diff --git a/Cargo.lock b/Cargo.lock index 99227202a5..f0cef862ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -882,6 +882,7 @@ dependencies = [ "mm2_metrics", "mm2_net", "mm2_number", + "mm2_p2p", "mm2_rpc", "mm2_state_machine", "mm2_test_helpers", @@ -3876,10 +3877,10 @@ dependencies = [ "hex", "instant", "lazy_static", + "libp2p", "mm2_err_handle", "mm2_event_stream", "mm2_metrics", - "mm2_p2p", "mm2_rpc", "primitives", "rand 0.7.3", @@ -4183,10 +4184,8 @@ dependencies = [ "lazy_static", "mm2_core", "mm2_err_handle", - "mm2_event_stream", - "mm2_p2p", + "mm2_number", "mm2_state_machine", - "parking_lot", "pin-project", "prost", "rand 0.7.3", @@ -4235,12 +4234,17 @@ dependencies = [ "lazy_static", "libp2p", "log", + "mm2_core", + "mm2_event_stream", + "mm2_number", + "parking_lot", "rand 0.7.3", "regex", "rmp-serde", "secp256k1 0.20.3", "serde", "serde_bytes", + "serde_json", "sha2 0.10.7", "smallvec 1.6.1", "syn 2.0.38", diff --git a/Cargo.toml b/Cargo.toml index f6f7f67e61..ab18c83da1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,9 +43,6 @@ members = [ exclude = [ "mm2src/adex_cli", - "mm2src/floodsub", - "mm2src/gossipsub", - "mm2src/mm2_libp2p", "mm2src/mm2_test_helpers", ] diff --git a/mm2src/adex_cli/Cargo.lock b/mm2src/adex_cli/Cargo.lock index 4de83cbd97..5d5eb5abeb 100644 --- a/mm2src/adex_cli/Cargo.lock +++ b/mm2src/adex_cli/Cargo.lock @@ -21,7 +21,7 @@ dependencies = [ "common", "derive_more", "directories", - "env_logger 0.7.1", + "env_logger", "gstuff", "http 0.2.9", "hyper", @@ -29,6 +29,7 @@ dependencies = [ "inquire", "itertools", "log", + "mm2_core", "mm2_net", "mm2_number", "mm2_rpc", @@ -285,7 +286,7 @@ checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -626,7 +627,7 @@ dependencies = [ "chrono", "crossbeam", "derive_more", - "env_logger 0.9.3", + "env_logger", "findshlibs", "fnv", "futures 0.1.31", @@ -857,7 +858,7 @@ dependencies = [ "proc-macro2", "quote 1.0.27", "scratch", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -874,7 +875,7 @@ checksum = "b846f081361125bfc8dc9d3940c84e1fd83ba54bbca7b17cd29483c828be0704" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -900,7 +901,7 @@ checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -967,19 +968,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" -[[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime 1.3.0", - "log", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.9.3" @@ -987,7 +975,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" dependencies = [ "atty", - "humantime 2.1.0", + "humantime", "log", "regex", "termcolor", @@ -1488,15 +1476,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error", -] - [[package]] name = "humantime" version = "2.1.0" @@ -1603,7 +1582,7 @@ checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -2244,7 +2223,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -2466,7 +2445,7 @@ dependencies = [ "itertools", "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -2485,12 +2464,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - [[package]] name = "quote" version = "0.3.15" @@ -3135,7 +3108,7 @@ dependencies = [ "proc-macro2", "quote 1.0.27", "ser_error", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -3189,7 +3162,7 @@ checksum = "2dc6b7951b17b051f3210b063f12cc17320e2fe30ae05b0fe2a3abb068551c76" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -3392,9 +3365,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.95" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote 1.0.27", @@ -3475,7 +3448,7 @@ checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] @@ -3576,7 +3549,7 @@ checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ "proc-macro2", "quote 1.0.27", - "syn 1.0.95", + "syn 1.0.109", ] [[package]] diff --git a/mm2src/adex_cli/Cargo.toml b/mm2src/adex_cli/Cargo.toml index d2b38a4cba..cb477cacb0 100644 --- a/mm2src/adex_cli/Cargo.toml +++ b/mm2src/adex_cli/Cargo.toml @@ -7,23 +7,24 @@ description = "Provides a CLI interface and facilitates interoperating to komodo # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -anyhow = { version = "=1.0.42", features = ["std"] } -async-trait = "=0.1.52" +anyhow = { version = "1.0", features = ["std"] } +async-trait = "0.1" clap = { version = "4.2", features = ["derive"] } common = { path = "../common" } derive_more = "0.99" directories = "5.0" -env_logger = "0.7.1" +env_logger = "0.9.3" http = "0.2" hyper = { version = "0.14.26", features = ["client", "http2", "tcp"] } -hyper-rustls = "0.24.0" -gstuff = { version = "=0.7.4" , features = [ "nightly" ]} +hyper-rustls = "0.24" +gstuff = { version = "0.7" , features = [ "nightly" ]} inquire = "0.6" itertools = "0.10" log = "0.4.21" mm2_net = { path = "../mm2_net" } mm2_number = { path = "../mm2_number" } mm2_rpc = { path = "../mm2_rpc"} +mm2_core = { path = "../mm2_core" } passwords = "3.1" rpc = { path = "../mm2_bitcoin/rpc" } rustls = { version = "0.21", features = [ "dangerous_configuration" ] } @@ -31,8 +32,8 @@ serde = "1.0" serde_json = { version = "1", features = ["preserve_order", "raw_value"] } sysinfo = "0.28" tiny-bip39 = "0.8.0" -tokio = { version = "=1.25.0", features = [ "macros" ] } -uuid = { version = "=1.2.2", features = ["fast-rng", "serde", "v4"] } +tokio = { version = "1.20.0", features = [ "macros" ] } +uuid = { version = "1.2.2", features = ["fast-rng", "serde", "v4"] } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3.3", features = ["processthreadsapi", "winnt"] } diff --git a/mm2src/adex_cli/src/adex_proc/adex_proc_impl.rs b/mm2src/adex_cli/src/adex_proc/adex_proc_impl.rs index 33d4fbfb62..24c108c6da 100644 --- a/mm2src/adex_cli/src/adex_proc/adex_proc_impl.rs +++ b/mm2src/adex_cli/src/adex_proc/adex_proc_impl.rs @@ -1,5 +1,5 @@ use anyhow::{anyhow, bail, Result}; -use log::{error, info, warn}; +use log::{debug, error, info, warn}; use mm2_rpc::data::legacy::{BalanceResponse, CoinInitResponse, GetEnabledResponse, Mm2RpcResult, MmVersionResponse, OrderbookRequest, OrderbookResponse, SellBuyRequest, SellBuyResponse, Status}; use serde_json::{json, Value as Json}; @@ -38,7 +38,7 @@ impl AdexProc<'_, '_, let activation_scheme = get_activation_scheme()?; let activation_method = activation_scheme.get_activation_method(asset)?; - + debug!("Got activation scheme for the coin: {}, {:?}", asset, activation_method); let enable = Command::builder() .flatten_data(activation_method) .userpass(self.get_rpc_password()?) diff --git a/mm2src/adex_cli/src/rpc_data.rs b/mm2src/adex_cli/src/rpc_data.rs index a3146cbe47..2c634759ef 100644 --- a/mm2src/adex_cli/src/rpc_data.rs +++ b/mm2src/adex_cli/src/rpc_data.rs @@ -40,6 +40,10 @@ pub(crate) struct ElectrumRequest { #[serde(skip_serializing_if = "Vec::is_empty")] pub(super) servers: Vec, #[serde(skip_serializing_if = "Option::is_none")] + min_connected: Option, + #[serde(skip_serializing_if = "Option::is_none")] + max_connected: Option, + #[serde(skip_serializing_if = "Option::is_none")] mm2: Option, #[serde(default)] tx_history: bool, @@ -62,4 +66,5 @@ pub(super) struct Server { protocol: ElectrumProtocol, #[serde(default)] disable_cert_verification: bool, + pub timeout_sec: Option, } diff --git a/mm2src/coins/Cargo.toml b/mm2src/coins/Cargo.toml index 5f21e8455b..06f250fd49 100644 --- a/mm2src/coins/Cargo.toml +++ b/mm2src/coins/Cargo.toml @@ -67,8 +67,9 @@ mm2_event_stream = { path = "../mm2_event_stream" } mm2_git = { path = "../mm2_git" } mm2_io = { path = "../mm2_io" } mm2_metrics = { path = "../mm2_metrics" } -mm2_net = { path = "../mm2_net", features = ["p2p"] } +mm2_net = { path = "../mm2_net" } mm2_number = { path = "../mm2_number"} +mm2_p2p = { path = "../mm2_p2p" } mm2_rpc = { path = "../mm2_rpc" } mm2_state_machine = { path = "../mm2_state_machine" } mocktopus = { version = "0.8.0", optional = true } diff --git a/mm2src/coins/eth.rs b/mm2src/coins/eth.rs index 84532295d1..15f91fd419 100644 --- a/mm2src/coins/eth.rs +++ b/mm2src/coins/eth.rs @@ -1104,30 +1104,29 @@ impl Deref for EthCoin { #[async_trait] impl SwapOps for EthCoin { - fn send_taker_fee(&self, dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionFut { - let address = try_tx_fus!(addr_from_raw_pubkey(self.dex_pubkey())); - - Box::new( - self.send_to_address( - address, - try_tx_fus!(wei_from_big_decimal(&dex_fee.fee_amount().into(), self.decimals)), - ) - .map(TransactionEnum::from), + async fn send_taker_fee(&self, dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionResult { + let address = try_tx_s!(addr_from_raw_pubkey(self.dex_pubkey())); + self.send_to_address( + address, + try_tx_s!(wei_from_big_decimal(&dex_fee.fee_amount().into(), self.decimals)), ) + .map(TransactionEnum::from) + .compat() + .await } - fn send_maker_payment(&self, maker_payment: SendPaymentArgs) -> TransactionFut { - Box::new( - self.send_hash_time_locked_payment(maker_payment) - .map(TransactionEnum::from), - ) + async fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { + self.send_hash_time_locked_payment(maker_payment_args) + .compat() + .await + .map(TransactionEnum::from) } - fn send_taker_payment(&self, taker_payment: SendPaymentArgs) -> TransactionFut { - Box::new( - self.send_hash_time_locked_payment(taker_payment) - .map(TransactionEnum::from), - ) + async fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { + self.send_hash_time_locked_payment(taker_payment_args) + .map(TransactionEnum::from) + .compat() + .await } async fn send_maker_spends_taker_payment( @@ -1160,10 +1159,15 @@ impl SwapOps for EthCoin { .map(TransactionEnum::from) } - fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentFut<()> { + async fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentResult<()> { let tx = match validate_fee_args.fee_tx { TransactionEnum::SignedEthTx(t) => t.clone(), - _ => panic!(), + fee_tx => { + return MmError::err(ValidatePaymentError::InternalError(format!( + "Invalid fee tx type. fee tx: {:?}", + fee_tx + ))) + }, }; validate_fee_impl(self.clone(), EthValidateFeeArgs { fee_tx_hash: &tx.tx_hash(), @@ -1172,6 +1176,8 @@ impl SwapOps for EthCoin { min_block_number: validate_fee_args.min_block_number, uuid: validate_fee_args.uuid, }) + .compat() + .await } #[inline] @@ -1184,70 +1190,62 @@ impl SwapOps for EthCoin { self.validate_payment(input).compat().await } - fn check_if_my_payment_sent( + async fn check_if_my_payment_sent( &self, - if_my_payment_sent_args: CheckIfMyPaymentSentArgs, - ) -> Box, Error = String> + Send> { - let id = self.etomic_swap_id( - try_fus!(if_my_payment_sent_args.time_lock.try_into()), - if_my_payment_sent_args.secret_hash, - ); - let swap_contract_address = try_fus!(if_my_payment_sent_args.swap_contract_address.try_to_address()); - let selfi = self.clone(); + if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, + ) -> Result, String> { + let time_lock = if_my_payment_sent_args + .time_lock + .try_into() + .map_err(|e: TryFromIntError| e.to_string())?; + let id = self.etomic_swap_id(time_lock, if_my_payment_sent_args.secret_hash); + let swap_contract_address = if_my_payment_sent_args.swap_contract_address.try_to_address()?; let from_block = if_my_payment_sent_args.search_from_block; - let fut = async move { - let status = try_s!( - selfi - .payment_status(swap_contract_address, Token::FixedBytes(id.clone())) - .compat() - .await - ); + let status = self + .payment_status(swap_contract_address, Token::FixedBytes(id.clone())) + .compat() + .await?; - if status == U256::from(PaymentState::Uninitialized as u8) { - return Ok(None); - }; + if status == U256::from(PaymentState::Uninitialized as u8) { + return Ok(None); + }; - let mut current_block = try_s!(selfi.current_block().compat().await); - if current_block < from_block { - current_block = from_block; - } + let mut current_block = self.current_block().compat().await?; + if current_block < from_block { + current_block = from_block; + } - let mut from_block = from_block; + let mut from_block = from_block; - loop { - let to_block = current_block.min(from_block + selfi.logs_block_range); + loop { + let to_block = current_block.min(from_block + self.logs_block_range); - let events = try_s!( - selfi - .payment_sent_events(swap_contract_address, from_block, to_block) - .compat() - .await - ); + let events = self + .payment_sent_events(swap_contract_address, from_block, to_block) + .compat() + .await?; - let found = events.iter().find(|event| &event.data.0[..32] == id.as_slice()); + let found = events.iter().find(|event| &event.data.0[..32] == id.as_slice()); - match found { - Some(event) => { - let transaction = try_s!( - selfi - .transaction(TransactionId::Hash(event.transaction_hash.unwrap())) - .await - ); - match transaction { - Some(t) => break Ok(Some(try_s!(signed_tx_from_web3_tx(t)).into())), - None => break Ok(None), - } - }, - None => { - if to_block >= current_block { - break Ok(None); - } - from_block = to_block; - }, - } + match found { + Some(event) => { + let transaction = try_s!( + self.transaction(TransactionId::Hash(event.transaction_hash.unwrap())) + .await + ); + match transaction { + Some(t) => break Ok(Some(try_s!(signed_tx_from_web3_tx(t)).into())), + None => break Ok(None), + } + }, + None => { + if to_block >= current_block { + break Ok(None); + } + from_block = to_block; + }, } - }; - Box::new(fut.boxed().compat()) + } } async fn search_for_swap_tx_spend_my( diff --git a/mm2src/coins/eth/eth_tests.rs b/mm2src/coins/eth/eth_tests.rs index 4055e30c70..3a534b603a 100644 --- a/mm2src/coins/eth/eth_tests.rs +++ b/mm2src/coins/eth/eth_tests.rs @@ -590,9 +590,7 @@ fn validate_dex_fee_invalid_sender_eth() { min_block_number: 0, uuid: &[], }; - let error = block_on_f01(coin.validate_fee(validate_fee_args)) - .unwrap_err() - .into_inner(); + let error = block_on(coin.validate_fee(validate_fee_args)).unwrap_err().into_inner(); match error { ValidatePaymentError::WrongPaymentTx(err) => assert!(err.contains("was sent from wrong address")), _ => panic!("Expected `WrongPaymentTx` wrong sender address, found {:?}", error), @@ -627,9 +625,7 @@ fn validate_dex_fee_invalid_sender_erc() { min_block_number: 0, uuid: &[], }; - let error = block_on_f01(coin.validate_fee(validate_fee_args)) - .unwrap_err() - .into_inner(); + let error = block_on(coin.validate_fee(validate_fee_args)).unwrap_err().into_inner(); match error { ValidatePaymentError::WrongPaymentTx(err) => assert!(err.contains("was sent from wrong address")), _ => panic!("Expected `WrongPaymentTx` wrong sender address, found {:?}", error), @@ -668,9 +664,7 @@ fn validate_dex_fee_eth_confirmed_before_min_block() { min_block_number: 11784793, uuid: &[], }; - let error = block_on_f01(coin.validate_fee(validate_fee_args)) - .unwrap_err() - .into_inner(); + let error = block_on(coin.validate_fee(validate_fee_args)).unwrap_err().into_inner(); match error { ValidatePaymentError::WrongPaymentTx(err) => assert!(err.contains("confirmed before min_block")), _ => panic!("Expected `WrongPaymentTx` early confirmation, found {:?}", error), @@ -708,9 +702,7 @@ fn validate_dex_fee_erc_confirmed_before_min_block() { min_block_number: 11823975, uuid: &[], }; - let error = block_on_f01(coin.validate_fee(validate_fee_args)) - .unwrap_err() - .into_inner(); + let error = block_on(coin.validate_fee(validate_fee_args)).unwrap_err().into_inner(); match error { ValidatePaymentError::WrongPaymentTx(err) => assert!(err.contains("confirmed before min_block")), _ => panic!("Expected `WrongPaymentTx` early confirmation, found {:?}", error), @@ -834,7 +826,7 @@ fn polygon_check_if_my_payment_sent() { amount: &BigDecimal::default(), payment_instructions: &None, }; - let my_payment = block_on_f01(coin.check_if_my_payment_sent(if_my_payment_sent_args)) + let my_payment = block_on(coin.check_if_my_payment_sent(if_my_payment_sent_args)) .unwrap() .unwrap(); let expected_hash = BytesJson::from("69a20008cea0c15ee483b5bbdff942752634aa072dfd2ff715fe87eec302de11"); diff --git a/mm2src/coins/eth/v2_activation.rs b/mm2src/coins/eth/v2_activation.rs index 15af41b3c2..576920b030 100644 --- a/mm2src/coins/eth/v2_activation.rs +++ b/mm2src/coins/eth/v2_activation.rs @@ -13,7 +13,7 @@ use instant::Instant; use mm2_err_handle::common_errors::WithInternal; #[cfg(target_arch = "wasm32")] use mm2_metamask::{from_metamask_error, MetamaskError, MetamaskRpcError, WithMetamaskRpcError}; -use mm2_net::p2p::P2PContext; +use mm2_p2p::p2p_ctx::P2PContext; use proxy_signature::RawMessage; use rpc_task::RpcTaskError; use std::sync::atomic::Ordering; diff --git a/mm2src/coins/eth/web3_transport/http_transport.rs b/mm2src/coins/eth/web3_transport/http_transport.rs index 5d6ad98a26..378e384df2 100644 --- a/mm2src/coins/eth/web3_transport/http_transport.rs +++ b/mm2src/coins/eth/web3_transport/http_transport.rs @@ -3,7 +3,7 @@ use common::APPLICATION_JSON; use common::X_AUTH_PAYLOAD; use http::header::CONTENT_TYPE; use jsonrpc_core::{Call, Response}; -use mm2_net::p2p::Keypair; +use mm2_p2p::Keypair; use proxy_signature::RawMessage; use serde_json::Value as Json; use std::ops::Deref; @@ -120,10 +120,10 @@ async fn send_request(request: Call, transport: HttpTransport) -> Result Result r, Err(err) => { - return Err(request_failed_error(request, Web3RpcError::Transport(err.to_string()))); + return Err(request_failed_error(&request, Web3RpcError::Transport(err.to_string()))); }, }; @@ -160,7 +160,7 @@ async fn send_request(request: Call, transport: HttpTransport) -> Result Result r, Err(err) => { return Err(request_failed_error( - request, + &request, Web3RpcError::InvalidResponse(format!("Server: '{}', error: {}", transport.node.uri, err)), )); }, @@ -195,10 +195,10 @@ async fn send_request(request: Call, transport: HttpTransport) -> Result Result Ok(response_json), Err(Error::Transport(e)) => Err(request_failed_error( - request, + &request, Web3RpcError::Transport(format!("Server: '{}', error: {}", transport.node.uri, e)), )), Err(e) => Err(request_failed_error( - request, + &request, Web3RpcError::InvalidResponse(format!("Server: '{}', error: {}", transport.node.uri, e)), )), } @@ -275,7 +275,7 @@ async fn send_request_once( } } -fn request_failed_error(request: Call, error: Web3RpcError) -> Error { +fn request_failed_error(request: &Call, error: Web3RpcError) -> Error { let error = format!("request {:?} failed: {}", request, error); Error::Transport(TransportError::Message(error)) } diff --git a/mm2src/coins/eth/web3_transport/websocket_transport.rs b/mm2src/coins/eth/web3_transport/websocket_transport.rs index fd1220e92e..6d19573781 100644 --- a/mm2src/coins/eth/web3_transport/websocket_transport.rs +++ b/mm2src/coins/eth/web3_transport/websocket_transport.rs @@ -20,7 +20,7 @@ use futures_ticker::Ticker; use futures_util::{FutureExt, SinkExt, StreamExt}; use instant::{Duration, Instant}; use jsonrpc_core::Call; -use mm2_net::p2p::Keypair; +use mm2_p2p::Keypair; use proxy_signature::{ProxySign, RawMessage}; use std::sync::atomic::AtomicBool; use std::sync::{atomic::{AtomicUsize, Ordering}, @@ -359,7 +359,7 @@ async fn send_request( let mut tx = transport.controller_channel.tx.lock().await; - let (notification_sender, notification_receiver) = futures::channel::oneshot::channel::>(); + let (notification_sender, notification_receiver) = oneshot::channel::>(); event_handlers.on_outgoing_request(&request_bytes); diff --git a/mm2src/coins/lightning.rs b/mm2src/coins/lightning.rs index 117f06ec8a..f3b364e0ef 100644 --- a/mm2src/coins/lightning.rs +++ b/mm2src/coins/lightning.rs @@ -610,43 +610,34 @@ impl LightningCoin { #[async_trait] impl SwapOps for LightningCoin { // Todo: This uses dummy data for now for the sake of swap P.O.C., this should be implemented probably after agreeing on how fees will work for lightning - fn send_taker_fee(&self, _dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionFut { - let fut = async move { Ok(TransactionEnum::LightningPayment(PaymentHash([1; 32]))) }; - Box::new(fut.boxed().compat()) + async fn send_taker_fee(&self, _dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionResult { + Ok(TransactionEnum::LightningPayment(PaymentHash([1; 32]))) } - fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionFut { + async fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { let invoice = match maker_payment_args.payment_instructions.clone() { Some(PaymentInstructions::Lightning(invoice)) => invoice, - _ => try_tx_fus!(ERR!("Invalid instructions, ligntning invoice is expected")), + _ => try_tx_s!(ERR!("Invalid instructions, ligntning invoice is expected")), }; - let coin = self.clone(); - let fut = async move { - // No need for max_total_cltv_expiry_delta for lightning maker payment since the maker is the side that reveals the secret/preimage - let payment = try_tx_s!(coin.pay_invoice(invoice, None).await); - Ok(payment.payment_hash.into()) - }; - Box::new(fut.boxed().compat()) + // No need for max_total_cltv_expiry_delta for lightning maker payment since the maker is the side that reveals the secret/preimage + let payment = try_tx_s!(self.pay_invoice(invoice, None).await); + Ok(payment.payment_hash.into()) } - fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionFut { + async fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { let invoice = match taker_payment_args.payment_instructions.clone() { Some(PaymentInstructions::Lightning(invoice)) => invoice, - _ => try_tx_fus!(ERR!("Invalid instructions, ligntning invoice is expected")), + _ => try_tx_s!(ERR!("Invalid instructions, ligntning invoice is expected")), }; let max_total_cltv_expiry_delta = self .estimate_blocks_from_duration(taker_payment_args.time_lock_duration) .try_into() .expect("max_total_cltv_expiry_delta shouldn't exceed u32::MAX"); - let coin = self.clone(); - let fut = async move { - // Todo: The path/s used is already logged when PaymentPathSuccessful/PaymentPathFailed events are fired, it might be better to save it to the DB and retrieve it with the payment info. - let payment = try_tx_s!(coin.pay_invoice(invoice, Some(max_total_cltv_expiry_delta)).await); - Ok(payment.payment_hash.into()) - }; - Box::new(fut.boxed().compat()) + // Todo: The path/s used is already logged when PaymentPathSuccessful/PaymentPathFailed events are fired, it might be better to save it to the DB and retrieve it with the payment info. + let payment = try_tx_s!(self.pay_invoice(invoice, Some(max_total_cltv_expiry_delta)).await); + Ok(payment.payment_hash.into()) } #[inline] @@ -684,9 +675,7 @@ impl SwapOps for LightningCoin { } // Todo: This validates the dummy fee for now for the sake of swap P.O.C., this should be implemented probably after agreeing on how fees will work for lightning - fn validate_fee(&self, _validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentFut<()> { - Box::new(futures01::future::ok(())) - } + async fn validate_fee(&self, _validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentResult<()> { Ok(()) } #[inline] async fn validate_maker_payment(&self, input: ValidatePaymentInput) -> ValidatePaymentResult<()> { @@ -698,29 +687,26 @@ impl SwapOps for LightningCoin { self.validate_swap_payment(input).compat().await } - fn check_if_my_payment_sent( + async fn check_if_my_payment_sent( &self, if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, - ) -> Box, Error = String> + Send> { + ) -> Result, String> { let invoice = match if_my_payment_sent_args.payment_instructions.clone() { Some(PaymentInstructions::Lightning(invoice)) => invoice, - _ => try_f!(ERR!("Invalid instructions, ligntning invoice is expected")), + _ => return ERR!("Invalid instructions, ligntning invoice is expected"), }; let payment_hash = PaymentHash((invoice.payment_hash()).into_inner()); let payment_hex = hex::encode(payment_hash.0); - let coin = self.clone(); - let fut = async move { - match coin.db.get_payment_from_db(payment_hash).await { - Ok(maybe_payment) => Ok(maybe_payment.map(|p| p.payment_hash.into())), - Err(e) => ERR!( - "Unable to check if payment {} is in db or not error: {}", - payment_hex, - e - ), - } - }; - Box::new(fut.boxed().compat()) + + match self.db.get_payment_from_db(payment_hash).await { + Ok(maybe_payment) => Ok(maybe_payment.map(|p| p.payment_hash.into())), + Err(e) => ERR!( + "Unable to check if payment {} is in db or not error: {}", + payment_hex, + e + ), + } } // Todo: need to also check on-chain spending diff --git a/mm2src/coins/lightning/ln_platform.rs b/mm2src/coins/lightning/ln_platform.rs index 0a5fae0a4f..59e3e19488 100644 --- a/mm2src/coins/lightning/ln_platform.rs +++ b/mm2src/coins/lightning/ln_platform.rs @@ -1,8 +1,8 @@ use super::*; use crate::lightning::ln_errors::{SaveChannelClosingError, SaveChannelClosingResult}; -use crate::utxo::rpc_clients::{BestBlock as RpcBestBlock, BlockHashOrHeight, ConfirmedTransactionInfo, - ElectrumBlockHeader, ElectrumClient, ElectrumNonce, EstimateFeeMethod, - UtxoRpcClientEnum, UtxoRpcResult}; +use crate::lightning::ln_utils::RpcBestBlock; +use crate::utxo::rpc_clients::{BlockHashOrHeight, ConfirmedTransactionInfo, ElectrumBlockHeader, ElectrumClient, + ElectrumNonce, EstimateFeeMethod, UtxoRpcClientEnum, UtxoRpcResult}; use crate::utxo::spv::SimplePaymentVerification; use crate::utxo::utxo_standard::UtxoStandardCoin; use crate::utxo::GetConfirmedTxError; diff --git a/mm2src/coins/lightning/ln_utils.rs b/mm2src/coins/lightning/ln_utils.rs index 693b7c3a4f..5b4ac5698d 100644 --- a/mm2src/coins/lightning/ln_utils.rs +++ b/mm2src/coins/lightning/ln_utils.rs @@ -3,7 +3,7 @@ use crate::lightning::ln_db::LightningDB; use crate::lightning::ln_platform::{get_best_header, ln_best_block_update_loop, update_best_block}; use crate::lightning::ln_sql::SqliteLightningDB; use crate::lightning::ln_storage::{LightningStorage, NodesAddressesMap}; -use crate::utxo::rpc_clients::BestBlock as RpcBestBlock; +use crate::utxo::rpc_clients::ElectrumBlockHeader; use bitcoin::hash_types::BlockHash; use bitcoin_hashes::{sha256d, Hash}; use common::executor::SpawnFuture; @@ -38,6 +38,21 @@ pub type ChainMonitor = chainmonitor::ChainMonitor< pub type ChannelManager = SimpleArcChannelManager; pub type Router = DefaultRouter, Arc, Arc>; +#[derive(Debug, PartialEq)] +pub struct RpcBestBlock { + pub height: u64, + pub hash: H256Json, +} + +impl From for RpcBestBlock { + fn from(block_header: ElectrumBlockHeader) -> Self { + RpcBestBlock { + height: block_header.block_height(), + hash: block_header.block_hash(), + } + } +} + #[inline] fn ln_data_dir(ctx: &MmArc, ticker: &str) -> PathBuf { ctx.dbdir().join("LIGHTNING").join(ticker) } diff --git a/mm2src/coins/lp_coins.rs b/mm2src/coins/lp_coins.rs index 69f57d4a81..5387162d1e 100644 --- a/mm2src/coins/lp_coins.rs +++ b/mm2src/coins/lp_coins.rs @@ -1090,11 +1090,11 @@ pub enum WatcherRewardError { // otherwise mocks called from other crates won't work #[cfg_attr(any(test, feature = "mocktopus"), mockable)] pub trait SwapOps { - fn send_taker_fee(&self, dex_fee: DexFee, uuid: &[u8], expire_at: u64) -> TransactionFut; + async fn send_taker_fee(&self, dex_fee: DexFee, uuid: &[u8], expire_at: u64) -> TransactionResult; - fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionFut; + async fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionResult; - fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionFut; + async fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionResult; async fn send_maker_spends_taker_payment( &self, @@ -1110,16 +1110,16 @@ pub trait SwapOps { async fn send_maker_refunds_payment(&self, maker_refunds_payment_args: RefundPaymentArgs<'_>) -> TransactionResult; - fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentFut<()>; + async fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentResult<()>; async fn validate_maker_payment(&self, input: ValidatePaymentInput) -> ValidatePaymentResult<()>; async fn validate_taker_payment(&self, input: ValidatePaymentInput) -> ValidatePaymentResult<()>; - fn check_if_my_payment_sent( + async fn check_if_my_payment_sent( &self, if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, - ) -> Box, Error = String> + Send>; + ) -> Result, String>; async fn search_for_swap_tx_spend_my( &self, @@ -1143,14 +1143,13 @@ pub trait SwapOps { /// Whether the refund transaction can be sent now /// For example: there are no additional conditions for ETH, but for some UTXO coins we should wait for /// locktime < MTP - fn can_refund_htlc(&self, locktime: u64) -> Box + Send + '_> { + async fn can_refund_htlc(&self, locktime: u64) -> Result { let now = now_sec(); - let result = if now > locktime { - CanRefundHtlc::CanRefundNow + if now > locktime { + Ok(CanRefundHtlc::CanRefundNow) } else { - CanRefundHtlc::HaveToWait(locktime - now + 1) - }; - Box::new(futures01::future::ok(result)) + Ok(CanRefundHtlc::HaveToWait(locktime - now + 1)) + } } /// Whether the swap payment is refunded automatically or not when the locktime expires, or the other side fails the HTLC. @@ -4462,9 +4461,10 @@ pub enum CoinProtocol { }, } -pub type RpcTransportEventHandlerShared = Arc; - -/// Common methods to measure the outgoing requests and incoming responses statistics. +/// Common methods to handle the connection events. +/// +/// Note that the handler methods are sync and shouldn't take long time executing, otherwise it will hurt the performance. +/// If a handler needs to do some heavy work, it should be spawned/done in a separate thread. pub trait RpcTransportEventHandler { fn debug_info(&self) -> String; @@ -4472,12 +4472,15 @@ pub trait RpcTransportEventHandler { fn on_incoming_response(&self, data: &[u8]); - fn on_connected(&self, address: String) -> Result<(), String>; + fn on_connected(&self, address: &str) -> Result<(), String>; - fn on_disconnected(&self, address: String) -> Result<(), String>; + fn on_disconnected(&self, address: &str) -> Result<(), String>; } -impl fmt::Debug for dyn RpcTransportEventHandler + Send + Sync { +pub type SharableRpcTransportEventHandler = dyn RpcTransportEventHandler + Send + Sync; +pub type RpcTransportEventHandlerShared = Arc; + +impl fmt::Debug for SharableRpcTransportEventHandler { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.debug_info()) } } @@ -4488,9 +4491,21 @@ impl RpcTransportEventHandler for RpcTransportEventHandlerShared { fn on_incoming_response(&self, data: &[u8]) { self.as_ref().on_incoming_response(data) } - fn on_connected(&self, address: String) -> Result<(), String> { self.as_ref().on_connected(address) } + fn on_connected(&self, address: &str) -> Result<(), String> { self.as_ref().on_connected(address) } - fn on_disconnected(&self, address: String) -> Result<(), String> { self.as_ref().on_disconnected(address) } + fn on_disconnected(&self, address: &str) -> Result<(), String> { self.as_ref().on_disconnected(address) } +} + +impl RpcTransportEventHandler for Box { + fn debug_info(&self) -> String { self.as_ref().debug_info() } + + fn on_outgoing_request(&self, data: &[u8]) { self.as_ref().on_outgoing_request(data) } + + fn on_incoming_response(&self, data: &[u8]) { self.as_ref().on_incoming_response(data) } + + fn on_connected(&self, address: &str) -> Result<(), String> { self.as_ref().on_connected(address) } + + fn on_disconnected(&self, address: &str) -> Result<(), String> { self.as_ref().on_disconnected(address) } } impl RpcTransportEventHandler for Vec { @@ -4511,16 +4526,28 @@ impl RpcTransportEventHandler for Vec { } } - fn on_connected(&self, address: String) -> Result<(), String> { + fn on_connected(&self, address: &str) -> Result<(), String> { + let mut errors = vec![]; for handler in self { - try_s!(handler.on_connected(address.clone())) + if let Err(e) = handler.on_connected(address) { + errors.push((handler.debug_info(), e)) + } + } + if !errors.is_empty() { + return Err(format!("Errors: {:?}", errors)); } Ok(()) } - fn on_disconnected(&self, address: String) -> Result<(), String> { + fn on_disconnected(&self, address: &str) -> Result<(), String> { + let mut errors = vec![]; for handler in self { - try_s!(handler.on_disconnected(address.clone())) + if let Err(e) = handler.on_disconnected(address) { + errors.push((handler.debug_info(), e)) + } + } + if !errors.is_empty() { + return Err(format!("Errors: {:?}", errors)); } Ok(()) } @@ -4581,17 +4608,9 @@ impl RpcTransportEventHandler for CoinTransportMetrics { "coin" => self.ticker.to_owned(), "client" => self.client.to_owned()); } - fn on_connected(&self, _address: String) -> Result<(), String> { - // Handle a new connected endpoint if necessary. - // Now just return the Ok - Ok(()) - } + fn on_connected(&self, _address: &str) -> Result<(), String> { Ok(()) } - fn on_disconnected(&self, _address: String) -> Result<(), String> { - // Handle disconnected endpoint if necessary. - // Now just return the Ok - Ok(()) - } + fn on_disconnected(&self, _address: &str) -> Result<(), String> { Ok(()) } } #[async_trait] diff --git a/mm2src/coins/nft.rs b/mm2src/coins/nft.rs index 2298ae9648..2dc85e67dc 100644 --- a/mm2src/coins/nft.rs +++ b/mm2src/coins/nft.rs @@ -1,7 +1,7 @@ use http::Uri; use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::{MmError, MmResult}; -use mm2_net::p2p::P2PContext; +use mm2_p2p::p2p_ctx::P2PContext; use proxy_signature::{ProxySign, RawMessage}; use url::Url; diff --git a/mm2src/coins/qrc20.rs b/mm2src/coins/qrc20.rs index 93729b8e9d..df1740736e 100644 --- a/mm2src/coins/qrc20.rs +++ b/mm2src/coins/qrc20.rs @@ -56,6 +56,7 @@ use serde_json::{self as json, Value as Json}; use serialization::{deserialize, serialize, CoinVariant}; use std::collections::{HashMap, HashSet}; use std::convert::TryInto; +use std::num::TryFromIntError; use std::ops::{Deref, Neg}; #[cfg(not(target_arch = "wasm32"))] use std::path::PathBuf; use std::str::FromStr; @@ -758,52 +759,36 @@ impl UtxoCommonOps for Qrc20Coin { #[async_trait] impl SwapOps for Qrc20Coin { - fn send_taker_fee(&self, dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionFut { - let to_address = try_tx_fus!(self.contract_address_from_raw_pubkey(self.dex_pubkey())); - let amount = try_tx_fus!(wei_from_big_decimal(&dex_fee.fee_amount().into(), self.utxo.decimals)); + async fn send_taker_fee(&self, dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionResult { + let to_address = try_tx_s!(self.contract_address_from_raw_pubkey(self.dex_pubkey())); + let amount = try_tx_s!(wei_from_big_decimal(&dex_fee.fee_amount().into(), self.utxo.decimals)); let transfer_output = - try_tx_fus!(self.transfer_output(to_address, amount, QRC20_GAS_LIMIT_DEFAULT, QRC20_GAS_PRICE_DEFAULT)); - let outputs = vec![transfer_output]; - - let selfi = self.clone(); - let fut = async move { selfi.send_contract_calls(outputs).await }; - - Box::new(fut.boxed().compat()) + try_tx_s!(self.transfer_output(to_address, amount, QRC20_GAS_LIMIT_DEFAULT, QRC20_GAS_PRICE_DEFAULT)); + self.send_contract_calls(vec![transfer_output]).await } - fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs) -> TransactionFut { - let time_lock = try_tx_fus!(maker_payment_args.time_lock.try_into()); - let taker_addr = try_tx_fus!(self.contract_address_from_raw_pubkey(maker_payment_args.other_pubkey)); + async fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { + let time_lock = try_tx_s!(maker_payment_args.time_lock.try_into()); + let taker_addr = try_tx_s!(self.contract_address_from_raw_pubkey(maker_payment_args.other_pubkey)); let id = qrc20_swap_id(time_lock, maker_payment_args.secret_hash); - let value = try_tx_fus!(wei_from_big_decimal(&maker_payment_args.amount, self.utxo.decimals)); + let value = try_tx_s!(wei_from_big_decimal(&maker_payment_args.amount, self.utxo.decimals)); let secret_hash = Vec::from(maker_payment_args.secret_hash); - let swap_contract_address = try_tx_fus!(maker_payment_args.swap_contract_address.try_to_address()); + let swap_contract_address = try_tx_s!(maker_payment_args.swap_contract_address.try_to_address()); - let selfi = self.clone(); - let fut = async move { - selfi - .send_hash_time_locked_payment(id, value, time_lock, secret_hash, taker_addr, swap_contract_address) - .await - }; - Box::new(fut.boxed().compat()) + self.send_hash_time_locked_payment(id, value, time_lock, secret_hash, taker_addr, swap_contract_address) + .await } #[inline] - fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs) -> TransactionFut { - let time_lock = try_tx_fus!(taker_payment_args.time_lock.try_into()); - let maker_addr = try_tx_fus!(self.contract_address_from_raw_pubkey(taker_payment_args.other_pubkey)); + async fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { + let time_lock = try_tx_s!(taker_payment_args.time_lock.try_into()); + let maker_addr = try_tx_s!(self.contract_address_from_raw_pubkey(taker_payment_args.other_pubkey)); let id = qrc20_swap_id(time_lock, taker_payment_args.secret_hash); - let value = try_tx_fus!(wei_from_big_decimal(&taker_payment_args.amount, self.utxo.decimals)); + let value = try_tx_s!(wei_from_big_decimal(&taker_payment_args.amount, self.utxo.decimals)); let secret_hash = Vec::from(taker_payment_args.secret_hash); - let swap_contract_address = try_tx_fus!(taker_payment_args.swap_contract_address.try_to_address()); - - let selfi = self.clone(); - let fut = async move { - selfi - .send_hash_time_locked_payment(id, value, time_lock, secret_hash, maker_addr, swap_contract_address) - .await - }; - Box::new(fut.boxed().compat()) + let swap_contract_address = try_tx_s!(taker_payment_args.swap_contract_address.try_to_address()); + self.send_hash_time_locked_payment(id, value, time_lock, secret_hash, maker_addr, swap_contract_address) + .await } #[inline] @@ -855,39 +840,36 @@ impl SwapOps for Qrc20Coin { } #[inline] - fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentFut<()> { - let fee_tx = validate_fee_args.fee_tx; - let min_block_number = validate_fee_args.min_block_number; - let fee_tx = match fee_tx { + async fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentResult<()> { + let fee_tx = match validate_fee_args.fee_tx { TransactionEnum::UtxoTx(tx) => tx, - _ => panic!("Unexpected TransactionEnum"), + fee_tx => { + return MmError::err(ValidatePaymentError::InternalError(format!( + "Invalid fee tx type. fee tx: {:?}", + fee_tx + ))) + }, }; let fee_tx_hash = fee_tx.hash().reversed().into(); - let inputs_signed_by_pub = try_f!(check_all_utxo_inputs_signed_by_pub( - fee_tx, - validate_fee_args.expected_sender - )); + let inputs_signed_by_pub = check_all_utxo_inputs_signed_by_pub(fee_tx, validate_fee_args.expected_sender)?; if !inputs_signed_by_pub { - return Box::new(futures01::future::err( - ValidatePaymentError::WrongPaymentTx("The dex fee was sent from wrong address".to_string()).into(), + return MmError::err(ValidatePaymentError::WrongPaymentTx( + "The dex fee was sent from wrong address".to_string(), )); } - let fee_addr = try_f!(self + let fee_addr = self .contract_address_from_raw_pubkey(self.dex_pubkey()) - .map_to_mm(ValidatePaymentError::WrongPaymentTx)); - let expected_value = try_f!(wei_from_big_decimal( - &validate_fee_args.dex_fee.fee_amount().into(), - self.utxo.decimals - )); - - let selfi = self.clone(); - let fut = async move { - selfi - .validate_fee_impl(fee_tx_hash, fee_addr, expected_value, min_block_number) - .await - .map_to_mm(ValidatePaymentError::WrongPaymentTx) - }; - Box::new(fut.boxed().compat()) + .map_to_mm(ValidatePaymentError::WrongPaymentTx)?; + let expected_value = wei_from_big_decimal(&validate_fee_args.dex_fee.fee_amount().into(), self.utxo.decimals)?; + + self.validate_fee_impl( + fee_tx_hash, + fee_addr, + expected_value, + validate_fee_args.min_block_number, + ) + .await + .map_to_mm(ValidatePaymentError::WrongPaymentTx) } #[inline] @@ -943,24 +925,23 @@ impl SwapOps for Qrc20Coin { } #[inline] - fn check_if_my_payment_sent( + async fn check_if_my_payment_sent( &self, if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, - ) -> Box, Error = String> + Send> { - let search_from_block = if_my_payment_sent_args.search_from_block; - let swap_id = qrc20_swap_id( - try_fus!(if_my_payment_sent_args.time_lock.try_into()), - if_my_payment_sent_args.secret_hash, - ); - let swap_contract_address = try_fus!(if_my_payment_sent_args.swap_contract_address.try_to_address()); + ) -> Result, String> { + let time_lock = if_my_payment_sent_args + .time_lock + .try_into() + .map_err(|e: TryFromIntError| e.to_string())?; + let swap_id = qrc20_swap_id(time_lock, if_my_payment_sent_args.secret_hash); + let swap_contract_address = if_my_payment_sent_args.swap_contract_address.try_to_address()?; - let selfi = self.clone(); - let fut = async move { - selfi - .check_if_my_payment_sent_impl(swap_contract_address, swap_id, search_from_block) - .await - }; - Box::new(fut.boxed().compat()) + self.check_if_my_payment_sent_impl( + swap_contract_address, + swap_id, + if_my_payment_sent_args.search_from_block, + ) + .await } #[inline] diff --git a/mm2src/coins/qrc20/qrc20_tests.rs b/mm2src/coins/qrc20/qrc20_tests.rs index c382e37499..976253cf32 100644 --- a/mm2src/coins/qrc20/qrc20_tests.rs +++ b/mm2src/coins/qrc20/qrc20_tests.rs @@ -336,7 +336,7 @@ fn test_validate_fee() { let amount = BigDecimal::from_str("0.01").unwrap(); - let result = block_on_f01(coin.validate_fee(ValidateFeeArgs { + let result = block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &tx, expected_sender: &sender_pub, dex_fee: &DexFee::Standard(amount.clone().into()), @@ -351,7 +351,7 @@ fn test_validate_fee() { hex::decode("03bc2c7ba671bae4a6fc835244c9762b41647b9827d4780a89a949b984a8ddcc05").unwrap(), ))) }); - let err = block_on_f01(coin.validate_fee(ValidateFeeArgs { + let err = block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &tx, expected_sender: &sender_pub, dex_fee: &DexFee::Standard(amount.clone().into()), @@ -367,7 +367,7 @@ fn test_validate_fee() { } ::dex_pubkey.clear_mock(); - let err = block_on_f01(coin.validate_fee(ValidateFeeArgs { + let err = block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &tx, expected_sender: &DEX_FEE_ADDR_RAW_PUBKEY, dex_fee: &DexFee::Standard(amount.clone().into()), @@ -382,7 +382,7 @@ fn test_validate_fee() { _ => panic!("Expected `WrongPaymentTx` wrong sender address, found {:?}", err), } - let err = block_on_f01(coin.validate_fee(ValidateFeeArgs { + let err = block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &tx, expected_sender: &sender_pub, dex_fee: &DexFee::Standard(amount.clone().into()), @@ -398,7 +398,7 @@ fn test_validate_fee() { } let amount_dif = BigDecimal::from_str("0.02").unwrap(); - let err = block_on_f01(coin.validate_fee(ValidateFeeArgs { + let err = block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &tx, expected_sender: &sender_pub, dex_fee: &DexFee::Standard(amount_dif.into()), @@ -418,7 +418,7 @@ fn test_validate_fee() { // QTUM tx "8a51f0ffd45f34974de50f07c5bf2f0949da4e88433f8f75191953a442cf9310" let tx = TransactionEnum::UtxoTx("020000000113640281c9332caeddd02a8dd0d784809e1ad87bda3c972d89d5ae41f5494b85010000006a47304402207c5c904a93310b8672f4ecdbab356b65dd869a426e92f1064a567be7ccfc61ff02203e4173b9467127f7de4682513a21efb5980e66dbed4da91dff46534b8e77c7ef012102baefe72b3591de2070c0da3853226b00f082d72daa417688b61cb18c1d543d1afeffffff020001b2c4000000001976a9149e032d4b0090a11dc40fe6c47601499a35d55fbb88acbc4dd20c2f0000001976a9144208fa7be80dcf972f767194ad365950495064a488ac76e70800".into()); let sender_pub = hex::decode("02baefe72b3591de2070c0da3853226b00f082d72daa417688b61cb18c1d543d1a").unwrap(); - let err = block_on_f01(coin.validate_fee(ValidateFeeArgs { + let err = block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &tx, expected_sender: &sender_pub, dex_fee: &DexFee::Standard(amount.into()), diff --git a/mm2src/coins/qrc20/swap.rs b/mm2src/coins/qrc20/swap.rs index a6162f6421..7370926684 100644 --- a/mm2src/coins/qrc20/swap.rs +++ b/mm2src/coins/qrc20/swap.rs @@ -346,7 +346,23 @@ impl Qrc20Coin { receiver, secret_hash, .. - } = try_s!(self.erc20_payment_details_from_tx(&tx).await); + } = try_s!( + retry_on_err!(async { self.erc20_payment_details_from_tx(&tx).await }) + .until_ready() + .repeat_every_secs(check_every) + .until_s(wait_until) + .inspect_err({ + let tx_hash = tx.hash().reversed(); + move |e| { + error!( + "Failed to retrieve QRC20 payment details from transaction {} \ + will retry in {} seconds. Error: {:?}", + tx_hash, check_every, e + ) + } + }) + .await + ); loop { // Try to find a 'receiverSpend' contract call. diff --git a/mm2src/coins/siacoin.rs b/mm2src/coins/siacoin.rs index 49ae52d618..2253d6138c 100644 --- a/mm2src/coins/siacoin.rs +++ b/mm2src/coins/siacoin.rs @@ -401,11 +401,17 @@ impl MarketCoinOps for SiaCoin { #[async_trait] impl SwapOps for SiaCoin { - fn send_taker_fee(&self, _dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionFut { unimplemented!() } + async fn send_taker_fee(&self, _dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionResult { + unimplemented!() + } - fn send_maker_payment(&self, _maker_payment_args: SendPaymentArgs) -> TransactionFut { unimplemented!() } + async fn send_maker_payment(&self, _maker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { + unimplemented!() + } - fn send_taker_payment(&self, _taker_payment_args: SendPaymentArgs) -> TransactionFut { unimplemented!() } + async fn send_taker_payment(&self, _taker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { + unimplemented!() + } async fn send_maker_spends_taker_payment( &self, @@ -435,7 +441,9 @@ impl SwapOps for SiaCoin { unimplemented!() } - fn validate_fee(&self, _validate_fee_args: ValidateFeeArgs) -> ValidatePaymentFut<()> { unimplemented!() } + async fn validate_fee(&self, _validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentResult<()> { + unimplemented!() + } async fn validate_maker_payment(&self, _input: ValidatePaymentInput) -> ValidatePaymentResult<()> { unimplemented!() @@ -445,10 +453,10 @@ impl SwapOps for SiaCoin { unimplemented!() } - fn check_if_my_payment_sent( + async fn check_if_my_payment_sent( &self, - _if_my_payment_sent_args: CheckIfMyPaymentSentArgs, - ) -> Box, Error = String> + Send> { + _if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, + ) -> Result, String> { unimplemented!() } @@ -494,9 +502,7 @@ impl SwapOps for SiaCoin { fn derive_htlc_pubkey(&self, _swap_unique_data: &[u8]) -> Vec { unimplemented!() } - fn can_refund_htlc(&self, _locktime: u64) -> Box + Send + '_> { - unimplemented!() - } + async fn can_refund_htlc(&self, _locktime: u64) -> Result { unimplemented!() } fn validate_other_pubkey(&self, _raw_pubkey: &[u8]) -> MmResult<(), ValidateOtherPubKeyErr> { unimplemented!() } diff --git a/mm2src/coins/tendermint/rpc/tendermint_native_rpc.rs b/mm2src/coins/tendermint/rpc/tendermint_native_rpc.rs index 6b4dfb9ca4..5da40d622d 100644 --- a/mm2src/coins/tendermint/rpc/tendermint_native_rpc.rs +++ b/mm2src/coins/tendermint/rpc/tendermint_native_rpc.rs @@ -6,7 +6,7 @@ use cosmrs::tendermint::evidence::Evidence; use cosmrs::tendermint::Genesis; use cosmrs::tendermint::Hash; use http::Uri; -use mm2_net::p2p::Keypair; +use mm2_p2p::Keypair; use serde::{de::DeserializeOwned, Serialize}; use std::fmt; use std::time::Duration; @@ -382,7 +382,7 @@ mod sealed { use hyper::client::HttpConnector; use hyper::{header, Uri}; use hyper_rustls::{HttpsConnector, HttpsConnectorBuilder}; - use mm2_net::p2p::Keypair; + use mm2_p2p::Keypair; use proxy_signature::RawMessage; use std::io::Read; use tendermint_rpc::{Error, Response, SimpleRequest}; diff --git a/mm2src/coins/tendermint/rpc/tendermint_wasm_rpc.rs b/mm2src/coins/tendermint/rpc/tendermint_wasm_rpc.rs index 4b5d6a0652..9224ea997f 100644 --- a/mm2src/coins/tendermint/rpc/tendermint_wasm_rpc.rs +++ b/mm2src/coins/tendermint/rpc/tendermint_wasm_rpc.rs @@ -4,9 +4,9 @@ use derive_more::Display; use http::header::{ACCEPT, CONTENT_TYPE}; use http::uri::InvalidUri; use http::{StatusCode, Uri}; -use mm2_net::p2p::Keypair; use mm2_net::transport::SlurpError; use mm2_net::wasm::http::FetchRequest; +use mm2_p2p::Keypair; use proxy_signature::RawMessage; use std::str::FromStr; use tendermint_rpc::endpoint::{abci_info, broadcast}; diff --git a/mm2src/coins/tendermint/tendermint_balance_events.rs b/mm2src/coins/tendermint/tendermint_balance_events.rs index a7a5fd94cf..c512cf8277 100644 --- a/mm2src/coins/tendermint/tendermint_balance_events.rs +++ b/mm2src/coins/tendermint/tendermint_balance_events.rs @@ -7,7 +7,6 @@ use jsonrpc_core::{Id as RpcId, Params as RpcParams, Value as RpcValue, Version use mm2_core::mm_ctx::MmArc; use mm2_event_stream::{behaviour::{EventBehaviour, EventInitStatus}, ErrorEventName, Event, EventName, EventStreamConfiguration}; -use mm2_net::p2p::Keypair; use mm2_number::BigDecimal; use proxy_signature::RawMessage; use std::collections::{HashMap, HashSet}; @@ -24,7 +23,7 @@ impl EventBehaviour for TendermintCoin { async fn handle(self, _interval: f64, tx: oneshot::Sender) { fn generate_subscription_query( query_filter: String, - proxy_sign_keypair: &Option, + proxy_sign_keypair: &Option, uri: &http::Uri, ) -> String { let mut params = serde_json::Map::with_capacity(1); diff --git a/mm2src/coins/tendermint/tendermint_coin.rs b/mm2src/coins/tendermint/tendermint_coin.rs index 1642ed2bf2..076c6c0a81 100644 --- a/mm2src/coins/tendermint/tendermint_coin.rs +++ b/mm2src/coins/tendermint/tendermint_coin.rs @@ -66,8 +66,8 @@ use keys::{KeyPair, Public}; use mm2_core::mm_ctx::{MmArc, MmWeak}; use mm2_err_handle::prelude::*; use mm2_git::{FileMetadata, GitController, GithubClient, RepositoryOperations, GITHUB_API_URI}; -use mm2_net::p2p::P2PContext; use mm2_number::MmNumber; +use mm2_p2p::p2p_ctx::P2PContext; use parking_lot::Mutex as PaMutex; use primitives::hash::H256; use regex::Regex; @@ -2859,11 +2859,13 @@ impl MarketCoinOps for TendermintCoin { #[async_trait] #[allow(unused_variables)] impl SwapOps for TendermintCoin { - fn send_taker_fee(&self, dex_fee: DexFee, uuid: &[u8], expire_at: u64) -> TransactionFut { + async fn send_taker_fee(&self, dex_fee: DexFee, uuid: &[u8], expire_at: u64) -> TransactionResult { self.send_taker_fee_for_denom(&dex_fee, self.denom.clone(), self.decimals, uuid, expire_at) + .compat() + .await } - fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs) -> TransactionFut { + async fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { self.send_htlc_for_denom( maker_payment_args.time_lock_duration, maker_payment_args.other_pubkey, @@ -2872,9 +2874,11 @@ impl SwapOps for TendermintCoin { self.denom.clone(), self.decimals, ) + .compat() + .await } - fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs) -> TransactionFut { + async fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { self.send_htlc_for_denom( taker_payment_args.time_lock_duration, taker_payment_args.other_pubkey, @@ -2883,6 +2887,8 @@ impl SwapOps for TendermintCoin { self.denom.clone(), self.decimals, ) + .compat() + .await } async fn send_maker_spends_taker_payment( @@ -3019,7 +3025,7 @@ impl SwapOps for TendermintCoin { )) } - fn validate_fee(&self, validate_fee_args: ValidateFeeArgs) -> ValidatePaymentFut<()> { + async fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentResult<()> { self.validate_fee_for_denom( validate_fee_args.fee_tx, validate_fee_args.expected_sender, @@ -3028,6 +3034,8 @@ impl SwapOps for TendermintCoin { validate_fee_args.uuid, self.denom.to_string(), ) + .compat() + .await } async fn validate_maker_payment(&self, input: ValidatePaymentInput) -> ValidatePaymentResult<()> { @@ -3040,10 +3048,10 @@ impl SwapOps for TendermintCoin { .await } - fn check_if_my_payment_sent( + async fn check_if_my_payment_sent( &self, - if_my_payment_sent_args: CheckIfMyPaymentSentArgs, - ) -> Box, Error = String> + Send> { + if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, + ) -> Result, String> { self.check_if_my_payment_sent_for_denom( self.decimals, self.denom.clone(), @@ -3051,6 +3059,8 @@ impl SwapOps for TendermintCoin { if_my_payment_sent_args.secret_hash, if_my_payment_sent_args.amount, ) + .compat() + .await } async fn search_for_swap_tx_spend_my( @@ -3474,7 +3484,7 @@ pub mod tendermint_coin_tests { use super::*; use crate::DexFeeBurnDestination; - use common::{block_on, block_on_f01, wait_until_ms, DEX_FEE_ADDR_RAW_PUBKEY}; + use common::{block_on, wait_until_ms, DEX_FEE_ADDR_RAW_PUBKEY}; use cosmrs::proto::cosmos::tx::v1beta1::{GetTxRequest, GetTxResponse, GetTxsEventResponse}; use crypto::privkey::key_pair_from_seed; use mocktopus::mocking::{MockResult, Mockable}; @@ -3638,19 +3648,16 @@ pub mod tendermint_coin_tests { }); // >> END HTLC CREATION - let htlc_spent = block_on( - coin.check_if_my_payment_sent(CheckIfMyPaymentSentArgs { - time_lock: 0, - other_pub: IRIS_TESTNET_HTLC_PAIR2_PUB_KEY, - secret_hash: sha256(&sec).as_slice(), - search_from_block: current_block, - swap_contract_address: &None, - swap_unique_data: &[], - amount: &amount_dec, - payment_instructions: &None, - }) - .compat(), - ) + let htlc_spent = block_on(coin.check_if_my_payment_sent(CheckIfMyPaymentSentArgs { + time_lock: 0, + other_pub: IRIS_TESTNET_HTLC_PAIR2_PUB_KEY, + secret_hash: sha256(&sec).as_slice(), + search_from_block: current_block, + swap_contract_address: &None, + swap_unique_data: &[], + amount: &amount_dec, + payment_instructions: &None, + })) .unwrap(); assert!(htlc_spent.is_some()); @@ -3866,7 +3873,7 @@ pub mod tendermint_coin_tests { }); let invalid_amount: MmNumber = 1.into(); - let error = block_on_f01(coin.validate_fee(ValidateFeeArgs { + let error = block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &create_htlc_tx, expected_sender: &[], dex_fee: &DexFee::Standard(invalid_amount.clone()), @@ -3907,7 +3914,7 @@ pub mod tendermint_coin_tests { .unwrap(), }); - let error = block_on_f01(coin.validate_fee(ValidateFeeArgs { + let error = block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &random_transfer_tx, expected_sender: &[], dex_fee: &DexFee::Standard(invalid_amount.clone()), @@ -3949,7 +3956,7 @@ pub mod tendermint_coin_tests { data: TxRaw::decode(dex_fee_tx_response.tx.as_ref().unwrap().encode_to_vec().as_slice()).unwrap(), }); - let error = block_on_f01(coin.validate_fee(ValidateFeeArgs { + let error = block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &dex_fee_tx, expected_sender: &[], dex_fee: &DexFee::Standard(invalid_amount), @@ -3966,7 +3973,7 @@ pub mod tendermint_coin_tests { let valid_amount: BigDecimal = "0.0001".parse().unwrap(); // valid amount but invalid sender - let error = block_on_f01(coin.validate_fee(ValidateFeeArgs { + let error = block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &dex_fee_tx, expected_sender: &DEX_FEE_ADDR_RAW_PUBKEY, dex_fee: &DexFee::Standard(valid_amount.clone().into()), @@ -3982,7 +3989,7 @@ pub mod tendermint_coin_tests { } // invalid memo - let error = block_on_f01(coin.validate_fee(ValidateFeeArgs { + let error = block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &dex_fee_tx, expected_sender: &pubkey, dex_fee: &DexFee::Standard(valid_amount.into()), diff --git a/mm2src/coins/tendermint/tendermint_token.rs b/mm2src/coins/tendermint/tendermint_token.rs index 331b424f04..5329a4a1f3 100644 --- a/mm2src/coins/tendermint/tendermint_token.rs +++ b/mm2src/coins/tendermint/tendermint_token.rs @@ -104,31 +104,39 @@ impl TendermintToken { #[async_trait] #[allow(unused_variables)] impl SwapOps for TendermintToken { - fn send_taker_fee(&self, dex_fee: DexFee, uuid: &[u8], expire_at: u64) -> TransactionFut { + async fn send_taker_fee(&self, dex_fee: DexFee, uuid: &[u8], expire_at: u64) -> TransactionResult { self.platform_coin .send_taker_fee_for_denom(&dex_fee, self.denom.clone(), self.decimals, uuid, expire_at) + .compat() + .await } - fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs) -> TransactionFut { - self.platform_coin.send_htlc_for_denom( - maker_payment_args.time_lock_duration, - maker_payment_args.other_pubkey, - maker_payment_args.secret_hash, - maker_payment_args.amount, - self.denom.clone(), - self.decimals, - ) + async fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { + self.platform_coin + .send_htlc_for_denom( + maker_payment_args.time_lock_duration, + maker_payment_args.other_pubkey, + maker_payment_args.secret_hash, + maker_payment_args.amount, + self.denom.clone(), + self.decimals, + ) + .compat() + .await } - fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs) -> TransactionFut { - self.platform_coin.send_htlc_for_denom( - taker_payment_args.time_lock_duration, - taker_payment_args.other_pubkey, - taker_payment_args.secret_hash, - taker_payment_args.amount, - self.denom.clone(), - self.decimals, - ) + async fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { + self.platform_coin + .send_htlc_for_denom( + taker_payment_args.time_lock_duration, + taker_payment_args.other_pubkey, + taker_payment_args.secret_hash, + taker_payment_args.amount, + self.denom.clone(), + self.decimals, + ) + .compat() + .await } async fn send_maker_spends_taker_payment( @@ -161,15 +169,18 @@ impl SwapOps for TendermintToken { )) } - fn validate_fee(&self, validate_fee_args: ValidateFeeArgs) -> ValidatePaymentFut<()> { - self.platform_coin.validate_fee_for_denom( - validate_fee_args.fee_tx, - validate_fee_args.expected_sender, - validate_fee_args.dex_fee, - self.decimals, - validate_fee_args.uuid, - self.denom.to_string(), - ) + async fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentResult<()> { + self.platform_coin + .validate_fee_for_denom( + validate_fee_args.fee_tx, + validate_fee_args.expected_sender, + validate_fee_args.dex_fee, + self.decimals, + validate_fee_args.uuid, + self.denom.to_string(), + ) + .compat() + .await } async fn validate_maker_payment(&self, input: ValidatePaymentInput) -> ValidatePaymentResult<()> { @@ -184,17 +195,20 @@ impl SwapOps for TendermintToken { .await } - fn check_if_my_payment_sent( + async fn check_if_my_payment_sent( &self, - if_my_payment_sent_args: CheckIfMyPaymentSentArgs, - ) -> Box, Error = String> + Send> { - self.platform_coin.check_if_my_payment_sent_for_denom( - self.decimals, - self.denom.clone(), - if_my_payment_sent_args.other_pub, - if_my_payment_sent_args.secret_hash, - if_my_payment_sent_args.amount, - ) + if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, + ) -> Result, String> { + self.platform_coin + .check_if_my_payment_sent_for_denom( + self.decimals, + self.denom.clone(), + if_my_payment_sent_args.other_pub, + if_my_payment_sent_args.secret_hash, + if_my_payment_sent_args.amount, + ) + .compat() + .await } async fn search_for_swap_tx_spend_my( diff --git a/mm2src/coins/test_coin.rs b/mm2src/coins/test_coin.rs index 906bafc52d..f03767bf5b 100644 --- a/mm2src/coins/test_coin.rs +++ b/mm2src/coins/test_coin.rs @@ -118,11 +118,17 @@ impl MarketCoinOps for TestCoin { #[async_trait] #[cfg_attr(any(test, feature = "mocktopus"), mockable)] impl SwapOps for TestCoin { - fn send_taker_fee(&self, dex_fee: DexFee, uuid: &[u8], _expire_at: u64) -> TransactionFut { unimplemented!() } + async fn send_taker_fee(&self, dex_fee: DexFee, uuid: &[u8], expire_at: u64) -> TransactionResult { + unimplemented!() + } - fn send_maker_payment(&self, _maker_payment_args: SendPaymentArgs) -> TransactionFut { unimplemented!() } + async fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { + unimplemented!() + } - fn send_taker_payment(&self, _taker_payment_args: SendPaymentArgs) -> TransactionFut { unimplemented!() } + async fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { + unimplemented!() + } async fn send_maker_spends_taker_payment( &self, @@ -152,7 +158,9 @@ impl SwapOps for TestCoin { unimplemented!() } - fn validate_fee(&self, _validate_fee_args: ValidateFeeArgs) -> ValidatePaymentFut<()> { unimplemented!() } + async fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentResult<()> { + unimplemented!() + } async fn validate_maker_payment(&self, _input: ValidatePaymentInput) -> ValidatePaymentResult<()> { unimplemented!() @@ -162,10 +170,10 @@ impl SwapOps for TestCoin { unimplemented!() } - fn check_if_my_payment_sent( + async fn check_if_my_payment_sent( &self, - _if_my_payment_sent_args: CheckIfMyPaymentSentArgs, - ) -> Box, Error = String> + Send> { + if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, + ) -> Result, String> { unimplemented!() } @@ -211,9 +219,7 @@ impl SwapOps for TestCoin { fn derive_htlc_pubkey(&self, _swap_unique_data: &[u8]) -> Vec { unimplemented!() } - fn can_refund_htlc(&self, locktime: u64) -> Box + Send + '_> { - unimplemented!() - } + async fn can_refund_htlc(&self, locktime: u64) -> Result { unimplemented!() } fn validate_other_pubkey(&self, raw_pubkey: &[u8]) -> MmResult<(), ValidateOtherPubKeyErr> { unimplemented!() } diff --git a/mm2src/coins/utxo.rs b/mm2src/coins/utxo.rs index 328863865c..6d98451c7f 100644 --- a/mm2src/coins/utxo.rs +++ b/mm2src/coins/utxo.rs @@ -99,16 +99,15 @@ use utxo_hd_wallet::UtxoHDWallet; use utxo_signer::with_key_pair::sign_tx; use utxo_signer::{TxProvider, TxProviderError, UtxoSignTxError, UtxoSignTxResult}; -use self::rpc_clients::{electrum_script_hash, ElectrumClient, ElectrumRpcRequest, EstimateFeeMethod, EstimateFeeMode, - NativeClient, UnspentInfo, UnspentMap, UtxoRpcClientEnum, UtxoRpcError, UtxoRpcFut, - UtxoRpcResult}; +use self::rpc_clients::{electrum_script_hash, ElectrumClient, ElectrumConnectionSettings, EstimateFeeMethod, + EstimateFeeMode, NativeClient, UnspentInfo, UnspentMap, UtxoRpcClientEnum, UtxoRpcError, + UtxoRpcFut, UtxoRpcResult}; use super::{big_decimal_from_sat_unsigned, BalanceError, BalanceFut, BalanceResult, CoinBalance, CoinFutSpawner, CoinsContext, DerivationMethod, FeeApproxStage, FoundSwapTxSpend, HistorySyncState, KmdRewardsDetails, MarketCoinOps, MmCoin, NumConversError, NumConversResult, PrivKeyActivationPolicy, PrivKeyPolicy, - PrivKeyPolicyNotAllowed, RawTransactionFut, RpcTransportEventHandler, RpcTransportEventHandlerShared, - TradeFee, TradePreimageError, TradePreimageFut, TradePreimageResult, Transaction, TransactionDetails, - TransactionEnum, TransactionErr, UnexpectedDerivationMethod, VerificationError, WithdrawError, - WithdrawRequest}; + PrivKeyPolicyNotAllowed, RawTransactionFut, TradeFee, TradePreimageError, TradePreimageFut, + TradePreimageResult, Transaction, TransactionDetails, TransactionEnum, TransactionErr, + UnexpectedDerivationMethod, VerificationError, WithdrawError, WithdrawRequest}; use crate::coin_balance::{EnableCoinScanPolicy, EnabledCoinBalanceParams, HDAddressBalanceScanner}; use crate::hd_wallet::{HDAccountOps, HDAddressOps, HDPathAccountToAddressId, HDWalletCoinOps, HDWalletOps}; use crate::utxo::tx_cache::UtxoVerboseCacheShared; @@ -143,7 +142,6 @@ pub type RecentlySpentOutPointsGuard<'a> = AsyncMutexGuard<'a, RecentlySpentOutP pub enum ScripthashNotification { Triggered(String), SubscribeToAddresses(HashSet
), - RefreshSubscriptions, } pub type ScripthashNotificationSender = Option>; @@ -1386,43 +1384,6 @@ pub fn coin_daemon_data_dir(name: &str, is_asset_chain: bool) -> PathBuf { data_dir } -enum ElectrumProtoVerifierEvent { - Connected(String), - Disconnected(String), -} - -/// Electrum protocol version verifier. -/// The structure is used to handle the `on_connected` event and notify `electrum_version_loop`. -struct ElectrumProtoVerifier { - on_event_tx: UnboundedSender, -} - -impl ElectrumProtoVerifier { - fn into_shared(self) -> RpcTransportEventHandlerShared { Arc::new(self) } -} - -impl RpcTransportEventHandler for ElectrumProtoVerifier { - fn debug_info(&self) -> String { "ElectrumProtoVerifier".into() } - - fn on_outgoing_request(&self, _data: &[u8]) {} - - fn on_incoming_response(&self, _data: &[u8]) {} - - fn on_connected(&self, address: String) -> Result<(), String> { - try_s!(self - .on_event_tx - .unbounded_send(ElectrumProtoVerifierEvent::Connected(address))); - Ok(()) - } - - fn on_disconnected(&self, address: String) -> Result<(), String> { - try_s!(self - .on_event_tx - .unbounded_send(ElectrumProtoVerifierEvent::Disconnected(address))); - Ok(()) - } -} - #[derive(Clone, Debug, Deserialize, Serialize)] pub struct UtxoActivationParams { pub mode: UtxoRpcMode, @@ -1472,7 +1433,13 @@ impl UtxoActivationParams { Some("electrum") => { let servers = json::from_value(req["servers"].clone()).map_to_mm(UtxoFromLegacyReqErr::InvalidElectrumServers)?; - UtxoRpcMode::Electrum { servers } + let min_connected = req["min_connected"].as_u64().map(|m| m as usize); + let max_connected = req["max_connected"].as_u64().map(|m| m as usize); + UtxoRpcMode::Electrum { + servers, + min_connected, + max_connected, + } }, _ => return MmError::err(UtxoFromLegacyReqErr::UnexpectedMethod), }; @@ -1524,7 +1491,14 @@ impl UtxoActivationParams { #[serde(tag = "rpc", content = "rpc_data")] pub enum UtxoRpcMode { Native, - Electrum { servers: Vec }, + Electrum { + /// The settings of each electrum server. + servers: Vec, + /// The minimum number of connections to electrum servers to keep alive/maintained at all times. + min_connected: Option, + /// The maximum number of connections to electrum servers to not exceed at any time. + max_connected: Option, + }, } impl UtxoRpcMode { diff --git a/mm2src/coins/utxo/bch.rs b/mm2src/coins/utxo/bch.rs index 1182cdc882..2a070a3847 100644 --- a/mm2src/coins/utxo/bch.rs +++ b/mm2src/coins/utxo/bch.rs @@ -870,18 +870,22 @@ impl UtxoCommonOps for BchCoin { #[async_trait] impl SwapOps for BchCoin { #[inline] - fn send_taker_fee(&self, dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionFut { - utxo_common::send_taker_fee(self.clone(), dex_fee) + async fn send_taker_fee(&self, dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionResult { + utxo_common::send_taker_fee(self.clone(), dex_fee).compat().await } #[inline] - fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs) -> TransactionFut { + async fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { utxo_common::send_maker_payment(self.clone(), maker_payment_args) + .compat() + .await } #[inline] - fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs) -> TransactionFut { + async fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { utxo_common::send_taker_payment(self.clone(), taker_payment_args) + .compat() + .await } #[inline] @@ -910,10 +914,15 @@ impl SwapOps for BchCoin { utxo_common::send_maker_refunds_payment(self.clone(), maker_refunds_payment_args).await } - fn validate_fee(&self, validate_fee_args: ValidateFeeArgs) -> ValidatePaymentFut<()> { + async fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentResult<()> { let tx = match validate_fee_args.fee_tx { TransactionEnum::UtxoTx(tx) => tx.clone(), - _ => panic!(), + fee_tx => { + return MmError::err(ValidatePaymentError::InternalError(format!( + "Invalid fee tx type. fee tx: {:?}", + fee_tx + ))) + }, }; utxo_common::validate_fee( self.clone(), @@ -923,6 +932,8 @@ impl SwapOps for BchCoin { validate_fee_args.dex_fee.clone(), validate_fee_args.min_block_number, ) + .compat() + .await } #[inline] @@ -936,17 +947,23 @@ impl SwapOps for BchCoin { } #[inline] - fn check_if_my_payment_sent( + async fn check_if_my_payment_sent( &self, - if_my_payment_sent_args: CheckIfMyPaymentSentArgs, - ) -> Box, Error = String> + Send> { + if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, + ) -> Result, String> { + let time_lock = if_my_payment_sent_args + .time_lock + .try_into() + .map_err(|e: TryFromIntError| e.to_string())?; utxo_common::check_if_my_payment_sent( self.clone(), - try_fus!(if_my_payment_sent_args.time_lock.try_into()), + time_lock, if_my_payment_sent_args.other_pub, if_my_payment_sent_args.secret_hash, if_my_payment_sent_args.swap_unique_data, ) + .compat() + .await } #[inline] @@ -989,13 +1006,10 @@ impl SwapOps for BchCoin { } #[inline] - fn can_refund_htlc(&self, locktime: u64) -> Box + Send + '_> { - Box::new( - utxo_common::can_refund_htlc(self, locktime) - .boxed() - .map_err(|e| ERRL!("{}", e)) - .compat(), - ) + async fn can_refund_htlc(&self, locktime: u64) -> Result { + utxo_common::can_refund_htlc(self, locktime) + .await + .map_err(|e| ERRL!("{}", e)) } #[inline] diff --git a/mm2src/coins/utxo/qtum.rs b/mm2src/coins/utxo/qtum.rs index 36e2f3dd2e..99a69debe3 100644 --- a/mm2src/coins/utxo/qtum.rs +++ b/mm2src/coins/utxo/qtum.rs @@ -510,18 +510,22 @@ impl UtxoStandardOps for QtumCoin { #[async_trait] impl SwapOps for QtumCoin { #[inline] - fn send_taker_fee(&self, dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionFut { - utxo_common::send_taker_fee(self.clone(), dex_fee) + async fn send_taker_fee(&self, dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionResult { + utxo_common::send_taker_fee(self.clone(), dex_fee).compat().await } #[inline] - fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs) -> TransactionFut { + async fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { utxo_common::send_maker_payment(self.clone(), maker_payment_args) + .compat() + .await } #[inline] - fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs) -> TransactionFut { + async fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { utxo_common::send_taker_payment(self.clone(), taker_payment_args) + .compat() + .await } #[inline] @@ -550,10 +554,15 @@ impl SwapOps for QtumCoin { utxo_common::send_maker_refunds_payment(self.clone(), maker_refunds_payment_args).await } - fn validate_fee(&self, validate_fee_args: ValidateFeeArgs) -> ValidatePaymentFut<()> { + async fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentResult<()> { let tx = match validate_fee_args.fee_tx { TransactionEnum::UtxoTx(tx) => tx.clone(), - _ => panic!(), + fee_tx => { + return MmError::err(ValidatePaymentError::InternalError(format!( + "Invalid fee tx type. fee tx: {:?}", + fee_tx + ))) + }, }; utxo_common::validate_fee( self.clone(), @@ -563,6 +572,8 @@ impl SwapOps for QtumCoin { validate_fee_args.dex_fee.clone(), validate_fee_args.min_block_number, ) + .compat() + .await } #[inline] @@ -576,17 +587,23 @@ impl SwapOps for QtumCoin { } #[inline] - fn check_if_my_payment_sent( + async fn check_if_my_payment_sent( &self, - if_my_payment_sent_args: CheckIfMyPaymentSentArgs, - ) -> Box, Error = String> + Send> { + if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, + ) -> Result, String> { + let time_lock = if_my_payment_sent_args + .time_lock + .try_into() + .map_err(|e: TryFromIntError| e.to_string())?; utxo_common::check_if_my_payment_sent( self.clone(), - try_fus!(if_my_payment_sent_args.time_lock.try_into()), + time_lock, if_my_payment_sent_args.other_pub, if_my_payment_sent_args.secret_hash, if_my_payment_sent_args.swap_unique_data, ) + .compat() + .await } #[inline] @@ -629,13 +646,10 @@ impl SwapOps for QtumCoin { } #[inline] - fn can_refund_htlc(&self, locktime: u64) -> Box + Send + '_> { - Box::new( - utxo_common::can_refund_htlc(self, locktime) - .boxed() - .map_err(|e| ERRL!("{}", e)) - .compat(), - ) + async fn can_refund_htlc(&self, locktime: u64) -> Result { + utxo_common::can_refund_htlc(self, locktime) + .await + .map_err(|e| ERRL!("{}", e)) } #[inline] diff --git a/mm2src/coins/utxo/rpc_clients.rs b/mm2src/coins/utxo/rpc_clients.rs index f5150e4a9a..fce6afa82a 100644 --- a/mm2src/coins/utxo/rpc_clients.rs +++ b/mm2src/coins/utxo/rpc_clients.rs @@ -1,78 +1,51 @@ #![cfg_attr(target_arch = "wasm32", allow(unused_macros))] #![cfg_attr(target_arch = "wasm32", allow(dead_code))] -use crate::utxo::utxo_block_header_storage::BlockHeaderStorage; -use crate::utxo::{output_script, output_script_p2pk, sat_from_big_decimal, GetBlockHeaderError, GetConfirmedTxError, - GetTxError, GetTxHeightError, NumConversResult, ScripthashNotification}; -use crate::{big_decimal_from_sat_unsigned, MyAddressError, NumConversError, RpcTransportEventHandler, - RpcTransportEventHandlerShared}; -use async_trait::async_trait; -use chain::{BlockHeader, BlockHeaderBits, BlockHeaderNonce, OutPoint, Transaction as UtxoTx, TransactionInput, - TxHashAlgo}; -use common::custom_futures::{select_ok_sequential, timeout::FutureTimerExt}; +mod electrum_rpc; +pub use electrum_rpc::*; + +use crate::utxo::{sat_from_big_decimal, GetBlockHeaderError, GetTxError, NumConversError, NumConversResult}; +use crate::{big_decimal_from_sat_unsigned, MyAddressError, RpcTransportEventHandlerShared}; +use chain::{OutPoint, Transaction as UtxoTx, TransactionInput, TxHashAlgo}; use common::custom_iter::TryIntoGroupMap; -use common::executor::{abortable_queue, abortable_queue::AbortableQueue, AbortableSystem, SpawnFuture, Timer}; -use common::jsonrpc_client::{JsonRpcBatchClient, JsonRpcBatchResponse, JsonRpcClient, JsonRpcError, JsonRpcErrorType, - JsonRpcId, JsonRpcMultiClient, JsonRpcRemoteAddr, JsonRpcRequest, JsonRpcRequestEnum, - JsonRpcResponse, JsonRpcResponseEnum, JsonRpcResponseFut, RpcRes}; -use common::log::{debug, LogOnError}; +use common::executor::Timer; +use common::jsonrpc_client::{JsonRpcBatchClient, JsonRpcClient, JsonRpcError, JsonRpcErrorType, JsonRpcRequest, + JsonRpcRequestEnum, JsonRpcResponseFut, RpcRes}; use common::log::{error, info, warn}; -use common::{median, now_float, now_ms, now_sec, OrdRange}; -use derive_more::Display; +use common::{median, now_sec}; use enum_derives::EnumFromStringify; -use futures::channel::oneshot as async_oneshot; -use futures::compat::{Future01CompatExt, Stream01CompatExt}; -use futures::future::{join_all, FutureExt, TryFutureExt}; -use futures::lock::Mutex as AsyncMutex; -use futures::{select, StreamExt}; -use futures01::future::select_ok; -use futures01::sync::mpsc; -use futures01::{Future, Sink, Stream}; -use http::Uri; -use itertools::Itertools; use keys::hash::H256; use keys::Address; use mm2_err_handle::prelude::*; -use mm2_number::{BigDecimal, BigInt, MmNumber}; -use mm2_rpc::data::legacy::ElectrumProtocol; -#[cfg(test)] use mocktopus::macros::*; +use mm2_number::{BigDecimal, MmNumber}; use rpc::v1::types::{Bytes as BytesJson, Transaction as RpcTransaction, H256 as H256Json}; use script::Script; -use serde_json::{self as json, Value as Json}; -use serialization::{deserialize, serialize, serialize_with_flags, CoinVariant, CompactInteger, Reader, - SERIALIZE_TRANSACTION_WITNESS}; -use sha2::{Digest, Sha256}; -use spv_validation::helpers_validation::SPVError; -use spv_validation::storage::BlockHeaderStorageOps; -use std::collections::hash_map::Entry; +use serialization::{deserialize, serialize, serialize_with_flags, CoinVariant, SERIALIZE_TRANSACTION_WITNESS}; + use std::collections::HashMap; -use std::convert::TryInto; use std::fmt; -use std::io; -use std::net::{SocketAddr, ToSocketAddrs}; +use std::fmt::Debug; use std::num::NonZeroU64; use std::ops::Deref; use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; use std::sync::Arc; -use std::time::Duration; -use super::ScripthashNotificationSender; +use async_trait::async_trait; +use derive_more::Display; +use futures::channel::oneshot as async_oneshot; +use futures::compat::Future01CompatExt; +use futures::future::{FutureExt, TryFutureExt}; +use futures::lock::Mutex as AsyncMutex; +use futures01::Future; +#[cfg(test)] use mocktopus::macros::*; +use serde_json::{self as json, Value as Json}; cfg_native! { - use futures::future::Either; - use futures::io::Error; + use crate::RpcTransportEventHandler; + use common::jsonrpc_client::{JsonRpcRemoteAddr, JsonRpcResponseEnum}; + use http::header::AUTHORIZATION; use http::{Request, StatusCode}; - use rustls::client::ServerCertVerified; - use rustls::{Certificate, ClientConfig, ServerName, OwnedTrustAnchor, RootCertStore}; - use std::convert::TryFrom; - use std::pin::Pin; - use std::task::{Context, Poll}; - use std::time::SystemTime; - use tokio::io::{AsyncBufReadExt, AsyncRead, AsyncWrite, AsyncWriteExt, BufReader, ReadBuf}; - use tokio::net::TcpStream; - use tokio_rustls::{client::TlsStream, TlsConnector}; - use webpki_roots::TLS_SERVER_ROOTS; } pub const NO_TX_ERROR_CODE: &str = "'code': -5"; @@ -80,39 +53,15 @@ const RESPONSE_TOO_LARGE_CODE: i16 = -32600; const TX_NOT_FOUND_RETRIES: u8 = 10; pub type AddressesByLabelResult = HashMap; -pub type JsonRpcPendingRequestsShared = Arc>; -pub type JsonRpcPendingRequests = HashMap>; pub type UnspentMap = HashMap>; -type ElectrumTxHistory = Vec; -type ElectrumScriptHash = String; -type ScriptHashUnspents = Vec; - #[derive(Debug, Deserialize)] #[allow(dead_code)] pub struct AddressPurpose { purpose: String, } -/// Skips the server certificate verification on TLS connection -pub struct NoCertificateVerification {} - -#[cfg(not(target_arch = "wasm32"))] -impl rustls::client::ServerCertVerifier for NoCertificateVerification { - fn verify_server_cert( - &self, - _: &Certificate, - _: &[Certificate], - _: &ServerName, - _: &mut dyn Iterator, - _: &[u8], - _: SystemTime, - ) -> Result { - Ok(rustls::client::ServerCertVerified::assertion()) - } -} - -#[derive(Debug)] +#[derive(Clone, Debug)] pub enum UtxoRpcClientEnum { Native(NativeClient), Electrum(ElectrumClient), @@ -145,15 +94,6 @@ impl Deref for UtxoRpcClientEnum { } } -impl Clone for UtxoRpcClientEnum { - fn clone(&self) -> Self { - match self { - UtxoRpcClientEnum::Native(c) => UtxoRpcClientEnum::Native(c.clone()), - UtxoRpcClientEnum::Electrum(c) => UtxoRpcClientEnum::Electrum(c.clone()), - } - } -} - impl UtxoRpcClientEnum { pub fn wait_for_confirmations( &self, @@ -306,6 +246,14 @@ pub struct SpentOutputInfo { pub spent_in_block: BlockHashOrHeight, } +#[allow(clippy::upper_case_acronyms)] +#[derive(Debug, Deserialize, Serialize)] +pub enum EstimateFeeMode { + ECONOMICAL, + CONSERVATIVE, + UNSET, +} + pub type UtxoRpcResult = Result>; pub type UtxoRpcFut = Box> + Send + 'static>; @@ -381,7 +329,8 @@ pub trait UtxoRpcClientOps: fmt::Debug + Send + Sync + 'static { /// Submits the raw `tx` transaction (serialized, hex-encoded) to blockchain network. fn send_raw_transaction(&self, tx: BytesJson) -> UtxoRpcFut; - fn blockchain_scripthash_subscribe(&self, scripthash: String) -> UtxoRpcFut; + /// Subscribe to scripthash notifications from `server_address` for the given `scripthash`. + fn blockchain_scripthash_subscribe_using(&self, server_address: &str, scripthash: String) -> UtxoRpcFut; /// Returns raw transaction (serialized, hex-encoded) by the given `txid`. fn get_transaction_bytes(&self, txid: &H256Json) -> UtxoRpcFut; @@ -656,6 +605,66 @@ pub struct ListUnspentArgs { addresses: Vec, } +#[derive(Debug)] +struct ConcurrentRequestState { + is_running: bool, + subscribers: Vec>, +} + +impl ConcurrentRequestState { + fn new() -> Self { + ConcurrentRequestState { + is_running: false, + subscribers: Vec::new(), + } + } +} + +#[derive(Debug)] +pub struct ConcurrentRequestMap { + inner: AsyncMutex>>, +} + +impl Default for ConcurrentRequestMap { + fn default() -> Self { + ConcurrentRequestMap { + inner: AsyncMutex::new(HashMap::new()), + } + } +} + +impl ConcurrentRequestMap { + pub fn new() -> ConcurrentRequestMap { ConcurrentRequestMap::default() } + + async fn wrap_request(&self, request_arg: K, request_fut: RpcRes) -> Result { + let mut map = self.inner.lock().await; + let state = map + .entry(request_arg.clone()) + .or_insert_with(ConcurrentRequestState::new); + if state.is_running { + let (tx, rx) = async_oneshot::channel(); + state.subscribers.push(tx); + // drop here to avoid holding the lock during await + drop(map); + rx.await.unwrap() + } else { + state.is_running = true; + // drop here to avoid holding the lock during await + drop(map); + let request_res = request_fut.compat().await; + let mut map = self.inner.lock().await; + let state = map.get_mut(&request_arg).unwrap(); + for sub in state.subscribers.drain(..) { + if sub.send(request_res.clone()).is_err() { + warn!("subscriber is dropped"); + } + } + state.is_running = false; + request_res + } + } +} + /// RPC client for UTXO based coins /// https://developer.bitcoin.org/reference/rpc/index.html - Bitcoin RPC API reference /// Other coins have additional methods or miss some of these @@ -711,7 +720,7 @@ impl UtxoJsonRpcClientInfo for NativeClientImpl { impl JsonRpcClient for NativeClientImpl { fn version(&self) -> &'static str { "1.0" } - fn next_id(&self) -> String { self.request_id.fetch_add(1, AtomicOrdering::Relaxed).to_string() } + fn next_id(&self) -> u64 { self.request_id.fetch_add(1, AtomicOrdering::Relaxed) } fn client_info(&self) -> String { UtxoJsonRpcClientInfo::client_info(self) } @@ -732,10 +741,10 @@ impl JsonRpcClient for NativeClientImpl { self.event_handlers.on_outgoing_request(request_body.as_bytes()); let uri = self.uri.clone(); - + let auth = self.auth.clone(); let http_request = try_f!(Request::builder() .method("POST") - .header(AUTHORIZATION, self.auth.clone()) + .header(AUTHORIZATION, auth) .uri(uri.clone()) .body(Vec::from(request_body)) .map_err(|e| JsonRpcErrorType::InvalidRequest(e.to_string()))); @@ -828,7 +837,7 @@ impl UtxoRpcClientOps for NativeClient { Box::new(rpc_func!(self, "sendrawtransaction", tx).map_to_mm_fut(UtxoRpcError::from)) } - fn blockchain_scripthash_subscribe(&self, _scripthash: String) -> UtxoRpcFut { + fn blockchain_scripthash_subscribe_using(&self, _: &str, _scripthash: String) -> UtxoRpcFut { Box::new(futures01::future::err( UtxoRpcError::Internal("blockchain_scripthash_subscribe` is not supported for Native Clients".to_owned()) .into(), @@ -1225,1858 +1234,6 @@ impl NativeClientImpl { } } -#[derive(Clone, Debug, Deserialize)] -pub struct ElectrumUnspent { - pub height: Option, - pub tx_hash: H256Json, - pub tx_pos: u32, - pub value: u64, -} - -#[derive(Clone, Debug, Deserialize)] -#[serde(untagged)] -pub enum ElectrumNonce { - Number(u64), - Hash(H256Json), -} - -#[allow(clippy::from_over_into)] -impl Into for ElectrumNonce { - fn into(self) -> BlockHeaderNonce { - match self { - ElectrumNonce::Number(n) => BlockHeaderNonce::U32(n as u32), - ElectrumNonce::Hash(h) => BlockHeaderNonce::H256(h.into()), - } - } -} - -#[derive(Debug, Deserialize)] -pub struct ElectrumBlockHeadersRes { - pub count: u64, - pub hex: BytesJson, - #[allow(dead_code)] - max: u64, -} - -/// The block header compatible with Electrum 1.2 -#[derive(Clone, Debug, Deserialize)] -pub struct ElectrumBlockHeaderV12 { - pub bits: u64, - pub block_height: u64, - pub merkle_root: H256Json, - pub nonce: ElectrumNonce, - pub prev_block_hash: H256Json, - pub timestamp: u64, - pub version: u64, -} - -impl ElectrumBlockHeaderV12 { - fn as_block_header(&self) -> BlockHeader { - BlockHeader { - version: self.version as u32, - previous_header_hash: self.prev_block_hash.into(), - merkle_root_hash: self.merkle_root.into(), - claim_trie_root: None, - hash_final_sapling_root: None, - time: self.timestamp as u32, - bits: BlockHeaderBits::U32(self.bits as u32), - nonce: self.nonce.clone().into(), - solution: None, - aux_pow: None, - prog_pow: None, - mtp_pow: None, - is_verus: false, - hash_state_root: None, - hash_utxo_root: None, - prevout_stake: None, - vch_block_sig_dlgt: None, - n_height: None, - n_nonce_u64: None, - mix_hash: None, - } - } - - #[inline] - pub fn as_hex(&self) -> String { - let block_header = self.as_block_header(); - let serialized = serialize(&block_header); - hex::encode(serialized) - } - - #[inline] - pub fn hash(&self) -> H256Json { - let block_header = self.as_block_header(); - BlockHeader::hash(&block_header).into() - } -} - -/// The block header compatible with Electrum 1.4 -#[derive(Clone, Debug, Deserialize)] -pub struct ElectrumBlockHeaderV14 { - pub height: u64, - pub hex: BytesJson, -} - -impl ElectrumBlockHeaderV14 { - pub fn hash(&self) -> H256Json { self.hex.clone().into_vec()[..].into() } -} - -#[derive(Clone, Debug, Deserialize)] -#[serde(untagged)] -pub enum ElectrumBlockHeader { - V12(ElectrumBlockHeaderV12), - V14(ElectrumBlockHeaderV14), -} - -/// The merkle branch of a confirmed transaction -#[derive(Clone, Debug, Deserialize)] -pub struct TxMerkleBranch { - pub merkle: Vec, - pub block_height: u64, - pub pos: usize, -} - -#[derive(Clone)] -pub struct ConfirmedTransactionInfo { - pub tx: UtxoTx, - pub header: BlockHeader, - pub index: u64, - pub height: u64, -} - -#[derive(Debug, PartialEq)] -pub struct BestBlock { - pub height: u64, - pub hash: H256Json, -} - -impl From for BestBlock { - fn from(block_header: ElectrumBlockHeader) -> Self { - BestBlock { - height: block_header.block_height(), - hash: block_header.block_hash(), - } - } -} - -#[allow(clippy::upper_case_acronyms)] -#[derive(Debug, Deserialize, Serialize)] -pub enum EstimateFeeMode { - ECONOMICAL, - CONSERVATIVE, - UNSET, -} - -impl ElectrumBlockHeader { - pub fn block_height(&self) -> u64 { - match self { - ElectrumBlockHeader::V12(h) => h.block_height, - ElectrumBlockHeader::V14(h) => h.height, - } - } - - fn block_hash(&self) -> H256Json { - match self { - ElectrumBlockHeader::V12(h) => h.hash(), - ElectrumBlockHeader::V14(h) => h.hash(), - } - } -} - -#[derive(Debug, Deserialize)] -pub struct ElectrumTxHistoryItem { - pub height: i64, - pub tx_hash: H256Json, - pub fee: Option, -} - -#[derive(Clone, Debug, Deserialize)] -pub struct ElectrumBalance { - pub(crate) confirmed: i128, - pub(crate) unconfirmed: i128, -} - -impl ElectrumBalance { - #[inline] - pub fn to_big_decimal(&self, decimals: u8) -> BigDecimal { - let balance_sat = BigInt::from(self.confirmed) + BigInt::from(self.unconfirmed); - BigDecimal::from(balance_sat) / BigDecimal::from(10u64.pow(decimals as u32)) - } -} - -#[inline] -fn sha_256(input: &[u8]) -> Vec { - let mut sha = Sha256::new(); - sha.update(input); - sha.finalize().to_vec() -} - -#[inline] -pub fn electrum_script_hash(script: &[u8]) -> Vec { - let mut result = sha_256(script); - result.reverse(); - result -} - -#[derive(Debug, Deserialize, Serialize)] -/// Deserializable Electrum protocol version representation for RPC -/// https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-methods.html#server.version -pub struct ElectrumProtocolVersion { - pub server_software_version: String, - pub protocol_version: String, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -/// Electrum request RPC representation -pub struct ElectrumRpcRequest { - pub url: String, - #[serde(default)] - pub protocol: ElectrumProtocol, - #[serde(default)] - pub disable_cert_verification: bool, -} - -/// Electrum client configuration -#[allow(clippy::upper_case_acronyms)] -#[cfg(not(target_arch = "wasm32"))] -#[derive(Clone, Debug, Serialize)] -enum ElectrumConfig { - TCP, - SSL { dns_name: String, skip_validation: bool }, -} - -/// Electrum client configuration -#[allow(clippy::upper_case_acronyms)] -#[cfg(target_arch = "wasm32")] -#[derive(Clone, Debug, Serialize)] -enum ElectrumConfig { - WS, - WSS, -} - -fn addr_to_socket_addr(input: &str) -> Result { - let mut addr = match input.to_socket_addrs() { - Ok(a) => a, - Err(e) => return ERR!("{} resolve error {:?}", input, e), - }; - match addr.next() { - Some(a) => Ok(a), - None => ERR!("{} resolved to None.", input), - } -} - -#[cfg(not(target_arch = "wasm32"))] -fn server_name_from_domain(dns_name: &str) -> Result { - match ServerName::try_from(dns_name) { - Ok(dns_name) if matches!(dns_name, ServerName::DnsName(_)) => Ok(dns_name), - _ => ERR!("Couldn't parse DNS name from '{}'", dns_name), - } -} - -/// Attempts to process the request (parse url, etc), build up the config and create new electrum connection -/// The function takes `abortable_system` that will be used to spawn Electrum's related futures. -#[cfg(not(target_arch = "wasm32"))] -pub fn spawn_electrum( - req: &ElectrumRpcRequest, - event_handlers: Vec, - scripthash_notification_sender: &ScripthashNotificationSender, - abortable_system: AbortableQueue, -) -> Result { - let config = match req.protocol { - ElectrumProtocol::TCP => ElectrumConfig::TCP, - ElectrumProtocol::SSL => { - let uri: Uri = try_s!(req.url.parse()); - let host = uri - .host() - .ok_or(ERRL!("Couldn't retrieve host from addr {}", req.url))?; - - try_s!(server_name_from_domain(host)); - - ElectrumConfig::SSL { - dns_name: host.into(), - skip_validation: req.disable_cert_verification, - } - }, - ElectrumProtocol::WS | ElectrumProtocol::WSS => { - return ERR!("'ws' and 'wss' protocols are not supported yet. Consider using 'TCP' or 'SSL'") - }, - }; - - Ok(electrum_connect( - req.url.clone(), - config, - event_handlers, - scripthash_notification_sender, - abortable_system, - )) -} - -/// Attempts to process the request (parse url, etc), build up the config and create new electrum connection -/// The function takes `abortable_system` that will be used to spawn Electrum's related futures. -#[cfg(target_arch = "wasm32")] -pub fn spawn_electrum( - req: &ElectrumRpcRequest, - event_handlers: Vec, - scripthash_notification_sender: &ScripthashNotificationSender, - abortable_system: AbortableQueue, -) -> Result { - let mut url = req.url.clone(); - let uri: Uri = try_s!(req.url.parse()); - - if uri.scheme().is_some() { - return ERR!( - "There has not to be a scheme in the url: {}. \ - 'ws://' scheme is used by default. \ - Consider using 'protocol: \"WSS\"' in the electrum request to switch to the 'wss://' scheme.", - url - ); - } - - let config = match req.protocol { - ElectrumProtocol::WS => { - url.insert_str(0, "ws://"); - ElectrumConfig::WS - }, - ElectrumProtocol::WSS => { - url.insert_str(0, "wss://"); - ElectrumConfig::WSS - }, - ElectrumProtocol::TCP | ElectrumProtocol::SSL => { - return ERR!("'TCP' and 'SSL' are not supported in a browser. Please use 'WS' or 'WSS' protocols"); - }, - }; - - Ok(electrum_connect( - url, - config, - event_handlers, - scripthash_notification_sender, - abortable_system, - )) -} - -/// Represents the active Electrum connection to selected address -pub struct ElectrumConnection { - /// The client connected to this SocketAddr - addr: String, - /// Configuration - #[allow(dead_code)] - config: ElectrumConfig, - /// The Sender forwarding requests to writing part of underlying stream - tx: Arc>>>>, - /// Responses are stored here - responses: JsonRpcPendingRequestsShared, - /// Selected protocol version. The value is initialized after the server.version RPC call. - protocol_version: AsyncMutex>, - /// This spawner is used to spawn Electrum's related futures that should be aborted on coin deactivation. - /// and on [`MmArc::stop`]. - /// This field is not used directly, but it holds all abort handles of futures spawned at `electrum_connect`. - /// - /// Please also note that this abortable system is a subsystem of [`ElectrumClientImpl::abortable_system`]. - /// For more info see [`ElectrumClientImpl::add_server`]. - _abortable_system: AbortableQueue, -} - -impl ElectrumConnection { - async fn is_connected(&self) -> bool { self.tx.lock().await.is_some() } - - async fn set_protocol_version(&self, version: f32) { self.protocol_version.lock().await.replace(version); } - - async fn reset_protocol_version(&self) { *self.protocol_version.lock().await = None; } -} - -#[derive(Debug)] -struct ConcurrentRequestState { - is_running: bool, - subscribers: Vec>, -} - -impl ConcurrentRequestState { - fn new() -> Self { - ConcurrentRequestState { - is_running: false, - subscribers: Vec::new(), - } - } -} - -#[derive(Debug)] -pub struct ConcurrentRequestMap { - inner: AsyncMutex>>, -} - -impl Default for ConcurrentRequestMap { - fn default() -> Self { - ConcurrentRequestMap { - inner: AsyncMutex::new(HashMap::new()), - } - } -} - -impl ConcurrentRequestMap { - pub fn new() -> ConcurrentRequestMap { ConcurrentRequestMap::default() } - - async fn wrap_request(&self, request_arg: K, request_fut: RpcRes) -> Result { - let mut map = self.inner.lock().await; - let state = map - .entry(request_arg.clone()) - .or_insert_with(ConcurrentRequestState::new); - if state.is_running { - let (tx, rx) = async_oneshot::channel(); - state.subscribers.push(tx); - // drop here to avoid holding the lock during await - drop(map); - rx.await.unwrap() - } else { - // drop here to avoid holding the lock during await - drop(map); - let request_res = request_fut.compat().await; - let mut map = self.inner.lock().await; - let state = map.get_mut(&request_arg).unwrap(); - for sub in state.subscribers.drain(..) { - if sub.send(request_res.clone()).is_err() { - warn!("subscriber is dropped"); - } - } - state.is_running = false; - request_res - } - } -} - -#[derive(Debug)] -pub struct ElectrumClientImpl { - coin_ticker: String, - connections: AsyncMutex>, - next_id: AtomicU64, - event_handlers: Vec, - protocol_version: OrdRange, - get_balance_concurrent_map: ConcurrentRequestMap, - list_unspent_concurrent_map: ConcurrentRequestMap>, - block_headers_storage: BlockHeaderStorage, - /// This spawner is used to spawn Electrum's related futures that should be aborted on coin deactivation, - /// and on [`MmArc::stop`]. - /// - /// Please also note that this abortable system is a subsystem of [`UtxoCoinFields::abortable_system`]. - abortable_system: AbortableQueue, - negotiate_version: bool, - /// This is used for balance event streaming implementation for UTXOs. - /// If balance event streaming isn't enabled, this value will always be `None`; otherwise, - /// it will be used for sending scripthash messages to trigger re-connections, re-fetching the balances, etc. - pub(crate) scripthash_notification_sender: ScripthashNotificationSender, -} - -async fn electrum_request_multi( - client: ElectrumClient, - request: JsonRpcRequestEnum, -) -> Result<(JsonRpcRemoteAddr, JsonRpcResponseEnum), JsonRpcErrorType> { - let mut futures = vec![]; - let connections = client.connections.lock().await; - for (i, connection) in connections.iter().enumerate() { - if client.negotiate_version && connection.protocol_version.lock().await.is_none() { - continue; - } - - let connection_addr = connection.addr.clone(); - let json = json::to_string(&request).map_err(|e| JsonRpcErrorType::InvalidRequest(e.to_string()))?; - if let Some(tx) = &*connection.tx.lock().await { - let fut = electrum_request( - json, - request.rpc_id(), - tx.clone(), - connection.responses.clone(), - ELECTRUM_TIMEOUT / (connections.len() - i) as u64, - ) - .map(|response| (JsonRpcRemoteAddr(connection_addr), response)); - futures.push(fut) - } - } - drop(connections); - - if futures.is_empty() { - return Err(JsonRpcErrorType::Transport( - "All electrums are currently disconnected".to_string(), - )); - } - - if let JsonRpcRequestEnum::Single(single) = &request { - if single.method == "server.ping" { - // server.ping must be sent to all servers to keep all connections alive - return select_ok(futures).map(|(result, _)| result).compat().await; - } - } - - let (res, no_of_failed_requests) = select_ok_sequential(futures) - .compat() - .await - .map_err(|e| JsonRpcErrorType::Transport(format!("{:?}", e)))?; - client.rotate_servers(no_of_failed_requests).await; - - Ok(res) -} - -async fn electrum_request_to( - client: ElectrumClient, - request: JsonRpcRequestEnum, - to_addr: String, -) -> Result<(JsonRpcRemoteAddr, JsonRpcResponseEnum), JsonRpcErrorType> { - let (tx, responses) = { - let connections = client.connections.lock().await; - let connection = connections - .iter() - .find(|c| c.addr == to_addr) - .ok_or_else(|| JsonRpcErrorType::Internal(format!("Unknown destination address {}", to_addr)))?; - let responses = connection.responses.clone(); - let tx = { - match &*connection.tx.lock().await { - Some(tx) => tx.clone(), - None => { - return Err(JsonRpcErrorType::Transport(format!( - "Connection {} is not established yet", - to_addr - ))) - }, - } - }; - (tx, responses) - }; - let json = json::to_string(&request).map_err(|err| JsonRpcErrorType::InvalidRequest(err.to_string()))?; - let response = electrum_request(json, request.rpc_id(), tx, responses, ELECTRUM_TIMEOUT) - .compat() - .await?; - Ok((JsonRpcRemoteAddr(to_addr.to_owned()), response)) -} - -impl ElectrumClientImpl { - pub fn spawner(&self) -> abortable_queue::WeakSpawner { self.abortable_system.weak_spawner() } - - /// Create an Electrum connection and spawn a green thread actor to handle it. - pub async fn add_server(&self, req: &ElectrumRpcRequest) -> Result<(), String> { - let subsystem = try_s!(self.abortable_system.create_subsystem()); - let connection = try_s!(spawn_electrum( - req, - self.event_handlers.clone(), - &self.scripthash_notification_sender, - subsystem, - )); - self.connections.lock().await.push(connection); - Ok(()) - } - - /// Remove an Electrum connection and stop corresponding spawned actor. - pub async fn remove_server(&self, server_addr: &str) -> Result<(), String> { - let mut connections = self.connections.lock().await; - // do not use retain, we would have to return an error if we did not find connection by the passd address - let pos = connections - .iter() - .position(|con| con.addr == server_addr) - .ok_or(ERRL!("Unknown electrum address {}", server_addr))?; - // shutdown_tx will be closed immediately on the connection drop - connections.remove(pos); - Ok(()) - } - - /// Moves the Electrum servers that fail in a multi request to the end. - pub async fn rotate_servers(&self, no_of_rotations: usize) { - let mut connections = self.connections.lock().await; - connections.rotate_left(no_of_rotations); - } - - /// Check if one of the spawned connections is connected. - pub async fn is_connected(&self) -> bool { - for connection in self.connections.lock().await.iter() { - if connection.is_connected().await { - return true; - } - } - false - } - - /// Check if all connections have been removed. - pub async fn is_connections_pool_empty(&self) -> bool { self.connections.lock().await.is_empty() } - - pub async fn count_connections(&self) -> usize { self.connections.lock().await.len() } - - /// Check if the protocol version was checked for one of the spawned connections. - pub async fn is_protocol_version_checked(&self) -> bool { - for connection in self.connections.lock().await.iter() { - if connection.protocol_version.lock().await.is_some() { - return true; - } - } - false - } - - /// Set the protocol version for the specified server. - pub async fn set_protocol_version(&self, server_addr: &str, version: f32) -> Result<(), String> { - let connections = self.connections.lock().await; - let con = connections - .iter() - .find(|con| con.addr == server_addr) - .ok_or(ERRL!("Unknown electrum address {}", server_addr))?; - con.set_protocol_version(version).await; - - if let Some(sender) = &self.scripthash_notification_sender { - sender - .unbounded_send(ScripthashNotification::RefreshSubscriptions) - .map_err(|e| ERRL!("Failed sending scripthash message. {}", e))?; - } - - Ok(()) - } - - /// Reset the protocol version for the specified server. - pub async fn reset_protocol_version(&self, server_addr: &str) -> Result<(), String> { - let connections = self.connections.lock().await; - let con = connections - .iter() - .find(|con| con.addr == server_addr) - .ok_or(ERRL!("Unknown electrum address {}", server_addr))?; - con.reset_protocol_version().await; - Ok(()) - } - - /// Get available protocol versions. - pub fn protocol_version(&self) -> &OrdRange { &self.protocol_version } - - /// Get block headers storage. - pub fn block_headers_storage(&self) -> &BlockHeaderStorage { &self.block_headers_storage } -} - -#[derive(Clone, Debug)] -pub struct ElectrumClient(pub Arc); -impl Deref for ElectrumClient { - type Target = ElectrumClientImpl; - fn deref(&self) -> &ElectrumClientImpl { &self.0 } -} - -const BLOCKCHAIN_HEADERS_SUB_ID: &str = "blockchain.headers.subscribe"; - -const BLOCKCHAIN_SCRIPTHASH_SUB_ID: &str = "blockchain.scripthash.subscribe"; - -impl UtxoJsonRpcClientInfo for ElectrumClient { - fn coin_name(&self) -> &str { self.coin_ticker.as_str() } -} - -impl JsonRpcClient for ElectrumClient { - fn version(&self) -> &'static str { "2.0" } - - fn next_id(&self) -> String { self.next_id.fetch_add(1, AtomicOrdering::Relaxed).to_string() } - - fn client_info(&self) -> String { UtxoJsonRpcClientInfo::client_info(self) } - - fn transport(&self, request: JsonRpcRequestEnum) -> JsonRpcResponseFut { - Box::new(electrum_request_multi(self.clone(), request).boxed().compat()) - } -} - -impl JsonRpcBatchClient for ElectrumClient {} - -impl JsonRpcMultiClient for ElectrumClient { - fn transport_exact(&self, to_addr: String, request: JsonRpcRequestEnum) -> JsonRpcResponseFut { - Box::new(electrum_request_to(self.clone(), request, to_addr).boxed().compat()) - } -} - -impl ElectrumClient { - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#server-ping - pub fn server_ping(&self) -> RpcRes<()> { rpc_func!(self, "server.ping") } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#server-version - pub fn server_version( - &self, - server_address: &str, - client_name: &str, - version: &OrdRange, - ) -> RpcRes { - let protocol_version: Vec = version.flatten().into_iter().map(|v| format!("{}", v)).collect(); - rpc_func_from!(self, server_address, "server.version", client_name, protocol_version) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-headers-subscribe - pub fn get_block_count_from(&self, server_address: &str) -> RpcRes { - Box::new( - rpc_func_from!(self, server_address, BLOCKCHAIN_HEADERS_SUB_ID) - .map(|r: ElectrumBlockHeader| r.block_height()), - ) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-block-headers - pub fn get_block_headers_from( - &self, - server_address: &str, - start_height: u64, - count: NonZeroU64, - ) -> RpcRes { - rpc_func_from!(self, server_address, "blockchain.block.headers", start_height, count) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-listunspent - /// It can return duplicates sometimes: https://github.com/artemii235/SuperNET/issues/269 - /// We should remove them to build valid transactions - pub fn scripthash_list_unspent(&self, hash: &str) -> RpcRes> { - let request_fut = Box::new(rpc_func!(self, "blockchain.scripthash.listunspent", hash).and_then( - move |unspents: Vec| { - let mut map: HashMap<(H256Json, u32), bool> = HashMap::new(); - let unspents = unspents - .into_iter() - .filter(|unspent| match map.entry((unspent.tx_hash, unspent.tx_pos)) { - Entry::Occupied(_) => false, - Entry::Vacant(e) => { - e.insert(true); - true - }, - }) - .collect(); - Ok(unspents) - }, - )); - let arc = self.clone(); - let hash = hash.to_owned(); - let fut = async move { arc.list_unspent_concurrent_map.wrap_request(hash, request_fut).await }; - Box::new(fut.boxed().compat()) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-listunspent - /// It can return duplicates sometimes: https://github.com/artemii235/SuperNET/issues/269 - /// We should remove them to build valid transactions. - /// Please note the function returns `ScriptHashUnspents` elements in the same order in which they were requested. - pub fn scripthash_list_unspent_batch(&self, hashes: Vec) -> RpcRes> { - let requests = hashes - .iter() - .map(|hash| rpc_req!(self, "blockchain.scripthash.listunspent", hash)); - Box::new(self.batch_rpc(requests).map(move |unspents: Vec| { - unspents - .into_iter() - .map(|hash_unspents| { - hash_unspents - .into_iter() - .unique_by(|unspent| (unspent.tx_hash, unspent.tx_pos)) - .collect::>() - }) - .collect() - })) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-get-history - pub fn scripthash_get_history(&self, hash: &str) -> RpcRes { - rpc_func!(self, "blockchain.scripthash.get_history", hash) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-get-history - /// Requests history of the `hashes` in a batch and returns them in the same order they were requested. - pub fn scripthash_get_history_batch(&self, hashes: I) -> RpcRes> - where - I: IntoIterator, - { - let requests = hashes - .into_iter() - .map(|hash| rpc_req!(self, "blockchain.scripthash.get_history", hash)); - self.batch_rpc(requests) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-gethistory - pub fn scripthash_get_balance(&self, hash: &str) -> RpcRes { - let arc = self.clone(); - let hash = hash.to_owned(); - let fut = async move { - let request = rpc_func!(arc, "blockchain.scripthash.get_balance", &hash); - arc.get_balance_concurrent_map.wrap_request(hash, request).await - }; - Box::new(fut.boxed().compat()) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-gethistory - /// Requests balances in a batch and returns them in the same order they were requested. - pub fn scripthash_get_balances(&self, hashes: I) -> RpcRes> - where - I: IntoIterator, - { - let requests = hashes - .into_iter() - .map(|hash| rpc_req!(self, "blockchain.scripthash.get_balance", &hash)); - self.batch_rpc(requests) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-headers-subscribe - pub fn blockchain_headers_subscribe(&self) -> RpcRes { - rpc_func!(self, BLOCKCHAIN_HEADERS_SUB_ID) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-broadcast - pub fn blockchain_transaction_broadcast(&self, tx: BytesJson) -> RpcRes { - rpc_func!(self, "blockchain.transaction.broadcast", tx) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-estimatefee - /// It is recommended to set n_blocks as low as possible. - /// However, in some cases, n_blocks = 1 leads to an unreasonably high fee estimation. - /// https://github.com/KomodoPlatform/atomicDEX-API/issues/656#issuecomment-743759659 - pub fn estimate_fee(&self, mode: &Option, n_blocks: u32) -> UtxoRpcFut { - match mode { - Some(m) => { - Box::new(rpc_func!(self, "blockchain.estimatefee", n_blocks, m).map_to_mm_fut(UtxoRpcError::from)) - }, - None => Box::new(rpc_func!(self, "blockchain.estimatefee", n_blocks).map_to_mm_fut(UtxoRpcError::from)), - } - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-block-header - pub fn blockchain_block_header(&self, height: u64) -> RpcRes { - rpc_func!(self, "blockchain.block.header", height) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-block-headers - pub fn blockchain_block_headers(&self, start_height: u64, count: NonZeroU64) -> RpcRes { - rpc_func!(self, "blockchain.block.headers", start_height, count) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get-merkle - pub fn blockchain_transaction_get_merkle(&self, txid: H256Json, height: u64) -> RpcRes { - rpc_func!(self, "blockchain.transaction.get_merkle", txid, height) - } - - // get_tx_height_from_rpc is costly since it loops through history after requesting the whole history of the script pubkey - // This method should always be used if the block headers are saved to the DB - async fn get_tx_height_from_storage(&self, tx: &UtxoTx) -> Result> { - let tx_hash = tx.hash().reversed(); - let blockhash = self.get_verbose_transaction(&tx_hash.into()).compat().await?.blockhash; - Ok(self - .block_headers_storage() - .get_block_height_by_hash(blockhash.into()) - .await? - .ok_or_else(|| { - GetTxHeightError::HeightNotFound(format!( - "Transaction block header is not found in storage for {}", - self.0.coin_ticker - )) - })? - .try_into()?) - } - - // get_tx_height_from_storage is always preferred to be used instead of this, but if there is no headers in storage (storing headers is not enabled) - // this function can be used instead - async fn get_tx_height_from_rpc(&self, tx: &UtxoTx) -> Result { - for output in tx.outputs.clone() { - let script_pubkey_str = hex::encode(electrum_script_hash(&output.script_pubkey)); - if let Ok(history) = self.scripthash_get_history(script_pubkey_str.as_str()).compat().await { - if let Some(item) = history - .into_iter() - .find(|item| item.tx_hash.reversed() == H256Json(*tx.hash()) && item.height > 0) - { - return Ok(item.height as u64); - } - } - } - Err(GetTxHeightError::HeightNotFound(format!( - "Couldn't find height through electrum for {}", - self.coin_ticker - ))) - } - - async fn block_header_from_storage(&self, height: u64) -> Result> { - self.block_headers_storage() - .get_block_header(height) - .await? - .ok_or_else(|| { - GetBlockHeaderError::Internal(format!("Header not found in storage for {}", self.coin_ticker)).into() - }) - } - - async fn block_header_from_storage_or_rpc(&self, height: u64) -> Result> { - match self.block_header_from_storage(height).await { - Ok(h) => Ok(h), - Err(_) => Ok(deserialize( - self.blockchain_block_header(height).compat().await?.as_slice(), - )?), - } - } - - pub async fn get_confirmed_tx_info_from_rpc( - &self, - tx: &UtxoTx, - ) -> Result { - let height = self.get_tx_height_from_rpc(tx).await?; - - let merkle_branch = self - .blockchain_transaction_get_merkle(tx.hash().reversed().into(), height) - .compat() - .await?; - - let header = deserialize(self.blockchain_block_header(height).compat().await?.as_slice())?; - - Ok(ConfirmedTransactionInfo { - tx: tx.clone(), - header, - index: merkle_branch.pos as u64, - height, - }) - } - - pub async fn get_merkle_and_validated_header( - &self, - tx: &UtxoTx, - ) -> Result<(TxMerkleBranch, BlockHeader, u64), MmError> { - let height = self.get_tx_height_from_storage(tx).await?; - - let merkle_branch = self - .blockchain_transaction_get_merkle(tx.hash().reversed().into(), height) - .compat() - .await - .map_to_mm(|err| SPVError::UnableToGetMerkle { - coin: self.coin_ticker.clone(), - err: err.to_string(), - })?; - - let header = self.block_header_from_storage(height).await?; - - Ok((merkle_branch, header, height)) - } -} - -#[cfg_attr(test, mockable)] -impl ElectrumClient { - pub fn retrieve_headers_from( - &self, - server_address: &str, - from_height: u64, - to_height: u64, - ) -> UtxoRpcFut<(HashMap, Vec)> { - let coin_name = self.coin_ticker.clone(); - if from_height == 0 || to_height < from_height { - return Box::new(futures01::future::err( - UtxoRpcError::Internal("Invalid values for from/to parameters".to_string()).into(), - )); - } - let count: NonZeroU64 = match (to_height - from_height + 1).try_into() { - Ok(c) => c, - Err(e) => return Box::new(futures01::future::err(UtxoRpcError::Internal(e.to_string()).into())), - }; - Box::new( - self.get_block_headers_from(server_address, from_height, count) - .map_to_mm_fut(UtxoRpcError::from) - .and_then(move |headers| { - let (block_registry, block_headers) = { - if headers.count == 0 { - return MmError::err(UtxoRpcError::Internal("No headers available".to_string())); - } - let len = CompactInteger::from(headers.count); - let mut serialized = serialize(&len).take(); - serialized.extend(headers.hex.0.into_iter()); - drop_mutability!(serialized); - let mut reader = - Reader::new_with_coin_variant(serialized.as_slice(), coin_name.as_str().into()); - let maybe_block_headers = reader.read_list::(); - let block_headers = match maybe_block_headers { - Ok(headers) => headers, - Err(e) => return MmError::err(UtxoRpcError::InvalidResponse(format!("{:?}", e))), - }; - let mut block_registry: HashMap = HashMap::with_capacity(block_headers.len()); - let mut starting_height = from_height; - for block_header in &block_headers { - block_registry.insert(starting_height, block_header.clone()); - starting_height += 1; - } - (block_registry, block_headers) - }; - Ok((block_registry, block_headers)) - }), - ) - } - - pub(crate) fn get_servers_with_latest_block_count(&self) -> UtxoRpcFut<(Vec, u64)> { - let selfi = self.clone(); - let fut = async move { - let connections = selfi.connections.lock().await; - let futures = connections - .iter() - .map(|connection| { - let addr = connection.addr.clone(); - selfi - .get_block_count_from(&addr) - .map(|response| (addr, response)) - .compat() - }) - .collect::>(); - drop(connections); - - let responses = join_all(futures).await; - - // First, we use filter_map to get rid of any errors and collect the - // server addresses and block counts into two vectors - let (responding_servers, block_counts_from_all_servers): (Vec<_>, Vec<_>) = - responses.clone().into_iter().filter_map(|res| res.ok()).unzip(); - - // Next, we use max to find the maximum block count from all servers - if let Some(max_block_count) = block_counts_from_all_servers.clone().iter().max() { - // Then, we use filter and collect to get the servers that have the maximum block count - let servers_with_max_count: Vec<_> = responding_servers - .into_iter() - .zip(block_counts_from_all_servers) - .filter(|(_, count)| count == max_block_count) - .map(|(addr, _)| addr) - .collect(); - - // Finally, we return a tuple of servers with max count and the max count - return Ok((servers_with_max_count, *max_block_count)); - } - - Err(MmError::new(UtxoRpcError::Internal(format!( - "Couldn't get block count from any server for {}, responses: {:?}", - &selfi.coin_ticker, responses - )))) - }; - - Box::new(fut.boxed().compat()) - } -} - -// if mockable is placed before async_trait there is `munmap_chunk(): invalid pointer` error on async fn mocking attempt -#[async_trait] -#[cfg_attr(test, mockable)] -impl UtxoRpcClientOps for ElectrumClient { - fn list_unspent(&self, address: &Address, _decimals: u8) -> UtxoRpcFut> { - let mut output_scripts = vec![try_f!(output_script(address))]; - - // If the plain pubkey is available, fetch the UTXOs found in P2PK outputs as well (if any). - if let Some(pubkey) = address.pubkey() { - let p2pk_output_script = output_script_p2pk(pubkey); - output_scripts.push(p2pk_output_script); - } - - let this = self.clone(); - let fut = async move { - let hashes = output_scripts - .iter() - .map(|s| hex::encode(electrum_script_hash(s))) - .collect(); - let unspents = this.scripthash_list_unspent_batch(hashes).compat().await?; - - let unspents = unspents - .into_iter() - .zip(output_scripts) - .flat_map(|(unspents, output_script)| { - unspents - .into_iter() - .map(move |unspent| UnspentInfo::from_electrum(unspent, output_script.clone())) - }) - .collect(); - Ok(unspents) - }; - - Box::new(fut.boxed().compat()) - } - - fn list_unspent_group(&self, addresses: Vec
, _decimals: u8) -> UtxoRpcFut { - let output_scripts = try_f!(addresses - .iter() - .map(output_script) - .collect::, keys::Error>>()); - - let this = self.clone(); - let fut = async move { - let hashes = output_scripts - .iter() - .map(|s| hex::encode(electrum_script_hash(s))) - .collect(); - let unspents = this.scripthash_list_unspent_batch(hashes).compat().await?; - - let unspents: Vec> = unspents - .into_iter() - .zip(output_scripts) - .map(|(unspents, output_script)| { - unspents - .into_iter() - .map(|unspent| UnspentInfo::from_electrum(unspent, output_script.clone())) - .collect() - }) - .collect(); - - let unspent_map = addresses - .into_iter() - // `scripthash_list_unspent_batch` returns `ScriptHashUnspents` elements in the same order in which they were requested. - // So we can zip `addresses` and `unspents` into one iterator. - .zip(unspents) - .collect(); - Ok(unspent_map) - }; - Box::new(fut.boxed().compat()) - } - - fn send_transaction(&self, tx: &UtxoTx) -> UtxoRpcFut { - let bytes = if tx.has_witness() { - BytesJson::from(serialize_with_flags(tx, SERIALIZE_TRANSACTION_WITNESS)) - } else { - BytesJson::from(serialize(tx)) - }; - Box::new( - self.blockchain_transaction_broadcast(bytes) - .map_to_mm_fut(UtxoRpcError::from), - ) - } - - fn send_raw_transaction(&self, tx: BytesJson) -> UtxoRpcFut { - Box::new( - self.blockchain_transaction_broadcast(tx) - .map_to_mm_fut(UtxoRpcError::from), - ) - } - - fn blockchain_scripthash_subscribe(&self, scripthash: String) -> UtxoRpcFut { - Box::new(rpc_func!(self, BLOCKCHAIN_SCRIPTHASH_SUB_ID, scripthash).map_to_mm_fut(UtxoRpcError::from)) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get - /// returns transaction bytes by default - fn get_transaction_bytes(&self, txid: &H256Json) -> UtxoRpcFut { - let verbose = false; - Box::new(rpc_func!(self, "blockchain.transaction.get", txid, verbose).map_to_mm_fut(UtxoRpcError::from)) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get - /// returns verbose transaction by default - fn get_verbose_transaction(&self, txid: &H256Json) -> UtxoRpcFut { - let verbose = true; - Box::new(rpc_func!(self, "blockchain.transaction.get", txid, verbose).map_to_mm_fut(UtxoRpcError::from)) - } - - /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get - /// Returns verbose transactions in a batch. - fn get_verbose_transactions(&self, tx_ids: &[H256Json]) -> UtxoRpcFut> { - let verbose = true; - let requests = tx_ids - .iter() - .map(|txid| rpc_req!(self, "blockchain.transaction.get", txid, verbose)); - Box::new(self.batch_rpc(requests).map_to_mm_fut(UtxoRpcError::from)) - } - - fn get_block_count(&self) -> UtxoRpcFut { - Box::new( - self.blockchain_headers_subscribe() - .map(|r| r.block_height()) - .map_to_mm_fut(UtxoRpcError::from), - ) - } - - fn display_balance(&self, address: Address, decimals: u8) -> RpcRes { - let output_script = try_f!(output_script(&address).map_err(|err| JsonRpcError::new( - UtxoJsonRpcClientInfo::client_info(self), - rpc_req!(self, "blockchain.scripthash.get_balance").into(), - JsonRpcErrorType::Internal(err.to_string()) - ))); - let mut hashes = vec![hex::encode(electrum_script_hash(&output_script))]; - - // If the plain pubkey is available, fetch the balance found in P2PK output as well (if any). - if let Some(pubkey) = address.pubkey() { - let p2pk_output_script = output_script_p2pk(pubkey); - hashes.push(hex::encode(electrum_script_hash(&p2pk_output_script))); - } - - let this = self.clone(); - let fut = async move { - Ok(this - .scripthash_get_balances(hashes) - .compat() - .await? - .into_iter() - .fold(BigDecimal::from(0), |sum, electrum_balance| { - sum + electrum_balance.to_big_decimal(decimals) - })) - }; - Box::new(fut.boxed().compat()) - } - - fn display_balances(&self, addresses: Vec
, decimals: u8) -> UtxoRpcFut> { - let this = self.clone(); - let fut = async move { - let hashes = addresses - .iter() - .map(|address| { - let output_script = output_script(address)?; - let hash = electrum_script_hash(&output_script); - - Ok(hex::encode(hash)) - }) - .collect::, keys::Error>>()?; - - let electrum_balances = this.scripthash_get_balances(hashes).compat().await?; - let balances = electrum_balances - .into_iter() - // `scripthash_get_balances` returns `ElectrumBalance` elements in the same order in which they were requested. - // So we can zip `addresses` and the balances into one iterator. - .zip(addresses) - .map(|(electrum_balance, address)| (address, electrum_balance.to_big_decimal(decimals))) - .collect(); - Ok(balances) - }; - - Box::new(fut.boxed().compat()) - } - - fn estimate_fee_sat( - &self, - decimals: u8, - _fee_method: &EstimateFeeMethod, - mode: &Option, - n_blocks: u32, - ) -> UtxoRpcFut { - Box::new(self.estimate_fee(mode, n_blocks).map(move |fee| { - if fee > 0.00001 { - (fee * 10.0_f64.powf(decimals as f64)) as u64 - } else { - 1000 - } - })) - } - - fn get_relay_fee(&self) -> RpcRes { rpc_func!(self, "blockchain.relayfee") } - - fn find_output_spend( - &self, - tx_hash: H256, - script_pubkey: &[u8], - vout: usize, - _from_block: BlockHashOrHeight, - tx_hash_algo: TxHashAlgo, - ) -> Box, Error = String> + Send> { - let selfi = self.clone(); - let script_hash = hex::encode(electrum_script_hash(script_pubkey)); - let fut = async move { - let history = try_s!(selfi.scripthash_get_history(&script_hash).compat().await); - - if history.len() < 2 { - return Ok(None); - } - - for item in history.iter() { - let transaction = try_s!(selfi.get_transaction_bytes(&item.tx_hash).compat().await); - - let mut maybe_spend_tx: UtxoTx = - try_s!(deserialize(transaction.as_slice()).map_err(|e| ERRL!("{:?}", e))); - maybe_spend_tx.tx_hash_algo = tx_hash_algo; - drop_mutability!(maybe_spend_tx); - - for (index, input) in maybe_spend_tx.inputs.iter().enumerate() { - if input.previous_output.hash == tx_hash && input.previous_output.index == vout as u32 { - return Ok(Some(SpentOutputInfo { - input: input.clone(), - input_index: index, - spending_tx: maybe_spend_tx, - spent_in_block: BlockHashOrHeight::Height(item.height), - })); - } - } - } - Ok(None) - }; - Box::new(fut.boxed().compat()) - } - - fn get_median_time_past( - &self, - starting_block: u64, - count: NonZeroU64, - coin_variant: CoinVariant, - ) -> UtxoRpcFut { - let from = if starting_block <= count.get() { - 0 - } else { - starting_block - count.get() + 1 - }; - Box::new( - self.blockchain_block_headers(from, count) - .map_to_mm_fut(UtxoRpcError::from) - .and_then(|res| { - if res.count == 0 { - return MmError::err(UtxoRpcError::InvalidResponse("Server returned zero count".to_owned())); - } - let len = CompactInteger::from(res.count); - let mut serialized = serialize(&len).take(); - serialized.extend(res.hex.0.into_iter()); - let mut reader = Reader::new_with_coin_variant(serialized.as_slice(), coin_variant); - let headers = reader.read_list::()?; - let mut timestamps: Vec<_> = headers.into_iter().map(|block| block.time).collect(); - // can unwrap because count is non zero - Ok(median(timestamps.as_mut_slice()).unwrap()) - }), - ) - } - - async fn get_block_timestamp(&self, height: u64) -> Result> { - Ok(self.block_header_from_storage_or_rpc(height).await?.time as u64) - } -} - -#[cfg_attr(test, mockable)] -impl ElectrumClientImpl { - pub fn new( - coin_ticker: String, - event_handlers: Vec, - block_headers_storage: BlockHeaderStorage, - abortable_system: AbortableQueue, - negotiate_version: bool, - scripthash_notification_sender: ScripthashNotificationSender, - ) -> ElectrumClientImpl { - let protocol_version = OrdRange::new(1.2, 1.4).unwrap(); - ElectrumClientImpl { - coin_ticker, - connections: AsyncMutex::new(vec![]), - next_id: 0.into(), - event_handlers, - protocol_version, - get_balance_concurrent_map: ConcurrentRequestMap::new(), - list_unspent_concurrent_map: ConcurrentRequestMap::new(), - block_headers_storage, - abortable_system, - negotiate_version, - scripthash_notification_sender, - } - } - - #[cfg(test)] - pub fn with_protocol_version( - coin_ticker: String, - event_handlers: Vec, - protocol_version: OrdRange, - block_headers_storage: BlockHeaderStorage, - abortable_system: AbortableQueue, - scripthash_notification_sender: ScripthashNotificationSender, - ) -> ElectrumClientImpl { - ElectrumClientImpl { - protocol_version, - ..ElectrumClientImpl::new( - coin_ticker, - event_handlers, - block_headers_storage, - abortable_system, - false, - scripthash_notification_sender, - ) - } - } -} - -/// Helper function casting mpsc::Receiver as Stream. -fn rx_to_stream(rx: mpsc::Receiver>) -> impl Stream, Error = io::Error> { - rx.map_err(|_| panic!("errors not possible on rx")) -} - -async fn electrum_process_json( - raw_json: Json, - arc: &JsonRpcPendingRequestsShared, - scripthash_notification_sender: &ScripthashNotificationSender, -) { - // detect if we got standard JSONRPC response or subscription response as JSONRPC request - #[derive(Deserialize)] - #[serde(untagged)] - enum ElectrumRpcResponseEnum { - /// The subscription response as JSONRPC request. - /// - /// NOTE Because JsonRpcResponse uses default values for each of its field, - /// this variant has to stay at top in this enumeration to be properly deserialized - /// from serde. - SubscriptionNotification(JsonRpcRequest), - /// The standard JSONRPC single response. - SingleResponse(JsonRpcResponse), - /// The batch of standard JSONRPC responses. - BatchResponses(JsonRpcBatchResponse), - } - - let response: ElectrumRpcResponseEnum = match json::from_value(raw_json) { - Ok(res) => res, - Err(e) => { - error!("{}", e); - return; - }, - }; - - let response = match response { - ElectrumRpcResponseEnum::SingleResponse(single) => JsonRpcResponseEnum::Single(single), - ElectrumRpcResponseEnum::BatchResponses(batch) => JsonRpcResponseEnum::Batch(batch), - ElectrumRpcResponseEnum::SubscriptionNotification(req) => { - let id = match req.method.as_ref() { - BLOCKCHAIN_HEADERS_SUB_ID => BLOCKCHAIN_HEADERS_SUB_ID, - BLOCKCHAIN_SCRIPTHASH_SUB_ID => { - let scripthash = match req.params.first() { - Some(t) => t.as_str().unwrap_or_default(), - None => { - debug!("Notification must contain the scripthash value."); - return; - }, - }; - - if let Some(sender) = scripthash_notification_sender { - debug!("Sending scripthash message"); - if let Err(e) = sender.unbounded_send(ScripthashNotification::Triggered(scripthash.to_string())) - { - error!("Failed sending scripthash message. {e}"); - return; - }; - }; - BLOCKCHAIN_SCRIPTHASH_SUB_ID - }, - _ => { - error!("Couldn't get id of request {:?}", req); - return; - }, - }; - JsonRpcResponseEnum::Single(JsonRpcResponse { - id: id.into(), - jsonrpc: "2.0".into(), - result: req.params[0].clone(), - error: Json::Null, - }) - }, - }; - - // the corresponding sender may not exist, receiver may be dropped - // these situations are not considered as errors so we just silently skip them - let mut pending = arc.lock().await; - if let Some(tx) = pending.remove(&response.rpc_id()) { - tx.send(response).ok(); - } -} - -async fn electrum_process_chunk( - chunk: &[u8], - arc: &JsonRpcPendingRequestsShared, - scripthash_notification_sender: ScripthashNotificationSender, -) { - // we should split the received chunk because we can get several responses in 1 chunk. - let split = chunk.split(|item| *item == b'\n'); - for chunk in split { - // split returns empty slice if it ends with separator which is our case - if !chunk.is_empty() { - let raw_json: Json = match json::from_slice(chunk) { - Ok(json) => json, - Err(e) => { - error!("{}", e); - return; - }, - }; - electrum_process_json(raw_json, arc, &scripthash_notification_sender).await - } - } -} - -fn increase_delay(delay: &AtomicU64) { - if delay.load(AtomicOrdering::Relaxed) < 60 { - delay.fetch_add(5, AtomicOrdering::Relaxed); - } -} - -macro_rules! try_loop { - ($e:expr, $addr: ident, $delay: ident) => { - match $e { - Ok(res) => res, - Err(e) => { - error!("{:?} error {:?}", $addr, e); - increase_delay(&$delay); - continue; - }, - } - }; -} - -/// The enum wrapping possible variants of underlying Streams -#[cfg(not(target_arch = "wasm32"))] -#[allow(clippy::large_enum_variant)] -enum ElectrumStream { - Tcp(TcpStream), - Tls(TlsStream), -} - -#[cfg(not(target_arch = "wasm32"))] -impl AsRef for ElectrumStream { - fn as_ref(&self) -> &TcpStream { - match self { - ElectrumStream::Tcp(stream) => stream, - ElectrumStream::Tls(stream) => stream.get_ref().0, - } - } -} - -#[cfg(not(target_arch = "wasm32"))] -impl AsyncRead for ElectrumStream { - fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { - match self.get_mut() { - ElectrumStream::Tcp(stream) => AsyncRead::poll_read(Pin::new(stream), cx, buf), - ElectrumStream::Tls(stream) => AsyncRead::poll_read(Pin::new(stream), cx, buf), - } - } -} - -#[cfg(not(target_arch = "wasm32"))] -impl AsyncWrite for ElectrumStream { - fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { - match self.get_mut() { - ElectrumStream::Tcp(stream) => AsyncWrite::poll_write(Pin::new(stream), cx, buf), - ElectrumStream::Tls(stream) => AsyncWrite::poll_write(Pin::new(stream), cx, buf), - } - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.get_mut() { - ElectrumStream::Tcp(stream) => AsyncWrite::poll_flush(Pin::new(stream), cx), - ElectrumStream::Tls(stream) => AsyncWrite::poll_flush(Pin::new(stream), cx), - } - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.get_mut() { - ElectrumStream::Tcp(stream) => AsyncWrite::poll_shutdown(Pin::new(stream), cx), - ElectrumStream::Tls(stream) => AsyncWrite::poll_shutdown(Pin::new(stream), cx), - } - } -} - -const ELECTRUM_TIMEOUT: u64 = 60; - -async fn electrum_last_chunk_loop(last_chunk: Arc) { - loop { - Timer::sleep(ELECTRUM_TIMEOUT as f64).await; - let last = (last_chunk.load(AtomicOrdering::Relaxed) / 1000) as f64; - if now_float() - last > ELECTRUM_TIMEOUT as f64 { - warn!( - "Didn't receive any data since {}. Shutting down the connection.", - last as i64 - ); - break; - } - } -} - -#[cfg(not(target_arch = "wasm32"))] -fn rustls_client_config(unsafe_conf: bool) -> Arc { - let mut cert_store = RootCertStore::empty(); - - cert_store.add_trust_anchors( - TLS_SERVER_ROOTS - .iter() - .map(|ta| OwnedTrustAnchor::from_subject_spki_name_constraints(ta.subject, ta.spki, ta.name_constraints)), - ); - - let mut tls_config = rustls::ClientConfig::builder() - .with_safe_defaults() - .with_root_certificates(cert_store) - .with_no_client_auth(); - - if unsafe_conf { - tls_config - .dangerous() - .set_certificate_verifier(Arc::new(NoCertificateVerification {})); - } - Arc::new(tls_config) -} - -#[cfg(not(target_arch = "wasm32"))] -lazy_static! { - static ref SAFE_TLS_CONFIG: Arc = rustls_client_config(false); - static ref UNSAFE_TLS_CONFIG: Arc = rustls_client_config(true); -} - -#[cfg(not(target_arch = "wasm32"))] -async fn connect_loop( - config: ElectrumConfig, - addr: String, - responses: JsonRpcPendingRequestsShared, - connection_tx: Arc>>>>, - event_handlers: Vec, - scripthash_notification_sender: ScripthashNotificationSender, - _spawner: Spawner, -) -> Result<(), ()> { - let delay = Arc::new(AtomicU64::new(0)); - - loop { - let current_delay = delay.load(AtomicOrdering::Relaxed); - if current_delay > 0 { - Timer::sleep(current_delay as f64).await; - }; - - let socket_addr = addr_to_socket_addr(&addr).map_err(|e| { - error!("{:?} error {:?}", addr, e); - })?; - - let connect_f = match config.clone() { - ElectrumConfig::TCP => Either::Left(TcpStream::connect(&socket_addr).map_ok(ElectrumStream::Tcp)), - ElectrumConfig::SSL { - dns_name, - skip_validation, - } => { - let tls_connector = if skip_validation { - TlsConnector::from(UNSAFE_TLS_CONFIG.clone()) - } else { - TlsConnector::from(SAFE_TLS_CONFIG.clone()) - }; - // The address should always be correct since we checked it beforehand in initializaiton. - let dns = server_name_from_domain(dns_name.as_str()).map_err(|e| { - error!("{:?} error {:?}", addr, e); - })?; - - Either::Right( - TcpStream::connect(&socket_addr) - .and_then(move |stream| tls_connector.connect(dns, stream).map_ok(ElectrumStream::Tls)), - ) - }, - }; - - let stream = try_loop!(connect_f.await, addr, delay); - try_loop!(stream.as_ref().set_nodelay(true), addr, delay); - info!("Electrum client connected to {}", addr); - try_loop!(event_handlers.on_connected(addr.clone()), addr, delay); - let last_chunk = Arc::new(AtomicU64::new(now_ms())); - let mut last_chunk_f = electrum_last_chunk_loop(last_chunk.clone()).boxed().fuse(); - - let (tx, rx) = mpsc::channel(0); - *connection_tx.lock().await = Some(tx); - let rx = rx_to_stream(rx).inspect(|data| { - // measure the length of each sent packet - event_handlers.on_outgoing_request(data); - }); - - let (read, mut write) = tokio::io::split(stream); - let recv_f = { - let delay = delay.clone(); - let addr = addr.clone(); - let responses = responses.clone(); - let scripthash_notification_sender = scripthash_notification_sender.clone(); - let event_handlers = event_handlers.clone(); - async move { - let mut buffer = String::with_capacity(1024); - let mut buf_reader = BufReader::new(read); - loop { - match buf_reader.read_line(&mut buffer).await { - Ok(c) => { - if c == 0 { - info!("EOF from {}", addr); - break; - } - // reset the delay if we've connected successfully and only if we received some data from connection - delay.store(0, AtomicOrdering::Relaxed); - }, - Err(e) => { - error!("Error on read {} from {}", e, addr); - break; - }, - }; - // measure the length of each incoming packet - event_handlers.on_incoming_response(buffer.as_bytes()); - last_chunk.store(now_ms(), AtomicOrdering::Relaxed); - - electrum_process_chunk(buffer.as_bytes(), &responses, scripthash_notification_sender.clone()).await; - buffer.clear(); - } - } - }; - let mut recv_f = Box::pin(recv_f).fuse(); - - let send_f = { - let addr = addr.clone(); - let mut rx = rx.compat(); - async move { - while let Some(Ok(bytes)) = rx.next().await { - if let Err(e) = write.write_all(&bytes).await { - error!("Write error {} to {}", e, addr); - } - } - } - }; - let mut send_f = Box::pin(send_f).fuse(); - macro_rules! reset_tx_and_continue { - () => { - info!("{} connection dropped", addr); - event_handlers.on_disconnected(addr.clone()).error_log(); - *connection_tx.lock().await = None; - increase_delay(&delay); - continue; - }; - } - - select! { - _last_chunk = last_chunk_f => { reset_tx_and_continue!(); }, - _recv = recv_f => { reset_tx_and_continue!(); }, - _send = send_f => { reset_tx_and_continue!(); }, - } - } -} - -#[cfg(target_arch = "wasm32")] -async fn connect_loop( - _config: ElectrumConfig, - addr: String, - responses: JsonRpcPendingRequestsShared, - connection_tx: Arc>>>>, - event_handlers: Vec, - scripthash_notification_sender: ScripthashNotificationSender, - spawner: Spawner, -) -> Result<(), ()> { - use std::sync::atomic::AtomicUsize; - - lazy_static! { - static ref CONN_IDX: Arc = Arc::new(AtomicUsize::new(0)); - } - - use mm2_net::wasm::wasm_ws::ws_transport; - - let delay = Arc::new(AtomicU64::new(0)); - loop { - let current_delay = delay.load(AtomicOrdering::Relaxed); - if current_delay > 0 { - Timer::sleep(current_delay as f64).await; - } - - let conn_idx = CONN_IDX.fetch_add(1, AtomicOrdering::Relaxed); - let (mut transport_tx, mut transport_rx) = - try_loop!(ws_transport(conn_idx, &addr, &spawner).await, addr, delay); - - info!("Electrum client connected to {}", addr); - try_loop!(event_handlers.on_connected(addr.clone()), addr, delay); - - let last_chunk = Arc::new(AtomicU64::new(now_ms())); - let mut last_chunk_fut = electrum_last_chunk_loop(last_chunk.clone()).boxed().fuse(); - - let (outgoing_tx, outgoing_rx) = mpsc::channel(0); - *connection_tx.lock().await = Some(outgoing_tx); - - let incoming_fut = { - let delay = delay.clone(); - let addr = addr.clone(); - let responses = responses.clone(); - let scripthash_notification_sender = scripthash_notification_sender.clone(); - let event_handlers = event_handlers.clone(); - async move { - while let Some(incoming_res) = transport_rx.next().await { - last_chunk.store(now_ms(), AtomicOrdering::Relaxed); - match incoming_res { - Ok(incoming_json) => { - // reset the delay if we've connected successfully and only if we received some data from connection - delay.store(0, AtomicOrdering::Relaxed); - // measure the length of each incoming packet - let incoming_str = incoming_json.to_string(); - event_handlers.on_incoming_response(incoming_str.as_bytes()); - - electrum_process_json(incoming_json, &responses, &scripthash_notification_sender).await; - }, - Err(e) => { - error!("{} error: {:?}", addr, e); - }, - } - } - } - }; - let mut incoming_fut = Box::pin(incoming_fut).fuse(); - - let outgoing_fut = { - let addr = addr.clone(); - let mut outgoing_rx = rx_to_stream(outgoing_rx).compat(); - let event_handlers = event_handlers.clone(); - async move { - while let Some(Ok(data)) = outgoing_rx.next().await { - let raw_json: Json = match json::from_slice(&data) { - Ok(js) => js, - Err(e) => { - error!("Error {} deserializing the outgoing data: {:?}", e, data); - continue; - }, - }; - // measure the length of each sent packet - event_handlers.on_outgoing_request(&data); - - if let Err(e) = transport_tx.send(raw_json).await { - error!("Error sending to {}: {:?}", addr, e); - } - } - } - }; - let mut outgoing_fut = Box::pin(outgoing_fut).fuse(); - - macro_rules! reset_tx_and_continue { - () => { - info!("{} connection dropped", addr); - *connection_tx.lock().await = None; - event_handlers.on_disconnected(addr.clone()).error_log(); - increase_delay(&delay); - continue; - }; - } - - select! { - _last_chunk = last_chunk_fut => { reset_tx_and_continue!(); }, - _incoming = incoming_fut => { reset_tx_and_continue!(); }, - _outgoing = outgoing_fut => { reset_tx_and_continue!(); }, - } - } -} - -/// Builds up the electrum connection, spawns endless loop that attempts to reconnect to the server -/// in case of connection errors. -/// The function takes `abortable_system` that will be used to spawn Electrum's related futures. -fn electrum_connect( - addr: String, - config: ElectrumConfig, - event_handlers: Vec, - scripthash_notification_sender: &ScripthashNotificationSender, - abortable_system: AbortableQueue, -) -> ElectrumConnection { - let responses = Arc::new(AsyncMutex::new(JsonRpcPendingRequests::default())); - let tx = Arc::new(AsyncMutex::new(None)); - - let spawner = abortable_system.weak_spawner(); - let fut = connect_loop( - config.clone(), - addr.clone(), - responses.clone(), - tx.clone(), - event_handlers, - scripthash_notification_sender.clone(), - spawner.clone(), - ) - .then(|_| futures::future::ready(())); - - spawner.spawn(fut); - ElectrumConnection { - addr, - config, - tx, - responses, - protocol_version: AsyncMutex::new(None), - _abortable_system: abortable_system, - } -} - -/// # Important -/// `electrum_request` should always return [`JsonRpcErrorType::Transport`] error. -fn electrum_request( - mut req_json: String, - rpc_id: JsonRpcId, - tx: mpsc::Sender>, - responses: JsonRpcPendingRequestsShared, - timeout: u64, -) -> Box + Send + 'static> { - let send_fut = async move { - #[cfg(not(target_arch = "wasm"))] - { - // Electrum request and responses must end with \n - // https://electrumx.readthedocs.io/en/latest/protocol-basics.html#message-stream - req_json.push('\n'); - } - let (req_tx, resp_rx) = async_oneshot::channel(); - responses.lock().await.insert(rpc_id, req_tx); - tx.send(req_json.into_bytes()) - .compat() - .await - .map_err(|err| JsonRpcErrorType::Transport(err.to_string()))?; - let resps = resp_rx.await.map_err(|e| JsonRpcErrorType::Transport(e.to_string()))?; - Ok(resps) - }; - let send_fut = send_fut - .boxed() - .timeout(Duration::from_secs(timeout)) - .compat() - .then(move |res| res.map_err(|err| JsonRpcErrorType::Transport(err.to_string()))?); - Box::new(send_fut) -} - fn address_balance_from_unspent_map(address: &Address, unspent_map: &UnspentMap, decimals: u8) -> BigDecimal { let unspents = match unspent_map.get(address) { Some(unspents) => unspents, diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/client.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/client.rs new file mode 100644 index 0000000000..ce0498cc31 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/client.rs @@ -0,0 +1,1068 @@ +use super::super::{BlockHashOrHeight, EstimateFeeMethod, EstimateFeeMode, SpentOutputInfo, UnspentInfo, UnspentMap, + UtxoJsonRpcClientInfo, UtxoRpcClientOps, UtxoRpcError, UtxoRpcFut}; +use super::connection::{ElectrumConnection, ElectrumConnectionErr, ElectrumConnectionSettings}; +use super::connection_manager::ConnectionManager; +use super::constants::{BLOCKCHAIN_HEADERS_SUB_ID, BLOCKCHAIN_SCRIPTHASH_SUB_ID, ELECTRUM_REQUEST_TIMEOUT, + NO_FORCE_CONNECT_METHODS, SEND_TO_ALL_METHODS}; +use super::electrum_script_hash; +use super::event_handlers::{ElectrumConnectionManagerNotifier, ElectrumScriptHashNotificationBridge}; +use super::rpc_responses::*; + +use crate::utxo::rpc_clients::ConcurrentRequestMap; +use crate::utxo::utxo_block_header_storage::BlockHeaderStorage; +use crate::utxo::{output_script, output_script_p2pk, GetBlockHeaderError, GetConfirmedTxError, GetTxHeightError, + ScripthashNotification}; +use crate::RpcTransportEventHandler; +use crate::SharableRpcTransportEventHandler; +use chain::{BlockHeader, Transaction as UtxoTx, TxHashAlgo}; +use common::executor::abortable_queue::{AbortableQueue, WeakSpawner}; +use common::jsonrpc_client::{JsonRpcBatchClient, JsonRpcClient, JsonRpcError, JsonRpcErrorType, JsonRpcId, + JsonRpcMultiClient, JsonRpcRemoteAddr, JsonRpcRequest, JsonRpcRequestEnum, + JsonRpcResponseEnum, JsonRpcResponseFut, RpcRes}; +use common::log::warn; +use common::{median, OrdRange}; +use keys::hash::H256; +use keys::Address; +use mm2_err_handle::prelude::*; +use mm2_number::BigDecimal; +#[cfg(test)] use mocktopus::macros::*; +use rpc::v1::types::{Bytes as BytesJson, Transaction as RpcTransaction, H256 as H256Json}; +use serialization::{deserialize, serialize, serialize_with_flags, CoinVariant, CompactInteger, Reader, + SERIALIZE_TRANSACTION_WITNESS}; +use spv_validation::helpers_validation::SPVError; +use spv_validation::storage::BlockHeaderStorageOps; + +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::collections::HashSet; +use std::convert::TryInto; +use std::fmt::Debug; +use std::iter::FromIterator; +use std::num::NonZeroU64; +use std::ops::Deref; +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; +use std::sync::Arc; + +use async_trait::async_trait; +use futures::channel::mpsc::UnboundedSender; +use futures::compat::Future01CompatExt; +use futures::future::{join_all, FutureExt, TryFutureExt}; +use futures::stream::FuturesUnordered; +use futures::StreamExt; +use futures01::Future; +use itertools::Itertools; +use serde_json::{self as json, Value as Json}; + +type ElectrumTxHistory = Vec; +type ElectrumScriptHash = String; +type ScriptHashUnspents = Vec; + +#[derive(Debug)] +pub struct ElectrumClientSettings { + pub client_name: String, + pub servers: Vec, + pub coin_ticker: String, + pub negotiate_version: bool, + pub spawn_ping: bool, + /// Minimum number of connections to keep alive at all times (best effort). + pub min_connected: usize, + /// Maximum number of connections to keep alive at any time. + pub max_connected: usize, +} + +#[derive(Debug)] +pub struct ElectrumClientImpl { + client_name: String, + coin_ticker: String, + pub connection_manager: ConnectionManager, + next_id: AtomicU64, + negotiate_version: bool, + protocol_version: OrdRange, + get_balance_concurrent_map: ConcurrentRequestMap, + list_unspent_concurrent_map: ConcurrentRequestMap>, + block_headers_storage: BlockHeaderStorage, + /// Event handlers that are triggered on (dis)connection & transport events. They are wrapped + /// in an `Arc` since they are shared outside `ElectrumClientImpl`. They are handed to each active + /// `ElectrumConnection` to notify them about the events. + event_handlers: Arc>>, + pub scripthash_notification_sender: Option>, + abortable_system: AbortableQueue, +} + +#[cfg_attr(test, mockable)] +impl ElectrumClientImpl { + /// Returns a new instance of `ElectrumClientImpl`. + /// + /// This doesn't initialize the connection manager contained within `ElectrumClientImpl`. + /// Use `try_new_arc` to create an Arc-wrapped instance with an initialized connection manager. + fn try_new( + client_settings: ElectrumClientSettings, + block_headers_storage: BlockHeaderStorage, + abortable_system: AbortableQueue, + mut event_handlers: Vec>, + scripthash_notification_sender: Option>, + ) -> Result { + // This is used for balance event streaming implementation for UTXOs. + // Will be used for sending scripthash messages to trigger re-connections, re-fetching the balances, etc. + if let Some(scripthash_notification_sender) = scripthash_notification_sender.clone() { + event_handlers.push(Box::new(ElectrumScriptHashNotificationBridge { + scripthash_notification_sender, + })); + } + + let connection_manager = ConnectionManager::try_new( + client_settings.servers, + client_settings.spawn_ping, + (client_settings.min_connected, client_settings.max_connected), + &abortable_system, + )?; + + event_handlers.push(Box::new(ElectrumConnectionManagerNotifier { + connection_manager: connection_manager.clone(), + })); + + Ok(ElectrumClientImpl { + client_name: client_settings.client_name, + coin_ticker: client_settings.coin_ticker, + connection_manager, + next_id: 0.into(), + negotiate_version: client_settings.negotiate_version, + protocol_version: OrdRange::new(1.2, 1.4).unwrap(), + get_balance_concurrent_map: ConcurrentRequestMap::new(), + list_unspent_concurrent_map: ConcurrentRequestMap::new(), + block_headers_storage, + abortable_system, + scripthash_notification_sender, + event_handlers: Arc::new(event_handlers), + }) + } + + /// Create a new Electrum client instance. + /// This function initializes the connection manager and starts the connection process. + pub fn try_new_arc( + client_settings: ElectrumClientSettings, + block_headers_storage: BlockHeaderStorage, + abortable_system: AbortableQueue, + event_handlers: Vec>, + scripthash_notification_sender: Option>, + ) -> Result, String> { + let client_impl = Arc::new(ElectrumClientImpl::try_new( + client_settings, + block_headers_storage, + abortable_system, + event_handlers, + scripthash_notification_sender, + )?); + // Initialize the connection manager. + client_impl + .connection_manager + .initialize(Arc::downgrade(&client_impl)) + .map_err(|e| e.to_string())?; + + Ok(client_impl) + } + + /// Remove an Electrum connection and stop corresponding spawned actor. + pub fn remove_server(&self, server_addr: &str) -> Result, String> { + self.connection_manager + .remove_connection(server_addr) + .map_err(|err| err.to_string()) + } + + /// Check if all connections have been removed. + pub fn is_connections_pool_empty(&self) -> bool { self.connection_manager.is_connections_pool_empty() } + + /// Get available protocol versions. + pub fn protocol_version(&self) -> &OrdRange { &self.protocol_version } + + pub fn coin_ticker(&self) -> &str { &self.coin_ticker } + + /// Whether to negotiate the protocol version. + pub fn negotiate_version(&self) -> bool { self.negotiate_version } + + /// Get the event handlers. + pub fn event_handlers(&self) -> Arc>> { self.event_handlers.clone() } + + /// Sends a list of addresses through the scripthash notification sender to subscribe to their scripthash notifications. + pub fn subscribe_addresses(&self, addresses: HashSet
) -> Result<(), String> { + if let Some(sender) = &self.scripthash_notification_sender { + sender + .unbounded_send(ScripthashNotification::SubscribeToAddresses(addresses)) + .map_err(|e| ERRL!("Failed sending scripthash message. {}", e))?; + } + + Ok(()) + } + + /// Get block headers storage. + pub fn block_headers_storage(&self) -> &BlockHeaderStorage { &self.block_headers_storage } + + pub fn weak_spawner(&self) -> WeakSpawner { self.abortable_system.weak_spawner() } + + #[cfg(test)] + pub fn with_protocol_version( + client_settings: ElectrumClientSettings, + block_headers_storage: BlockHeaderStorage, + abortable_system: AbortableQueue, + event_handlers: Vec>, + scripthash_notification_sender: Option>, + protocol_version: OrdRange, + ) -> Result, String> { + let client_impl = Arc::new(ElectrumClientImpl { + protocol_version, + ..ElectrumClientImpl::try_new( + client_settings, + block_headers_storage, + abortable_system, + event_handlers, + scripthash_notification_sender, + )? + }); + // Initialize the connection manager. + client_impl + .connection_manager + .initialize(Arc::downgrade(&client_impl)) + .map_err(|e| e.to_string())?; + + Ok(client_impl) + } +} + +#[derive(Clone, Debug)] +pub struct ElectrumClient(pub Arc); + +impl Deref for ElectrumClient { + type Target = ElectrumClientImpl; + fn deref(&self) -> &ElectrumClientImpl { &self.0 } +} + +impl UtxoJsonRpcClientInfo for ElectrumClient { + fn coin_name(&self) -> &str { self.coin_ticker.as_str() } +} + +impl JsonRpcClient for ElectrumClient { + fn version(&self) -> &'static str { "2.0" } + + fn next_id(&self) -> u64 { self.next_id.fetch_add(1, AtomicOrdering::Relaxed) } + + fn client_info(&self) -> String { UtxoJsonRpcClientInfo::client_info(self) } + + fn transport(&self, request: JsonRpcRequestEnum) -> JsonRpcResponseFut { + Box::new(self.clone().electrum_request_multi(request).boxed().compat()) + } +} + +impl JsonRpcBatchClient for ElectrumClient {} + +impl JsonRpcMultiClient for ElectrumClient { + fn transport_exact(&self, to_addr: String, request: JsonRpcRequestEnum) -> JsonRpcResponseFut { + Box::new( + self.clone() + .electrum_request_to(to_addr.clone(), request) + .map_ok(|response| (JsonRpcRemoteAddr(to_addr), response)) + .boxed() + .compat(), + ) + } +} + +#[cfg_attr(test, mockable)] +impl ElectrumClient { + pub fn try_new( + client_settings: ElectrumClientSettings, + event_handlers: Vec>, + block_headers_storage: BlockHeaderStorage, + abortable_system: AbortableQueue, + scripthash_notification_sender: Option>, + ) -> Result { + let client = ElectrumClient(ElectrumClientImpl::try_new_arc( + client_settings, + block_headers_storage, + abortable_system, + event_handlers, + scripthash_notification_sender, + )?); + + Ok(client) + } + + /// Sends a JSONRPC request to all the connected servers. + /// + /// This method will block until a response is received from at least one server. + async fn electrum_request_multi( + self, + request: JsonRpcRequestEnum, + ) -> Result<(JsonRpcRemoteAddr, JsonRpcResponseEnum), JsonRpcErrorType> { + // Whether to send the request to all active connections or not. + let send_to_all = matches!(request, JsonRpcRequestEnum::Single(ref req) if SEND_TO_ALL_METHODS.contains(&req.method.as_str())); + // Request id and serialized request. + let req_id = request.rpc_id(); + let request = json::to_string(&request).map_err(|e| JsonRpcErrorType::InvalidRequest(e.to_string()))?; + let request = (req_id, request); + // Use the active connections for this request. + let connections = self.connection_manager.get_active_connections(); + // Maximum number of connections to establish or use in request concurrently. Could be up to connections.len(). + let concurrency = if send_to_all { connections.len() } else { 1 }; + match self + .send_request_using(&request, connections, send_to_all, concurrency) + .await + { + Ok(response) => Ok(response), + // If we failed the request using only the active connections, try again using all connections. + Err(_) if !send_to_all => { + warn!( + "[coin={}] Failed to send the request using active connections, trying all connections.", + self.coin_ticker() + ); + let connections = self.connection_manager.get_all_connections(); + // At this point we should have all the connections disconnected since all + // the active connections failed (and we disconnected them in the process). + // So use a higher concurrency to speed up the response time. + // + // Note that a side effect of this is that we might break the `max_connected` threshold for + // a short time since the connection manager's background task will be trying to establish + // connections at the same time. This is not as bad though since the manager's background task + // tries connections sequentially and we are expected for finish much quicker due to parallelizing. + let concurrency = self.connection_manager.config().max_connected; + match self.send_request_using(&request, connections, false, concurrency).await { + Ok(response) => Ok(response), + Err(err_vec) => Err(JsonRpcErrorType::Internal(format!("All servers errored: {err_vec:?}"))), + } + }, + Err(e) => Err(JsonRpcErrorType::Internal(format!("All servers errored: {e:?}"))), + } + } + + /// Sends a JSONRPC request to a specific electrum server. + /// + /// This will try to wake up the server connection if it's not connected. + async fn electrum_request_to( + self, + to_addr: String, + request: JsonRpcRequestEnum, + ) -> Result { + // Whether to force the connection to be established (if not) before sending the request. + let force_connect = !matches!(request, JsonRpcRequestEnum::Single(ref req) if NO_FORCE_CONNECT_METHODS.contains(&req.method.as_str())); + let json = json::to_string(&request).map_err(|err| JsonRpcErrorType::InvalidRequest(err.to_string()))?; + + let connection = self + .connection_manager + .get_connection_by_address(&to_addr, force_connect) + .await + .map_err(|err| JsonRpcErrorType::Internal(err.to_string()))?; + + let response = connection + .electrum_request(json, request.rpc_id(), ELECTRUM_REQUEST_TIMEOUT) + .await; + // If the request was not forcefully connected, we shouldn't inform the connection manager that it's + // not needed anymore, as we didn't force spawn it in the first place. + // This fixes dropping the connection after the version check request, as we don't mark the connection + // maintained till after the version is checked. + if force_connect { + // Inform the connection manager that the connection was queried and no longer needed now. + self.connection_manager.not_needed(&to_addr); + } + + response + } + + /// Sends a JSONRPC request to all the given connections in parallel and returns + /// the first successful response if there is any, or a vector of errors otherwise. + /// + /// If `send_to_all` is set to `true`, we won't return on first successful response but + /// wait for all responses to come back first. + async fn send_request_using( + &self, + request: &(JsonRpcId, String), + connections: Vec>, + send_to_all: bool, + max_concurrency: usize, + ) -> Result<(JsonRpcRemoteAddr, JsonRpcResponseEnum), Vec<(JsonRpcRemoteAddr, JsonRpcErrorType)>> { + let max_concurrency = max_concurrency.max(1); + // Create the request + let chunked_requests = connections.chunks(max_concurrency).map(|chunk| { + FuturesUnordered::from_iter(chunk.iter().map(|connection| { + let client = self.clone(); + let req_id = request.0; + let req_json = request.1.clone(); + async move { + let connection_is_established = connection + // We first make sure that the connection loop is established before sending the request. + .establish_connection_loop(client) + .await + .map_err(|e| JsonRpcErrorType::Transport(format!("Failed to establish connection: {e:?}"))); + let response = match connection_is_established { + Ok(_) => { + // Perform the request. + connection + .electrum_request(req_json, req_id, ELECTRUM_REQUEST_TIMEOUT) + .await + }, + Err(e) => Err(e), + }; + (response, connection.clone()) + } + })) + }); + let client = self.clone(); + let mut final_response = None; + let mut errors = Vec::new(); + // Iterate over the request chunks sequentially. + for mut requests in chunked_requests { + // For each chunk, iterate over the requests in parallel. + while let Some((response, connection)) = requests.next().await { + let address = JsonRpcRemoteAddr(connection.address().to_string()); + match response { + Ok(response) => { + if final_response.is_none() { + final_response = Some((address, response)); + } + client.connection_manager.not_needed(connection.address()); + if !send_to_all && final_response.is_some() { + return Ok(final_response.unwrap()); + } + }, + Err(e) => { + warn!( + "[coin={}], Error while sending request to {address:?}: {e:?}", + client.coin_ticker() + ); + connection.disconnect(Some(ElectrumConnectionErr::Temporary(format!( + "Forcefully disconnected for erroring: {e:?}." + )))); + client.event_handlers.on_disconnected(connection.address()).ok(); + errors.push((address, e)) + }, + } + } + } + final_response.ok_or(errors) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#server-ping + pub fn server_ping(&self) -> RpcRes<()> { rpc_func!(self, "server.ping") } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#server-version + pub fn server_version(&self, server_address: &str, version: &OrdRange) -> RpcRes { + let protocol_version: Vec = version.flatten().into_iter().map(|v| format!("{}", v)).collect(); + rpc_func_from!( + self, + server_address, + "server.version", + &self.client_name, + protocol_version + ) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-headers-subscribe + pub fn get_block_count_from(&self, server_address: &str) -> RpcRes { + Box::new( + rpc_func_from!(self, server_address, BLOCKCHAIN_HEADERS_SUB_ID) + .map(|r: ElectrumBlockHeader| r.block_height()), + ) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-block-headers + pub fn get_block_headers_from( + &self, + server_address: &str, + start_height: u64, + count: NonZeroU64, + ) -> RpcRes { + rpc_func_from!(self, server_address, "blockchain.block.headers", start_height, count) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-listunspent + /// It can return duplicates sometimes: https://github.com/artemii235/SuperNET/issues/269 + /// We should remove them to build valid transactions + pub fn scripthash_list_unspent(&self, hash: &str) -> RpcRes> { + let request_fut = Box::new(rpc_func!(self, "blockchain.scripthash.listunspent", hash).and_then( + move |unspents: Vec| { + let mut map: HashMap<(H256Json, u32), bool> = HashMap::new(); + let unspents = unspents + .into_iter() + .filter(|unspent| match map.entry((unspent.tx_hash, unspent.tx_pos)) { + Entry::Occupied(_) => false, + Entry::Vacant(e) => { + e.insert(true); + true + }, + }) + .collect(); + Ok(unspents) + }, + )); + let arc = self.clone(); + let hash = hash.to_owned(); + let fut = async move { arc.list_unspent_concurrent_map.wrap_request(hash, request_fut).await }; + Box::new(fut.boxed().compat()) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-listunspent + /// It can return duplicates sometimes: https://github.com/artemii235/SuperNET/issues/269 + /// We should remove them to build valid transactions. + /// Please note the function returns `ScriptHashUnspents` elements in the same order in which they were requested. + pub fn scripthash_list_unspent_batch(&self, hashes: Vec) -> RpcRes> { + let requests = hashes + .iter() + .map(|hash| rpc_req!(self, "blockchain.scripthash.listunspent", hash)); + Box::new(self.batch_rpc(requests).map(move |unspents: Vec| { + unspents + .into_iter() + .map(|hash_unspents| { + hash_unspents + .into_iter() + .unique_by(|unspent| (unspent.tx_hash, unspent.tx_pos)) + .collect::>() + }) + .collect() + })) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-get-history + pub fn scripthash_get_history(&self, hash: &str) -> RpcRes { + rpc_func!(self, "blockchain.scripthash.get_history", hash) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-get-history + /// Requests history of the `hashes` in a batch and returns them in the same order they were requested. + pub fn scripthash_get_history_batch(&self, hashes: I) -> RpcRes> + where + I: IntoIterator, + { + let requests = hashes + .into_iter() + .map(|hash| rpc_req!(self, "blockchain.scripthash.get_history", hash)); + self.batch_rpc(requests) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-gethistory + pub fn scripthash_get_balance(&self, hash: &str) -> RpcRes { + let arc = self.clone(); + let hash = hash.to_owned(); + let fut = async move { + let request = rpc_func!(arc, "blockchain.scripthash.get_balance", &hash); + arc.get_balance_concurrent_map.wrap_request(hash, request).await + }; + Box::new(fut.boxed().compat()) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-scripthash-gethistory + /// Requests balances in a batch and returns them in the same order they were requested. + pub fn scripthash_get_balances(&self, hashes: I) -> RpcRes> + where + I: IntoIterator, + { + let requests = hashes + .into_iter() + .map(|hash| rpc_req!(self, "blockchain.scripthash.get_balance", &hash)); + self.batch_rpc(requests) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-headers-subscribe + pub fn blockchain_headers_subscribe(&self) -> RpcRes { + rpc_func!(self, BLOCKCHAIN_HEADERS_SUB_ID) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-broadcast + pub fn blockchain_transaction_broadcast(&self, tx: BytesJson) -> RpcRes { + rpc_func!(self, "blockchain.transaction.broadcast", tx) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-estimatefee + /// It is recommended to set n_blocks as low as possible. + /// However, in some cases, n_blocks = 1 leads to an unreasonably high fee estimation. + /// https://github.com/KomodoPlatform/atomicDEX-API/issues/656#issuecomment-743759659 + pub fn estimate_fee(&self, mode: &Option, n_blocks: u32) -> UtxoRpcFut { + match mode { + Some(m) => { + Box::new(rpc_func!(self, "blockchain.estimatefee", n_blocks, m).map_to_mm_fut(UtxoRpcError::from)) + }, + None => Box::new(rpc_func!(self, "blockchain.estimatefee", n_blocks).map_to_mm_fut(UtxoRpcError::from)), + } + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-block-header + pub fn blockchain_block_header(&self, height: u64) -> RpcRes { + rpc_func!(self, "blockchain.block.header", height) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-block-headers + pub fn blockchain_block_headers(&self, start_height: u64, count: NonZeroU64) -> RpcRes { + rpc_func!(self, "blockchain.block.headers", start_height, count) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get-merkle + pub fn blockchain_transaction_get_merkle(&self, txid: H256Json, height: u64) -> RpcRes { + rpc_func!(self, "blockchain.transaction.get_merkle", txid, height) + } + + // get_tx_height_from_rpc is costly since it loops through history after requesting the whole history of the script pubkey + // This method should always be used if the block headers are saved to the DB + async fn get_tx_height_from_storage(&self, tx: &UtxoTx) -> Result> { + let tx_hash = tx.hash().reversed(); + let blockhash = self.get_verbose_transaction(&tx_hash.into()).compat().await?.blockhash; + Ok(self + .block_headers_storage() + .get_block_height_by_hash(blockhash.into()) + .await? + .ok_or_else(|| { + GetTxHeightError::HeightNotFound(format!( + "Transaction block header is not found in storage for {}", + self.coin_ticker() + )) + })? + .try_into()?) + } + + // get_tx_height_from_storage is always preferred to be used instead of this, but if there is no headers in storage (storing headers is not enabled) + // this function can be used instead + async fn get_tx_height_from_rpc(&self, tx: &UtxoTx) -> Result { + let selfi = self; + for output in tx.outputs.clone() { + let script_pubkey_str = hex::encode(electrum_script_hash(&output.script_pubkey)); + if let Ok(history) = selfi.scripthash_get_history(script_pubkey_str.as_str()).compat().await { + if let Some(item) = history + .into_iter() + .find(|item| item.tx_hash.reversed() == H256Json(*tx.hash()) && item.height > 0) + { + return Ok(item.height as u64); + } + } + } + Err(GetTxHeightError::HeightNotFound(format!( + "Couldn't find height through electrum for {}", + selfi.coin_ticker + ))) + } + + async fn block_header_from_storage(&self, height: u64) -> Result> { + self.block_headers_storage() + .get_block_header(height) + .await? + .ok_or_else(|| { + GetBlockHeaderError::Internal(format!("Header not found in storage for {}", self.coin_ticker)).into() + }) + } + + async fn block_header_from_storage_or_rpc(&self, height: u64) -> Result> { + match self.block_header_from_storage(height).await { + Ok(h) => Ok(h), + Err(_) => Ok(deserialize( + self.blockchain_block_header(height).compat().await?.as_slice(), + )?), + } + } + + pub async fn get_confirmed_tx_info_from_rpc( + &self, + tx: &UtxoTx, + ) -> Result { + let height = self.get_tx_height_from_rpc(tx).await?; + + let merkle_branch = self + .blockchain_transaction_get_merkle(tx.hash().reversed().into(), height) + .compat() + .await?; + + let header = deserialize(self.blockchain_block_header(height).compat().await?.as_slice())?; + + Ok(ConfirmedTransactionInfo { + tx: tx.clone(), + header, + index: merkle_branch.pos as u64, + height, + }) + } + + pub async fn get_merkle_and_validated_header( + &self, + tx: &UtxoTx, + ) -> Result<(TxMerkleBranch, BlockHeader, u64), MmError> { + let height = self.get_tx_height_from_storage(tx).await?; + + let merkle_branch = self + .blockchain_transaction_get_merkle(tx.hash().reversed().into(), height) + .compat() + .await + .map_to_mm(|err| SPVError::UnableToGetMerkle { + coin: self.coin_ticker.clone(), + err: err.to_string(), + })?; + + let header = self.block_header_from_storage(height).await?; + + Ok((merkle_branch, header, height)) + } + + pub fn retrieve_headers_from( + &self, + server_address: &str, + from_height: u64, + to_height: u64, + ) -> UtxoRpcFut<(HashMap, Vec)> { + let coin_name = self.coin_ticker.clone(); + if from_height == 0 || to_height < from_height { + return Box::new(futures01::future::err( + UtxoRpcError::Internal("Invalid values for from/to parameters".to_string()).into(), + )); + } + let count: NonZeroU64 = match (to_height - from_height + 1).try_into() { + Ok(c) => c, + Err(e) => return Box::new(futures01::future::err(UtxoRpcError::Internal(e.to_string()).into())), + }; + Box::new( + self.get_block_headers_from(server_address, from_height, count) + .map_to_mm_fut(UtxoRpcError::from) + .and_then(move |headers| { + let (block_registry, block_headers) = { + if headers.count == 0 { + return MmError::err(UtxoRpcError::Internal("No headers available".to_string())); + } + let len = CompactInteger::from(headers.count); + let mut serialized = serialize(&len).take(); + serialized.extend(headers.hex.0.into_iter()); + drop_mutability!(serialized); + let mut reader = + Reader::new_with_coin_variant(serialized.as_slice(), coin_name.as_str().into()); + let maybe_block_headers = reader.read_list::(); + let block_headers = match maybe_block_headers { + Ok(headers) => headers, + Err(e) => return MmError::err(UtxoRpcError::InvalidResponse(format!("{:?}", e))), + }; + let mut block_registry: HashMap = HashMap::new(); + let mut starting_height = from_height; + for block_header in &block_headers { + block_registry.insert(starting_height, block_header.clone()); + starting_height += 1; + } + (block_registry, block_headers) + }; + Ok((block_registry, block_headers)) + }), + ) + } + + pub(crate) fn get_servers_with_latest_block_count(&self) -> UtxoRpcFut<(Vec, u64)> { + let selfi = self.clone(); + let fut = async move { + let addresses = selfi.connection_manager.get_all_server_addresses(); + let futures = addresses + .into_iter() + .map(|address| { + selfi + .get_block_count_from(&address) + .map(|response| (address, response)) + .compat() + }) + .collect::>(); + + let responses = join_all(futures).await; + + // First, we use filter_map to get rid of any errors and collect the + // server addresses and block counts into two vectors + let (responding_servers, block_counts_from_all_servers): (Vec<_>, Vec<_>) = + responses.clone().into_iter().filter_map(|res| res.ok()).unzip(); + + // Next, we use max to find the maximum block count from all servers + if let Some(max_block_count) = block_counts_from_all_servers.clone().iter().max() { + // Then, we use filter and collect to get the servers that have the maximum block count + let servers_with_max_count: Vec<_> = responding_servers + .into_iter() + .zip(block_counts_from_all_servers) + .filter(|(_, count)| count == max_block_count) + .map(|(addr, _)| addr) + .collect(); + + // Finally, we return a tuple of servers with max count and the max count + return Ok((servers_with_max_count, *max_block_count)); + } + + Err(MmError::new(UtxoRpcError::Internal(format!( + "Couldn't get block count from any server for {}, responses: {:?}", + &selfi.coin_ticker, responses + )))) + }; + + Box::new(fut.boxed().compat()) + } +} + +// if mockable is placed before async_trait there is `munmap_chunk(): invalid pointer` error on async fn mocking attempt +#[async_trait] +#[cfg_attr(test, mockable)] +impl UtxoRpcClientOps for ElectrumClient { + fn list_unspent(&self, address: &Address, _decimals: u8) -> UtxoRpcFut> { + let mut output_scripts = vec![try_f!(output_script(address))]; + + // If the plain pubkey is available, fetch the UTXOs found in P2PK outputs as well (if any). + if let Some(pubkey) = address.pubkey() { + let p2pk_output_script = output_script_p2pk(pubkey); + output_scripts.push(p2pk_output_script); + } + + let this = self.clone(); + let fut = async move { + let hashes = output_scripts + .iter() + .map(|s| hex::encode(electrum_script_hash(s))) + .collect(); + let unspents = this.scripthash_list_unspent_batch(hashes).compat().await?; + + let unspents = unspents + .into_iter() + .zip(output_scripts) + .flat_map(|(unspents, output_script)| { + unspents + .into_iter() + .map(move |unspent| UnspentInfo::from_electrum(unspent, output_script.clone())) + }) + .collect(); + Ok(unspents) + }; + + Box::new(fut.boxed().compat()) + } + + fn list_unspent_group(&self, addresses: Vec
, _decimals: u8) -> UtxoRpcFut { + let output_scripts = try_f!(addresses + .iter() + .map(output_script) + .collect::, keys::Error>>()); + + let this = self.clone(); + let fut = async move { + let hashes = output_scripts + .iter() + .map(|s| hex::encode(electrum_script_hash(s))) + .collect(); + let unspents = this.scripthash_list_unspent_batch(hashes).compat().await?; + + let unspents: Vec> = unspents + .into_iter() + .zip(output_scripts) + .map(|(unspents, output_script)| { + unspents + .into_iter() + .map(|unspent| UnspentInfo::from_electrum(unspent, output_script.clone())) + .collect() + }) + .collect(); + + let unspent_map = addresses + .into_iter() + // `scripthash_list_unspent_batch` returns `ScriptHashUnspents` elements in the same order in which they were requested. + // So we can zip `addresses` and `unspents` into one iterator. + .zip(unspents) + .collect(); + Ok(unspent_map) + }; + Box::new(fut.boxed().compat()) + } + + fn send_transaction(&self, tx: &UtxoTx) -> UtxoRpcFut { + let bytes = if tx.has_witness() { + BytesJson::from(serialize_with_flags(tx, SERIALIZE_TRANSACTION_WITNESS)) + } else { + BytesJson::from(serialize(tx)) + }; + Box::new( + self.blockchain_transaction_broadcast(bytes) + .map_to_mm_fut(UtxoRpcError::from), + ) + } + + fn send_raw_transaction(&self, tx: BytesJson) -> UtxoRpcFut { + Box::new( + self.blockchain_transaction_broadcast(tx) + .map_to_mm_fut(UtxoRpcError::from), + ) + } + + fn blockchain_scripthash_subscribe_using(&self, server_address: &str, scripthash: String) -> UtxoRpcFut { + Box::new( + rpc_func_from!(self, server_address, BLOCKCHAIN_SCRIPTHASH_SUB_ID, scripthash) + .map_to_mm_fut(UtxoRpcError::from), + ) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get + /// returns transaction bytes by default + fn get_transaction_bytes(&self, txid: &H256Json) -> UtxoRpcFut { + let verbose = false; + Box::new(rpc_func!(self, "blockchain.transaction.get", txid, verbose).map_to_mm_fut(UtxoRpcError::from)) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get + /// returns verbose transaction by default + fn get_verbose_transaction(&self, txid: &H256Json) -> UtxoRpcFut { + let verbose = true; + Box::new(rpc_func!(self, "blockchain.transaction.get", txid, verbose).map_to_mm_fut(UtxoRpcError::from)) + } + + /// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#blockchain-transaction-get + /// Returns verbose transactions in a batch. + fn get_verbose_transactions(&self, tx_ids: &[H256Json]) -> UtxoRpcFut> { + let verbose = true; + let requests = tx_ids + .iter() + .map(|txid| rpc_req!(self, "blockchain.transaction.get", txid, verbose)); + Box::new(self.batch_rpc(requests).map_to_mm_fut(UtxoRpcError::from)) + } + + fn get_block_count(&self) -> UtxoRpcFut { + Box::new( + self.blockchain_headers_subscribe() + .map(|r| r.block_height()) + .map_to_mm_fut(UtxoRpcError::from), + ) + } + + fn display_balance(&self, address: Address, decimals: u8) -> RpcRes { + let output_script = try_f!(output_script(&address).map_err(|err| JsonRpcError::new( + UtxoJsonRpcClientInfo::client_info(self), + rpc_req!(self, "blockchain.scripthash.get_balance").into(), + JsonRpcErrorType::Internal(err.to_string()) + ))); + let mut hashes = vec![hex::encode(electrum_script_hash(&output_script))]; + + // If the plain pubkey is available, fetch the balance found in P2PK output as well (if any). + if let Some(pubkey) = address.pubkey() { + let p2pk_output_script = output_script_p2pk(pubkey); + hashes.push(hex::encode(electrum_script_hash(&p2pk_output_script))); + } + + let this = self.clone(); + let fut = async move { + Ok(this + .scripthash_get_balances(hashes) + .compat() + .await? + .into_iter() + .fold(BigDecimal::from(0), |sum, electrum_balance| { + sum + electrum_balance.to_big_decimal(decimals) + })) + }; + Box::new(fut.boxed().compat()) + } + + fn display_balances(&self, addresses: Vec
, decimals: u8) -> UtxoRpcFut> { + let this = self.clone(); + let fut = async move { + let hashes = addresses + .iter() + .map(|address| { + let output_script = output_script(address)?; + let hash = electrum_script_hash(&output_script); + + Ok(hex::encode(hash)) + }) + .collect::, keys::Error>>()?; + + let electrum_balances = this.scripthash_get_balances(hashes).compat().await?; + let balances = electrum_balances + .into_iter() + // `scripthash_get_balances` returns `ElectrumBalance` elements in the same order in which they were requested. + // So we can zip `addresses` and the balances into one iterator. + .zip(addresses) + .map(|(electrum_balance, address)| (address, electrum_balance.to_big_decimal(decimals))) + .collect(); + Ok(balances) + }; + + Box::new(fut.boxed().compat()) + } + + fn estimate_fee_sat( + &self, + decimals: u8, + _fee_method: &EstimateFeeMethod, + mode: &Option, + n_blocks: u32, + ) -> UtxoRpcFut { + Box::new(self.estimate_fee(mode, n_blocks).map(move |fee| { + if fee > 0.00001 { + (fee * 10.0_f64.powf(decimals as f64)) as u64 + } else { + 1000 + } + })) + } + + fn get_relay_fee(&self) -> RpcRes { rpc_func!(self, "blockchain.relayfee") } + + fn find_output_spend( + &self, + tx_hash: H256, + script_pubkey: &[u8], + vout: usize, + _from_block: BlockHashOrHeight, + tx_hash_algo: TxHashAlgo, + ) -> Box, Error = String> + Send> { + let selfi = self.clone(); + let script_hash = hex::encode(electrum_script_hash(script_pubkey)); + let fut = async move { + let history = try_s!(selfi.scripthash_get_history(&script_hash).compat().await); + + if history.len() < 2 { + return Ok(None); + } + + for item in history.iter() { + let transaction = try_s!(selfi.get_transaction_bytes(&item.tx_hash).compat().await); + + let mut maybe_spend_tx: UtxoTx = + try_s!(deserialize(transaction.as_slice()).map_err(|e| ERRL!("{:?}", e))); + maybe_spend_tx.tx_hash_algo = tx_hash_algo; + drop_mutability!(maybe_spend_tx); + + for (index, input) in maybe_spend_tx.inputs.iter().enumerate() { + if input.previous_output.hash == tx_hash && input.previous_output.index == vout as u32 { + return Ok(Some(SpentOutputInfo { + input: input.clone(), + input_index: index, + spending_tx: maybe_spend_tx, + spent_in_block: BlockHashOrHeight::Height(item.height), + })); + } + } + } + Ok(None) + }; + Box::new(fut.boxed().compat()) + } + + fn get_median_time_past( + &self, + starting_block: u64, + count: NonZeroU64, + coin_variant: CoinVariant, + ) -> UtxoRpcFut { + let from = if starting_block <= count.get() { + 0 + } else { + starting_block - count.get() + 1 + }; + Box::new( + self.blockchain_block_headers(from, count) + .map_to_mm_fut(UtxoRpcError::from) + .and_then(|res| { + if res.count == 0 { + return MmError::err(UtxoRpcError::InvalidResponse("Server returned zero count".to_owned())); + } + let len = CompactInteger::from(res.count); + let mut serialized = serialize(&len).take(); + serialized.extend(res.hex.0.into_iter()); + let mut reader = Reader::new_with_coin_variant(serialized.as_slice(), coin_variant); + let headers = reader.read_list::()?; + let mut timestamps: Vec<_> = headers.into_iter().map(|block| block.time).collect(); + // can unwrap because count is non zero + Ok(median(timestamps.as_mut_slice()).unwrap()) + }), + ) + } + + async fn get_block_timestamp(&self, height: u64) -> Result> { + Ok(self.block_header_from_storage_or_rpc(height).await?.time as u64) + } +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection.rs new file mode 100644 index 0000000000..2b9a3ada48 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection.rs @@ -0,0 +1,730 @@ +use super::client::ElectrumClient; +use super::constants::{BLOCKCHAIN_HEADERS_SUB_ID, BLOCKCHAIN_SCRIPTHASH_SUB_ID, CUTOFF_TIMEOUT, + DEFAULT_CONNECTION_ESTABLISHMENT_TIMEOUT}; + +use crate::{RpcTransportEventHandler, SharableRpcTransportEventHandler}; +use common::custom_futures::timeout::FutureTimerExt; +use common::executor::{abortable_queue::AbortableQueue, abortable_queue::WeakSpawner, AbortableSystem, SpawnFuture, + Timer}; +use common::expirable_map::ExpirableMap; +use common::jsonrpc_client::{JsonRpcBatchResponse, JsonRpcErrorType, JsonRpcId, JsonRpcRequest, JsonRpcResponse, + JsonRpcResponseEnum}; +use common::log::{error, info}; +use common::{now_float, now_ms}; +use mm2_rpc::data::legacy::ElectrumProtocol; + +use std::io; +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use futures::channel::oneshot as async_oneshot; +use futures::compat::{Future01CompatExt, Stream01CompatExt}; +use futures::future::FutureExt; +use futures::lock::Mutex as AsyncMutex; +use futures::select; +use futures::stream::StreamExt; +use futures01::sync::mpsc; +use futures01::{Sink, Stream}; +use http::Uri; +use instant::Instant; +use serde::Serialize; + +cfg_native! { + use super::tcp_stream::*; + + use std::convert::TryFrom; + use std::net::ToSocketAddrs; + use futures::future::{Either, TryFutureExt}; + use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader, WriteHalf, ReadHalf}; + use tokio::net::TcpStream; + use tokio_rustls::{TlsConnector}; + use rustls::{ServerName}; +} + +cfg_wasm32! { + use mm2_net::wasm::wasm_ws::{ws_transport,WsOutgoingSender,WsIncomingReceiver}; + + use std::sync::atomic::AtomicUsize; +} + +pub type JsonRpcPendingRequests = ExpirableMap>; + +macro_rules! disconnect_and_return { + ($typ:tt, $err:expr, $conn:expr, $handlers:expr) => {{ + let err = ElectrumConnectionErr::$typ(format!("{:?}", $err)); + disconnect_and_return!(err, $conn, $handlers); + }}; + ($err:expr, $conn:expr, $handlers:expr) => {{ + // Inform the event handlers of the disconnection. + $handlers.on_disconnected(&$conn.address()).ok(); + // Disconnect the connection. + $conn.disconnect(Some($err.clone())); + return Err($err); + }}; +} + +macro_rules! disconnect_and_return_if_err { + ($ex:expr, $typ:tt, $conn:expr, $handlers:expr) => {{ + match $ex { + Ok(res) => res, + Err(e) => { + disconnect_and_return!($typ, e, $conn, $handlers); + }, + } + }}; + ($ex:expr, $conn:expr, $handlers:expr) => {{ + match $ex { + Ok(res) => res, + Err(e) => { + disconnect_and_return!(e, $conn, $handlers); + }, + } + }}; +} + +macro_rules! wrap_timeout { + ($call:expr, $timeout:expr, $conn:expr, $handlers:expr) => {{ + let now = Instant::now(); + let res = match $call.timeout_secs($timeout).await { + Ok(res) => res, + Err(_) => { + disconnect_and_return!( + ElectrumConnectionErr::Timeout(stringify!($call), $timeout), + $conn, + $handlers + ); + }, + }; + // Remaining timeout after executing `$call`. + let timeout = ($timeout - now.elapsed().as_secs_f64()).max(0.0); + (timeout, res) + }}; +} + +/// Helper function casting mpsc::Receiver as Stream. +fn rx_to_stream(rx: mpsc::Receiver>) -> impl Stream, Error = io::Error> { + rx.map_err(|_| panic!("errors not possible on rx")) +} + +#[cfg(not(target_arch = "wasm32"))] +/// Helper function to parse a a string DNS name into a ServerName. +fn server_name_from_domain(dns_name: &str) -> Result { + match ServerName::try_from(dns_name) { + // The `ServerName` must be `DnsName` variant, SSL works with domain names and not IPs. + Ok(dns_name) if matches!(dns_name, ServerName::DnsName(_)) => Ok(dns_name), + _ => ERR!("Couldn't parse DNS name from '{}'", dns_name), + } +} + +/// Electrum request RPC representation +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct ElectrumConnectionSettings { + pub url: String, + #[serde(default)] + pub protocol: ElectrumProtocol, + #[serde(default)] + pub disable_cert_verification: bool, + pub timeout_sec: Option, +} + +/// Possible connection errors when connection to an Electrum server. +#[derive(Clone, Debug)] +pub enum ElectrumConnectionErr { + /// Couldn't connect to the server within the provided timeout. + /// The first argument is the call (stringified) that timed out. + /// The second argument is the time limit it had to finish within, in seconds. + Timeout(&'static str, f64), + /// A temporary error that might be resolved later on. + Temporary(String), + /// An error that can't be resolved by retrying. + Irrecoverable(String), + /// The server's version doesn't match the client's version. + VersionMismatch(String), +} + +impl ElectrumConnectionErr { + pub fn is_recoverable(&self) -> bool { + match self { + ElectrumConnectionErr::Irrecoverable(_) | ElectrumConnectionErr::VersionMismatch(_) => false, + ElectrumConnectionErr::Timeout(_, _) | ElectrumConnectionErr::Temporary(_) => true, + } + } +} + +/// Represents the active Electrum connection to selected address +#[derive(Debug)] +pub struct ElectrumConnection { + /// The client connected to this SocketAddr + settings: ElectrumConnectionSettings, + /// The Sender forwarding requests to writing part of underlying stream + tx: Mutex>>>, + /// A lock to prevent multiple connection establishments happening concurrently. + establishing_connection: AsyncMutex<()>, + /// Responses are stored here + responses: Mutex, + /// Selected protocol version. The value is initialized after the server.version RPC call. + protocol_version: Mutex>, + /// Why was the connection disconnected the last time? + last_error: Mutex>, + /// An abortable system for connection specific tasks to run on. + abortable_system: AbortableQueue, +} + +impl ElectrumConnection { + pub fn new(settings: ElectrumConnectionSettings, abortable_system: AbortableQueue) -> Self { + ElectrumConnection { + settings, + tx: Mutex::new(None), + establishing_connection: AsyncMutex::new(()), + responses: Mutex::new(JsonRpcPendingRequests::new()), + protocol_version: Mutex::new(None), + last_error: Mutex::new(None), + abortable_system, + } + } + + pub fn address(&self) -> &str { &self.settings.url } + + fn weak_spawner(&self) -> WeakSpawner { self.abortable_system.weak_spawner() } + + fn is_connected(&self) -> bool { self.tx.lock().unwrap().is_some() } + + fn set_protocol_version(&self, version: f32) { + let mut protocol_version = self.protocol_version.lock().unwrap(); + if protocol_version.is_none() { + *protocol_version = Some(version); + } + } + + fn clear_protocol_version(&self) { self.protocol_version.lock().unwrap().take(); } + + fn set_last_error(&self, reason: ElectrumConnectionErr) { + let mut last_error = self.last_error.lock().unwrap(); + if last_error.is_none() { + *last_error = Some(reason); + } + } + + fn clear_last_error(&self) { self.last_error.lock().unwrap().take(); } + + fn last_error(&self) -> Option { self.last_error.lock().unwrap().clone() } + + /// Connects to the electrum server by setting the `tx` sender channel. + /// + /// # Safety: + /// For this to be atomic, the caller must have acquired the lock to `establishing_connection`. + fn connect(&self, tx: mpsc::Sender>) { + self.tx.lock().unwrap().replace(tx); + self.clear_last_error(); + } + + /// Disconnect and clear the connection state. + pub fn disconnect(&self, reason: Option) { + self.tx.lock().unwrap().take(); + self.responses.lock().unwrap().clear(); + self.clear_protocol_version(); + if let Some(reason) = reason { + self.set_last_error(reason); + } + self.abortable_system.abort_all_and_reset().ok(); + } + + /// Sends a request to the electrum server and waits for the response. + /// + /// ## Important: This should always return [`JsonRpcErrorType::Transport`] error. + pub async fn electrum_request( + &self, + mut req_json: String, + rpc_id: JsonRpcId, + timeout: f64, + ) -> Result { + #[cfg(not(target_arch = "wasm"))] + { + // Electrum request and responses must end with \n + // https://electrumx.readthedocs.io/en/latest/protocol-basics.html#message-stream + req_json.push('\n'); + } + + // Create a oneshot channel to receive the response in. + let (req_tx, res_rx) = async_oneshot::channel(); + self.responses + .lock() + .unwrap() + .insert(rpc_id, req_tx, Duration::from_secs_f64(timeout)); + let tx = self + .tx + .lock() + .unwrap() + // Clone to not to hold the lock while sending the request. + .clone() + .ok_or_else(|| JsonRpcErrorType::Transport("Connection is not established".to_string()))?; + + // Send the request to the electrum server. + tx.send(req_json.into_bytes()) + .compat() + .await + .map_err(|e| JsonRpcErrorType::Transport(e.to_string()))?; + + // Wait for the response to be processed and sent back to us. + res_rx + .timeout_secs(timeout) + .await + .map_err(|e| JsonRpcErrorType::Transport(e.to_string()))? + .map_err(|_e| JsonRpcErrorType::Transport("The sender didn't send".to_string())) + } + + /// Process an incoming JSONRPC response from the electrum server. + fn process_electrum_response(&self, bytes: &[u8], event_handlers: &Vec>) { + // Inform the event handlers. + event_handlers.on_incoming_response(bytes); + + // detect if we got standard JSONRPC response or subscription response as JSONRPC request + #[derive(Deserialize)] + #[serde(untagged)] + enum ElectrumRpcResponseEnum { + /// The subscription response as JSONRPC request. + /// + /// NOTE Because JsonRpcResponse uses default values for each of its field, + /// this variant has to stay at top in this enumeration to be properly deserialized + /// from serde. + SubscriptionNotification(JsonRpcRequest), + /// The standard JSONRPC single response. + SingleResponse(JsonRpcResponse), + /// The batch of standard JSONRPC responses. + BatchResponses(JsonRpcBatchResponse), + } + + let response: ElectrumRpcResponseEnum = match serde_json::from_slice(bytes) { + Ok(res) => res, + Err(e) => { + error!("{}", e); + return; + }, + }; + + let response = match response { + ElectrumRpcResponseEnum::SingleResponse(single) => JsonRpcResponseEnum::Single(single), + ElectrumRpcResponseEnum::BatchResponses(batch) => JsonRpcResponseEnum::Batch(batch), + ElectrumRpcResponseEnum::SubscriptionNotification(req) => { + match req.method.as_str() { + // NOTE: Sending a script hash notification is handled in it's own event handler. + BLOCKCHAIN_SCRIPTHASH_SUB_ID | BLOCKCHAIN_HEADERS_SUB_ID => {}, + _ => { + error!("Unexpected notification method: {}", req.method); + }, + } + return; + }, + }; + + // the corresponding sender may not exist, receiver may be dropped + // these situations are not considered as errors so we just silently skip them + let pending = self.responses.lock().unwrap().remove(&response.rpc_id()); + if let Some(tx) = pending { + tx.send(response).ok(); + } + } + + /// Process a bulk response from the electrum server. + /// + /// A bulk response is a response that contains multiple JSONRPC responses. + fn process_electrum_bulk_response( + &self, + bulk_response: &[u8], + event_handlers: &Vec>, + ) { + // We should split the received response because we can get several responses in bulk. + let responses = bulk_response.split(|item| *item == b'\n'); + + for response in responses { + // `split` returns empty slice if it ends with separator which is our case. + if !response.is_empty() { + self.process_electrum_response(response, event_handlers) + } + } + } +} + +// Connection loop establishment methods. +impl ElectrumConnection { + /// Tries to establish a connection to the server. + /// + /// Returns the tokio stream with the server and the remaining timeout + /// left from the input timeout. + #[cfg(not(target_arch = "wasm32"))] + async fn establish_connection(connection: &ElectrumConnection) -> Result { + let address = connection.address(); + + let socket_addr = match address.to_socket_addrs() { + Err(e) if matches!(e.kind(), std::io::ErrorKind::InvalidInput) => { + return Err(ElectrumConnectionErr::Irrecoverable(format!( + "Invalid address format: {e:?}" + ))); + }, + Err(e) => { + return Err(ElectrumConnectionErr::Temporary(format!( + "Resolve error in address: {e:?}" + ))); + }, + Ok(mut addr) => match addr.next() { + None => { + return Err(ElectrumConnectionErr::Temporary("Address resolved to None".to_string())); + }, + Some(addr) => addr, + }, + }; + + let connect_f = match connection.settings.protocol { + ElectrumProtocol::TCP => Either::Left(TcpStream::connect(&socket_addr).map_ok(ElectrumStream::Tcp)), + ElectrumProtocol::SSL => { + let uri: Uri = match address.parse() { + Ok(uri) => uri, + Err(e) => { + return Err(ElectrumConnectionErr::Irrecoverable(format!("URL parse error: {e:?}"))); + }, + }; + + let Some(dns_name) = uri.host().map(String::from) else { + return Err(ElectrumConnectionErr::Irrecoverable("Couldn't retrieve host from address".to_string())); + }; + + let Ok(dns) = server_name_from_domain(dns_name.as_str()) else { + return Err(ElectrumConnectionErr::Irrecoverable("Address isn't a valid domain name".to_string())); + }; + + let tls_connector = if connection.settings.disable_cert_verification { + TlsConnector::from(UNSAFE_TLS_CONFIG.clone()) + } else { + TlsConnector::from(SAFE_TLS_CONFIG.clone()) + }; + + Either::Right( + TcpStream::connect(&socket_addr) + .and_then(move |stream| tls_connector.connect(dns, stream).map_ok(ElectrumStream::Tls)), + ) + }, + ElectrumProtocol::WS | ElectrumProtocol::WSS => { + return Err(ElectrumConnectionErr::Irrecoverable( + "Incorrect protocol for native connection ('WS'/'WSS'). Use 'TCP' or 'SSL' instead.".to_string(), + )); + }, + }; + + // Try to connect to the server. + let stream = match connect_f.await { + Ok(stream) => stream, + Err(e) => { + return Err(ElectrumConnectionErr::Temporary(format!( + "Couldn't connect to the electrum server: {e:?}" + ))) + }, + }; + if let Err(e) = stream.as_ref().set_nodelay(true) { + return Err(ElectrumConnectionErr::Temporary(format!( + "Setting TCP_NODELAY failed: {e:?}" + ))); + }; + + Ok(stream) + } + + #[cfg(target_arch = "wasm32")] + async fn establish_connection( + connection: &ElectrumConnection, + ) -> Result<(WsIncomingReceiver, WsOutgoingSender), ElectrumConnectionErr> { + lazy_static! { + static ref CONN_IDX: Arc = Arc::new(AtomicUsize::new(0)); + } + + let address = connection.address(); + let uri: Uri = match address.parse() { + Ok(uri) => uri, + Err(e) => { + return Err(ElectrumConnectionErr::Irrecoverable(format!( + "Failed to parse the address: {e:?}" + ))); + }, + }; + if uri.scheme().is_some() { + return Err(ElectrumConnectionErr::Irrecoverable( + "There has not to be a scheme in the url. 'ws://' scheme is used by default. Consider using 'protocol: \"WSS\"' in the electrum request to switch to the 'wss://' scheme.".to_string(), + ) + ); + } + + let protocol_prefixed_address = match connection.settings.protocol { + ElectrumProtocol::WS => { + format!("ws://{address}") + }, + ElectrumProtocol::WSS => { + format!("wss://{address}") + }, + ElectrumProtocol::TCP | ElectrumProtocol::SSL => { + return Err(ElectrumConnectionErr::Irrecoverable( + "'TCP' and 'SSL' are not supported in a browser. Please use 'WS' or 'WSS' protocols".to_string(), + )); + }, + }; + + let spawner = connection.weak_spawner(); + let connect_f = ws_transport( + CONN_IDX.fetch_add(1, AtomicOrdering::Relaxed), + &protocol_prefixed_address, + &spawner, + ); + + // Try to connect to the server. + let (transport_tx, transport_rx) = match connect_f.await { + Ok(stream) => stream, + Err(e) => { + return Err(ElectrumConnectionErr::Temporary(format!( + "Couldn't connect to the electrum server: {e:?}" + ))) + }, + }; + + Ok((transport_rx, transport_tx)) + } + + /// Waits until `last_response` time is too old in the past then returns a temporary error. + async fn timeout_loop(last_response: Arc) -> ElectrumConnectionErr { + loop { + Timer::sleep(CUTOFF_TIMEOUT).await; + let last_sec = (last_response.load(AtomicOrdering::Relaxed) / 1000) as f64; + if now_float() - last_sec > CUTOFF_TIMEOUT { + break ElectrumConnectionErr::Temporary(format!( + "Server didn't respond for too long ({}s).", + now_float() - last_sec + )); + } + } + } + + /// Runs the send loop that sends outgoing requests to the server. + /// + /// This runs until the sender is disconnected. + async fn send_loop( + address: String, + event_handlers: Arc>>, + #[cfg(not(target_arch = "wasm32"))] mut write: WriteHalf, + #[cfg(target_arch = "wasm32")] mut write: WsOutgoingSender, + rx: mpsc::Receiver>, + ) -> ElectrumConnectionErr { + let mut rx = rx_to_stream(rx).compat(); + while let Some(Ok(bytes)) = rx.next().await { + // NOTE: We shouldn't really notify on going request yet since we don't know + // if sending will error. We do that early though to avoid cloning the bytes on wasm. + event_handlers.on_outgoing_request(&bytes); + + #[cfg(not(target_arch = "wasm32"))] + let send_result = write.write_all(&bytes).await; + #[cfg(target_arch = "wasm32")] + let send_result = write.send(bytes).await; + + if let Err(e) = send_result { + error!("Write error {e} to {address}"); + } + } + ElectrumConnectionErr::Temporary("Sender disconnected".to_string()) + } + + /// Runs the receive loop that reads incoming responses from the server. + /// + /// This runs until the electrum server sends an empty response (signaling disconnection), + /// or if we encounter an error while reading from the stream. + #[cfg(not(target_arch = "wasm32"))] + async fn recv_loop( + connection: Arc, + event_handlers: Arc>>, + read: ReadHalf, + last_response: Arc, + ) -> ElectrumConnectionErr { + let mut buffer = String::with_capacity(1024); + let mut buf_reader = BufReader::new(read); + loop { + match buf_reader.read_line(&mut buffer).await { + Ok(c) => { + if c == 0 { + break ElectrumConnectionErr::Temporary("EOF".to_string()); + } + }, + Err(e) => { + break ElectrumConnectionErr::Temporary(format!("Error on read {e:?}")); + }, + }; + + last_response.store(now_ms(), AtomicOrdering::Relaxed); + connection.process_electrum_bulk_response(buffer.as_bytes(), &event_handlers); + buffer.clear(); + } + } + + #[cfg(target_arch = "wasm32")] + async fn recv_loop( + connection: Arc, + event_handlers: Arc>>, + mut read: WsIncomingReceiver, + last_response: Arc, + ) -> ElectrumConnectionErr { + let address = connection.address(); + while let Some(response) = read.next().await { + match response { + Ok(bytes) => { + last_response.store(now_ms(), AtomicOrdering::Relaxed); + connection.process_electrum_response(&bytes, &event_handlers); + }, + Err(e) => { + error!("{address} error: {e:?}"); + }, + } + } + ElectrumConnectionErr::Temporary("Receiver disconnected".to_string()) + } + + /// Checks the server version against the range of accepted versions and disconnects the server + /// if the version is not supported. + async fn check_server_version( + connection: &ElectrumConnection, + client: &ElectrumClient, + ) -> Result<(), ElectrumConnectionErr> { + let address = connection.address(); + + // Don't query for the version if the client doesn't care about it, as querying for the version might + // fail with the protocol range we will provide. + if !client.negotiate_version() { + return Ok(()); + } + + match client.server_version(address, client.protocol_version()).compat().await { + Ok(version_str) => match version_str.protocol_version.parse::() { + Ok(version_f32) => { + connection.set_protocol_version(version_f32); + Ok(()) + }, + Err(e) => Err(ElectrumConnectionErr::Temporary(format!( + "Failed to parse electrum server version {e:?}" + ))), + }, + // If the version we provided isn't supported by the server, it returns a JSONRPC response error. + Err(e) if matches!(e.error, JsonRpcErrorType::Response(..)) => { + Err(ElectrumConnectionErr::VersionMismatch(format!("{e:?}"))) + }, + Err(e) => Err(ElectrumConnectionErr::Temporary(format!( + "Failed to get electrum server version {e:?}" + ))), + } + } + + /// Starts the connection loop that keeps an active connection to the electrum server. + /// If this connection is already connected, nothing is performed and `Ok(())` is returned. + /// + /// This will first try to connect to the server and use that connection to query its version. + /// If version checks succeed, the connection will be kept alive, otherwise, it will be terminated. + pub async fn establish_connection_loop( + self: &Arc, + client: ElectrumClient, + ) -> Result<(), ElectrumConnectionErr> { + let connection = self.clone(); + let address = connection.address().to_string(); + let event_handlers = client.event_handlers(); + // This is the timeout for connection establishment and version querying (i.e. the whole method). + // The caller is guaranteed that the method will return within this time. + let timeout = connection + .settings + .timeout_sec + .unwrap_or(DEFAULT_CONNECTION_ESTABLISHMENT_TIMEOUT); + + // Locking `establishing_connection` will prevent other threads from establishing a connection concurrently. + let (timeout, _establishing_connection) = wrap_timeout!( + connection.establishing_connection.lock(), + timeout, + connection, + event_handlers + ); + + // Check if we are already connected. + if connection.is_connected() { + return Ok(()); + } + + // Check why we errored the last time, don't try to reconnect if it was an irrecoverable error. + if let Some(last_error) = connection.last_error() { + if !last_error.is_recoverable() { + return Err(last_error); + } + } + + let (timeout, stream_res) = wrap_timeout!( + Self::establish_connection(&connection).boxed(), + timeout, + connection, + event_handlers + ); + let stream = disconnect_and_return_if_err!(stream_res, connection, event_handlers); + + let (connection_ready_signal, wait_for_connection_ready) = async_oneshot::channel(); + let connection_loop = { + // Branch 1: Disconnect after not receiving responses for too long. + let last_response = Arc::new(AtomicU64::new(now_ms())); + let timeout_branch = Self::timeout_loop(last_response.clone()).boxed(); + + // Branch 2: Read incoming responses from the server. + #[cfg(not(target_arch = "wasm32"))] + let (read, write) = tokio::io::split(stream); + #[cfg(target_arch = "wasm32")] + let (read, write) = stream; + let recv_branch = Self::recv_loop(connection.clone(), event_handlers.clone(), read, last_response).boxed(); + + // Branch 3: Send outgoing requests to the server. + let (tx, rx) = mpsc::channel(0); + let send_branch = Self::send_loop(address.clone(), event_handlers.clone(), write, rx).boxed(); + + let connection = connection.clone(); + let event_handlers = event_handlers.clone(); + async move { + connection.connect(tx); + // Signal that the connection is up and ready so to start the version querying. + connection_ready_signal.send(()).ok(); + event_handlers.on_connected(&address).ok(); + let via = match connection.settings.protocol { + ElectrumProtocol::TCP => "via TCP", + ElectrumProtocol::SSL if connection.settings.disable_cert_verification => { + "via SSL *with disabled certificate verification*" + }, + ElectrumProtocol::SSL => "via SSL", + ElectrumProtocol::WS => "via WS", + ElectrumProtocol::WSS => "via WSS", + }; + info!("{address} is now connected {via}."); + + let err = select! { + e = timeout_branch.fuse() => e, + e = recv_branch.fuse() => e, + e = send_branch.fuse() => e, + }; + + error!("{address} connection dropped due to: {err:?}"); + event_handlers.on_disconnected(&address).ok(); + connection.disconnect(Some(err)); + } + }; + // Start the connection loop on a weak spawner. + connection.weak_spawner().spawn(connection_loop); + + // Wait for the connection to be ready before querying the version. + let (timeout, connection_ready_res) = + wrap_timeout!(wait_for_connection_ready, timeout, connection, event_handlers); + disconnect_and_return_if_err!(connection_ready_res, Temporary, connection, event_handlers); + + let (_, version_res) = wrap_timeout!( + Self::check_server_version(&connection, &client).boxed(), + timeout, + connection, + event_handlers + ); + disconnect_and_return_if_err!(version_res, connection, event_handlers); + + Ok(()) + } +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/connection_context.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/connection_context.rs new file mode 100644 index 0000000000..17f3495b85 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/connection_context.rs @@ -0,0 +1,91 @@ +use std::collections::HashSet; +use std::mem; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; + +use super::super::connection::ElectrumConnection; +use super::super::constants::FIRST_SUSPEND_TIME; + +use common::now_ms; +use keys::Address; + +#[derive(Debug)] +struct SuspendTimer { + /// When was the connection last disconnected. + disconnected_at: AtomicU64, + /// How long to suspend the server the next time it disconnects (in milliseconds). + next_suspend_time: AtomicU64, +} + +impl SuspendTimer { + /// Creates a new suspend timer. + fn new() -> Self { + SuspendTimer { + disconnected_at: AtomicU64::new(0), + next_suspend_time: AtomicU64::new(FIRST_SUSPEND_TIME), + } + } + + /// Resets the suspend time and disconnection time. + fn reset(&self) { + self.disconnected_at.store(0, Ordering::SeqCst); + self.next_suspend_time.store(FIRST_SUSPEND_TIME, Ordering::SeqCst); + } + + /// Doubles the suspend time and sets the disconnection time to `now`. + fn double(&self) { + // The max suspend time, 12h. + const MAX_SUSPEND_TIME: u64 = 12 * 60 * 60; + self.disconnected_at.store(now_ms(), Ordering::SeqCst); + let mut next_suspend_time = self.next_suspend_time.load(Ordering::SeqCst); + next_suspend_time = (next_suspend_time * 2).min(MAX_SUSPEND_TIME); + self.next_suspend_time.store(next_suspend_time, Ordering::SeqCst); + } + + /// Returns the time until when the server should be suspended in milliseconds. + fn get_suspend_until(&self) -> u64 { + self.disconnected_at.load(Ordering::SeqCst) + self.next_suspend_time.load(Ordering::SeqCst) * 1000 + } +} + +/// A struct that encapsulates an Electrum connection and its information. +#[derive(Debug)] +pub struct ConnectionContext { + /// The electrum connection. + pub connection: Arc, + /// The list of addresses subscribed to the connection. + subs: Mutex>, + /// The timer deciding when the connection is ready to be used again. + suspend_timer: SuspendTimer, + /// The ID of this connection which also serves as a priority (lower is better). + pub id: u32, +} + +impl ConnectionContext { + /// Creates a new connection context. + pub(super) fn new(connection: ElectrumConnection, id: u32) -> Self { + ConnectionContext { + connection: Arc::new(connection), + subs: Mutex::new(HashSet::new()), + suspend_timer: SuspendTimer::new(), + id, + } + } + + /// Resets the suspend time. + pub(super) fn connected(&self) { self.suspend_timer.reset(); } + + /// Inform the connection context that the connection has been disconnected. + /// + /// Doubles the suspend time and clears the subs list and returns it. + pub(super) fn disconnected(&self) -> HashSet
{ + self.suspend_timer.double(); + mem::take(&mut self.subs.lock().unwrap()) + } + + /// Returns the time the server should be suspended until (when to take it up) in milliseconds. + pub(super) fn suspended_till(&self) -> u64 { self.suspend_timer.get_suspend_until() } + + /// Adds a subscription to the connection context. + pub(super) fn add_sub(&self, address: Address) { self.subs.lock().unwrap().insert(address); } +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/manager.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/manager.rs new file mode 100644 index 0000000000..b06628fd60 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/manager.rs @@ -0,0 +1,528 @@ +use std::collections::{BTreeMap, HashMap}; +use std::sync::{Arc, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, Weak}; + +use super::super::client::{ElectrumClient, ElectrumClientImpl}; +use super::super::connection::{ElectrumConnection, ElectrumConnectionErr, ElectrumConnectionSettings}; +use super::super::constants::{BACKGROUND_TASK_WAIT_TIMEOUT, PING_INTERVAL}; +use super::connection_context::ConnectionContext; + +use crate::utxo::rpc_clients::UtxoRpcClientOps; +use common::executor::abortable_queue::AbortableQueue; +use common::executor::{AbortableSystem, SpawnFuture, Timer}; +use common::log::{debug, error}; +use common::notifier::{Notifiee, Notifier}; +use common::now_ms; +use keys::Address; + +use futures::compat::Future01CompatExt; +use futures::FutureExt; + +/// A macro to unwrap an option and *execute* some code if the option is None. +macro_rules! unwrap_or_else { + ($option:expr, $($action:tt)*) => {{ + match $option { + Some(some_val) => some_val, + None => { $($action)* } + } + }}; +} + +macro_rules! unwrap_or_continue { + ($option:expr) => { + unwrap_or_else!($option, continue) + }; +} + +macro_rules! unwrap_or_return { + ($option:expr, $ret:expr) => { + unwrap_or_else!($option, return $ret) + }; + ($option:expr) => { + unwrap_or_else!($option, return) + }; +} + +/// The ID of a connection (and also its priority, lower is better). +type ID = u32; + +#[derive(Debug, Display)] +pub enum ConnectionManagerErr { + #[display(fmt = "Unknown server address")] + UnknownAddress, + #[display(fmt = "Failed to connect to the server due to {:?}", _0)] + ConnectingError(ElectrumConnectionErr), + #[display(fmt = "No client found, connection manager isn't initialized properly")] + NoClient, + #[display(fmt = "Connection manager is already initialized")] + AlreadyInitialized, +} + +/// The configuration parameter for a connection manager. +#[derive(Debug)] +pub struct ManagerConfig { + /// A flag to spawn a ping loop task for active connections. + pub spawn_ping: bool, + /// The minimum number of connections that should be connected at all times. + pub min_connected: usize, + /// The maximum number of connections that can be connected at any given time. + pub max_connected: usize, +} + +#[derive(Debug)] +/// A connection manager that maintains a set of connections to electrum servers and +/// handles reconnecting, address subscription distribution, etc... +struct ConnectionManagerImpl { + /// The configuration for the connection manager. + config: ManagerConfig, + /// The set of addresses that are currently connected. + /// + /// This set's size should satisfy: `min_connected <= maintained_connections.len() <= max_connected`. + /// + /// It is actually represented as a sorted map from connection ID (u32, also represents connection priority) + /// to address so we can easily/cheaply pop low priority connections and add high priority ones. + maintained_connections: RwLock>, + /// A map for server addresses to their corresponding connections. + connections: RwLock>, + /// A weak reference to the electrum client that owns this connection manager. + /// It is used to send electrum requests during connection establishment (version querying). + // TODO: This field might not be necessary if [`ElectrumConnection`] object be used to send + // electrum requests on its own, i.e. implement [`JsonRpcClient`] & [`UtxoRpcClientOps`]. + electrum_client: RwLock>>, + /// A notification sender to notify the background task when we have less than `min_connected` connections. + below_min_connected_notifier: Notifier, + /// A notification receiver to be used by the background task to receive notifications of when + /// we have less than `min_connected` maintained connections. + /// + /// Wrapped inside a Mutex>, +} + +#[derive(Clone, Debug)] +pub struct ConnectionManager(Arc); + +// Public interface. +impl ConnectionManager { + pub fn try_new( + servers: Vec, + spawn_ping: bool, + (min_connected, max_connected): (usize, usize), + abortable_system: &AbortableQueue, + ) -> Result { + let mut connections = HashMap::with_capacity(servers.len()); + // Priority is assumed to be the order of the servers in the list as they appear. + for (priority, connection_settings) in servers.into_iter().enumerate() { + let subsystem = abortable_system.create_subsystem().map_err(|e| { + ERRL!( + "Failed to create abortable subsystem for connection: {}, error: {:?}", + connection_settings.url, + e + ) + })?; + let connection = ElectrumConnection::new(connection_settings, subsystem); + connections.insert( + connection.address().to_string(), + ConnectionContext::new(connection, priority as u32), + ); + } + + if min_connected == 0 { + return Err(ERRL!("min_connected should be greater than 0")); + } + if min_connected > max_connected { + return Err(ERRL!( + "min_connected ({}) must be <= max_connected ({})", + min_connected, + max_connected + )); + } + + let (notifier, notifiee) = Notifier::new(); + Ok(ConnectionManager(Arc::new(ConnectionManagerImpl { + config: ManagerConfig { + spawn_ping, + min_connected, + max_connected, + }, + connections: RwLock::new(connections), + maintained_connections: RwLock::new(BTreeMap::new()), + electrum_client: RwLock::new(None), + below_min_connected_notifier: notifier, + below_min_connected_notifiee: Mutex::new(Some(notifiee)), + }))) + } + + /// Initializes the connection manager by connecting the electrum connections. + /// This must be called and only be called once to have a functioning connection manager. + pub fn initialize(&self, weak_client: Weak) -> Result<(), ConnectionManagerErr> { + // Disallow reusing the same manager with another client. + if self.weak_client().read().unwrap().is_some() { + return Err(ConnectionManagerErr::AlreadyInitialized); + } + + let electrum_client = unwrap_or_return!(weak_client.upgrade(), Err(ConnectionManagerErr::NoClient)); + + // Store the (weak) electrum client. + *self.weak_client().write().unwrap() = Some(weak_client); + + // Use the client's spawner to spawn the connection manager's background task. + electrum_client.weak_spawner().spawn(self.clone().background_task()); + + if self.config().spawn_ping { + // Use the client's spawner to spawn the connection manager's ping task. + electrum_client.weak_spawner().spawn(self.clone().ping_task()); + } + + Ok(()) + } + + /// Returns all the server addresses. + pub fn get_all_server_addresses(&self) -> Vec { self.read_connections().keys().cloned().collect() } + + /// Returns all the connections. + pub fn get_all_connections(&self) -> Vec> { + self.read_connections() + .values() + .map(|conn_ctx| conn_ctx.connection.clone()) + .collect() + } + + /// Retrieve a specific electrum connection by its address. + /// The connection will be forcibly established if it's disconnected. + pub async fn get_connection_by_address( + &self, + server_address: &str, + force_connect: bool, + ) -> Result, ConnectionManagerErr> { + let connection = self + .get_connection(server_address) + .ok_or(ConnectionManagerErr::UnknownAddress)?; + + if force_connect { + let client = unwrap_or_return!(self.get_client(), Err(ConnectionManagerErr::NoClient)); + // Make sure the connection is connected. + connection + .establish_connection_loop(client) + .await + .map_err(ConnectionManagerErr::ConnectingError)?; + } + + Ok(connection) + } + + /// Returns a list of active/maintained connections. + pub fn get_active_connections(&self) -> Vec> { + self.read_maintained_connections() + .iter() + .filter_map(|(_id, address)| self.get_connection(address)) + .collect() + } + + /// Returns a boolean `true` if the connection pool is empty, `false` otherwise. + pub fn is_connections_pool_empty(&self) -> bool { self.read_connections().is_empty() } + + /// Subscribe the list of addresses to our active connections. + /// + /// There is a bit of indirection here. We register the abandoned addresses on `on_disconnected` with + /// the client to queue them for `utxo_balance_events` which in turn calls this method back to re-subscribe + /// the abandoned addresses. We could have instead directly re-subscribed the addresses here in the connection + /// manager without sending them to `utxo_balance_events`. However, we don't do that so that `utxo_balance_events` + /// knows about all the added addresses. If it doesn't know about them, it won't be able to retrieve the triggered + /// address when its script hash is notified. + pub async fn add_subscriptions(&self, addresses: &HashMap) { + for (scripthash, address) in addresses.iter() { + // For a single address/scripthash, keep trying to subscribe it until we succeed. + 'single_address_sub: loop { + let client = unwrap_or_return!(self.get_client()); + let connections = self.get_active_connections(); + if connections.is_empty() { + // If there are no active connections, wait for a connection to be established. + Timer::sleep(1.).await; + continue; + } + // Try to subscribe the address to any connection we have. + for connection in connections { + if client + .blockchain_scripthash_subscribe_using(connection.address(), scripthash.clone()) + .compat() + .await + .is_ok() + { + let all_connections = self.read_connections(); + let connection_ctx = unwrap_or_continue!(all_connections.get(connection.address())); + connection_ctx.add_sub(address.clone()); + break 'single_address_sub; + } + } + } + } + } + + /// Handles the connection event. + pub fn on_connected(&self, server_address: &str) { + let all_connections = self.read_connections(); + let connection_ctx = unwrap_or_return!(all_connections.get(server_address)); + + // Reset the suspend time & disconnection time. + connection_ctx.connected(); + } + + /// Handles the disconnection event from an Electrum server. + pub fn on_disconnected(&self, server_address: &str) { + debug!("Electrum server disconnected: {}", server_address); + let all_connections = self.read_connections(); + let connection_ctx = unwrap_or_return!(all_connections.get(server_address)); + + self.unmaintain(connection_ctx.id); + + let abandoned_subs = connection_ctx.disconnected(); + // Re-subscribe the abandoned addresses using the client. + let client = unwrap_or_return!(self.get_client()); + client.subscribe_addresses(abandoned_subs).ok(); + } + + /// A method that should be called after using a specific server for some request. + /// + /// Instead of disconnecting the connection right away, this method will only disconnect it + /// if it's not in the maintained connections set. + pub fn not_needed(&self, server_address: &str) { + let (id, connection) = { + let all_connections = self.read_connections(); + let connection_ctx = unwrap_or_return!(all_connections.get(server_address)); + (connection_ctx.id, connection_ctx.connection.clone()) + }; + if !self.read_maintained_connections().contains_key(&id) { + connection.disconnect(Some(ElectrumConnectionErr::Temporary("Not needed anymore".to_string()))); + self.on_disconnected(connection.address()); + } + } + + /// Remove a connection from the connection manager by its address. + // TODO(feat): Add the ability to add a connection during runtime. + pub fn remove_connection(&self, server_address: &str) -> Result, ConnectionManagerErr> { + let connection = self + .get_connection(server_address) + .ok_or(ConnectionManagerErr::UnknownAddress)?; + // Make sure this connection is disconnected. + connection.disconnect(Some(ElectrumConnectionErr::Irrecoverable( + "Forcefully disconnected & removed".to_string(), + ))); + // Run the on-disconnection hook, this will also make sure the connection is removed from the maintained set. + self.on_disconnected(connection.address()); + // Remove the connection from the manager. + self.write_connections().remove(server_address); + Ok(connection) + } +} + +// Background tasks. +impl ConnectionManager { + /// A forever-lived task that pings active/maintained connections periodically. + async fn ping_task(self) { + loop { + let client = unwrap_or_return!(self.get_client()); + // This will ping all the active/maintained connections, which will keep these connections alive. + client.server_ping().compat().await.ok(); + Timer::sleep(PING_INTERVAL).await; + } + } + + /// A forever-lived task that does the house keeping tasks of the connection manager: + /// - Maintaining the right number of active connections. + /// - Establishing new connections if needed. + /// - Replacing low priority connections with high priority ones periodically. + /// - etc... + async fn background_task(self) { + // Take out the min_connected notifiee from the manager. + let mut min_connected_notification = unwrap_or_return!(self.extract_below_min_connected_notifiee()); + // A flag to indicate whether to log connection establishment errors or not. We should not log them if we + // are in panic mode (i.e. we are below the `min_connected` threshold) as this will flood the error log. + let mut log_errors = true; + loop { + // Get the candidate connections that we will consider maintaining. + let (candidate_connections, will_never_get_min_connected) = self.get_candidate_connections(); + // Establish the connections to the selected candidates and alter the maintained connections set accordingly. + self.establish_best_connections(candidate_connections, log_errors).await; + // Only sleep if we successfully acquired the minimum number of connections, + // or if we know we can never maintain `min_connected` connections; there is no point of infinite non-wait looping then. + if self.read_maintained_connections().len() >= self.config().min_connected || will_never_get_min_connected { + // Wait for a timeout or a below `min_connected` notification before doing another round of house keeping. + futures::select! { + _ = Timer::sleep(BACKGROUND_TASK_WAIT_TIMEOUT).fuse() => (), + _ = min_connected_notification.wait().fuse() => (), + } + log_errors = true; + } else { + // Never sleeping can result in busy waiting, which is problematic as it might not + // give a chance to other tasks to make progress, especially in single threaded environments. + // Yield the execution to the executor to give a chance to other tasks to run. + // TODO: `yield` keyword is not supported in the current rust version, using a short sleep for now. + Timer::sleep(1.).await; + log_errors = false; + } + } + } + + /// Returns a list of candidate connections that aren't maintained and could be considered for maintaining. + /// + /// Also returns a flag indicating whether covering `min_connected` connections is even possible: not possible when + /// `min_connected` is greater than the number of connections we have. + fn get_candidate_connections(&self) -> (Vec<(Arc, u32)>, bool) { + let all_connections = self.read_connections(); + let maintained_connections = self.read_maintained_connections(); + // The number of connections we need to add as maintained to reach the `min_connected` threshold. + let connections_needed = self.config().min_connected.saturating_sub(maintained_connections.len()); + // The connections that we can consider (all connections - candidate connections). + let all_candidate_connections: Vec<_> = all_connections + .iter() + .filter_map(|(_, conn_ctx)| { + (!maintained_connections.contains_key(&conn_ctx.id)).then(|| (conn_ctx.connection.clone(), conn_ctx.id)) + }) + .collect(); + // The candidate connections from above, but further filtered by whether they are suspended or not. + let non_suspended_candidate_connections: Vec<_> = all_candidate_connections + .iter() + .filter(|(connection, _)| { + all_connections + .get(connection.address()) + .map_or(false, |conn_ctx| now_ms() > conn_ctx.suspended_till()) + }) + .cloned() + .collect(); + // Decide which candidate connections to consider (all or only non-suspended). + if connections_needed > non_suspended_candidate_connections.len() { + if connections_needed > all_candidate_connections.len() { + // Not enough connections to cover the `min_connected` threshold. + // This means we will never be able to maintain `min_connected` active connections. + (all_candidate_connections, true) + } else { + // If we consider all candidate connection (but some are suspended), we can cover the needed connections. + // We will consider the suspended ones since if we don't we will stay below `min_connected` threshold. + (all_candidate_connections, false) + } + } else { + // Non suspended candidates are enough to cover the needed connections. + (non_suspended_candidate_connections, false) + } + } + + /// Establishes the best connections (based on priority) using the candidate connections + /// till we can't establish no more (hit the `max_connected` threshold). + async fn establish_best_connections( + &self, + mut candidate_connections: Vec<(Arc, u32)>, + log_errors: bool, + ) { + let client = unwrap_or_return!(self.get_client()); + // Sort the candidate connections by their priority/ID. + candidate_connections.sort_by_key(|(_, priority)| *priority); + for (connection, connection_id) in candidate_connections { + let address = connection.address().to_string(); + let (maintained_connections_size, lowest_priority_connection_id) = { + let maintained_connections = self.read_maintained_connections(); + let maintained_connections_size = maintained_connections.len(); + let lowest_priority_connection_id = *maintained_connections.keys().next_back().unwrap_or(&u32::MAX); + (maintained_connections_size, lowest_priority_connection_id) + }; + + // We can only try to add the connection if: + // 1- We haven't reached the `max_connected` threshold. + // 2- We have reached the `max_connected` threshold but the connection has a higher priority than the lowest priority connection. + if maintained_connections_size < self.config().max_connected + || connection_id < lowest_priority_connection_id + { + // Now that we know the connection is good to be inserted, try to establish it. + if let Err(e) = connection.establish_connection_loop(client.clone()).await { + if log_errors { + error!("Failed to establish connection to {address} due to error: {e:?}"); + } + // Remove the connection if it's not recoverable. + if !e.is_recoverable() { + self.remove_connection(&address).ok(); + } + continue; + } + self.maintain(connection_id, address); + } else { + // If any of the two conditions on the `if` statement above are not met, there is nothing to do. + // At this point we have already collected `max_connected` connections and also the current connection + // in the candidate list has a lower priority than the lowest priority maintained connection, and the next + // candidate connections as well since they are sorted by priority. + break; + } + } + } +} + +// Abstractions over the accesses of the inner fields of the connection manager. +impl ConnectionManager { + #[inline] + pub fn config(&self) -> &ManagerConfig { &self.0.config } + + #[inline] + fn read_connections(&self) -> RwLockReadGuard> { + self.0.connections.read().unwrap() + } + + #[inline] + fn write_connections(&self) -> RwLockWriteGuard> { + self.0.connections.write().unwrap() + } + + #[inline] + fn get_connection(&self, server_address: &str) -> Option> { + self.read_connections() + .get(server_address) + .map(|connection_ctx| connection_ctx.connection.clone()) + } + + #[inline] + fn read_maintained_connections(&self) -> RwLockReadGuard> { + self.0.maintained_connections.read().unwrap() + } + + #[inline] + fn maintain(&self, id: ID, server_address: String) { + let mut maintained_connections = self.0.maintained_connections.write().unwrap(); + maintained_connections.insert(id, server_address); + // If we have reached the `max_connected` threshold then remove the lowest priority connection. + if maintained_connections.len() > self.config().max_connected { + let lowest_priority_connection_id = *maintained_connections.keys().next_back().unwrap_or(&u32::MAX); + maintained_connections.remove(&lowest_priority_connection_id); + } + } + + #[inline] + fn unmaintain(&self, id: ID) { + // To avoid write locking the maintained connections, just make sure the connection is actually maintained first. + let is_maintained = self.read_maintained_connections().contains_key(&id); + if is_maintained { + // If the connection was maintained, remove it from the maintained connections. + let mut maintained_connections = self.0.maintained_connections.write().unwrap(); + maintained_connections.remove(&id); + // And notify the background task if we fell below the `min_connected` threshold. + if maintained_connections.len() < self.config().min_connected { + self.notify_below_min_connected() + } + } + } + + #[inline] + fn notify_below_min_connected(&self) { self.0.below_min_connected_notifier.notify().ok(); } + + #[inline] + fn extract_below_min_connected_notifiee(&self) -> Option { + self.0.below_min_connected_notifiee.lock().unwrap().take() + } + + #[inline] + fn weak_client(&self) -> &RwLock>> { &self.0.electrum_client } + + #[inline] + fn get_client(&self) -> Option { + self.weak_client() + .read() + .unwrap() + .as_ref() // None here = client was never initialized. + .and_then(|weak| weak.upgrade().map(ElectrumClient)) // None here = client was dropped. + } +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/mod.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/mod.rs new file mode 100644 index 0000000000..b301ad3186 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/connection_manager/mod.rs @@ -0,0 +1,4 @@ +mod connection_context; +mod manager; + +pub use manager::ConnectionManager; diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/constants.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/constants.rs new file mode 100644 index 0000000000..f4d1a0efa5 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/constants.rs @@ -0,0 +1,32 @@ +/// The timeout for the electrum server to respond to a request. +pub const ELECTRUM_REQUEST_TIMEOUT: f64 = 20.; +/// The default (can be overridden) maximum timeout to establish a connection with the electrum server. +/// This included connecting to the server and querying the server version. +pub const DEFAULT_CONNECTION_ESTABLISHMENT_TIMEOUT: f64 = 20.; +/// Wait this long before pinging again. +pub const PING_INTERVAL: f64 = 30.; +/// Used to cutoff the server connection after not receiving any response for that long. +/// This only makes sense if we have sent a request to the server. So we need to keep `PING_INTERVAL` +/// lower than this value, otherwise we might disconnect servers that are perfectly responsive but just +/// haven't received any requests from us for a while. +pub const CUTOFF_TIMEOUT: f64 = 60.; +/// Initial server suspension time. +pub const FIRST_SUSPEND_TIME: u64 = 10; +/// The timeout used by the background task of the connection manager to re-check the manager's health. +pub const BACKGROUND_TASK_WAIT_TIMEOUT: f64 = (5 * 60) as f64; +/// Electrum methods that should not be sent without forcing the connection to be established first. +pub const NO_FORCE_CONNECT_METHODS: &[&str] = &[ + // The server should already be connected if we are querying for its version, don't force connect. + "server.version", +]; +/// Electrum methods that should be sent to all connections even after receiving a response from a subset of them. +/// Note that this is only applicable to active/maintained connections. If an electrum request fails by all maintained +/// connections, a fallback using all connections will *NOT* be attempted. +pub const SEND_TO_ALL_METHODS: &[&str] = &[ + // A ping should be sent to all connections even if we got a response from one of them early. + "server.ping", +]; +/// Electrum RPC method for headers subscription. +pub const BLOCKCHAIN_HEADERS_SUB_ID: &str = "blockchain.headers.subscribe"; +/// Electrum RPC method for script/address subscription. +pub const BLOCKCHAIN_SCRIPTHASH_SUB_ID: &str = "blockchain.scripthash.subscribe"; diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/event_handlers.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/event_handlers.rs new file mode 100644 index 0000000000..27bd74b4d9 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/event_handlers.rs @@ -0,0 +1,74 @@ +use super::connection_manager::ConnectionManager; +use super::constants::BLOCKCHAIN_SCRIPTHASH_SUB_ID; + +use crate::utxo::ScripthashNotification; +use crate::RpcTransportEventHandler; +use common::jsonrpc_client::JsonRpcRequest; +use common::log::{error, warn}; + +use futures::channel::mpsc::UnboundedSender; +use serde_json::{self as json, Value as Json}; + +/// An `RpcTransportEventHandler` that forwards `ScripthashNotification`s to trigger balance updates. +/// +/// This handler hooks in `on_incoming_response` and looks for an electrum script hash notification to forward it. +pub struct ElectrumScriptHashNotificationBridge { + pub scripthash_notification_sender: UnboundedSender, +} + +impl RpcTransportEventHandler for ElectrumScriptHashNotificationBridge { + fn debug_info(&self) -> String { "ElectrumScriptHashNotificationBridge".into() } + + fn on_incoming_response(&self, data: &[u8]) { + if let Ok(raw_json) = json::from_slice::(data) { + // Try to parse the notification. A notification is sent as a JSON-RPC request. + if let Ok(notification) = json::from_value::(raw_json) { + // Only care about `BLOCKCHAIN_SCRIPTHASH_SUB_ID` notifications. + if notification.method.as_str() == BLOCKCHAIN_SCRIPTHASH_SUB_ID { + if let Some(scripthash) = notification.params.first().and_then(|s| s.as_str()) { + if let Err(e) = self + .scripthash_notification_sender + .unbounded_send(ScripthashNotification::Triggered(scripthash.to_string())) + { + error!("Failed sending script hash message. {e:?}"); + } + } else { + warn!("Notification must contain the script hash value, got: {notification:?}"); + } + }; + } + } + } + + fn on_connected(&self, _address: &str) -> Result<(), String> { Ok(()) } + + fn on_disconnected(&self, _address: &str) -> Result<(), String> { Ok(()) } + + fn on_outgoing_request(&self, _data: &[u8]) {} +} + +/// An `RpcTransportEventHandler` that notifies the `ConnectionManager` upon connections and disconnections. +/// +/// When a connection is connected or disconnected, this event handler will notify the `ConnectionManager` +/// to handle the the event. +pub struct ElectrumConnectionManagerNotifier { + pub connection_manager: ConnectionManager, +} + +impl RpcTransportEventHandler for ElectrumConnectionManagerNotifier { + fn debug_info(&self) -> String { "ElectrumConnectionManagerNotifier".into() } + + fn on_connected(&self, address: &str) -> Result<(), String> { + self.connection_manager.on_connected(address); + Ok(()) + } + + fn on_disconnected(&self, address: &str) -> Result<(), String> { + self.connection_manager.on_disconnected(address); + Ok(()) + } + + fn on_incoming_response(&self, _data: &[u8]) {} + + fn on_outgoing_request(&self, _data: &[u8]) {} +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/mod.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/mod.rs new file mode 100644 index 0000000000..bf78308be2 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/mod.rs @@ -0,0 +1,20 @@ +use sha2::{Digest, Sha256}; + +mod client; +mod connection; +mod connection_manager; +mod constants; +mod event_handlers; +mod rpc_responses; +#[cfg(not(target_arch = "wasm32"))] mod tcp_stream; + +pub use client::{ElectrumClient, ElectrumClientImpl, ElectrumClientSettings}; +pub use connection::ElectrumConnectionSettings; +pub use rpc_responses::*; + +#[inline] +pub fn electrum_script_hash(script: &[u8]) -> Vec { + let mut sha = Sha256::new(); + sha.update(script); + sha.finalize().iter().rev().copied().collect() +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/rpc_responses.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/rpc_responses.rs new file mode 100644 index 0000000000..75daac6f35 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/rpc_responses.rs @@ -0,0 +1,168 @@ +use chain::{BlockHeader, BlockHeaderBits, BlockHeaderNonce, Transaction as UtxoTx}; +use mm2_number::{BigDecimal, BigInt}; +use rpc::v1::types::{Bytes as BytesJson, H256 as H256Json}; +use serialization::serialize; + +#[derive(Debug, Deserialize)] +pub struct ElectrumTxHistoryItem { + pub height: i64, + pub tx_hash: H256Json, + pub fee: Option, +} + +#[derive(Clone, Debug, Deserialize)] +pub struct ElectrumUnspent { + pub height: Option, + pub tx_hash: H256Json, + pub tx_pos: u32, + pub value: u64, +} + +#[derive(Clone, Debug, Deserialize)] +#[serde(untagged)] +pub enum ElectrumNonce { + Number(u64), + Hash(H256Json), +} + +#[allow(clippy::from_over_into)] +impl Into for ElectrumNonce { + fn into(self) -> BlockHeaderNonce { + match self { + ElectrumNonce::Number(n) => BlockHeaderNonce::U32(n as u32), + ElectrumNonce::Hash(h) => BlockHeaderNonce::H256(h.into()), + } + } +} + +#[derive(Debug, Deserialize)] +pub struct ElectrumBlockHeadersRes { + pub count: u64, + pub hex: BytesJson, + #[allow(dead_code)] + max: u64, +} + +/// The block header compatible with Electrum 1.2 +#[derive(Clone, Debug, Deserialize)] +pub struct ElectrumBlockHeaderV12 { + pub bits: u64, + pub block_height: u64, + pub merkle_root: H256Json, + pub nonce: ElectrumNonce, + pub prev_block_hash: H256Json, + pub timestamp: u64, + pub version: u64, +} + +impl ElectrumBlockHeaderV12 { + fn as_block_header(&self) -> BlockHeader { + BlockHeader { + version: self.version as u32, + previous_header_hash: self.prev_block_hash.into(), + merkle_root_hash: self.merkle_root.into(), + claim_trie_root: None, + hash_final_sapling_root: None, + time: self.timestamp as u32, + bits: BlockHeaderBits::U32(self.bits as u32), + nonce: self.nonce.clone().into(), + solution: None, + aux_pow: None, + prog_pow: None, + mtp_pow: None, + is_verus: false, + hash_state_root: None, + hash_utxo_root: None, + prevout_stake: None, + vch_block_sig_dlgt: None, + n_height: None, + n_nonce_u64: None, + mix_hash: None, + } + } + + #[inline] + pub fn as_hex(&self) -> String { + let block_header = self.as_block_header(); + let serialized = serialize(&block_header); + hex::encode(serialized) + } + + #[inline] + pub fn hash(&self) -> H256Json { + let block_header = self.as_block_header(); + BlockHeader::hash(&block_header).into() + } +} + +/// The block header compatible with Electrum 1.4 +#[derive(Clone, Debug, Deserialize)] +pub struct ElectrumBlockHeaderV14 { + pub height: u64, + pub hex: BytesJson, +} + +impl ElectrumBlockHeaderV14 { + pub fn hash(&self) -> H256Json { self.hex.clone().into_vec()[..].into() } +} + +#[derive(Clone, Debug, Deserialize)] +#[serde(untagged)] +pub enum ElectrumBlockHeader { + V12(ElectrumBlockHeaderV12), + V14(ElectrumBlockHeaderV14), +} + +impl ElectrumBlockHeader { + pub fn block_height(&self) -> u64 { + match self { + ElectrumBlockHeader::V12(h) => h.block_height, + ElectrumBlockHeader::V14(h) => h.height, + } + } + + pub fn block_hash(&self) -> H256Json { + match self { + ElectrumBlockHeader::V12(h) => h.hash(), + ElectrumBlockHeader::V14(h) => h.hash(), + } + } +} + +/// The merkle branch of a confirmed transaction +#[derive(Clone, Debug, Deserialize)] +pub struct TxMerkleBranch { + pub merkle: Vec, + pub block_height: u64, + pub pos: usize, +} + +#[derive(Clone)] +pub struct ConfirmedTransactionInfo { + pub tx: UtxoTx, + pub header: BlockHeader, + pub index: u64, + pub height: u64, +} + +#[derive(Clone, Debug, Deserialize)] +pub struct ElectrumBalance { + pub(crate) confirmed: i128, + pub(crate) unconfirmed: i128, +} + +impl ElectrumBalance { + #[inline] + pub fn to_big_decimal(&self, decimals: u8) -> BigDecimal { + let balance_sat = BigInt::from(self.confirmed) + BigInt::from(self.unconfirmed); + BigDecimal::from(balance_sat) / BigDecimal::from(10u64.pow(decimals as u32)) + } +} + +#[derive(Debug, Deserialize, Serialize)] +/// Deserializable Electrum protocol version representation for RPC +/// https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-methods.html#server.version +pub struct ElectrumProtocolVersion { + pub server_software_version: String, + pub protocol_version: String, +} diff --git a/mm2src/coins/utxo/rpc_clients/electrum_rpc/tcp_stream.rs b/mm2src/coins/utxo/rpc_clients/electrum_rpc/tcp_stream.rs new file mode 100644 index 0000000000..b50b7f5c85 --- /dev/null +++ b/mm2src/coins/utxo/rpc_clients/electrum_rpc/tcp_stream.rs @@ -0,0 +1,105 @@ +use std::io; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::SystemTime; + +use futures::io::Error; +use rustls::client::ServerCertVerified; +use rustls::{Certificate, ClientConfig, OwnedTrustAnchor, RootCertStore, ServerName}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tokio::net::TcpStream; +use tokio_rustls::client::TlsStream; +use webpki_roots::TLS_SERVER_ROOTS; + +/// The enum wrapping possible variants of underlying Streams +#[allow(clippy::large_enum_variant)] +pub enum ElectrumStream { + Tcp(TcpStream), + Tls(TlsStream), +} + +impl AsRef for ElectrumStream { + fn as_ref(&self) -> &TcpStream { + match self { + ElectrumStream::Tcp(stream) => stream, + ElectrumStream::Tls(stream) => stream.get_ref().0, + } + } +} + +impl AsyncRead for ElectrumStream { + fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { + match self.get_mut() { + ElectrumStream::Tcp(stream) => AsyncRead::poll_read(Pin::new(stream), cx, buf), + ElectrumStream::Tls(stream) => AsyncRead::poll_read(Pin::new(stream), cx, buf), + } + } +} + +impl AsyncWrite for ElectrumStream { + fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { + match self.get_mut() { + ElectrumStream::Tcp(stream) => AsyncWrite::poll_write(Pin::new(stream), cx, buf), + ElectrumStream::Tls(stream) => AsyncWrite::poll_write(Pin::new(stream), cx, buf), + } + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.get_mut() { + ElectrumStream::Tcp(stream) => AsyncWrite::poll_flush(Pin::new(stream), cx), + ElectrumStream::Tls(stream) => AsyncWrite::poll_flush(Pin::new(stream), cx), + } + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.get_mut() { + ElectrumStream::Tcp(stream) => AsyncWrite::poll_shutdown(Pin::new(stream), cx), + ElectrumStream::Tls(stream) => AsyncWrite::poll_shutdown(Pin::new(stream), cx), + } + } +} + +/// Skips the server certificate verification on TLS connection +pub struct NoCertificateVerification {} + +impl rustls::client::ServerCertVerifier for NoCertificateVerification { + fn verify_server_cert( + &self, + _: &Certificate, + _: &[Certificate], + _: &ServerName, + _: &mut dyn Iterator, + _: &[u8], + _: SystemTime, + ) -> Result { + Ok(rustls::client::ServerCertVerified::assertion()) + } +} + +fn rustls_client_config(unsafe_conf: bool) -> Arc { + let mut cert_store = RootCertStore::empty(); + + cert_store.add_trust_anchors( + TLS_SERVER_ROOTS + .iter() + .map(|ta| OwnedTrustAnchor::from_subject_spki_name_constraints(ta.subject, ta.spki, ta.name_constraints)), + ); + + let mut tls_config = rustls::ClientConfig::builder() + .with_safe_defaults() + .with_root_certificates(cert_store) + .with_no_client_auth(); + + if unsafe_conf { + tls_config + .dangerous() + .set_certificate_verifier(Arc::new(NoCertificateVerification {})); + } + Arc::new(tls_config) +} + +lazy_static! { + pub static ref SAFE_TLS_CONFIG: Arc = rustls_client_config(false); + pub static ref UNSAFE_TLS_CONFIG: Arc = rustls_client_config(true); +} diff --git a/mm2src/coins/utxo/slp.rs b/mm2src/coins/utxo/slp.rs index d75855a750..6667123ed5 100644 --- a/mm2src/coins/utxo/slp.rs +++ b/mm2src/coins/utxo/slp.rs @@ -55,6 +55,7 @@ use serde_json::Value as Json; use serialization::{deserialize, serialize, Deserializable, Error as SerError, Reader}; use serialization_derive::Deserializable; use std::convert::TryInto; +use std::num::TryFromIntError; use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; use std::sync::Arc; use utxo_signer::with_key_pair::{p2pkh_spend, p2sh_spend, sign_tx, UtxoSignWithKeyPairError}; @@ -1216,63 +1217,50 @@ impl MarketCoinOps for SlpToken { #[async_trait] impl SwapOps for SlpToken { - fn send_taker_fee(&self, dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionFut { - let coin = self.clone(); - let fee_pubkey = try_tx_fus!(Public::from_slice(self.dex_pubkey())); + async fn send_taker_fee(&self, dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionResult { + let fee_pubkey = try_tx_s!(Public::from_slice(self.dex_pubkey())); let script_pubkey = ScriptBuilder::build_p2pkh(&fee_pubkey.address_hash().into()).into(); - let amount = try_tx_fus!(dex_fee.fee_amount_as_u64(self.decimals())); + let amount = try_tx_s!(dex_fee.fee_amount_as_u64(self.decimals())); + let slp_out = SlpOutput { amount, script_pubkey }; + let (preimage, recently_spent) = try_tx_s!(self.generate_slp_tx_preimage(vec![slp_out]).await); - let fut = async move { - let slp_out = SlpOutput { amount, script_pubkey }; - let (preimage, recently_spent) = try_tx_s!(coin.generate_slp_tx_preimage(vec![slp_out]).await); - generate_and_send_tx( - &coin, - preimage.available_bch_inputs, - Some(preimage.slp_inputs.into_iter().map(|slp| slp.bch_unspent).collect()), - FeePolicy::SendExact, - recently_spent, - preimage.outputs, - ) - .await - }; - Box::new(fut.boxed().compat().map(|tx| tx.into())) + generate_and_send_tx( + self, + preimage.available_bch_inputs, + Some(preimage.slp_inputs.into_iter().map(|slp| slp.bch_unspent).collect()), + FeePolicy::SendExact, + recently_spent, + preimage.outputs, + ) + .await + .map(|tx| tx.into()) } - fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs) -> TransactionFut { - let taker_pub = try_tx_fus!(Public::from_slice(maker_payment_args.other_pubkey)); - let amount = try_tx_fus!(sat_from_big_decimal(&maker_payment_args.amount, self.decimals())); + async fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { + let taker_pub = try_tx_s!(Public::from_slice(maker_payment_args.other_pubkey)); + let amount = try_tx_s!(sat_from_big_decimal(&maker_payment_args.amount, self.decimals())); let secret_hash = maker_payment_args.secret_hash.to_owned(); let maker_htlc_keypair = self.derive_htlc_key_pair(maker_payment_args.swap_unique_data); - let time_lock = try_tx_fus!(maker_payment_args.time_lock.try_into()); + let time_lock = try_tx_s!(maker_payment_args.time_lock.try_into()); - let coin = self.clone(); - let fut = async move { - let tx = try_tx_s!( - coin.send_htlc(maker_htlc_keypair.public(), &taker_pub, time_lock, &secret_hash, amount) - .await - ); - Ok(tx.into()) - }; - Box::new(fut.boxed().compat()) + let tx = try_tx_s!( + self.send_htlc(maker_htlc_keypair.public(), &taker_pub, time_lock, &secret_hash, amount) + .await + ); + Ok(tx.into()) } - fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs) -> TransactionFut { - let maker_pub = try_tx_fus!(Public::from_slice(taker_payment_args.other_pubkey)); - let amount = try_tx_fus!(sat_from_big_decimal(&taker_payment_args.amount, self.decimals())); + async fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { + let maker_pub = try_tx_s!(Public::from_slice(taker_payment_args.other_pubkey)); + let amount = try_tx_s!(sat_from_big_decimal(&taker_payment_args.amount, self.decimals())); let secret_hash = taker_payment_args.secret_hash.to_owned(); let taker_htlc_keypair = self.derive_htlc_key_pair(taker_payment_args.swap_unique_data); - let time_lock = try_tx_fus!(taker_payment_args.time_lock.try_into()); + let time_lock = try_tx_s!(taker_payment_args.time_lock.try_into()); - let coin = self.clone(); - let fut = async move { - let tx = try_tx_s!( - coin.send_htlc(taker_htlc_keypair.public(), &maker_pub, time_lock, &secret_hash, amount) - .await - ); - Ok(tx.into()) - }; - Box::new(fut.boxed().compat()) + self.send_htlc(taker_htlc_keypair.public(), &maker_pub, time_lock, &secret_hash, amount) + .await + .map(|tx| tx.into()) } async fn send_maker_spends_taker_payment( @@ -1343,23 +1331,26 @@ impl SwapOps for SlpToken { Ok(tx.into()) } - fn validate_fee(&self, validate_fee_args: ValidateFeeArgs) -> ValidatePaymentFut<()> { + async fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentResult<()> { let tx = match validate_fee_args.fee_tx { TransactionEnum::UtxoTx(tx) => tx.clone(), - _ => panic!(), + fee_tx => { + return MmError::err(ValidatePaymentError::InternalError(format!( + "Invalid fee tx type. fee tx: {:?}", + fee_tx + ))) + }, }; - let coin = self.clone(); - let expected_sender = validate_fee_args.expected_sender.to_owned(); - let amount = validate_fee_args.dex_fee.fee_amount(); - let min_block_number = validate_fee_args.min_block_number; - let fut = async move { - coin.validate_dex_fee(tx, &expected_sender, amount.into(), min_block_number) - .await - .map_err(|e| MmError::new(ValidatePaymentError::WrongPaymentTx(e.into_inner().to_string())))?; - Ok(()) - }; - Box::new(fut.boxed().compat()) + let amount = validate_fee_args.dex_fee.fee_amount(); + self.validate_dex_fee( + tx, + validate_fee_args.expected_sender, + amount.into(), + validate_fee_args.min_block_number, + ) + .await + .map_err(|e| MmError::new(ValidatePaymentError::WrongPaymentTx(e.into_inner().to_string()))) } async fn validate_maker_payment(&self, input: ValidatePaymentInput) -> ValidatePaymentResult<()> { @@ -1371,17 +1362,23 @@ impl SwapOps for SlpToken { } #[inline] - fn check_if_my_payment_sent( + async fn check_if_my_payment_sent( &self, - if_my_payment_sent_args: CheckIfMyPaymentSentArgs, - ) -> Box, Error = String> + Send> { + if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, + ) -> Result, String> { + let time_lock = if_my_payment_sent_args + .time_lock + .try_into() + .map_err(|e: TryFromIntError| e.to_string())?; utxo_common::check_if_my_payment_sent( self.platform_coin.clone(), - try_fus!(if_my_payment_sent_args.time_lock.try_into()), + time_lock, if_my_payment_sent_args.other_pub, if_my_payment_sent_args.secret_hash, if_my_payment_sent_args.swap_unique_data, ) + .compat() + .await } #[inline] diff --git a/mm2src/coins/utxo/utxo_balance_events.rs b/mm2src/coins/utxo/utxo_balance_events.rs index 2d97ef5cc9..ec1de7aa40 100644 --- a/mm2src/coins/utxo/utxo_balance_events.rs +++ b/mm2src/coins/utxo/utxo_balance_events.rs @@ -1,20 +1,20 @@ +use super::utxo_standard::UtxoStandardCoin; +use crate::utxo::rpc_clients::UtxoRpcClientEnum; +use crate::{utxo::{output_script, + rpc_clients::electrum_script_hash, + utxo_common::{address_balance, address_to_scripthash}, + ScripthashNotification, UtxoCoinFields}, + CoinWithDerivationMethod, MarketCoinOps, MmCoin}; use async_trait::async_trait; -use common::{executor::{AbortSettings, SpawnAbortable, Timer}, - log, Future01CompatExt}; +use common::{executor::{AbortSettings, SpawnAbortable}, + log}; use futures::channel::oneshot::{self, Receiver, Sender}; use futures_util::StreamExt; use keys::Address; use mm2_core::mm_ctx::MmArc; use mm2_event_stream::{behaviour::{EventBehaviour, EventInitStatus}, ErrorEventName, Event, EventName, EventStreamConfiguration}; -use std::collections::{BTreeMap, HashSet}; - -use super::utxo_standard::UtxoStandardCoin; -use crate::{utxo::{output_script, - rpc_clients::electrum_script_hash, - utxo_common::{address_balance, address_to_scripthash}, - ScripthashNotification, UtxoCoinFields}, - CoinWithDerivationMethod, MarketCoinOps, MmCoin}; +use std::collections::{BTreeMap, HashMap, HashSet}; macro_rules! try_or_continue { ($exp:expr) => { @@ -41,37 +41,29 @@ impl EventBehaviour for UtxoStandardCoin { utxo: &UtxoCoinFields, addresses: HashSet
, ) -> Result, String> { - const LOOP_INTERVAL: f64 = 0.5; - - let mut scripthash_to_address_map: BTreeMap = BTreeMap::new(); - for address in addresses { - let scripthash = address_to_scripthash(&address).map_err(|e| e.to_string())?; - - scripthash_to_address_map.insert(scripthash.clone(), address); - - let mut attempt = 0; - while let Err(e) = utxo - .rpc_client - .blockchain_scripthash_subscribe(scripthash.clone()) - .compat() - .await - { - if attempt == 5 { - return Err(e.to_string()); - } - - log::error!( - "Failed to subscribe {} scripthash ({attempt}/5 attempt). Error: {}", - scripthash, - e.to_string() - ); - - attempt += 1; - Timer::sleep(LOOP_INTERVAL).await; - } + match utxo.rpc_client.clone() { + UtxoRpcClientEnum::Electrum(client) => { + // Collect the scrpithash for every address into a map. + let scripthash_to_address_map = addresses + .into_iter() + .map(|address| { + let scripthash = address_to_scripthash(&address).map_err(|e| e.to_string())?; + Ok((scripthash, address)) + }) + .collect::, String>>()?; + // Add these subscriptions to the connection manager. It will choose whatever connections + // it sees fit to subscribe each of these addresses to. + client + .connection_manager + .add_subscriptions(&scripthash_to_address_map) + .await; + // Convert the hashmap back to btreemap. + Ok(scripthash_to_address_map.into_iter().map(|(k, v)| (k, v)).collect()) + }, + UtxoRpcClientEnum::Native(_) => { + Err("Balance streaming is currently not supported for native client.".to_owned()) + }, } - - Ok(scripthash_to_address_map) } let ctx = match MmArc::from_weak(&self.as_ref().ctx) { @@ -115,24 +107,6 @@ impl EventBehaviour for UtxoStandardCoin { }, }; - continue; - }, - ScripthashNotification::RefreshSubscriptions => { - let my_addresses = try_or_continue!(self.all_addresses().await); - match subscribe_to_addresses(self.as_ref(), my_addresses).await { - Ok(map) => scripthash_to_address_map = map, - Err(e) => { - log::error!("{e}"); - - ctx.stream_channel_controller - .broadcast(Event::new( - format!("{}:{}", Self::error_event_name(), self.ticker()), - json!({ "error": e }).to_string(), - )) - .await; - }, - }; - continue; }, }; diff --git a/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs index 60b4d75ff0..f8e16a6089 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_arc_builder.rs @@ -260,14 +260,14 @@ pub(crate) async fn block_header_utxo_loop( ) { macro_rules! remove_server_and_break_if_no_servers_left { ($client:expr, $server_address:expr, $ticker:expr, $sync_status_loop_handle:expr) => { - if let Err(e) = $client.remove_server($server_address).await { + if let Err(e) = $client.remove_server($server_address) { let msg = format!("Error {} on removing server {}!", e, $server_address); // Todo: Permanent error notification should lead to deactivation of coin after applying some fail-safe measures if there are on-going swaps $sync_status_loop_handle.notify_on_permanent_error(msg); break; } - if $client.is_connections_pool_empty().await { + if $client.is_connections_pool_empty() { // Todo: Permanent error notification should lead to deactivation of coin after applying some fail-safe measures if there are on-going swaps let msg = format!("All servers are removed for {}!", $ticker); $sync_status_loop_handle.notify_on_permanent_error(msg); @@ -294,14 +294,14 @@ pub(crate) async fn block_header_utxo_loop( }; let mut args = BlockHeaderUtxoLoopExtraArgs::default(); while let Some(client) = weak.upgrade() { - let client = &ElectrumClient(client); + let client = ElectrumClient(client); let ticker = client.coin_name(); let storage = client.block_headers_storage(); let last_height_in_storage = match storage.get_last_block_height().await { Ok(Some(height)) => height, Ok(None) => { - if let Err(err) = validate_and_store_starting_header(client, ticker, storage, &spv_conf).await { + if let Err(err) = validate_and_store_starting_header(&client, ticker, storage, &spv_conf).await { sync_status_loop_handle.notify_on_permanent_error(err); break; } @@ -372,7 +372,7 @@ pub(crate) async fn block_header_utxo_loop( }; let (block_registry, block_headers) = match try_to_retrieve_headers_until_success( &mut args, - client, + &client, server_address, last_height_in_storage + 1, retrieve_to, @@ -411,7 +411,7 @@ pub(crate) async fn block_header_utxo_loop( } = &err { match resolve_possible_chain_reorg( - client, + &client, server_address, &mut args, last_height_in_storage, diff --git a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs index 446cadf2bb..15a699c2f1 100644 --- a/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs +++ b/mm2src/coins/utxo/utxo_builder/utxo_coin_builder.rs @@ -1,41 +1,37 @@ use crate::hd_wallet::{load_hd_accounts_from_storage, HDAccountsMutex, HDWallet, HDWalletCoinStorage, HDWalletStorageError, DEFAULT_GAP_LIMIT}; -use crate::utxo::rpc_clients::{ElectrumClient, ElectrumClientImpl, ElectrumRpcRequest, EstimateFeeMethod, +use crate::utxo::rpc_clients::{ElectrumClient, ElectrumClientSettings, ElectrumConnectionSettings, EstimateFeeMethod, UtxoRpcClientEnum}; use crate::utxo::tx_cache::{UtxoVerboseCacheOps, UtxoVerboseCacheShared}; use crate::utxo::utxo_block_header_storage::BlockHeaderStorage; use crate::utxo::utxo_builder::utxo_conf_builder::{UtxoConfBuilder, UtxoConfError}; -use crate::utxo::{output_script, ElectrumBuilderArgs, ElectrumProtoVerifier, ElectrumProtoVerifierEvent, - RecentlySpentOutPoints, ScripthashNotification, ScripthashNotificationSender, TxFee, UtxoCoinConf, - UtxoCoinFields, UtxoHDWallet, UtxoRpcMode, UtxoSyncStatus, UtxoSyncStatusLoopHandle, - UTXO_DUST_AMOUNT}; +use crate::utxo::{output_script, ElectrumBuilderArgs, RecentlySpentOutPoints, ScripthashNotification, + ScripthashNotificationSender, TxFee, UtxoCoinConf, UtxoCoinFields, UtxoHDWallet, UtxoRpcMode, + UtxoSyncStatus, UtxoSyncStatusLoopHandle, UTXO_DUST_AMOUNT}; use crate::{BlockchainNetwork, CoinTransportMetrics, DerivationMethod, HistorySyncState, IguanaPrivKey, - PrivKeyBuildPolicy, PrivKeyPolicy, PrivKeyPolicyNotAllowed, RpcClientType, UtxoActivationParams}; + PrivKeyBuildPolicy, PrivKeyPolicy, PrivKeyPolicyNotAllowed, RpcClientType, + SharableRpcTransportEventHandler, UtxoActivationParams}; use async_trait::async_trait; use chain::TxHashAlgo; -use common::custom_futures::repeatable::{Ready, Retry}; -use common::executor::{abortable_queue::AbortableQueue, AbortSettings, AbortableSystem, AbortedError, SpawnAbortable, - Timer}; -use common::log::{error, info, LogOnError}; -use common::{now_sec, small_rng}; +use common::executor::{abortable_queue::AbortableQueue, AbortableSystem, AbortedError}; +use common::now_sec; use crypto::{Bip32DerPathError, CryptoCtx, CryptoCtxError, GlobalHDAccountArc, HwWalletType, StandardHDPathError}; use derive_more::Display; -use futures::channel::mpsc::{channel, unbounded, Receiver as AsyncReceiver, UnboundedReceiver, UnboundedSender}; +use futures::channel::mpsc::{channel, Receiver as AsyncReceiver, UnboundedReceiver, UnboundedSender}; use futures::compat::Future01CompatExt; use futures::lock::Mutex as AsyncMutex; -use futures::StreamExt; use keys::bytes::Bytes; pub use keys::{Address, AddressBuilder, AddressFormat as UtxoAddressFormat, AddressHashEnum, AddressScriptType, KeyPair, Private, Public, Secret}; use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::*; use primitives::hash::H160; -use rand::seq::SliceRandom; use serde_json::{self as json, Value as Json}; use spv_validation::conf::SPVConf; use spv_validation::helpers_validation::SPVError; use spv_validation::storage::{BlockHeaderStorageError, BlockHeaderStorageOps}; -use std::sync::{Arc, Mutex, Weak}; +use std::sync::Arc; +use std::sync::Mutex; cfg_native! { use crate::utxo::coin_daemon_data_dir; @@ -60,16 +56,6 @@ pub enum UtxoCoinBuildError { ErrorDetectingFeeMethod(String), ErrorDetectingDecimals(String), InvalidBlockchainNetwork(String), - #[display( - fmt = "Failed to connect to at least 1 of {:?} in {} seconds.", - electrum_servers, - seconds - )] - FailedToConnectToElectrums { - electrum_servers: Vec, - seconds: u64, - }, - ElectrumProtocolVersionCheckError(String), #[display(fmt = "Can not detect the user home directory")] CantDetectUserHome, #[display(fmt = "Private key policy is not allowed: {}", _0)] @@ -82,8 +68,6 @@ pub enum UtxoCoinBuildError { )] CoinDoesntSupportTrezor, BlockHeaderStorageError(BlockHeaderStorageError), - #[display(fmt = "Error {} on getting the height of the latest block from rpc!", _0)] - CantGetBlockCount(String), #[display(fmt = "Internal error: {}", _0)] Internal(String), #[display(fmt = "SPV params verificaiton failed. Error: {_0}")] @@ -252,10 +236,7 @@ fn get_scripthash_notification_handlers( Arc>>, )> { if ctx.event_stream_configuration.is_some() { - let (sender, receiver): ( - UnboundedSender, - UnboundedReceiver, - ) = futures::channel::mpsc::unbounded(); + let (sender, receiver) = futures::channel::mpsc::unbounded(); Some((sender, Arc::new(AsyncMutex::new(receiver)))) } else { None @@ -565,12 +546,17 @@ pub trait UtxoCoinBuilderCommonOps { Ok(UtxoRpcClientEnum::Native(native)) } }, - UtxoRpcMode::Electrum { servers } => { + UtxoRpcMode::Electrum { + servers, + min_connected, + max_connected, + } => { let electrum = self .electrum_client( abortable_system, ElectrumBuilderArgs::default(), servers, + (min_connected, max_connected), scripthash_notification_sender, ) .await?; @@ -585,21 +571,19 @@ pub trait UtxoCoinBuilderCommonOps { &self, abortable_system: AbortableQueue, args: ElectrumBuilderArgs, - mut servers: Vec, + servers: Vec, + (min_connected, max_connected): (Option, Option), scripthash_notification_sender: ScripthashNotificationSender, ) -> UtxoCoinBuildResult { - let (on_event_tx, on_event_rx) = unbounded(); - let ticker = self.ticker().to_owned(); + let coin_ticker = self.ticker().to_owned(); let ctx = self.ctx(); - let mut event_handlers = vec![]; + let mut event_handlers: Vec> = vec![]; if args.collect_metrics { - event_handlers.push( - CoinTransportMetrics::new(ctx.metrics.weak(), ticker.clone(), RpcClientType::Electrum).into_shared(), - ); - } - - if args.negotiate_version { - event_handlers.push(ElectrumProtoVerifier { on_event_tx }.into_shared()); + event_handlers.push(Box::new(CoinTransportMetrics::new( + ctx.metrics.weak(), + coin_ticker.clone(), + RpcClientType::Electrum, + ))); } let storage_ticker = self.ticker().replace('-', "_"); @@ -609,56 +593,27 @@ pub trait UtxoCoinBuilderCommonOps { block_headers_storage.init().await?; } - let mut rng = small_rng(); - servers.as_mut_slice().shuffle(&mut rng); + let gui = ctx.gui().unwrap_or("UNKNOWN").to_string(); + let mm_version = ctx.mm_version().to_string(); + let (min_connected, max_connected) = (min_connected.unwrap_or(1), max_connected.unwrap_or(servers.len())); + let client_settings = ElectrumClientSettings { + client_name: format!("{} GUI/MM2 {}", gui, mm_version), + servers: servers.clone(), + coin_ticker, + spawn_ping: args.spawn_ping, + negotiate_version: args.negotiate_version, + min_connected, + max_connected, + }; - let client = ElectrumClientImpl::new( - ticker, + ElectrumClient::try_new( + client_settings, event_handlers, block_headers_storage, abortable_system, - args.negotiate_version, scripthash_notification_sender, - ); - for server in servers.iter() { - match client.add_server(server).await { - Ok(_) => (), - Err(e) => error!("Error {:?} connecting to {:?}. Address won't be used", e, server), - }; - } - - let mut attempts = 0i32; - while !client.is_connected().await { - if attempts >= 10 { - return MmError::err(UtxoCoinBuildError::FailedToConnectToElectrums { - electrum_servers: servers.clone(), - seconds: 5, - }); - } - - Timer::sleep(0.5).await; - attempts += 1; - } - - let client = Arc::new(client); - - let spawner = client.spawner(); - if args.negotiate_version { - let weak_client = Arc::downgrade(&client); - let client_name = format!("{} GUI/MM2 {}", ctx.gui().unwrap_or("UNKNOWN"), ctx.mm_version()); - spawn_electrum_version_loop(&spawner, weak_client, on_event_rx, client_name); - - wait_for_protocol_version_checked(&client) - .await - .map_to_mm(UtxoCoinBuildError::ElectrumProtocolVersionCheckError)?; - } - - if args.spawn_ping { - let weak_client = Arc::downgrade(&client); - spawn_electrum_ping_loop(&spawner, weak_client, servers); - } - - Ok(ElectrumClient(client)) + ) + .map_to_mm(UtxoCoinBuildError::Internal) } #[cfg(not(target_arch = "wasm32"))] @@ -869,140 +824,3 @@ fn read_native_mode_conf( ))); Ok((rpc_port, rpc_user.clone(), rpc_password.clone())) } - -/// Ping the electrum servers every 30 seconds to prevent them from disconnecting us. -/// According to docs server can do it if there are no messages in ~10 minutes. -/// https://electrumx.readthedocs.io/en/latest/protocol-methods.html?highlight=keep#server-ping -/// Weak reference will allow to stop the thread if client is dropped. -fn spawn_electrum_ping_loop( - spawner: &Spawner, - weak_client: Weak, - servers: Vec, -) { - let msg_on_stopped = format!("Electrum servers {servers:?} ping loop stopped"); - let fut = async move { - loop { - if let Some(client) = weak_client.upgrade() { - if let Err(e) = ElectrumClient(client).server_ping().compat().await { - error!("Electrum servers {:?} ping error: {}", servers, e); - } - } else { - break; - } - Timer::sleep(30.).await - } - }; - - let settings = AbortSettings::info_on_any_stop(msg_on_stopped); - spawner.spawn_with_settings(fut, settings); -} - -/// Follow the `on_connect_rx` stream and verify the protocol version of each connected electrum server. -/// https://electrumx.readthedocs.io/en/latest/protocol-methods.html?highlight=keep#server-version -/// Weak reference will allow to stop the thread if client is dropped. -fn spawn_electrum_version_loop( - spawner: &Spawner, - weak_client: Weak, - mut on_event_rx: UnboundedReceiver, - client_name: String, -) { - let fut = async move { - while let Some(event) = on_event_rx.next().await { - match event { - ElectrumProtoVerifierEvent::Connected(electrum_addr) => { - check_electrum_server_version(weak_client.clone(), client_name.clone(), electrum_addr).await - }, - ElectrumProtoVerifierEvent::Disconnected(electrum_addr) => { - if let Some(client) = weak_client.upgrade() { - client.reset_protocol_version(&electrum_addr).await.error_log(); - } - }, - } - } - }; - let settings = AbortSettings::info_on_any_stop("Electrum server.version loop stopped".to_string()); - spawner.spawn_with_settings(fut, settings); -} - -async fn check_electrum_server_version( - weak_client: Weak, - client_name: String, - electrum_addr: String, -) { - // client.remove_server() is called too often - async fn remove_server(client: ElectrumClient, electrum_addr: &str) { - if let Err(e) = client.remove_server(electrum_addr).await { - error!("Error on remove server: {}", e); - } - } - - if let Some(c) = weak_client.upgrade() { - let client = ElectrumClient(c); - let available_protocols = client.protocol_version(); - let version = match client - .server_version(&electrum_addr, &client_name, available_protocols) - .compat() - .await - { - Ok(version) => version, - Err(e) => { - error!("Electrum {} server.version error: {:?}", electrum_addr, e); - if !e.error.is_transport() { - remove_server(client, &electrum_addr).await; - }; - return; - }, - }; - - // check if the version is allowed - let actual_version = match version.protocol_version.parse::() { - Ok(v) => v, - Err(e) => { - error!("Error on parse protocol_version: {:?}", e); - remove_server(client, &electrum_addr).await; - return; - }, - }; - - if !available_protocols.contains(&actual_version) { - error!( - "Received unsupported protocol version {:?} from {:?}. Remove the connection", - actual_version, electrum_addr - ); - remove_server(client, &electrum_addr).await; - return; - } - - match client.set_protocol_version(&electrum_addr, actual_version).await { - Ok(()) => info!( - "Use protocol version {:?} for Electrum {:?}", - actual_version, electrum_addr - ), - Err(e) => error!("Error on set protocol_version: {}", e), - }; - } -} - -/// Wait until the protocol version of at least one client's Electrum is checked. -async fn wait_for_protocol_version_checked(client: &ElectrumClientImpl) -> Result<(), String> { - repeatable!(async { - if client.count_connections().await == 0 { - // All of the connections were removed because of server.version checking - return Ready(ERR!( - "There are no Electrums with the required protocol version {:?}", - client.protocol_version() - )); - } - - if client.is_protocol_version_checked().await { - return Ready(Ok(())); - } - Retry(()) - }) - .repeat_every_secs(0.5) - .attempts(10) - .await - .map_err(|_exceed| ERRL!("Failed protocol version verifying of at least 1 of Electrums in 5 seconds.")) - // Flatten `Result< Result<(), String>, String >` - .flatten() -} diff --git a/mm2src/coins/utxo/utxo_common.rs b/mm2src/coins/utxo/utxo_common.rs index 0b4b8b8ad6..89e41ff820 100644 --- a/mm2src/coins/utxo/utxo_common.rs +++ b/mm2src/coins/utxo/utxo_common.rs @@ -5117,11 +5117,7 @@ where valid_addresses.insert(valid_address); } if let UtxoRpcClientEnum::Electrum(electrum_client) = &coin.as_ref().rpc_client { - if let Some(sender) = &electrum_client.scripthash_notification_sender { - sender - .unbounded_send(ScripthashNotification::SubscribeToAddresses(valid_addresses)) - .map_err(|e| ERRL!("Failed sending scripthash message. {}", e))?; - } + electrum_client.subscribe_addresses(valid_addresses)?; }; Ok(()) diff --git a/mm2src/coins/utxo/utxo_standard.rs b/mm2src/coins/utxo/utxo_standard.rs index d12df889c1..3914bc5d58 100644 --- a/mm2src/coins/utxo/utxo_standard.rs +++ b/mm2src/coins/utxo/utxo_standard.rs @@ -304,18 +304,22 @@ impl UtxoStandardOps for UtxoStandardCoin { #[cfg_attr(test, mockable)] impl SwapOps for UtxoStandardCoin { #[inline] - fn send_taker_fee(&self, dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionFut { - utxo_common::send_taker_fee(self.clone(), dex_fee) + async fn send_taker_fee(&self, dex_fee: DexFee, _uuid: &[u8], _expire_at: u64) -> TransactionResult { + utxo_common::send_taker_fee(self.clone(), dex_fee).compat().await } #[inline] - fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs) -> TransactionFut { + async fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { utxo_common::send_maker_payment(self.clone(), maker_payment_args) + .compat() + .await } #[inline] - fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs) -> TransactionFut { + async fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { utxo_common::send_taker_payment(self.clone(), taker_payment_args) + .compat() + .await } #[inline] @@ -344,10 +348,15 @@ impl SwapOps for UtxoStandardCoin { utxo_common::send_maker_refunds_payment(self.clone(), maker_refunds_payment_args).await } - fn validate_fee(&self, validate_fee_args: ValidateFeeArgs) -> ValidatePaymentFut<()> { + async fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentResult<()> { let tx = match validate_fee_args.fee_tx { TransactionEnum::UtxoTx(tx) => tx.clone(), - _ => panic!(), + fee_tx => { + return MmError::err(ValidatePaymentError::InternalError(format!( + "Invalid fee tx type. fee tx: {:?}", + fee_tx + ))) + }, }; utxo_common::validate_fee( self.clone(), @@ -357,6 +366,8 @@ impl SwapOps for UtxoStandardCoin { validate_fee_args.dex_fee.clone(), validate_fee_args.min_block_number, ) + .compat() + .await } #[inline] @@ -370,17 +381,23 @@ impl SwapOps for UtxoStandardCoin { } #[inline] - fn check_if_my_payment_sent( + async fn check_if_my_payment_sent( &self, - if_my_payment_sent_args: CheckIfMyPaymentSentArgs, - ) -> Box, Error = String> + Send> { + if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, + ) -> Result, String> { + let time_lock = if_my_payment_sent_args + .time_lock + .try_into() + .map_err(|e: TryFromIntError| e.to_string())?; utxo_common::check_if_my_payment_sent( self.clone(), - try_fus!(if_my_payment_sent_args.time_lock.try_into()), + time_lock, if_my_payment_sent_args.other_pub, if_my_payment_sent_args.secret_hash, if_my_payment_sent_args.swap_unique_data, ) + .compat() + .await } #[inline] @@ -415,13 +432,10 @@ impl SwapOps for UtxoStandardCoin { } #[inline] - fn can_refund_htlc(&self, locktime: u64) -> Box + Send + '_> { - Box::new( - utxo_common::can_refund_htlc(self, locktime) - .boxed() - .map_err(|e| ERRL!("{}", e)) - .compat(), - ) + async fn can_refund_htlc(&self, locktime: u64) -> Result { + utxo_common::can_refund_htlc(self, locktime) + .await + .map_err(|e| ERRL!("{}", e)) } fn is_auto_refundable(&self) -> bool { false } diff --git a/mm2src/coins/utxo/utxo_tests.rs b/mm2src/coins/utxo/utxo_tests.rs index a01b222268..e1fe8dbb07 100644 --- a/mm2src/coins/utxo/utxo_tests.rs +++ b/mm2src/coins/utxo/utxo_tests.rs @@ -13,9 +13,9 @@ use crate::rpc_command::init_scan_for_new_addresses::{InitScanAddressesRpcOps, S use crate::utxo::qtum::{qtum_coin_with_priv_key, QtumCoin, QtumDelegationOps, QtumDelegationRequest}; #[cfg(not(target_arch = "wasm32"))] use crate::utxo::rpc_clients::{BlockHashOrHeight, NativeUnspent}; -use crate::utxo::rpc_clients::{ElectrumBalance, ElectrumClient, ElectrumClientImpl, GetAddressInfoRes, - ListSinceBlockRes, NativeClient, NativeClientImpl, NetworkInfo, UtxoRpcClientOps, - ValidateAddressRes, VerboseBlock}; +use crate::utxo::rpc_clients::{ElectrumBalance, ElectrumClient, ElectrumClientImpl, ElectrumClientSettings, + GetAddressInfoRes, ListSinceBlockRes, NativeClient, NativeClientImpl, NetworkInfo, + UtxoRpcClientOps, ValidateAddressRes, VerboseBlock}; use crate::utxo::spv::SimplePaymentVerification; #[cfg(not(target_arch = "wasm32"))] use crate::utxo::utxo_block_header_storage::{BlockHeaderStorage, SqliteBlockHeadersStorage}; @@ -86,7 +86,7 @@ pub fn electrum_client_for_test(servers: &[&str]) -> ElectrumClient { let servers = servers.into_iter().map(|s| json::from_value(s).unwrap()).collect(); let abortable_system = AbortableQueue::default(); - block_on(builder.electrum_client(abortable_system, args, servers, None)).unwrap() + block_on(builder.electrum_client(abortable_system, args, servers, (None, None), None)).unwrap() } /// Returned client won't work by default, requires some mocks to be usable @@ -469,15 +469,24 @@ fn test_wait_for_payment_spend_timeout_electrum() { }; let abortable_system = AbortableQueue::default(); - let client = ElectrumClientImpl::new( - TEST_COIN_NAME.into(), + let client_settings = ElectrumClientSettings { + client_name: "test".to_string(), + servers: vec![], + coin_ticker: TEST_COIN_NAME.into(), + spawn_ping: true, + negotiate_version: true, + min_connected: 1, + max_connected: 1, + }; + let client = ElectrumClient::try_new( + client_settings, Default::default(), block_headers_storage, abortable_system, - true, None, - ); - let client = UtxoRpcClientEnum::Electrum(ElectrumClient(Arc::new(client))); + ) + .expect("Expected electrum_client_impl constructed without a problem"); + let client = UtxoRpcClientEnum::Electrum(client); let coin = utxo_coin_for_test(client, None, false); let transaction = hex::decode("01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000") .unwrap(); @@ -1090,7 +1099,7 @@ fn test_electrum_rpc_client_error() { // use the static string instead because the actual error message cannot be obtain // by serde_json serialization - let expected = r#"JsonRpcError { client_info: "coin: DOC", request: JsonRpcRequest { jsonrpc: "2.0", id: "1", method: "blockchain.transaction.get", params: [String("0000000000000000000000000000000000000000000000000000000000000000"), Bool(true)] }, error: Response(electrum1.cipig.net:10060, Object({"code": Number(2), "message": String("daemon error: DaemonError({'code': -5, 'message': 'No such mempool or blockchain transaction. Use gettransaction for wallet transactions.'})")})) }"#; + let expected = r#"method: "blockchain.transaction.get", params: [String("0000000000000000000000000000000000000000000000000000000000000000"), Bool(true)] }, error: Response(electrum1.cipig.net:10060, Object({"code": Number(2), "message": String("daemon error: DaemonError({'code': -5, 'message': 'No such mempool or blockchain transaction. Use gettransaction for wallet transactions.'})")})) }"#; let actual = format!("{}", err); assert!(actual.contains(expected)); @@ -1534,33 +1543,44 @@ fn test_network_info_negative_time_offset() { #[test] fn test_unavailable_electrum_proto_version() { - ElectrumClientImpl::new.mock_safe( - |coin_ticker, event_handlers, block_headers_storage, abortable_system, _, _| { + ElectrumClientImpl::try_new_arc.mock_safe( + |client_settings, block_headers_storage, abortable_system, event_handlers, scripthash_notification_sender| { MockResult::Return(ElectrumClientImpl::with_protocol_version( - coin_ticker, - event_handlers, - OrdRange::new(1.8, 1.9).unwrap(), + client_settings, block_headers_storage, abortable_system, - None, + event_handlers, + scripthash_notification_sender, + OrdRange::new(1.8, 1.9).unwrap(), )) }, ); let conf = json!({"coin":"RICK","asset":"RICK","rpcport":8923}); + let servers = ["electrum1.cipig.net:10020"]; let req = json!({ "method": "electrum", - "servers": [{"url":"electrum1.cipig.net:10020"}], + "servers": servers.iter().map(|server| json!({"url": server})).collect::>(), }); let ctx = MmCtxBuilder::new().into_mm_arc(); let params = UtxoActivationParams::from_legacy_req(&req).unwrap(); let priv_key = Secp256k1Secret::from([1; 32]); - let error = block_on(utxo_standard_coin_with_priv_key(&ctx, "RICK", &conf, ¶ms, priv_key)) - .err() - .unwrap(); - log!("Error: {}", error); - assert!(error.contains("There are no Electrums with the required protocol version")); + let coin = block_on(utxo_standard_coin_with_priv_key(&ctx, "RICK", &conf, ¶ms, priv_key)).unwrap(); + // Wait a little bit to make sure the servers are removed due to version mismatch. + block_on(Timer::sleep(2.)); + if let UtxoRpcClientEnum::Electrum(ref electrum_client) = coin.as_ref().rpc_client { + for server in servers { + let error = block_on(electrum_client.get_block_count_from(server).compat()) + .err() + .unwrap() + .to_string(); + log!("{}", error); + assert!(error.contains("Unknown server address")); + } + } else { + panic!("Expected Electrum client"); + } } #[test] @@ -1603,18 +1623,29 @@ fn test_spam_rick() { #[test] fn test_one_unavailable_electrum_proto_version() { + // Patch the electurm client construct to require protocol version 1.4 only. + ElectrumClientImpl::try_new_arc.mock_safe( + |client_settings, block_headers_storage, abortable_system, event_handlers, scripthash_notification_sender| { + MockResult::Return(ElectrumClientImpl::with_protocol_version( + client_settings, + block_headers_storage, + abortable_system, + event_handlers, + scripthash_notification_sender, + OrdRange::new(1.4, 1.4).unwrap(), + )) + }, + ); // check if the electrum-mona.bitbank.cc:50001 doesn't support the protocol version 1.4 let client = electrum_client_for_test(&["electrum-mona.bitbank.cc:50001"]); - let result = block_on_f01(client.server_version( - "electrum-mona.bitbank.cc:50001", - "AtomicDEX", - &OrdRange::new(1.4, 1.4).unwrap(), - )); - assert!(result - .err() - .unwrap() - .to_string() - .contains("unsupported protocol version")); + // When an electrum server doesn't support our protocol version range, it gets removed by the client, + // wait a little bit to make sure this is the case. + block_on(Timer::sleep(2.)); + let error = block_on_f01(client.get_block_count_from("electrum-mona.bitbank.cc:50001")) + .unwrap_err() + .to_string(); + log!("{}", error); + assert!(error.contains("Unknown server address")); drop(client); log!("Run BTC coin to test the server.version loop"); @@ -2662,7 +2693,7 @@ fn test_validate_old_fee_tx() { min_block_number: 0, uuid: &[], }; - let result = block_on_f01(coin.validate_fee(validate_fee_args)); + let result = block_on(coin.validate_fee(validate_fee_args)); log!("result: {:?}", result); assert!(result.is_ok()); } @@ -2682,9 +2713,7 @@ fn test_validate_fee_wrong_sender() { min_block_number: 0, uuid: &[], }; - let error = block_on_f01(coin.validate_fee(validate_fee_args)) - .unwrap_err() - .into_inner(); + let error = block_on(coin.validate_fee(validate_fee_args)).unwrap_err().into_inner(); log!("error: {:?}", error); match error { ValidatePaymentError::WrongPaymentTx(err) => assert!(err.contains(INVALID_SENDER_ERR_LOG)), @@ -2708,9 +2737,7 @@ fn test_validate_fee_min_block() { min_block_number: 278455, uuid: &[], }; - let error = block_on_f01(coin.validate_fee(validate_fee_args)) - .unwrap_err() - .into_inner(); + let error = block_on(coin.validate_fee(validate_fee_args)).unwrap_err().into_inner(); match error { ValidatePaymentError::WrongPaymentTx(err) => assert!(err.contains("confirmed before min_block")), _ => panic!("Expected `WrongPaymentTx` early confirmation, found {:?}", error), @@ -2738,7 +2765,7 @@ fn test_validate_fee_bch_70_bytes_signature() { min_block_number: 0, uuid: &[], }; - block_on_f01(coin.validate_fee(validate_fee_args)).unwrap(); + block_on(coin.validate_fee(validate_fee_args)).unwrap(); } #[test] diff --git a/mm2src/coins/utxo/utxo_wasm_tests.rs b/mm2src/coins/utxo/utxo_wasm_tests.rs index a33e1ba039..bd059c8627 100644 --- a/mm2src/coins/utxo/utxo_wasm_tests.rs +++ b/mm2src/coins/utxo/utxo_wasm_tests.rs @@ -42,7 +42,7 @@ pub async fn electrum_client_for_test(servers: &[&str]) -> ElectrumClient { let servers = servers.into_iter().map(|s| json::from_value(s).unwrap()).collect(); let abortable_system = AbortableQueue::default(); builder - .electrum_client(abortable_system, args, servers, None) + .electrum_client(abortable_system, args, servers, (None, None), None) .await .unwrap() } diff --git a/mm2src/coins/z_coin.rs b/mm2src/coins/z_coin.rs index 9ed6a76c21..3bc858043c 100644 --- a/mm2src/coins/z_coin.rs +++ b/mm2src/coins/z_coin.rs @@ -11,7 +11,7 @@ use crate::coin_errors::{MyAddressError, ValidatePaymentResult}; use crate::hd_wallet::HDPathAccountToAddressId; use crate::my_tx_history_v2::{MyTxHistoryErrorV2, MyTxHistoryRequestV2, MyTxHistoryResponseV2}; use crate::rpc_command::init_withdraw::{InitWithdrawCoin, WithdrawInProgressStatus, WithdrawTaskHandleShared}; -use crate::utxo::rpc_clients::{ElectrumRpcRequest, UnspentInfo, UtxoRpcClientEnum, UtxoRpcError, UtxoRpcFut, +use crate::utxo::rpc_clients::{ElectrumConnectionSettings, UnspentInfo, UtxoRpcClientEnum, UtxoRpcError, UtxoRpcFut, UtxoRpcResult}; use crate::utxo::utxo_builder::UtxoCoinBuildError; use crate::utxo::utxo_builder::{UtxoCoinBuilder, UtxoCoinBuilderCommonOps, UtxoFieldsWithGlobalHDBuilder, @@ -70,6 +70,7 @@ use serialization::CoinVariant; use std::collections::{HashMap, HashSet}; use std::convert::TryInto; use std::iter; +use std::num::TryFromIntError; use std::path::PathBuf; use std::sync::Arc; pub use z_coin_errors::*; @@ -672,6 +673,33 @@ impl ZCoin { Ok(()) } + + /// Validates dex fee output or burn output + /// Returns true if the output valid or error if not valid. Returns false if could not decrypt output (some other output) + fn validate_dex_fee_output( + &self, + shielded_out: &OutputDescription, + ovk: &OutgoingViewingKey, + expected_address: &PaymentAddress, + block_height: BlockHeight, + amount_sat: u64, + expected_memo: &MemoBytes, + ) -> Result { + if let Some((note, address, memo)) = + try_sapling_output_recovery(self.consensus_params_ref(), block_height, ovk, shielded_out) + { + if &address == expected_address { + if note.value != amount_sat { + return Err(format!("invalid amount {}, expected {}", note.value, amount_sat)); + } + if &memo != expected_memo { + return Err(format!("invalid memo {:?}, expected {:?}", memo, expected_memo)); + } + return Ok(true); + } + } + Ok(false) + } } impl AsRef for ZCoin { @@ -752,7 +780,12 @@ pub enum ZcoinRpcMode { #[serde(alias = "Electrum")] Light { #[serde(alias = "servers")] - electrum_servers: Vec, + /// The settings of each electrum server. + electrum_servers: Vec, + /// The minimum number of connections to electrum servers to keep alive/maintained at all times. + min_connected: Option, + /// The maximum number of connections to electrum servers to not exceed at any time. + max_connected: Option, light_wallet_d_servers: Vec, /// Specifies the parameters for synchronizing the wallet from a specific block. This overrides the /// `CheckPointBlockInfo` configuration in the coin settings. @@ -978,8 +1011,15 @@ impl<'a> ZCoinBuilder<'a> { let utxo_mode = match &z_coin_params.mode { #[cfg(not(target_arch = "wasm32"))] ZcoinRpcMode::Native => UtxoRpcMode::Native, - ZcoinRpcMode::Light { electrum_servers, .. } => UtxoRpcMode::Electrum { + ZcoinRpcMode::Light { + electrum_servers, + min_connected, + max_connected, + .. + } => UtxoRpcMode::Electrum { servers: electrum_servers.clone(), + min_connected: *min_connected, + max_connected: *max_connected, }, }; let utxo_params = UtxoActivationParams { @@ -1217,62 +1257,50 @@ impl MarketCoinOps for ZCoin { #[async_trait] impl SwapOps for ZCoin { - fn send_taker_fee(&self, dex_fee: DexFee, uuid: &[u8], _expire_at: u64) -> TransactionFut { - let selfi = self.clone(); + async fn send_taker_fee(&self, dex_fee: DexFee, uuid: &[u8], _expire_at: u64) -> TransactionResult { let uuid = uuid.to_owned(); - let fut = async move { - let tx = try_tx_s!(z_send_dex_fee(&selfi, dex_fee, &uuid).await); - Ok(tx.into()) - }; - Box::new(fut.boxed().compat()) + let tx = try_tx_s!(z_send_dex_fee(self, dex_fee, &uuid).await); + Ok(tx.into()) } - fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionFut { - let selfi = self.clone(); + async fn send_maker_payment(&self, maker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { let maker_key_pair = self.derive_htlc_key_pair(maker_payment_args.swap_unique_data); - let taker_pub = try_tx_fus!(Public::from_slice(maker_payment_args.other_pubkey)); + let taker_pub = try_tx_s!(Public::from_slice(maker_payment_args.other_pubkey)); let secret_hash = maker_payment_args.secret_hash.to_vec(); - let time_lock = try_tx_fus!(maker_payment_args.time_lock.try_into()); + let time_lock = try_tx_s!(maker_payment_args.time_lock.try_into()); let amount = maker_payment_args.amount; - let fut = async move { - let utxo_tx = try_tx_s!( - z_send_htlc( - &selfi, - time_lock, - maker_key_pair.public(), - &taker_pub, - &secret_hash, - amount - ) - .await - ); - Ok(utxo_tx.into()) - }; - Box::new(fut.boxed().compat()) + let utxo_tx = try_tx_s!( + z_send_htlc( + self, + time_lock, + maker_key_pair.public(), + &taker_pub, + &secret_hash, + amount + ) + .await + ); + Ok(utxo_tx.into()) } - fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionFut { - let selfi = self.clone(); + async fn send_taker_payment(&self, taker_payment_args: SendPaymentArgs<'_>) -> TransactionResult { let taker_keypair = self.derive_htlc_key_pair(taker_payment_args.swap_unique_data); - let maker_pub = try_tx_fus!(Public::from_slice(taker_payment_args.other_pubkey)); + let maker_pub = try_tx_s!(Public::from_slice(taker_payment_args.other_pubkey)); let secret_hash = taker_payment_args.secret_hash.to_vec(); - let time_lock = try_tx_fus!(taker_payment_args.time_lock.try_into()); + let time_lock = try_tx_s!(taker_payment_args.time_lock.try_into()); let amount = taker_payment_args.amount; - let fut = async move { - let utxo_tx = try_tx_s!( - z_send_htlc( - &selfi, - time_lock, - taker_keypair.public(), - &maker_pub, - &secret_hash, - amount - ) - .await - ); - Ok(utxo_tx.into()) - }; - Box::new(fut.boxed().compat()) + let utxo_tx = try_tx_s!( + z_send_htlc( + self, + time_lock, + taker_keypair.public(), + &maker_pub, + &secret_hash, + amount + ) + .await + ); + Ok(utxo_tx.into()) } async fn send_maker_spends_taker_payment( @@ -1388,57 +1416,59 @@ impl SwapOps for ZCoin { /// Currently validates both Standard and WithBurn options for DexFee /// TODO: when all mm2 nodes upgrade to support the burn account then disable validation of the Standard option - fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentFut<()> { + async fn validate_fee(&self, validate_fee_args: ValidateFeeArgs<'_>) -> ValidatePaymentResult<()> { let z_tx = match validate_fee_args.fee_tx { TransactionEnum::ZTransaction(t) => t.clone(), - _ => panic!("Unexpected tx {:?}", validate_fee_args.fee_tx), + fee_tx => { + return MmError::err(ValidatePaymentError::InternalError(format!( + "Invalid fee tx type. fee tx: {:?}", + fee_tx + ))) + }, }; - let fee_amount_sat = try_f!(validate_fee_args.dex_fee.fee_amount_as_u64(self.utxo_arc.decimals)); - let burn_amount_sat = try_f!(validate_fee_args.dex_fee.burn_amount_as_u64(self.utxo_arc.decimals)); + let fee_amount_sat = validate_fee_args.dex_fee.fee_amount_as_u64(self.utxo_arc.decimals)?; + let burn_amount_sat = validate_fee_args.dex_fee.burn_amount_as_u64(self.utxo_arc.decimals)?; let expected_memo = MemoBytes::from_bytes(validate_fee_args.uuid).expect("Uuid length < 512"); - let min_block_number = validate_fee_args.min_block_number; - let coin = self.clone(); - let fut = async move { - let tx_hash = H256::from(z_tx.txid().0).reversed(); - let tx_from_rpc = coin - .utxo_rpc_client() - .get_verbose_transaction(&tx_hash.into()) - .compat() - .await - .map_err(|e| MmError::new(ValidatePaymentError::InvalidRpcResponse(e.into_inner().to_string())))?; - - let mut encoded = Vec::with_capacity(1024); - z_tx.write(&mut encoded).expect("Writing should not fail"); - if encoded != tx_from_rpc.hex.0 { - return MmError::err(ValidatePaymentError::WrongPaymentTx(format!( - "Encoded transaction {:?} does not match the tx {:?} from RPC", - encoded, tx_from_rpc - ))); - } + let tx_hash = H256::from(z_tx.txid().0).reversed(); + let tx_from_rpc = self + .utxo_rpc_client() + .get_verbose_transaction(&tx_hash.into()) + .compat() + .await + .map_err(|e| MmError::new(ValidatePaymentError::InvalidRpcResponse(e.into_inner().to_string())))?; + + let mut encoded = Vec::with_capacity(1024); + z_tx.write(&mut encoded).expect("Writing should not fail"); + if encoded != tx_from_rpc.hex.0 { + return MmError::err(ValidatePaymentError::WrongPaymentTx(format!( + "Encoded transaction {:?} does not match the tx {:?} from RPC", + encoded, tx_from_rpc + ))); + } + + let block_height = match tx_from_rpc.height { + Some(h) => { + if h < validate_fee_args.min_block_number { + return MmError::err(ValidatePaymentError::WrongPaymentTx(format!( + "Dex fee tx {:?} confirmed before min block {}", + z_tx, validate_fee_args.min_block_number + ))); + } else { + BlockHeight::from_u32(h as u32) + } + }, + None => H0, + }; - let block_height = match tx_from_rpc.height { - Some(h) => { - if h < min_block_number { - return MmError::err(ValidatePaymentError::WrongPaymentTx(format!( - "Dex fee tx {:?} confirmed before min block {}", - z_tx, min_block_number - ))); - } else { - BlockHeight::from_u32(h as u32) - } - }, - None => H0, - }; - - let mut fee_output_valid = false; - let mut burn_output_valid = false; - for shielded_out in z_tx.shielded_outputs.iter() { - if validate_dex_fee_output( - &coin, + let mut fee_output_valid = false; + let mut burn_output_valid = false; + for shielded_out in z_tx.shielded_outputs.iter() { + if self + .validate_dex_fee_output( shielded_out, &DEX_FEE_OVK, - &coin.z_fields.dex_fee_addr, + &self.z_fields.dex_fee_addr, block_height, fee_amount_sat, &expected_memo, @@ -1448,15 +1478,16 @@ impl SwapOps for ZCoin { "Bad dex fee output: {}", err ))) - })? { - fee_output_valid = true; - } - if let Some(burn_amount_sat) = burn_amount_sat { - if validate_dex_fee_output( - &coin, + })? + { + fee_output_valid = true; + } + if let Some(burn_amount_sat) = burn_amount_sat { + if self + .validate_dex_fee_output( shielded_out, &DEX_FEE_OVK, - &coin.z_fields.dex_burn_addr, + &self.z_fields.dex_burn_addr, block_height, burn_amount_sat, &expected_memo, @@ -1466,23 +1497,21 @@ impl SwapOps for ZCoin { "Bad burn output: {}", err ))) - })? { - burn_output_valid = true; - } + })? + { + burn_output_valid = true; } } + } - if fee_output_valid && (burn_amount_sat.is_none() || burn_output_valid) { - return Ok(()); - } - - MmError::err(ValidatePaymentError::WrongPaymentTx(format!( - "The dex fee tx {:?} has no shielded outputs or outputs decryption failed", - z_tx - ))) - }; + if fee_output_valid && (burn_amount_sat.is_none() || burn_output_valid) { + return Ok(()); + } - Box::new(fut.boxed().compat()) + MmError::err(ValidatePaymentError::WrongPaymentTx(format!( + "The dex fee tx {:?} has no shielded outputs or outputs decryption failed", + z_tx + ))) } #[inline] @@ -1496,17 +1525,23 @@ impl SwapOps for ZCoin { } #[inline] - fn check_if_my_payment_sent( + async fn check_if_my_payment_sent( &self, if_my_payment_sent_args: CheckIfMyPaymentSentArgs<'_>, - ) -> Box, Error = String> + Send> { + ) -> Result, String> { + let time_lock = if_my_payment_sent_args + .time_lock + .try_into() + .map_err(|e: TryFromIntError| e.to_string())?; utxo_common::check_if_my_payment_sent( self.clone(), - try_fus!(if_my_payment_sent_args.time_lock.try_into()), + time_lock, if_my_payment_sent_args.other_pub, if_my_payment_sent_args.secret_hash, if_my_payment_sent_args.swap_unique_data, ) + .compat() + .await } #[inline] @@ -2112,33 +2147,6 @@ fn extended_spending_key_from_global_hd_account( Ok(spending_key) } -/// Validates dex fee output or burn output -/// Returns true if the output valid or error if not valid. Returns false if could not decrypt output (some other output) -fn validate_dex_fee_output( - coin: &ZCoin, - shielded_out: &OutputDescription, - ovk: &OutgoingViewingKey, - expected_address: &PaymentAddress, - block_height: BlockHeight, - amount_sat: u64, - expected_memo: &MemoBytes, -) -> Result { - if let Some((note, address, memo)) = - try_sapling_output_recovery(coin.consensus_params_ref(), block_height, ovk, shielded_out) - { - if &address == expected_address { - if note.value != amount_sat { - return Err(format!("invalid amount {}, expected {}", note.value, amount_sat)); - } - if &memo != expected_memo { - return Err(format!("invalid memo {:?}, expected {:?}", memo, expected_memo)); - } - return Ok(true); - } - } - Ok(false) -} - #[test] fn derive_z_key_from_mm_seed() { use crypto::privkey::key_pair_from_seed; diff --git a/mm2src/coins/z_coin/z_coin_native_tests.rs b/mm2src/coins/z_coin/z_coin_native_tests.rs index e78bf2c7df..ca2ddaddbc 100644 --- a/mm2src/coins/z_coin/z_coin_native_tests.rs +++ b/mm2src/coins/z_coin/z_coin_native_tests.rs @@ -1,5 +1,5 @@ use bitcrypto::dhash160; -use common::{block_on, block_on_f01, now_sec, one_thousand_u32}; +use common::{block_on, now_sec, one_thousand_u32}; use mm2_core::mm_ctx::MmCtxBuilder; use mm2_test_helpers::for_tests::zombie_conf; use std::path::PathBuf; @@ -60,7 +60,7 @@ async fn zombie_coin_send_and_refund_maker_payment() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let tx = block_on_f01(coin.send_maker_payment(args)).unwrap(); + let tx = block_on(coin.send_maker_payment(args)).unwrap(); log!("swap tx {}", hex::encode(tx.tx_hash_as_bytes().0)); let refund_args = RefundPaymentArgs { @@ -130,7 +130,7 @@ async fn zombie_coin_send_and_spend_maker_payment() { wait_for_confirmation_until: 0, }; - let tx = block_on_f01(coin.send_maker_payment(maker_payment_args)).unwrap(); + let tx = block_on(coin.send_maker_payment(maker_payment_args)).unwrap(); log!("swap tx {}", hex::encode(tx.tx_hash_as_bytes().0)); let spends_payment_args = SpendPaymentArgs { @@ -266,9 +266,7 @@ async fn zombie_coin_validate_dex_fee() { uuid: &[1; 16], }; // Invalid amount should return an error - let err = block_on_f01(coin.validate_fee(validate_fee_args)) - .unwrap_err() - .into_inner(); + let err = block_on(coin.validate_fee(validate_fee_args)).unwrap_err().into_inner(); match err { ValidatePaymentError::WrongPaymentTx(err) => assert!(err.contains("invalid amount")), _ => panic!("Expected `WrongPaymentTx`: {:?}", err), @@ -282,9 +280,7 @@ async fn zombie_coin_validate_dex_fee() { min_block_number: 12000, uuid: &[2; 16], }; - let err = block_on_f01(coin.validate_fee(validate_fee_args)) - .unwrap_err() - .into_inner(); + let err = block_on(coin.validate_fee(validate_fee_args)).unwrap_err().into_inner(); match err { ValidatePaymentError::WrongPaymentTx(err) => assert!(err.contains("invalid memo")), _ => panic!("Expected `WrongPaymentTx`: {:?}", err), @@ -300,9 +296,7 @@ async fn zombie_coin_validate_dex_fee() { min_block_number: , uuid: &[1; 16], }; - let err = block_on_f01(coin.validate_fee(validate_fee_args)) - .unwrap_err() - .into_inner(); + let err = block_on(coin.validate_fee(validate_fee_args)).unwrap_err().into_inner(); match err { ValidatePaymentError::WrongPaymentTx(err) => assert!(err.contains("confirmed before min block")), _ => panic!("Expected `WrongPaymentTx`: {:?}", err), @@ -316,7 +310,7 @@ async fn zombie_coin_validate_dex_fee() { min_block_number: 12000, uuid: &[1; 16], }; - block_on_f01(coin.validate_fee(validate_fee_args)).unwrap(); + block_on(coin.validate_fee(validate_fee_args)).unwrap(); // Test old standard dex fee with no burn output // TODO: disable when the upgrade transition period ends @@ -335,9 +329,7 @@ async fn zombie_coin_validate_dex_fee() { min_block_number: 12000, uuid: &[1; 16], }; - let err = block_on_f01(coin.validate_fee(validate_fee_args)) - .unwrap_err() - .into_inner(); + let err = block_on(coin.validate_fee(validate_fee_args)).unwrap_err().into_inner(); match err { ValidatePaymentError::WrongPaymentTx(err) => assert!(err.contains("invalid amount")), _ => panic!("Expected `WrongPaymentTx`: {:?}", err), @@ -352,7 +344,7 @@ async fn zombie_coin_validate_dex_fee() { min_block_number: 12000, uuid: &[1; 16], }; - block_on_f01(coin.validate_fee(validate_fee_args)).unwrap(); + block_on(coin.validate_fee(validate_fee_args)).unwrap(); } fn default_zcoin_activation_params() -> ZcoinActivationParams { diff --git a/mm2src/coins_activation/src/eth_with_token_activation.rs b/mm2src/coins_activation/src/eth_with_token_activation.rs index 8cee5094f2..62e8fe4c4c 100644 --- a/mm2src/coins_activation/src/eth_with_token_activation.rs +++ b/mm2src/coins_activation/src/eth_with_token_activation.rs @@ -12,7 +12,7 @@ use coins::coin_balance::{CoinBalanceReport, EnableCoinBalanceOps}; use coins::eth::v2_activation::{eth_coin_from_conf_and_request_v2, Erc20Protocol, Erc20TokenActivationRequest, EthActivationV2Error, EthActivationV2Request, EthPrivKeyActivationPolicy}; use coins::eth::v2_activation::{EthTokenActivationError, NftActivationRequest, NftProviderEnum}; -use coins::eth::{Erc20TokenInfo, EthCoin, EthCoinType, EthPrivKeyBuildPolicy}; +use coins::eth::{display_eth_address, Erc20TokenInfo, EthCoin, EthCoinType, EthPrivKeyBuildPolicy}; use coins::hd_wallet::RpcTaskXPubExtractor; use coins::my_tx_history_v2::TxHistoryStorage; use coins::nft::nft_structs::NftInfo; @@ -360,8 +360,11 @@ impl PlatformCoinWithTokensActivationOps for EthCoin { return Ok(EthWithTokensActivationResult::Iguana( IguanaEthWithTokensActivationResult { current_block, - eth_addresses_infos: HashMap::from([(my_address.to_string(), eth_address_info)]), - erc20_addresses_infos: HashMap::from([(my_address.to_string(), erc20_address_info)]), + eth_addresses_infos: HashMap::from([(display_eth_address(my_address), eth_address_info)]), + erc20_addresses_infos: HashMap::from([( + display_eth_address(my_address), + erc20_address_info, + )]), nfts_infos: nfts_map, }, )); @@ -385,8 +388,8 @@ impl PlatformCoinWithTokensActivationOps for EthCoin { Ok(EthWithTokensActivationResult::Iguana( IguanaEthWithTokensActivationResult { current_block, - eth_addresses_infos: HashMap::from([(my_address.to_string(), eth_address_info)]), - erc20_addresses_infos: HashMap::from([(my_address.to_string(), erc20_address_info)]), + eth_addresses_infos: HashMap::from([(display_eth_address(my_address), eth_address_info)]), + erc20_addresses_infos: HashMap::from([(display_eth_address(my_address), erc20_address_info)]), nfts_infos: nfts_map, }, )) diff --git a/mm2src/common/common.rs b/mm2src/common/common.rs index 0093899b98..4146416bc6 100644 --- a/mm2src/common/common.rs +++ b/mm2src/common/common.rs @@ -129,6 +129,7 @@ pub mod custom_futures; pub mod custom_iter; #[path = "executor/mod.rs"] pub mod executor; pub mod expirable_map; +pub mod notifier; pub mod number_type_casting; pub mod password_policy; pub mod seri; diff --git a/mm2src/common/executor/abortable_system/abortable_queue.rs b/mm2src/common/executor/abortable_system/abortable_queue.rs index 99ffc70ca3..89781bcfa4 100644 --- a/mm2src/common/executor/abortable_system/abortable_queue.rs +++ b/mm2src/common/executor/abortable_system/abortable_queue.rs @@ -40,9 +40,7 @@ impl From> for AbortableQueue { impl AbortableSystem for AbortableQueue { type Inner = QueueInnerState; - /// Aborts all spawned futures and initiates aborting of critical futures - /// after the specified [`AbortSettings::critical_timeout_s`]. - fn abort_all(&self) -> Result<(), AbortedError> { self.inner.lock().abort_all() } + fn __inner(&self) -> InnerShared { self.inner.clone() } fn __push_subsystem_abort_tx(&self, subsystem_abort_tx: oneshot::Sender<()>) -> Result<(), AbortedError> { self.inner.lock().insert_handle(subsystem_abort_tx).map(|_| ()) @@ -98,12 +96,15 @@ impl WeakSpawner { match select(abortable_fut.boxed(), wait_till_abort.boxed()).await { // The future has finished normally. - Either::Left(_) => { + Either::Left((_, wait_till_abort_fut)) => { if let Some(on_finish) = settings.on_finish { log::log!(on_finish.level, "{}", on_finish.msg); } if let Some(queue_inner) = inner_weak.upgrade() { + // Drop the `wait_till_abort_fut` so to render the corresponding `abort_tx` sender canceled. + // This way we can query the `abort_tx` sender to check if it's canceled, thus safe to mark as finished. + drop(wait_till_abort_fut); queue_inner.lock().on_future_finished(future_id); } }, @@ -203,8 +204,18 @@ impl QueueInnerState { /// Releases the `finished_future_id` so it can be reused later on [`QueueInnerState::insert_handle`]. fn on_future_finished(&mut self, finished_future_id: FutureId) { - if let QueueInnerState::Ready { finished_futures, .. } = self { - finished_futures.push(finished_future_id); + if let QueueInnerState::Ready { + finished_futures, + abort_handlers, + } = self + { + // Only mark this ID as finished if a future existed for it and is canceled. We can get false + // `on_future_finished` signals from futures that aren't in the `abort_handlers` anymore (abortable queue was reset). + if let Some(handle) = abort_handlers.get(finished_future_id) { + if handle.is_canceled() { + finished_futures.push(finished_future_id); + } + } } } @@ -234,6 +245,8 @@ impl SystemInner for QueueInnerState { *self = QueueInnerState::Aborted; Ok(()) } + + fn is_aborted(&self) -> bool { matches!(self, QueueInnerState::Aborted) } } #[cfg(test)] diff --git a/mm2src/common/executor/abortable_system/graceful_shutdown.rs b/mm2src/common/executor/abortable_system/graceful_shutdown.rs index 3feee076b2..6a902faab7 100644 --- a/mm2src/common/executor/abortable_system/graceful_shutdown.rs +++ b/mm2src/common/executor/abortable_system/graceful_shutdown.rs @@ -32,7 +32,7 @@ impl From> for GracefulShutdownRegistry { impl AbortableSystem for GracefulShutdownRegistry { type Inner = ShutdownInnerState; - fn abort_all(&self) -> Result<(), AbortedError> { self.inner.lock().abort_all() } + fn __inner(&self) -> InnerShared { self.inner.clone() } fn __push_subsystem_abort_tx(&self, subsystem_abort_tx: oneshot::Sender<()>) -> Result<(), AbortedError> { self.inner.lock().insert_handle(subsystem_abort_tx) @@ -73,4 +73,6 @@ impl SystemInner for ShutdownInnerState { *self = ShutdownInnerState::Aborted; Ok(()) } + + fn is_aborted(&self) -> bool { matches!(self, ShutdownInnerState::Aborted) } } diff --git a/mm2src/common/executor/abortable_system/mod.rs b/mm2src/common/executor/abortable_system/mod.rs index b5399ad6dd..82ef564278 100644 --- a/mm2src/common/executor/abortable_system/mod.rs +++ b/mm2src/common/executor/abortable_system/mod.rs @@ -24,7 +24,23 @@ pub trait AbortableSystem: From> { /// Aborts all spawned futures and subsystems if they present. /// The abortable system is considered not to be - fn abort_all(&self) -> Result<(), AbortedError>; + fn abort_all(&self) -> Result<(), AbortedError> { self.__inner().lock().abort_all() } + + /// Aborts all the spawned futures & subsystems if present, and resets the system + /// to the initial state for further use. + fn abort_all_and_reset(&self) -> Result<(), AbortedError> { + let inner = self.__inner(); + let mut inner_locked = inner.lock(); + // Don't allow resetting the system state if the system is already aborted. If the system is + // aborted this is because its parent was aborted as well. Resetting it will leave the system + // dangling with no parent to abort it (could still be aborted manually of course). + if inner_locked.is_aborted() { + return Err(AbortedError); + } + let mut previous_inner = std::mem::take(&mut *inner_locked); + previous_inner.abort_all().ok(); + Ok(()) + } /// Creates a new subsystem `S` linked to `Self` the way that /// if `Self` is aborted, the futures spawned by the subsystem will be aborted as well. @@ -56,12 +72,17 @@ pub trait AbortableSystem: From> { Ok(S::from(inner_shared)) } + fn __inner(&self) -> InnerShared; + fn __push_subsystem_abort_tx(&self, subsystem_abort_tx: oneshot::Sender<()>) -> Result<(), AbortedError>; } pub trait SystemInner: Default + Send + 'static { /// Aborts all spawned futures and subsystems if they present. fn abort_all(&mut self) -> Result<(), AbortedError>; + + /// Returns whether the system has already been aborted. + fn is_aborted(&self) -> bool; } #[cfg(test)] diff --git a/mm2src/common/executor/abortable_system/simple_map.rs b/mm2src/common/executor/abortable_system/simple_map.rs index c7cd9fc6cd..d759d53a04 100644 --- a/mm2src/common/executor/abortable_system/simple_map.rs +++ b/mm2src/common/executor/abortable_system/simple_map.rs @@ -35,7 +35,7 @@ impl AbortableSimpleMap { impl AbortableSystem for AbortableSimpleMap { type Inner = SimpleMapInnerState; - fn abort_all(&self) -> Result<(), AbortedError> { self.inner.lock().abort_all() } + fn __inner(&self) -> InnerShared { self.inner.clone() } fn __push_subsystem_abort_tx(&self, subsystem_abort_tx: oneshot::Sender<()>) -> Result<(), AbortedError> { self.inner.lock().insert_subsystem(subsystem_abort_tx) @@ -81,6 +81,8 @@ impl SystemInner for SimpleMapInnerState { *self = SimpleMapInnerState::Aborted; Ok(()) } + + fn is_aborted(&self) -> bool { matches!(self, SimpleMapInnerState::Aborted) } } impl SimpleMapInnerState { diff --git a/mm2src/common/expirable_map.rs b/mm2src/common/expirable_map.rs index 996e2edfae..0b3110c066 100644 --- a/mm2src/common/expirable_map.rs +++ b/mm2src/common/expirable_map.rs @@ -93,6 +93,12 @@ impl ExpirableMap { self.map.insert(k, entry).map(|v| v.value) } + /// Clears the map. + pub fn clear(&mut self) { + self.map.clear(); + self.expiries.clear(); + } + /// Removes expired entries from the map. /// /// Iterates through the `expiries` in order, removing entries that have expired. diff --git a/mm2src/common/jsonrpc_client.rs b/mm2src/common/jsonrpc_client.rs index 94a1ca809b..3f9e4cf6f6 100644 --- a/mm2src/common/jsonrpc_client.rs +++ b/mm2src/common/jsonrpc_client.rs @@ -2,7 +2,7 @@ use futures01::Future; use itertools::Itertools; use serde::de::DeserializeOwned; use serde_json::{self as json, Value as Json}; -use std::collections::{BTreeSet, HashMap}; +use std::collections::HashMap; use std::fmt; /// Macro generating functions for RPC requests. @@ -69,10 +69,10 @@ impl From for JsonRpcRemoteAddr { /// The identifier is designed to uniquely match outgoing requests and incoming responses. /// Even if the batch response is sorted in a different order, `BTreeSet` allows it to be matched to the request. -#[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] pub enum JsonRpcId { - Single(String), - Batch(BTreeSet), + Single(u64), + Batch(u64), } /// Serializable RPC request that is either single or batch. @@ -114,19 +114,15 @@ impl fmt::Debug for JsonRpcRequestEnum { pub struct JsonRpcRequest { pub jsonrpc: String, #[serde(default)] - pub id: String, + pub id: u64, pub method: String, pub params: Vec, } impl JsonRpcRequest { - // Returns [`JsonRpcRequest::id`]. - #[inline] - pub fn get_id(&self) -> &str { &self.id } - /// Returns a `JsonRpcId` identifier of the request. #[inline] - pub fn rpc_id(&self) -> JsonRpcId { JsonRpcId::Single(self.id.clone()) } + pub fn rpc_id(&self) -> JsonRpcId { JsonRpcId::Single(self.id) } } impl From for JsonRpcRequestEnum { @@ -140,7 +136,12 @@ pub struct JsonRpcBatchRequest(Vec); impl JsonRpcBatchRequest { /// Returns a `JsonRpcId` identifier of the request. #[inline] - pub fn rpc_id(&self) -> JsonRpcId { JsonRpcId::Batch(self.orig_sequence_ids().collect()) } + pub fn rpc_id(&self) -> JsonRpcId { + // This shouldn't be called on an empty batch, but let's + // simply set the batch ID to maximum if the batch is empty. + let batch_id = self.0.iter().map(|res| res.id).max().unwrap_or(u64::MAX); + JsonRpcId::Batch(batch_id) + } /// Returns the number of the requests in the batch. #[inline] @@ -153,7 +154,7 @@ impl JsonRpcBatchRequest { /// Returns original sequence of identifiers. /// The method is used to process batch responses in the same order in which the requests were sent. #[inline] - fn orig_sequence_ids(&self) -> impl Iterator + '_ { self.0.iter().map(|req| req.id.clone()) } + fn orig_sequence_ids(&self) -> impl Iterator + '_ { self.0.iter().map(|req| req.id) } } impl From for JsonRpcRequestEnum { @@ -185,7 +186,7 @@ pub struct JsonRpcResponse { #[serde(default)] pub jsonrpc: String, #[serde(default)] - pub id: String, + pub id: u64, #[serde(default)] pub result: Json, #[serde(default)] @@ -195,7 +196,7 @@ pub struct JsonRpcResponse { impl JsonRpcResponse { /// Returns a `JsonRpcId` identifier of the response. #[inline] - pub fn rpc_id(&self) -> JsonRpcId { JsonRpcId::Single(self.id.clone()) } + pub fn rpc_id(&self) -> JsonRpcId { JsonRpcId::Single(self.id) } } /// Deserializable RPC batch response. @@ -204,7 +205,12 @@ pub struct JsonRpcBatchResponse(Vec); impl JsonRpcBatchResponse { /// Returns a `JsonRpcId` identifier of the response. - pub fn rpc_id(&self) -> JsonRpcId { JsonRpcId::Batch(self.0.iter().map(|res| res.id.clone()).collect()) } + pub fn rpc_id(&self) -> JsonRpcId { + // This shouldn't be called on an empty batch, but let's + // simply set the batch ID to maximum if the batch is empty. + let batch_id = self.0.iter().map(|res| res.id).max().unwrap_or(u64::MAX); + JsonRpcId::Batch(batch_id) + } /// Returns the number of the requests in the batch. #[inline] @@ -272,8 +278,8 @@ pub trait JsonRpcClient { /// Returns a stringified version of the JSON-RPC protocol. fn version(&self) -> &'static str; - /// Returns a stringified identifier of the next request. - fn next_id(&self) -> String; + /// Returns a unique identifier for the next request. + fn next_id(&self) -> u64; /// Get info that is used in particular to supplement the error info fn client_info(&self) -> String; @@ -395,8 +401,7 @@ fn process_transport_batch_result( }; // Turn the vector of responses into a hashmap by their IDs to get quick access to the content of the responses. - let mut response_map: HashMap = - batch.into_iter().map(|res| (res.id.clone(), res)).collect(); + let mut response_map: HashMap<_, _> = batch.into_iter().map(|res| (res.id, res)).collect(); if response_map.len() != orig_ids.len() { return Err(JsonRpcErrorType::Parse( remote_addr, diff --git a/mm2src/common/notifier.rs b/mm2src/common/notifier.rs new file mode 100644 index 0000000000..a82253b537 --- /dev/null +++ b/mm2src/common/notifier.rs @@ -0,0 +1,53 @@ +//! A simple notification system based on mpsc channels. +//! +//! Since this is based on mpsc, multiple notifiers (senders) are allowed while only a single +//! notifiee (receiver) listens for notifications. +//! +//! NOTE: This implementation memory leaks (in contrast to tokio's, but not used here to avoid tokio dependency on wasm). +//! This is because with each `clone()` of the sender we have a new slot in the channel (this is how `futures-rs` does mpsc). +//! These are removed when the receiver calls `wait()`, which calls `clear()`. But if the receiver never `wait()`s for any reason, +//! and there is a thread that doesn't stop `notify()`ing, the channel will keep growing unbounded. +//! +//! So one must make sure that either `wait()` is called after some time or the receiver is dropped when it's no longer needed. +use futures::{channel::mpsc, StreamExt}; + +#[derive(Clone, Debug)] +pub struct Notifier(mpsc::Sender<()>); + +#[derive(Debug)] +pub struct Notifiee(mpsc::Receiver<()>); + +impl Notifier { + /// Create a new notifier and notifiee pair. + pub fn new() -> (Notifier, Notifiee) { + let (sender, receiver) = mpsc::channel(0); + (Notifier(sender), Notifiee(receiver)) + } + + /// Notify the receiver. + /// + /// This will error if the receiver has been dropped (disconnected). + pub fn notify(&self) -> Result<(), &'static str> { + if let Err(e) = self.0.clone().try_send(()) { + if e.is_disconnected() { + return Err("Notification receiver has been dropped."); + } + } + Ok(()) + } +} + +impl Notifiee { + /// Wait for a notification from any notifier. + /// + /// This will error if all notifiers have been dropped (disconnected). + pub async fn wait(&mut self) -> Result<(), &'static str> { + let result = self.0.next().await.ok_or("All notifiers have been dropped."); + // Clear pending notifications if there are any, since we have already been notified. + self.clear(); + result + } + + /// Clears the pending notifications if there are any. + fn clear(&mut self) { while let Ok(Some(_)) = self.0.try_next() {} } +} diff --git a/mm2src/floodsub/CHANGELOG.md b/mm2src/floodsub/CHANGELOG.md deleted file mode 100644 index b361796860..0000000000 --- a/mm2src/floodsub/CHANGELOG.md +++ /dev/null @@ -1,15 +0,0 @@ -# 0.22.0 [2020-09-09] - -- Update `libp2p-swarm` and `libp2p-core`. - -# 0.21.0 [2020-08-18] - -- Bump `libp2p-core` and `libp2p-swarm` dependency. - -# 0.20.0 [2020-07-01] - -- Updated dependencies. - -# 0.19.1 [2020-06-22] - -- Updated dependencies. diff --git a/mm2src/floodsub/Cargo.toml b/mm2src/floodsub/Cargo.toml deleted file mode 100644 index b54b60448c..0000000000 --- a/mm2src/floodsub/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "libp2p-floodsub" -edition = "2018" -description = "Floodsub protocol for libp2p" -version = "0.22.0" -authors = ["Parity Technologies "] -license = "MIT" -repository = "https://github.com/libp2p/rust-libp2p" -keywords = ["peer-to-peer", "libp2p", "networking"] -categories = ["network-programming", "asynchronous"] - -[lib] -doctest = false - -[dependencies] -cuckoofilter = "0.3.2" -futures = "0.3.1" -libp2p-core = { git = "https://github.com/libp2p/rust-libp2p.git", tag ="v0.45.1" } -libp2p-swarm = { git = "https://github.com/libp2p/rust-libp2p.git", tag ="v0.45.1" } -prost = "0.10" -rand = "0.7" -smallvec = "1.0" - -[build-dependencies] -prost-build = { version = "0.10.4", default-features = false } diff --git a/mm2src/floodsub/build.rs b/mm2src/floodsub/build.rs deleted file mode 100644 index 9671983699..0000000000 --- a/mm2src/floodsub/build.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -fn main() { prost_build::compile_protos(&["src/rpc.proto"], &["src"]).unwrap(); } diff --git a/mm2src/floodsub/src/layer.rs b/mm2src/floodsub/src/layer.rs deleted file mode 100644 index 7151948a95..0000000000 --- a/mm2src/floodsub/src/layer.rs +++ /dev/null @@ -1,401 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::protocol::{FloodsubMessage, FloodsubProtocol, FloodsubRpc, FloodsubSubscription, FloodsubSubscriptionAction}; -use crate::topic::Topic; -use crate::FloodsubConfig; -use cuckoofilter::CuckooFilter; -use libp2p_core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}; -use libp2p_swarm::{IntoConnectionHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, OneShotHandler, - PollParameters}; -use smallvec::SmallVec; -use std::collections::hash_map::{DefaultHasher, HashMap}; -use std::task::{Context, Poll}; -use std::{collections::VecDeque, iter}; - -/// Network behaviour that handles the floodsub protocol. -pub struct Floodsub { - /// Events that need to be yielded to the outside when polling. - events: - VecDeque>>, - - config: FloodsubConfig, - - /// List of peers the network is connected to, and the topics that they're subscribed to. - // TODO: filter out peers that don't support floodsub, so that we avoid hammering them with - // opened substreams - connected_peers: HashMap>, - - // List of topics we're subscribed to. Necessary to filter out messages that we receive - // erroneously. - subscribed_topics: SmallVec<[Topic; 16]>, - - // We keep track of the messages we received (in the format `hash(source ID, seq_no)`) so that - // we don't dispatch the same message twice if we receive it twice on the network. - received: CuckooFilter, -} - -impl Floodsub { - /// Creates a `Floodsub` with default configuration. - pub fn new(local_peer_id: PeerId, forward_messages: bool) -> Self { - Self::from_config(FloodsubConfig::new(local_peer_id, forward_messages)) - } - - /// Creates a `Floodsub` with the given configuration. - pub fn from_config(config: FloodsubConfig) -> Self { - Floodsub { - events: VecDeque::new(), - config, - connected_peers: HashMap::new(), - subscribed_topics: SmallVec::new(), - received: CuckooFilter::new(), - } - } - - /// Subscribes to a topic. - /// - /// Returns true if the subscription worked. Returns false if we were already subscribed. - pub fn subscribe(&mut self, topic: Topic) -> bool { - if self.subscribed_topics.iter().any(|t| t.id() == topic.id()) { - return false; - } - - for peer in self.connected_peers.keys() { - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: *peer, - handler: NotifyHandler::Any, - event: FloodsubRpc { - messages: Vec::new(), - subscriptions: vec![FloodsubSubscription { - topic: topic.clone(), - action: FloodsubSubscriptionAction::Subscribe, - }], - }, - }); - } - - self.subscribed_topics.push(topic); - true - } - - /// Unsubscribes from a topic. - /// - /// Note that this only requires the topic name. - /// - /// Returns true if we were subscribed to this topic. - pub fn unsubscribe(&mut self, topic: Topic) -> bool { - let pos = match self.subscribed_topics.iter().position(|t| *t == topic) { - Some(pos) => pos, - None => return false, - }; - - self.subscribed_topics.remove(pos); - - for peer in self.connected_peers.keys() { - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: *peer, - handler: NotifyHandler::Any, - event: FloodsubRpc { - messages: Vec::new(), - subscriptions: vec![FloodsubSubscription { - topic: topic.clone(), - action: FloodsubSubscriptionAction::Unsubscribe, - }], - }, - }); - } - - true - } - - /// Publishes a message to the network, if we're subscribed to the topic only. - pub fn publish(&mut self, topic: impl Into, data: impl Into>) { - self.publish_many(iter::once(topic), data) - } - - /// Publishes a message to the network, even if we're not subscribed to the topic. - pub fn publish_any(&mut self, topic: impl Into, data: impl Into>) { - self.publish_many_any(iter::once(topic), data) - } - - /// Publishes a message with multiple topics to the network. - /// - /// - /// > **Note**: Doesn't do anything if we're not subscribed to any of the topics. - pub fn publish_many(&mut self, topic: impl IntoIterator>, data: impl Into>) { - self.publish_many_inner(topic, data, true) - } - - /// Publishes a message with multiple topics to the network, even if we're not subscribed to any of the topics. - pub fn publish_many_any(&mut self, topic: impl IntoIterator>, data: impl Into>) { - self.publish_many_inner(topic, data, false) - } - - fn publish_many_inner( - &mut self, - topic: impl IntoIterator>, - data: impl Into>, - check_self_subscriptions: bool, - ) { - let message = FloodsubMessage { - source: self.config.local_peer_id, - data: data.into(), - // If the sequence numbers are predictable, then an attacker could flood the network - // with packets with the predetermined sequence numbers and absorb our legitimate - // messages. We therefore use a random number. - sequence_number: rand::random::<[u8; 20]>().to_vec(), - topics: topic.into_iter().map(Into::into).collect(), - }; - - let self_subscribed = self - .subscribed_topics - .iter() - .any(|t| message.topics.iter().any(|u| t == u)); - if self_subscribed { - self.received.add(&message); - if self.config.subscribe_local_messages { - self.events - .push_back(NetworkBehaviourAction::GenerateEvent(FloodsubEvent::Message( - message.clone(), - ))); - } - } - // Don't publish the message if we have to check subscriptions - // and we're not subscribed ourselves to any of the topics. - if check_self_subscriptions && !self_subscribed { - return; - } - - // Send to peers we know are subscribed to the topic. - for (peer_id, sub_topic) in self.connected_peers.iter() { - if !sub_topic.iter().any(|t| message.topics.iter().any(|u| t == u)) { - continue; - } - - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: *peer_id, - handler: NotifyHandler::Any, - event: FloodsubRpc { - subscriptions: Vec::new(), - messages: vec![message.clone()], - }, - }); - } - } -} - -impl NetworkBehaviour for Floodsub { - type ConnectionHandler = OneShotHandler; - type OutEvent = FloodsubEvent; - - fn new_handler(&mut self) -> Self::ConnectionHandler { Default::default() } - - fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { Vec::new() } - - fn inject_connection_established( - &mut self, - id: &PeerId, - _: &ConnectionId, - _: &ConnectedPoint, - _: Option<&Vec>, - other_established: usize, - ) { - if other_established > 0 { - // We only care about the first time a peer connects. - return; - } - - // We need to send our subscriptions to the newly-connected node. - for topic in self.subscribed_topics.iter().cloned() { - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id: *id, - handler: NotifyHandler::Any, - event: FloodsubRpc { - messages: Vec::new(), - subscriptions: vec![FloodsubSubscription { - topic, - action: FloodsubSubscriptionAction::Subscribe, - }], - }, - }); - } - - self.connected_peers.insert(*id, SmallVec::new()); - } - - fn inject_connection_closed( - &mut self, - id: &PeerId, - _: &ConnectionId, - _: &ConnectedPoint, - _: ::Handler, - remaining_established: usize, - ) { - if remaining_established > 0 { - // we only care about peer disconnections - return; - } - - let was_in = self.connected_peers.remove(id); - debug_assert!(was_in.is_some()); - } - - fn inject_event(&mut self, propagation_source: PeerId, _connection: ConnectionId, event: InnerMessage) { - // We ignore successful sends or timeouts. - let event = match event { - InnerMessage::Rx(event) => event, - InnerMessage::Sent => return, - }; - - // Update connected peers topics - for subscription in event.subscriptions { - let remote_peer_topics = self.connected_peers - .get_mut(&propagation_source) - .expect("connected_peers is kept in sync with the peers we are connected to; we are guaranteed to only receive events from connected peers; QED"); - match subscription.action { - FloodsubSubscriptionAction::Subscribe => { - if !remote_peer_topics.contains(&subscription.topic) { - remote_peer_topics.push(subscription.topic.clone()); - } - self.events - .push_back(NetworkBehaviourAction::GenerateEvent(FloodsubEvent::Subscribed { - peer_id: propagation_source, - topic: subscription.topic, - })); - }, - FloodsubSubscriptionAction::Unsubscribe => { - if let Some(pos) = remote_peer_topics.iter().position(|t| t == &subscription.topic) { - remote_peer_topics.remove(pos); - } - self.events - .push_back(NetworkBehaviourAction::GenerateEvent(FloodsubEvent::Unsubscribed { - peer_id: propagation_source, - topic: subscription.topic, - })); - }, - } - } - - // List of messages we're going to propagate on the network. - let mut rpcs_to_dispatch: Vec<(PeerId, FloodsubRpc)> = Vec::new(); - - for message in event.messages { - // Use `self.received` to skip the messages that we have already received in the past. - // Note that this can false positive. - if !self.received.test_and_add(&message) { - continue; - } - - // Add the message to be dispatched to the user. - if self - .subscribed_topics - .iter() - .any(|t| message.topics.iter().any(|u| t == u)) - { - let event = FloodsubEvent::Message(message.clone()); - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); - } - - if self.config.forward_messages { - // Propagate the message to everyone else who is subscribed to any of the topics. - for (peer_id, subscr_topics) in self.connected_peers.iter() { - if peer_id == &propagation_source { - continue; - } - - if !subscr_topics.iter().any(|t| message.topics.iter().any(|u| t == u)) { - continue; - } - - if let Some(pos) = rpcs_to_dispatch.iter().position(|(p, _)| p == peer_id) { - rpcs_to_dispatch[pos].1.messages.push(message.clone()); - } else { - rpcs_to_dispatch.push((*peer_id, FloodsubRpc { - subscriptions: Vec::new(), - messages: vec![message.clone()], - })); - } - } - } - } - - for (peer_id, rpc) in rpcs_to_dispatch { - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event: rpc, - }); - } - } - - fn poll( - &mut self, - _: &mut Context<'_>, - _: &mut impl PollParameters, - ) -> Poll> { - if let Some(event) = self.events.pop_front() { - return Poll::Ready(event); - } - - Poll::Pending - } -} - -/// Transmission between the `OneShotHandler` and the `FloodsubHandler`. -#[derive(Debug)] -pub enum InnerMessage { - /// We received an RPC from a remote. - Rx(FloodsubRpc), - /// We successfully sent an RPC request. - Sent, -} - -impl From for InnerMessage { - #[inline] - fn from(rpc: FloodsubRpc) -> InnerMessage { InnerMessage::Rx(rpc) } -} - -impl From<()> for InnerMessage { - #[inline] - fn from(_: ()) -> InnerMessage { InnerMessage::Sent } -} - -/// Event that can happen on the floodsub behaviour. -#[derive(Debug)] -pub enum FloodsubEvent { - /// A message has been received. - Message(FloodsubMessage), - - /// A remote subscribed to a topic. - Subscribed { - /// Remote that has subscribed. - peer_id: PeerId, - /// The topic it has subscribed to. - topic: Topic, - }, - - /// A remote unsubscribed from a topic. - Unsubscribed { - /// Remote that has unsubscribed. - peer_id: PeerId, - /// The topic it has subscribed from. - topic: Topic, - }, -} diff --git a/mm2src/floodsub/src/lib.rs b/mm2src/floodsub/src/lib.rs deleted file mode 100644 index 90592a9fe5..0000000000 --- a/mm2src/floodsub/src/lib.rs +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Implements the floodsub protocol, see also the: -//! [spec](https://github.com/libp2p/specs/tree/master/pubsub). -//! The implementation is customized for AtomicDEX purposes, all peers are considered as "target_peers" -//! So "target_peers" are removed from Floodsub behaviour - -use libp2p_core::PeerId; - -pub mod protocol; - -mod layer; -mod topic; - -mod rpc_proto { - include!(concat!(env!("OUT_DIR"), "/floodsub.pb.rs")); -} - -pub use self::layer::{Floodsub, FloodsubEvent}; -pub use self::protocol::{FloodsubMessage, FloodsubRpc}; -pub use self::topic::Topic; - -/// Configuration options for the Floodsub protocol. -pub struct FloodsubConfig { - /// Peer id of the local node. Used for the source of the messages that we publish. - pub local_peer_id: PeerId, - - /// `true` if messages published by local node should be propagated as messages received from - /// the network, `false` by default. - pub subscribe_local_messages: bool, - - pub forward_messages: bool, -} - -impl FloodsubConfig { - pub fn new(local_peer_id: PeerId, forward_messages: bool) -> Self { - Self { - local_peer_id, - subscribe_local_messages: false, - forward_messages, - } - } -} diff --git a/mm2src/floodsub/src/protocol.rs b/mm2src/floodsub/src/protocol.rs deleted file mode 100644 index bf24053ffb..0000000000 --- a/mm2src/floodsub/src/protocol.rs +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::rpc_proto; -use crate::topic::Topic; -use futures::{io::{AsyncRead, AsyncWrite}, - Future}; -use libp2p_core::{upgrade, InboundUpgrade, OutboundUpgrade, PeerId, UpgradeInfo}; -use prost::Message; -use std::{error, fmt, io, iter, pin::Pin}; - -/// Implementation of `ConnectionUpgrade` for the floodsub protocol. -#[derive(Debug, Clone, Default)] -pub struct FloodsubProtocol {} - -impl FloodsubProtocol { - /// Builds a new `FloodsubProtocol`. - pub fn new() -> FloodsubProtocol { FloodsubProtocol {} } -} - -impl UpgradeInfo for FloodsubProtocol { - type Info = &'static [u8]; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { iter::once(b"/floodsub/1.0.0") } -} - -type PinBoxTryFut = Pin> + Send>>; - -impl InboundUpgrade for FloodsubProtocol -where - TSocket: AsyncRead + AsyncWrite + Send + Unpin + 'static, -{ - type Output = FloodsubRpc; - type Error = FloodsubDecodeError; - type Future = PinBoxTryFut; - - fn upgrade_inbound(self, mut socket: TSocket, _: Self::Info) -> Self::Future { - Box::pin(async move { - let packet = upgrade::read_length_prefixed(&mut socket, 2048).await?; - let rpc = rpc_proto::Rpc::decode(&packet[..])?; - - let mut messages = Vec::with_capacity(rpc.publish.len()); - for publish in rpc.publish.into_iter() { - messages.push(FloodsubMessage { - source: PeerId::from_bytes(&publish.from.unwrap_or_default()) - .map_err(|_| FloodsubDecodeError::InvalidPeerId)?, - data: publish.data.unwrap_or_default(), - sequence_number: publish.seqno.unwrap_or_default(), - topics: publish.topic_ids.into_iter().map(Topic::new).collect(), - }); - } - - Ok(FloodsubRpc { - messages, - subscriptions: rpc - .subscriptions - .into_iter() - .map(|sub| FloodsubSubscription { - action: if Some(true) == sub.subscribe { - FloodsubSubscriptionAction::Subscribe - } else { - FloodsubSubscriptionAction::Unsubscribe - }, - topic: Topic::new(sub.topic_id.unwrap_or_default()), - }) - .collect(), - }) - }) - } -} - -/// Reach attempt interrupt errors. -#[derive(Debug)] -pub enum FloodsubDecodeError { - /// Error when reading the packet from the socket. - ReadError(io::Error), - /// Error when decoding the raw buffer into a protobuf. - ProtobufError(prost::DecodeError), - /// Error when parsing the `PeerId` in the message. - InvalidPeerId, -} - -impl From for FloodsubDecodeError { - fn from(err: io::Error) -> Self { FloodsubDecodeError::ReadError(err) } -} - -impl From for FloodsubDecodeError { - fn from(err: prost::DecodeError) -> Self { FloodsubDecodeError::ProtobufError(err) } -} - -impl fmt::Display for FloodsubDecodeError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - FloodsubDecodeError::ReadError(ref err) => write!(f, "Error while reading from socket: {}", err), - FloodsubDecodeError::ProtobufError(ref err) => write!(f, "Error while decoding protobuf: {}", err), - FloodsubDecodeError::InvalidPeerId => write!(f, "Error while decoding PeerId from message"), - } - } -} - -impl error::Error for FloodsubDecodeError { - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match *self { - FloodsubDecodeError::ReadError(ref err) => Some(err), - FloodsubDecodeError::ProtobufError(ref err) => Some(err), - FloodsubDecodeError::InvalidPeerId => None, - } - } -} - -/// An RPC received by the floodsub system. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct FloodsubRpc { - /// List of messages that were part of this RPC query. - pub messages: Vec, - /// List of subscriptions. - pub subscriptions: Vec, -} - -impl UpgradeInfo for FloodsubRpc { - type Info = &'static [u8]; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { iter::once(b"/floodsub/1.0.0") } -} - -impl OutboundUpgrade for FloodsubRpc -where - TSocket: AsyncWrite + AsyncRead + Send + Unpin + 'static, -{ - type Output = (); - type Error = io::Error; - type Future = PinBoxTryFut; - - fn upgrade_outbound(self, mut socket: TSocket, _: Self::Info) -> Self::Future { - Box::pin(async move { - let bytes = self.into_bytes(); - upgrade::write_length_prefixed(&mut socket, bytes).await?; - Ok(()) - }) - } -} - -impl FloodsubRpc { - /// Turns this `FloodsubRpc` into a message that can be sent to a substream. - fn into_bytes(self) -> Vec { - let rpc = rpc_proto::Rpc { - publish: self - .messages - .into_iter() - .map(|msg| rpc_proto::Message { - from: Some(msg.source.to_bytes()), - data: Some(msg.data), - seqno: Some(msg.sequence_number), - topic_ids: msg.topics.into_iter().map(|topic| topic.into()).collect(), - }) - .collect(), - - subscriptions: self - .subscriptions - .into_iter() - .map(|topic| rpc_proto::rpc::SubOpts { - subscribe: Some(topic.action == FloodsubSubscriptionAction::Subscribe), - topic_id: Some(topic.topic.into()), - }) - .collect(), - }; - - let mut buf = Vec::with_capacity(rpc.encoded_len()); - rpc.encode(&mut buf).expect("Vec provides capacity as needed"); - buf - } -} - -/// A message received by the floodsub system. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct FloodsubMessage { - /// Id of the peer that published this message. - pub source: PeerId, - - /// Content of the message. Its meaning is out of scope of this library. - pub data: Vec, - - /// An incrementing sequence number. - pub sequence_number: Vec, - - /// List of topics this message belongs to. - /// - /// Each message can belong to multiple topics at once. - pub topics: Vec, -} - -/// A subscription received by the floodsub system. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct FloodsubSubscription { - /// Action to perform. - pub action: FloodsubSubscriptionAction, - /// The topic from which to subscribe or unsubscribe. - pub topic: Topic, -} - -/// Action that a subscription wants to perform. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum FloodsubSubscriptionAction { - /// The remote wants to subscribe to the given topic. - Subscribe, - /// The remote wants to unsubscribe from the given topic. - Unsubscribe, -} diff --git a/mm2src/floodsub/src/rpc.proto b/mm2src/floodsub/src/rpc.proto deleted file mode 100644 index 84f0ea5179..0000000000 --- a/mm2src/floodsub/src/rpc.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto2"; - -package floodsub.pb; - -message RPC { - repeated SubOpts subscriptions = 1; - repeated Message publish = 2; - - message SubOpts { - optional bool subscribe = 1; // subscribe or unsubcribe - optional string topic_id = 2; - } -} - -message Message { - optional bytes from = 1; - optional bytes data = 2; - optional bytes seqno = 3; - repeated string topic_ids = 4; -} diff --git a/mm2src/floodsub/src/topic.rs b/mm2src/floodsub/src/topic.rs deleted file mode 100644 index 41d2b253b8..0000000000 --- a/mm2src/floodsub/src/topic.rs +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -/// Built topic. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct Topic(String); - -impl Topic { - /// Returns the id of the topic. - #[inline] - pub fn id(&self) -> &str { &self.0 } - - pub fn new(name: S) -> Topic - where - S: Into, - { - Topic(name.into()) - } -} - -impl From for String { - fn from(topic: Topic) -> String { topic.0 } -} diff --git a/mm2src/gossipsub/CHANGELOG.md b/mm2src/gossipsub/CHANGELOG.md deleted file mode 100644 index d2a5c025a7..0000000000 --- a/mm2src/gossipsub/CHANGELOG.md +++ /dev/null @@ -1,11 +0,0 @@ -# 0.20.0 [2020-07-01] - -- Updated dependencies. - -# 0.19.3 [2020-06-23] - -- Maintenance release fixing linter warnings. - -# 0.19.2 [2020-06-22] - -- Updated dependencies. diff --git a/mm2src/gossipsub/Cargo.toml b/mm2src/gossipsub/Cargo.toml deleted file mode 100644 index b34f1911a3..0000000000 --- a/mm2src/gossipsub/Cargo.toml +++ /dev/null @@ -1,41 +0,0 @@ -[package] -name = "atomicdex-gossipsub" -edition = "2018" -description = "Gossipsub protocol for AtomicDEX, based on libp2p gossipsub" -version = "0.20.0" -authors = ["Age Manning "] -license = "MIT" -repository = "https://github.com/libp2p/rust-libp2p" -keywords = ["peer-to-peer", "libp2p", "networking"] -categories = ["network-programming", "asynchronous"] - -[lib] -doctest = false - -[dependencies] -base64 = "0.21.2" -bytes = "0.5.4" -byteorder = "1.3.2" -common = { path = "../common" } -fnv = "1.0.6" -futures = "0.3.1" -futures_codec = "0.4.0" -libp2p-swarm = { git = "https://github.com/libp2p/rust-libp2p.git", tag ="v0.45.1" } -libp2p-core = { git = "https://github.com/libp2p/rust-libp2p.git", tag ="v0.45.1" } -log = "0.4.17" -prost = "0.10" -rand = "0.7" -sha2 = "0.10" -smallvec = "1.1.0" -unsigned-varint = { version = "0.4.0", features = ["futures-codec"] } -wasm-timer = "0.2.4" - -[dev-dependencies] -async-std = "1.6.2" -env_logger = "0.9.3" -libp2p-plaintext = { git = "https://github.com/libp2p/rust-libp2p.git", tag ="v0.45.1" } -libp2p-yamux = { git = "https://github.com/libp2p/rust-libp2p.git", tag ="v0.45.1" } -quickcheck= { version = "0.9.2", default-features = false } - -[build-dependencies] -prost-build = { version = "0.10.4", default-features = false } diff --git a/mm2src/gossipsub/build.rs b/mm2src/gossipsub/build.rs deleted file mode 100644 index 9671983699..0000000000 --- a/mm2src/gossipsub/build.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -fn main() { prost_build::compile_protos(&["src/rpc.proto"], &["src"]).unwrap(); } diff --git a/mm2src/gossipsub/src/behaviour.rs b/mm2src/gossipsub/src/behaviour.rs deleted file mode 100644 index 19eb7a2627..0000000000 --- a/mm2src/gossipsub/src/behaviour.rs +++ /dev/null @@ -1,1526 +0,0 @@ -// Copyright 2020 Sigma Prime Pty Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::config::GossipsubConfig; -use crate::handler::GossipsubHandler; -use crate::mcache::MessageCache; -use crate::protocol::{GossipsubControlAction, GossipsubMessage, GossipsubSubscription, GossipsubSubscriptionAction, - MessageId}; -use crate::topic::{Topic, TopicHash}; -use common::time_cache::{Entry as TimeCacheEntry, TimeCache}; -use futures::prelude::*; -use libp2p_core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}; -use libp2p_swarm::{IntoConnectionHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters}; -use log::{debug, error, info, trace, warn}; -use rand::seq::SliceRandom; -use smallvec::SmallVec; -use std::collections::hash_map::Entry; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::iter; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::time::Duration; -use wasm_timer::{Instant, Interval}; - -mod tests; - -/// Network behaviour that handles the gossipsub protocol. -pub struct Gossipsub { - /// Configuration providing gossipsub performance parameters. - config: GossipsubConfig, - - /// Events that need to be yielded to the outside when polling. - events: VecDeque>>, - - /// Pools non-urgent control messages between heartbeats. - control_pool: HashMap>, - - /// Peer id of the local node. Used for the source of the messages that we publish. - local_peer_id: PeerId, - - /// A map of all connected peers - A map of topic hash to a list of gossipsub peer Ids. - topic_peers: HashMap>, - - /// A map of all connected peers to their subscribed topics. - peer_topics: HashMap>, - - /// The peer ids of connected relay nodes - connected_relays: HashSet, - - /// relays to which we forward the messages. Also tracks the relay mesh size of nodes in mesh. - relays_mesh: HashMap, - - /// Peers included our node to their relays mesh - included_to_relays_mesh: HashSet, - - /// Overlay network of connected peers - Maps topics to connected gossipsub peers. - mesh: HashMap>, - - /// Map of topics to list of peers that we publish to, but don't subscribe to. - fanout: HashMap>, - - /// The last publish time for fanout topics. - fanout_last_pub: HashMap, - - /// Message cache for the last few heartbeats. - mcache: MessageCache, - - /// We keep track of the messages we received (in the format `string(source ID, seq_no)`) so that - /// we don't dispatch the same message twice if we receive it twice on the network. - /// Also store the peers from which message was received so we don't manually propagate already known message to them - received: TimeCache>, - - /// Heartbeat interval stream. - heartbeat: Interval, - - /// Relay mesh maintenance interval stream. - relay_mesh_maintenance_interval: Interval, - - peer_connections: HashMap>, - - connected_addresses: Vec, - - /// The relay list which are forcefully kept in relay mesh - explicit_relay_list: Vec, -} - -impl Gossipsub { - /// Creates a `Gossipsub` struct given a set of parameters specified by `gs_config`. - pub fn new(local_peer_id: PeerId, gs_config: GossipsubConfig) -> Self { - let local_peer_id = if gs_config.no_source_id { - PeerId::from_bytes(&crate::config::IDENTITY_SOURCE).expect("Valid peer id") - } else { - local_peer_id - }; - - Gossipsub { - config: gs_config.clone(), - events: VecDeque::new(), - control_pool: HashMap::new(), - local_peer_id, - topic_peers: HashMap::new(), - peer_topics: HashMap::new(), - mesh: HashMap::new(), - fanout: HashMap::new(), - fanout_last_pub: HashMap::new(), - mcache: MessageCache::new( - gs_config.history_gossip, - gs_config.history_length, - gs_config.message_id_fn, - ), - received: TimeCache::new(gs_config.duplicate_cache_time), - heartbeat: Interval::new_at( - Instant::now() + gs_config.heartbeat_initial_delay, - gs_config.heartbeat_interval, - ), - relay_mesh_maintenance_interval: Interval::new_at( - Instant::now() + Duration::from_secs(10), - Duration::from_secs(10), - ), - peer_connections: HashMap::new(), - connected_relays: HashSet::new(), - relays_mesh: HashMap::new(), - included_to_relays_mesh: HashSet::new(), - connected_addresses: Vec::new(), - explicit_relay_list: Vec::new(), - } - } - - pub fn add_explicit_relay(&mut self, peer_id: PeerId) { - info!("Adding peer {} to explicit relay list", peer_id); - self.explicit_relay_list.push(peer_id); - } - - /// Subscribe to a topic. - /// - /// Returns true if the subscription worked. Returns false if we were already subscribed. - pub fn subscribe(&mut self, topic: Topic) -> bool { - debug!("Subscribing to topic: {}", topic); - if self.config.i_am_relay { - debug!("Relay is subscribed to all topics by default. Subscribe has no effect."); - return false; - } - let topic_hash = self.topic_hash(topic.clone()); - if self.mesh.get(&topic_hash).is_some() { - debug!("Topic: {} is already in the mesh.", topic); - return false; - } - - let peers: Vec<_> = self.peer_topics.keys().copied().collect(); - for peer_id in peers { - let mut fixed_event = None; // initialise the event once if needed - if fixed_event.is_none() { - fixed_event = Some(Arc::new(GossipsubRpc { - messages: Vec::new(), - subscriptions: vec![GossipsubSubscription { - topic_hash: topic_hash.clone(), - action: GossipsubSubscriptionAction::Subscribe, - }], - control_msgs: Vec::new(), - })); - } - - let event = fixed_event.expect("event has been initialised"); - - debug!("Sending SUBSCRIBE to peer: {:?}", peer_id); - self.notify_primary(peer_id, event); - } - - // call JOIN(topic) - // this will add new peers to the mesh for the topic - self.join(&topic_hash); - info!("Subscribed to topic: {}", topic); - true - } - - pub fn is_subscribed(&self, topic_hash: &TopicHash) -> bool { self.mesh.contains_key(topic_hash) } - - /// Unsubscribes from a topic. - /// - /// Returns true if we were subscribed to this topic. - pub fn unsubscribe(&mut self, topic: Topic) -> bool { - debug!("Unsubscribing from topic: {}", topic); - if self.config.i_am_relay { - debug!("Relay is subscribed to all topics by default. Unsubscribe has no effect"); - return false; - } - let topic_hash = &self.topic_hash(topic); - - if self.mesh.get(topic_hash).is_none() { - debug!("Already unsubscribed from topic: {:?}", topic_hash); - // we are not subscribed - return false; - } - - // announce to all peers in the topic - let mut fixed_event = None; // initialise the event once if needed - if let Some(peer_list) = self.topic_peers.get(topic_hash) { - if fixed_event.is_none() { - fixed_event = Some(Arc::new(GossipsubRpc { - messages: Vec::new(), - subscriptions: vec![GossipsubSubscription { - topic_hash: topic_hash.clone(), - action: GossipsubSubscriptionAction::Unsubscribe, - }], - control_msgs: Vec::new(), - })); - } - - let event = fixed_event.expect("event has been initialised"); - - for peer in peer_list.clone() { - debug!("Sending UNSUBSCRIBE to peer: {:?}", peer); - self.notify_primary(peer, event.clone()); - } - } - - // call LEAVE(topic) - // this will remove the topic from the mesh - self.leave(topic_hash); - - info!("Unsubscribed from topic: {:?}", topic_hash); - true - } - - /// Publishes a message to the network. - pub fn publish(&mut self, topic: &Topic, data: impl Into>) { - self.publish_many_from(iter::once(topic.clone()), data, self.local_peer_id) - } - - /// Publishes a message with multiple topics to the network. - pub fn publish_many(&mut self, topic: impl IntoIterator, data: impl Into>) { - self.publish_many_from(topic, data, self.local_peer_id); - } - - /// Publishes a message with multiple topics to the network. - pub fn publish_many_from( - &mut self, - topic: impl IntoIterator, - data: impl Into>, - source: PeerId, - ) { - let message = GossipsubMessage { - source, - data: data.into(), - // To be interoperable with the go-implementation this is treated as a 64-bit - // big-endian uint. - sequence_number: rand::random(), - topics: topic.into_iter().map(|t| self.topic_hash(t)).collect(), - }; - - debug!("Publishing message: {:?}", (self.config.message_id_fn)(&message)); - - // forward the message to mesh peers - self.forward_msg(message.clone(), &source); - - let mut recipient_peers = HashSet::new(); - for topic_hash in &message.topics { - // if not subscribed to the topic, use fanout peers - if self.mesh.get(topic_hash).is_none() { - debug!("Topic: {:?} not in the mesh", topic_hash); - // build a list of peers to forward the message to - // if we have fanout peers add them to the map - if self.fanout.contains_key(topic_hash) { - for peer in self.fanout.get(topic_hash).expect("Topic must exist") { - recipient_peers.insert(*peer); - } - } else { - // we have no fanout peers, select mesh_n of them and add them to the fanout - let mesh_n = self.config.mesh_n; - let new_peers = Self::get_random_peers(&self.topic_peers, topic_hash, mesh_n, |_| true); - // add the new peers to the fanout and recipient peers - self.fanout.insert(topic_hash.clone(), new_peers.clone()); - for peer in new_peers { - debug!("Peer added to fanout: {:?}", peer); - recipient_peers.insert(peer); - } - } - // we are publishing to fanout peers - update the time we published - self.fanout_last_pub.insert(topic_hash.clone(), Instant::now()); - } - } - - // add published message to our received caches - let msg_id = (self.config.message_id_fn)(&message); - self.mcache.put(message.clone()); - self.received.insert(msg_id.clone(), SmallVec::from_elem(source, 1)); - - debug!("Published message: {:?}", msg_id); - - let event = Arc::new(GossipsubRpc { - subscriptions: Vec::new(), - messages: vec![message], - control_msgs: Vec::new(), - }); - // Send to peers we know are subscribed to the topic. - for peer_id in recipient_peers.iter() { - debug!("Sending message to peer: {:?}", peer_id); - self.notify_primary(*peer_id, event.clone()); - } - } - - /// This function should be called when `config.manual_propagation` is `true` in order to - /// propagate messages. Messages are stored in the ['Memcache'] and validation is expected to be - /// fast enough that the messages should still exist in the cache. - /// - /// Calling this function will propagate a message stored in the cache, if it still exists. - /// If the message still exists in the cache, it will be forwarded and this function will return true, - /// otherwise it will return false. - pub fn propagate_message(&mut self, message_id: &MessageId, propagation_source: &PeerId) -> bool { - let message = match self.mcache.get(message_id) { - Some(message) => message.clone(), - None => { - warn!( - "Message not in cache. Ignoring forwarding. Message Id: {}", - message_id.0 - ); - return false; - }, - }; - self.forward_msg(message, propagation_source); - true - } - - /// Gossipsub JOIN(topic) - adds topic peers to mesh and sends them GRAFT messages. - fn join(&mut self, topic_hash: &TopicHash) { - debug!("Running JOIN for topic: {:?}", topic_hash); - - // if we are already in the mesh, return - if self.mesh.contains_key(topic_hash) { - info!("JOIN: The topic is already in the mesh, ignoring JOIN"); - return; - } - - let mut added_peers = vec![]; - - // check if we have mesh_n peers in fanout[topic] and add them to the mesh if we do, - // removing the fanout entry. - if let Some((_, peers)) = self.fanout.remove_entry(topic_hash) { - debug!("JOIN: Removing peers from the fanout for topic: {:?}", topic_hash); - // add up to mesh_n of them them to the mesh - // Note: These aren't randomly added, currently FIFO - let add_peers = std::cmp::min(peers.len(), self.config.mesh_n); - debug!( - "JOIN: Adding {:?} peers from the fanout for topic: {:?}", - add_peers, topic_hash - ); - added_peers.extend_from_slice(&peers[..add_peers]); - self.mesh.insert(topic_hash.clone(), peers[..add_peers].to_vec()); - // remove the last published time - self.fanout_last_pub.remove(topic_hash); - } - - // check if we need to get more peers, which we randomly select - if added_peers.len() < self.config.mesh_n { - // get the peers - let new_peers = Self::get_random_peers( - &self.topic_peers, - topic_hash, - self.config.mesh_n - added_peers.len(), - |_| true, - ); - added_peers.extend_from_slice(&new_peers); - // add them to the mesh - debug!("JOIN: Inserting {:?} random peers into the mesh", new_peers.len()); - let mesh_peers = self.mesh.entry(topic_hash.clone()).or_insert_with(Vec::new); - mesh_peers.extend_from_slice(&new_peers); - } - - for peer_id in added_peers { - // Send a GRAFT control message - info!("JOIN: Sending Graft message to peer: {:?}", peer_id); - Self::control_pool_add(&mut self.control_pool, peer_id, GossipsubControlAction::Graft { - topic_hash: topic_hash.clone(), - }); - } - debug!("Completed JOIN for topic: {:?}", topic_hash); - } - - /// Gossipsub LEAVE(topic) - Notifies mesh\[topic\] peers with PRUNE messages. - fn leave(&mut self, topic_hash: &TopicHash) { - debug!("Running LEAVE for topic {:?}", topic_hash); - - // if our mesh contains the topic, send prune to peers and delete it from the mesh - if let Some((_, peers)) = self.mesh.remove_entry(topic_hash) { - for peer in peers { - // Send a PRUNE control message - info!("LEAVE: Sending PRUNE to peer: {:?}", peer); - Self::control_pool_add(&mut self.control_pool, peer, GossipsubControlAction::Prune { - topic_hash: topic_hash.clone(), - }); - } - } - debug!("Completed LEAVE for topic: {:?}", topic_hash); - } - - /// Handles an IHAVE control message. Checks our cache of messages. If the message is unknown, - /// requests it with an IWANT control message. - fn handle_ihave(&mut self, peer_id: &PeerId, ihave_msgs: Vec<(TopicHash, Vec)>) { - debug!("Handling IHAVE for peer: {:?}", peer_id); - // use a hashset to avoid duplicates efficiently - let mut iwant_ids = HashSet::new(); - - for (topic, ids) in ihave_msgs { - // only process the message if we are subscribed - if !self.mesh.contains_key(&topic) { - debug!("IHAVE: Ignoring IHAVE - Not subscribed to topic: {:?}", topic); - continue; - } - - for id in ids { - if !self.received.contains_key(&id) { - // have not seen this message, request it - iwant_ids.insert(id); - } - } - } - - if !iwant_ids.is_empty() { - // Send the list of IWANT control messages - debug!("IHAVE: Sending IWANT message"); - Self::control_pool_add(&mut self.control_pool, *peer_id, GossipsubControlAction::IWant { - message_ids: iwant_ids.iter().cloned().collect(), - }); - } - debug!("Completed IHAVE handling for peer: {:?}", peer_id); - } - - /// Handles an IWANT control message. Checks our cache of messages. If the message exists it is - /// forwarded to the requesting peer. - fn handle_iwant(&mut self, peer_id: &PeerId, iwant_msgs: Vec) { - debug!("Handling IWANT for peer: {:?}", peer_id); - // build a hashmap of available messages - let mut cached_messages = HashMap::new(); - - for id in iwant_msgs { - // if we have it, add it do the cached_messages mapping - if let Some(msg) = self.mcache.get(&id) { - cached_messages.insert(id.clone(), msg.clone()); - } - } - - if !cached_messages.is_empty() { - debug!("IWANT: Sending cached messages to peer: {:?}", peer_id); - // Send the messages to the peer - let message_list = cached_messages.into_iter().map(|entry| entry.1).collect(); - let event = Arc::new(GossipsubRpc { - subscriptions: Vec::new(), - messages: message_list, - control_msgs: Vec::new(), - }); - self.notify_primary(*peer_id, event); - } - debug!("Completed IWANT handling for peer: {:?}", peer_id); - } - - /// Handles GRAFT control messages. If subscribed to the topic, adds the peer to mesh, if not, - /// responds with PRUNE messages. - fn handle_graft(&mut self, peer_id: &PeerId, topics: Vec) { - debug!("Handling GRAFT message for peer: {:?}", peer_id); - - let mut to_prune_topics = HashSet::new(); - for topic_hash in topics { - if let Some(peers) = self.mesh.get_mut(&topic_hash) { - // if we are subscribed, add peer to the mesh, if not already added - info!( - "GRAFT: Mesh link added for peer: {:?} in topic: {:?}", - peer_id, topic_hash - ); - // ensure peer is not already added - if !peers.contains(peer_id) { - peers.push(*peer_id); - } - } else { - to_prune_topics.insert(topic_hash.clone()); - } - } - - if !to_prune_topics.is_empty() { - // build the prune messages to send - let prune_messages = to_prune_topics - .iter() - .map(|t| GossipsubControlAction::Prune { topic_hash: t.clone() }) - .collect(); - // Send the prune messages to the peer - info!( - "GRAFT: Not subscribed to topics - Sending PRUNE to peer: {:?}", - peer_id - ); - let event = Arc::new(GossipsubRpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: prune_messages, - }); - self.notify_primary(*peer_id, event); - } - debug!("Completed GRAFT handling for peer: {:?}", peer_id); - } - - /// Handles PRUNE control messages. Removes peer from the mesh. - fn handle_prune(&mut self, peer_id: &PeerId, topics: Vec) { - debug!("Handling PRUNE message for peer: {:?}", peer_id); - for topic_hash in topics { - if let Some(peers) = self.mesh.get_mut(&topic_hash) { - // remove the peer if it exists in the mesh - info!( - "PRUNE: Removing peer: {:?} from the mesh for topic: {:?}", - peer_id, topic_hash - ); - peers.retain(|p| p != peer_id); - } - } - debug!("Completed PRUNE handling for peer: {:?}", peer_id); - } - - /// Handles IAmrelay control message, does nothing if remote peer already subscribed to some topic - fn handle_i_am_relay(&mut self, peer_id: &PeerId, is_relay: bool) { - debug!("Handling IAmrelay message for peer: {:?}", peer_id); - if self.peer_topics.entry(*peer_id).or_insert_with(Vec::new).is_empty() && is_relay { - info!("IAmrelay: Adding peer: {:?} to the relays list", peer_id); - self.connected_relays.insert(*peer_id); - if self.relays_mesh.len() < self.config.mesh_n_low { - info!("IAmrelay: Adding peer: {:?} to the relay mesh", peer_id); - self.add_peers_to_relays_mesh(vec![*peer_id]); - } - } - debug!("Completed IAmrelay handling for peer: {:?}", peer_id); - } - - /// Handles IncludedTorelaysMesh message - fn handle_included_to_relays_mesh(&mut self, peer_id: &PeerId, is_included: bool, other_mesh_size: usize) { - if self.is_relay() { - debug!( - "Handling IncludedTorelaysMesh message for peer: {:?}, is_included: {}", - peer_id, is_included - ); - if is_included { - if self.connected_relays.contains(peer_id) { - if self.relays_mesh.len() > self.config.mesh_n_high { - self.notify_excluded_from_relay_mesh(*peer_id); - } else { - debug!("Adding peer {:?} to relays_mesh", peer_id); - self.relays_mesh.insert(*peer_id, other_mesh_size); - } - } else { - debug!("Adding peer {:?} to included_to_relays_mesh", peer_id); - self.included_to_relays_mesh.insert(*peer_id); - } - } else { - debug!( - "Removing peer {:?} from included_to_relays_mesh and relays mesh", - peer_id - ); - self.included_to_relays_mesh.remove(peer_id); - self.relays_mesh.remove(peer_id); - } - } else { - debug!( - "Ignoring IncludedTorelaysMesh message for peer: {:?}, is_included: {}", - peer_id, is_included - ); - } - } - - /// Handles a newly received GossipsubMessage. - /// Forwards the message to all peers in the mesh. - fn handle_received_message(&mut self, msg: GossipsubMessage, propagation_source: &PeerId) { - let msg_id = (self.config.message_id_fn)(&msg); - debug!("Handling message: {:?} from peer: {:?}", msg_id, propagation_source); - match self.received.entry(msg_id.clone()) { - TimeCacheEntry::Occupied(entry) => { - debug!("Message already received, ignoring. Message: {:?}", msg_id); - entry.into_mut().push(*propagation_source); - return; - }, - TimeCacheEntry::Vacant(entry) => { - entry.insert(SmallVec::from_elem(*propagation_source, 1)); - }, - } - // add to the memcache - self.mcache.put(msg.clone()); - - // dispatch the message to the user - debug!("Sending received message to user"); - self.events - .push_back(NetworkBehaviourAction::GenerateEvent(GossipsubEvent::Message( - *propagation_source, - msg_id, - msg.clone(), - ))); - - // forward the message to mesh peers, if no validation is required - if !self.config.manual_propagation { - let message_id = (self.config.message_id_fn)(&msg); - self.forward_msg(msg, propagation_source); - debug!("Completed message handling for message: {:?}", message_id); - } - } - - /// Handles received subscriptions. - fn handle_received_subscriptions(&mut self, subscriptions: &[GossipsubSubscription], propagation_source: &PeerId) { - debug!( - "Handling subscriptions: {:?}, from source: {:?}", - subscriptions, propagation_source - ); - let subscribed_topics = match self.peer_topics.get_mut(propagation_source) { - Some(topics) => topics, - None => { - error!("Subscription by unknown peer: {:?}", &propagation_source); - return; - }, - }; - - for subscription in subscriptions { - // get the peers from the mapping, or insert empty lists if topic doesn't exist - let peer_list = self - .topic_peers - .entry(subscription.topic_hash.clone()) - .or_insert_with(Vec::new); - - match subscription.action { - GossipsubSubscriptionAction::Subscribe => { - if !peer_list.contains(propagation_source) { - debug!( - "SUBSCRIPTION: topic_peer: Adding gossip peer: {:?} to topic: {:?}", - propagation_source, subscription.topic_hash - ); - peer_list.push(*propagation_source); - } - - // add to the peer_topics mapping - if !subscribed_topics.contains(&subscription.topic_hash) { - info!( - "SUBSCRIPTION: Adding peer: {:?} to topic: {:?}", - propagation_source, subscription.topic_hash - ); - subscribed_topics.push(subscription.topic_hash.clone()); - } - - // if the mesh needs peers add the peer to the mesh - if let Some(peers) = self.mesh.get_mut(&subscription.topic_hash) { - if peers.len() < self.config.mesh_n_low { - debug!("SUBSCRIPTION: Adding peer {:?} to the mesh", propagation_source,); - } - peers.push(*propagation_source); - } - // generates a subscription event to be polled - self.events - .push_back(NetworkBehaviourAction::GenerateEvent(GossipsubEvent::Subscribed { - peer_id: *propagation_source, - topic: subscription.topic_hash.clone(), - })); - }, - GossipsubSubscriptionAction::Unsubscribe => { - if let Some(pos) = peer_list.iter().position(|p| p == propagation_source) { - info!( - "SUBSCRIPTION: Removing gossip peer: {:?} from topic: {:?}", - propagation_source, subscription.topic_hash - ); - peer_list.remove(pos); - } - // remove topic from the peer_topics mapping - if let Some(pos) = subscribed_topics.iter().position(|t| t == &subscription.topic_hash) { - subscribed_topics.remove(pos); - } - // remove the peer from the mesh if it exists - if let Some(peers) = self.mesh.get_mut(&subscription.topic_hash) { - peers.retain(|peer| peer != propagation_source); - } - - // generate an unsubscribe event to be polled - self.events - .push_back(NetworkBehaviourAction::GenerateEvent(GossipsubEvent::Unsubscribed { - peer_id: *propagation_source, - topic: subscription.topic_hash.clone(), - })); - }, - } - } - trace!("Completed handling subscriptions from source: {:?}", propagation_source); - } - - /// Heartbeat function which shifts the memcache and updates the mesh. - fn heartbeat(&mut self) { - debug!("Starting heartbeat"); - - let mut to_graft = HashMap::new(); - let mut to_prune = HashMap::new(); - - // maintain the mesh for each topic - for (topic_hash, peers) in self.mesh.iter_mut() { - // too little peers - add some - if peers.len() < self.config.mesh_n_low { - debug!( - "HEARTBEAT: Mesh low. Topic: {:?} Contains: {:?} needs: {:?}", - topic_hash.clone().into_string(), - peers.len(), - self.config.mesh_n_low - ); - // not enough peers - get mesh_n - current_length more - let desired_peers = self.config.mesh_n - peers.len(); - let peer_list = Self::get_random_peers(&self.topic_peers, topic_hash, desired_peers, { - |peer| !peers.contains(peer) - }); - for peer in &peer_list { - let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); - current_topic.push(topic_hash.clone()); - } - // update the mesh - debug!("Updating mesh, new mesh: {:?}", peer_list); - peers.extend(peer_list); - } - - // too many peers - remove some - if peers.len() > self.config.mesh_n_high { - debug!( - "HEARTBEAT: Mesh high. Topic: {:?} Contains: {:?} needs: {:?}", - topic_hash, - peers.len(), - self.config.mesh_n_high - ); - let excess_peer_no = peers.len() - self.config.mesh_n; - // shuffle the peers - let mut rng = rand::thread_rng(); - peers.shuffle(&mut rng); - // remove the first excess_peer_no peers adding them to to_prune - for _ in 0..excess_peer_no { - let peer = peers.pop().expect("There should always be enough peers to remove"); - let current_topic = to_prune.entry(peer).or_insert_with(Vec::new); - current_topic.push(topic_hash.clone()); - } - } - } - - // remove expired fanout topics - { - let fanout = &mut self.fanout; // help the borrow checker - let fanout_ttl = self.config.fanout_ttl; - self.fanout_last_pub.retain(|topic_hash, last_pub_time| { - if *last_pub_time + fanout_ttl < Instant::now() { - debug!( - "HEARTBEAT: Fanout topic removed due to timeout. Topic: {:?}", - topic_hash - ); - fanout.remove(topic_hash); - return false; - } - true - }); - } - - // maintain fanout - // check if our peers are still a part of the topic - for (topic_hash, peers) in self.fanout.iter_mut() { - let mut to_remove_peers = Vec::new(); - for peer in peers.iter() { - // is the peer still subscribed to the topic? - match self.peer_topics.get(peer) { - Some(topics) => { - if !topics.contains(topic_hash) { - debug!("HEARTBEAT: Peer removed from fanout for topic: {:?}", topic_hash); - to_remove_peers.push(*peer); - } - }, - None => { - // remove if the peer has disconnected - to_remove_peers.push(*peer); - }, - } - } - peers.retain(|peer| !to_remove_peers.contains(peer)); - - // not enough peers - if peers.len() < self.config.mesh_n { - debug!( - "HEARTBEAT: Fanout low. Contains: {:?} needs: {:?}", - peers.len(), - self.config.mesh_n - ); - let needed_peers = self.config.mesh_n - peers.len(); - let new_peers = Self::get_random_peers(&self.topic_peers, topic_hash, needed_peers, |peer| { - !peers.contains(peer) - }); - peers.extend(new_peers); - } - } - - self.emit_gossip(); - - // send graft/prunes - if !to_graft.is_empty() | !to_prune.is_empty() { - self.send_graft_prune(to_graft, to_prune); - } - - // piggyback pooled control messages - self.flush_control_pool(); - - // shift the memcache - self.mcache.shift(); - debug!("Completed Heartbeat"); - } - - /// Emits gossip - Send IHAVE messages to a random set of gossip peers. This is applied to mesh - /// and fanout peers - fn emit_gossip(&mut self) { - debug!("Started gossip"); - for (topic_hash, peers) in self.mesh.iter().chain(self.fanout.iter()) { - let message_ids = self.mcache.get_gossip_ids(topic_hash); - if message_ids.is_empty() { - return; - } - - // get gossip_lazy random peers - let to_msg_peers = Self::get_random_peers(&self.topic_peers, topic_hash, self.config.gossip_lazy, |peer| { - !peers.contains(peer) - }); - for peer in to_msg_peers { - // send an IHAVE message - Self::control_pool_add(&mut self.control_pool, peer, GossipsubControlAction::IHave { - topic_hash: topic_hash.clone(), - message_ids: message_ids.clone(), - }); - } - } - debug!("Completed gossip"); - } - - /// Handles multiple GRAFT/PRUNE messages and coalesces them into chunked gossip control - /// messages. - fn send_graft_prune( - &mut self, - to_graft: HashMap>, - mut to_prune: HashMap>, - ) { - // handle the grafts and overlapping prunes - for (peer, topics) in to_graft.iter() { - let mut grafts: Vec = topics - .iter() - .map(|topic_hash| GossipsubControlAction::Graft { - topic_hash: topic_hash.clone(), - }) - .collect(); - let mut prunes: Vec = to_prune - .remove(peer) - .unwrap_or_default() - .iter() - .map(|topic_hash| GossipsubControlAction::Prune { - topic_hash: topic_hash.clone(), - }) - .collect(); - grafts.append(&mut prunes); - - // send the control messages - let event = Arc::new(GossipsubRpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: grafts, - }); - self.notify_primary(*peer, event); - } - - // handle the remaining prunes - for (peer, topics) in to_prune.iter() { - let remaining_prunes = topics - .iter() - .map(|topic_hash| GossipsubControlAction::Prune { - topic_hash: topic_hash.clone(), - }) - .collect(); - let event = Arc::new(GossipsubRpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: remaining_prunes, - }); - self.notify_primary(*peer, event); - } - } - - /// Helper function which forwards a message to mesh\[topic\] peers. - fn forward_msg(&mut self, message: GossipsubMessage, source: &PeerId) { - let msg_id = (self.config.message_id_fn)(&message); - debug!("Forwarding message: {:?}", msg_id); - let mut recipient_peers = HashSet::new(); - - if self.config.i_am_relay { - // relay simply forwards the message to topic peers that included the relay to their relays mesh - for topic in &message.topics { - if let Some(topic_peers) = self.topic_peers.get(topic) { - for peer_id in topic_peers { - if peer_id != source - && peer_id != &message.source - && self.included_to_relays_mesh.contains(peer_id) - { - recipient_peers.insert(*peer_id); - } - } - } - } - } else { - // add mesh peers if the node is not relay - for topic in &message.topics { - if let Some(mesh_peers) = self.mesh.get(topic) { - for peer_id in mesh_peers { - if peer_id != source && peer_id != &message.source { - recipient_peers.insert(*peer_id); - } - } - } - } - } - - // forward the message to peers - if !recipient_peers.is_empty() { - let event = Arc::new(GossipsubRpc { - subscriptions: Vec::new(), - messages: vec![message.clone()], - control_msgs: Vec::new(), - }); - - for peer in recipient_peers.iter() { - if let Some(received_from_peers) = self.received.get(&msg_id) { - if received_from_peers.contains(peer) { - continue; - } - } - - debug!("Sending message: {:?} to peer {:?}", msg_id, peer); - self.notify_primary(*peer, event.clone()); - } - } - - if !self.relays_mesh.is_empty() { - debug!("Forwarding message to relays: {:?}", msg_id); - let message_source = message.source; - let event = Arc::new(GossipsubRpc { - subscriptions: Vec::new(), - messages: vec![message], - control_msgs: Vec::new(), - }); - - let relays: Vec<_> = self.relays_mesh.keys().copied().collect(); - for relay in relays { - if let Some(received_from_peers) = self.received.get(&msg_id) { - if received_from_peers.contains(&relay) { - continue; - } - } - - if relay != *source && relay != message_source { - debug!("Sending message: {:?} to relay {:?}", msg_id, relay); - self.notify_primary(relay, event.clone()); - } - } - debug!("Completed forwarding message to relays"); - } - debug!("Completed forwarding message"); - } - - /// Helper function to get a set of `n` random gossipsub peers for a `topic_hash` - /// filtered by the function `f`. - fn get_random_peers( - topic_peers: &HashMap>, - topic_hash: &TopicHash, - n: usize, - mut f: impl FnMut(&PeerId) -> bool, - ) -> Vec { - let mut gossip_peers = match topic_peers.get(topic_hash) { - // if they exist, filter the peers by `f` - Some(peer_list) => peer_list.iter().cloned().filter(|p| f(p)).collect(), - None => Vec::new(), - }; - - // if we have less than needed, return them - if gossip_peers.len() <= n { - debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); - return gossip_peers.to_vec(); - } - - // we have more peers than needed, shuffle them and return n of them - let mut rng = rand::thread_rng(); - gossip_peers.partial_shuffle(&mut rng, n); - - debug!("RANDOM PEERS: Got {:?} peers", n); - - gossip_peers[..n].to_vec() - } - - /// The helper function to get a set of `n` random peers from the `relays` hashset - /// filtered by the function `f`. - fn get_random_relays(relays: &HashSet, n: usize, mut f: impl FnMut(&PeerId) -> bool) -> Vec { - let mut relays: Vec<_> = relays.iter().cloned().filter(|p| f(p)).collect(); - - // if we have less than needed, return them - if relays.len() <= n { - debug!("RANDOM RELAYS: Got {:?} peers", relays.len()); - return relays; - } - - // we have more peers than needed, shuffle them and return n of them - let mut rng = rand::thread_rng(); - relays.partial_shuffle(&mut rng, n); - debug!("RANDOM RELAYS: Got {:?} peers", n); - - relays[..n].to_vec() - } - - // adds a control action to control_pool - fn control_pool_add( - control_pool: &mut HashMap>, - peer: PeerId, - control: GossipsubControlAction, - ) { - control_pool.entry(peer).or_insert_with(Vec::new).push(control); - } - - /// Produces a `TopicHash` for a topic given the gossipsub configuration. - fn topic_hash(&self, topic: Topic) -> TopicHash { - if self.config.hash_topics { - topic.sha256_hash() - } else { - topic.no_hash() - } - } - - /// Takes each control action mapping and turns it into a message - fn flush_control_pool(&mut self) { - let control_pool: Vec<_> = self.control_pool.drain().collect(); - for (peer, controls) in control_pool { - let event = Arc::new(GossipsubRpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: controls, - }); - self.notify_primary(peer, event); - } - } - - pub fn get_mesh_peers(&self, topic: &TopicHash) -> Vec { self.mesh.get(topic).cloned().unwrap_or_default() } - - pub fn get_topic_peers(&self, topic: &TopicHash) -> Vec { - self.topic_peers.get(topic).cloned().unwrap_or_default() - } - - pub fn get_num_peers(&self) -> usize { self.peer_topics.len() } - - pub fn get_peers_connections(&self) -> HashMap> { - self.peer_connections.clone() - } - - pub fn get_mesh(&self) -> &HashMap> { &self.mesh } - - pub fn get_relay_mesh(&self) -> Vec { self.relays_mesh.keys().cloned().collect() } - - pub fn relay_mesh_len(&self) -> usize { self.relays_mesh.len() } - - pub fn get_all_topic_peers(&self) -> &HashMap> { &self.topic_peers } - - pub fn get_all_peer_topics(&self) -> &HashMap> { &self.peer_topics } - - /// Get count of received messages in the [`GossipsubConfig::duplicate_cache_time`] period. - pub fn get_received_messages_in_period(&self) -> (Duration, usize) { (self.received.ttl(), self.received.len()) } - - pub fn get_config(&self) -> &GossipsubConfig { &self.config } - - /// Adds peers to relays mesh and notifies them they are added - fn add_peers_to_relays_mesh(&mut self, peers: Vec) { - for peer in &peers { - // other mesh size is unknown at this point - self.relays_mesh.insert(*peer, 0); - } - for peer in peers { - self.notify_included_to_relay_mesh(peer); - } - } - - #[allow(dead_code)] - fn remove_peer_from_relay_mesh(&mut self, peer: &PeerId) { - if self.relays_mesh.remove(peer).is_some() { - self.notify_excluded_from_relay_mesh(*peer) - } - } - - /// Cleans up relays mesh so it remains mesh_n peers - fn clean_up_relays_mesh(&mut self) { - let mesh_n = self.config.mesh_n; - let mut removed = Vec::with_capacity(self.relays_mesh.len() - mesh_n); - let explicit_relay_list = self.explicit_relay_list.clone(); - // perform 2 filter iterations to not keep excessive number of explicit peers in mesh - self.relays_mesh = self - .relays_mesh - .drain() - .enumerate() - .filter_map(|(i, peer)| { - if i < mesh_n || explicit_relay_list.contains(&peer.0) { - Some(peer) - } else { - removed.push(peer); - None - } - }) - .collect(); - - self.relays_mesh = self - .relays_mesh - .drain() - .enumerate() - .filter_map(|(i, peer)| { - if i < mesh_n { - Some(peer) - } else { - removed.push(peer); - None - } - }) - .collect(); - - for (peer, _) in removed { - self.notify_excluded_from_relay_mesh(peer) - } - } - - fn notify_included_to_relay_mesh(&mut self, peer: PeerId) { - let event = Arc::new(GossipsubRpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: vec![GossipsubControlAction::IncludedToRelaysMesh { - included: true, - mesh_size: self.relay_mesh_len(), - }], - }); - self.notify_primary(peer, event); - } - - fn notify_excluded_from_relay_mesh(&mut self, peer: PeerId) { - let event = Arc::new(GossipsubRpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: vec![GossipsubControlAction::IncludedToRelaysMesh { - included: false, - mesh_size: self.relay_mesh_len(), - }], - }); - self.notify_primary(peer, event); - } - - /// Notify the primary connection (the first connected point) of the peer. - /// Since `NotifyHandler::All` has been removed, the original `libp2p_gossipsub` notifies the connected peers using their primary connections. - /// See an example: https://github.com/libp2p/rust-libp2p/blob/v0.38.0/protocols/gossipsub/src/behaviour.rs#L3013 - fn notify_primary(&mut self, peer_id: PeerId, event: Arc) { - if let Some(points) = self.peer_connections.get(&peer_id) { - if !points.is_empty() { - let conn_id = points[0].0; - return self.notify_one(peer_id, conn_id, event); - } - } - warn!("Expected at least one connection of the peer '{}'", peer_id); - self.notify_any(peer_id, event); - } - - #[allow(dead_code)] - fn notify_all(&mut self, peer_id: PeerId, event: Arc) { - match self.peer_connections.get(&peer_id) { - Some(connected_points) => { - let connections: Vec<_> = connected_points.iter().map(|(conn_id, _point)| *conn_id).collect(); - for conn_id in connections { - self.notify_one(peer_id, conn_id, event.clone()); - } - }, - None => { - warn!( - "An attempt to notify a peer '{:?}' that is not in 'Gossipsub::peer_connections'", - peer_id - ) - }, - } - } - - fn notify_any(&mut self, peer_id: PeerId, event: Arc) { - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event, - }); - } - - fn notify_one(&mut self, peer_id: PeerId, conn_id: ConnectionId, event: Arc) { - self.events.push_back(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::One(conn_id), - event, - }); - } - - fn maintain_relays_mesh(&mut self) { - if self.relays_mesh.len() < self.config.mesh_n_low { - info!( - "HEARTBEAT: relays low. Contains: {:?} needs: {:?}", - self.relays_mesh.len(), - self.config.mesh_n_low, - ); - // add peers 1 by 1 to avoid overloading peaks when node connects to several other nodes at once - let required = 1; - // get `n` relays that are not in the `relays_mesh` - let to_add = - Self::get_random_relays(&self.connected_relays, required, |p| !self.relays_mesh.contains_key(p)); - self.add_peers_to_relays_mesh(to_add); - } - - if self.relays_mesh.len() > self.config.mesh_n_high { - info!( - "HEARTBEAT: relays high. Contains: {:?} needs: {:?}", - self.relays_mesh.len(), - self.config.mesh_n, - ); - self.clean_up_relays_mesh(); - } - - let size = self.relays_mesh.len(); - for relay in self.relays_mesh.keys() { - Self::control_pool_add(&mut self.control_pool, *relay, GossipsubControlAction::MeshSize(size)); - } - } - - pub fn is_relay(&self) -> bool { self.config.i_am_relay } - - pub fn connected_relays(&self) -> Vec { self.connected_relays.iter().cloned().collect() } - - pub fn connected_relays_len(&self) -> usize { self.connected_relays.len() } - - pub fn is_connected_to_addr(&self, addr: &Multiaddr) -> bool { self.connected_addresses.contains(addr) } -} - -impl NetworkBehaviour for Gossipsub { - type ConnectionHandler = GossipsubHandler; - type OutEvent = GossipsubEvent; - - fn new_handler(&mut self) -> Self::ConnectionHandler { - GossipsubHandler::new(self.config.protocol_id.clone(), self.config.max_transmit_size) - } - - fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { Vec::new() } - - fn inject_connection_established( - &mut self, - id: &PeerId, - conn_id: &ConnectionId, - point: &ConnectedPoint, - _: Option<&Vec>, - other_established: usize, - ) { - self.peer_connections - .entry(*id) - .or_insert_with(Default::default) - .push((*conn_id, point.clone())); - self.connected_addresses.push(point.get_remote_address().clone()); - - if other_established > 0 { - // For other actions, we only care about the first time a peer connects. - return; - } - - info!("New peer connected: {:?}", id); - // We need to send our subscriptions to the newly-connected node if we are not relay. - // Notify peer that we act as relay otherwise - if self.config.i_am_relay { - debug!("Sending IAmRelay to peer {:?}", id); - let event = Arc::new(GossipsubRpc { - messages: Vec::new(), - subscriptions: Vec::new(), - control_msgs: vec![GossipsubControlAction::IAmRelay(true)], - }); - self.notify_primary(*id, event); - } else { - let mut subscriptions = vec![]; - for topic_hash in self.mesh.keys() { - subscriptions.push(GossipsubSubscription { - topic_hash: topic_hash.clone(), - action: GossipsubSubscriptionAction::Subscribe, - }); - } - - if !subscriptions.is_empty() { - // send our subscriptions to the peer - let event = Arc::new(GossipsubRpc { - messages: Vec::new(), - subscriptions, - control_msgs: Vec::new(), - }); - self.notify_primary(*id, event); - } - } - // For the time being assume all gossipsub peers - self.peer_topics.insert(*id, Vec::new()); - } - - fn inject_connection_closed( - &mut self, - peer_id: &PeerId, - disconnected_conn_id: &ConnectionId, - disconnected_point: &ConnectedPoint, - _: ::Handler, - remaining_established: usize, - ) { - if let Entry::Occupied(mut o) = self.peer_connections.entry(*peer_id) { - let connected_points = o.get_mut(); - connected_points.retain(|(conn_id, _point)| conn_id != disconnected_conn_id); - if connected_points.is_empty() { - o.remove_entry(); - } - } - - self.connected_addresses - .retain(|addr| addr != disconnected_point.get_remote_address()); - - if remaining_established > 0 { - return; - } - - // remove from mesh, topic_peers, peer_topic and fanout - debug!("Peer disconnected: {:?}", peer_id); - { - let topics = match self.peer_topics.get(peer_id) { - Some(topics) => topics, - None => { - warn!("Disconnected node, not in connected nodes"); - return; - }, - }; - - // remove peer from all mappings - for topic in topics { - // check the mesh for the topic - if let Some(mesh_peers) = self.mesh.get_mut(topic) { - // check if the peer is in the mesh and remove it - if let Some(pos) = mesh_peers.iter().position(|p| p == peer_id) { - mesh_peers.remove(pos); - } - } - - // remove from topic_peers - if let Some(peer_list) = self.topic_peers.get_mut(topic) { - if let Some(pos) = peer_list.iter().position(|p| p == peer_id) { - peer_list.remove(pos); - } - // debugging purposes - else { - warn!("Disconnected node: {:?} not in topic_peers peer list", peer_id); - } - } else { - warn!( - "Disconnected node: {:?} with topic: {:?} not in topic_peers", - &peer_id, &topic - ); - } - - // remove from fanout - if let Some(peers) = self.fanout.get_mut(topic) { - peers.retain(|p| p != peer_id) - } - } - } - - self.relays_mesh.remove(peer_id); - self.connected_relays.remove(peer_id); - self.included_to_relays_mesh.remove(peer_id); - self.peer_connections.remove(peer_id); - // remove peer from peer_topics - let was_in = self.peer_topics.remove(peer_id); - debug_assert!(was_in.is_some()); - } - - fn inject_event(&mut self, propagation_source: PeerId, _: ConnectionId, event: GossipsubRpc) { - // Handle subscriptions - // Update connected peers topics - debug!("Event injected {:?}, source {:?}", event, propagation_source); - self.handle_received_subscriptions(&event.subscriptions, &propagation_source); - - // Handle messages - for message in event.messages { - self.handle_received_message(message, &propagation_source); - } - - // Handle control messages - // group some control messages, this minimises SendEvents (code is simplified to handle each event at a time however) - let mut ihave_msgs = vec![]; - let mut graft_msgs = vec![]; - let mut prune_msgs = vec![]; - for control_msg in event.control_msgs { - match control_msg { - GossipsubControlAction::IHave { - topic_hash, - message_ids, - } => { - ihave_msgs.push((topic_hash, message_ids)); - }, - GossipsubControlAction::IWant { message_ids } => self.handle_iwant(&propagation_source, message_ids), - GossipsubControlAction::Graft { topic_hash } => graft_msgs.push(topic_hash), - GossipsubControlAction::Prune { topic_hash } => prune_msgs.push(topic_hash), - GossipsubControlAction::IAmRelay(is_relay) => self.handle_i_am_relay(&propagation_source, is_relay), - GossipsubControlAction::IncludedToRelaysMesh { included, mesh_size } => { - self.handle_included_to_relays_mesh(&propagation_source, included, mesh_size) - }, - GossipsubControlAction::MeshSize(size) => { - if let Some(old_size) = self.relays_mesh.get_mut(&propagation_source) { - *old_size = size; - } - }, - } - } - if !ihave_msgs.is_empty() { - self.handle_ihave(&propagation_source, ihave_msgs); - } - if !graft_msgs.is_empty() { - self.handle_graft(&propagation_source, graft_msgs); - } - if !prune_msgs.is_empty() { - self.handle_prune(&propagation_source, prune_msgs); - } - } - - fn poll( - &mut self, - cx: &mut Context, - _: &mut impl PollParameters, - ) -> Poll> { - if let Some(event) = self.events.pop_front() { - // clone send event reference if others references are present - match event { - NetworkBehaviourAction::NotifyHandler { - peer_id, - handler, - event: send_event, - } => match Arc::try_unwrap(send_event) { - Ok(event) => { - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { - peer_id, - event, - handler, - }); - }, - Err(event) => { - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { - peer_id, - event: (*event).clone(), - handler, - }); - }, - }, - NetworkBehaviourAction::GenerateEvent(e) => { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(e)); - }, - NetworkBehaviourAction::Dial { opts, handler } => { - return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }); - }, - NetworkBehaviourAction::ReportObservedAddr { address, score } => { - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }); - }, - NetworkBehaviourAction::CloseConnection { peer_id, connection } => { - return Poll::Ready(NetworkBehaviourAction::CloseConnection { peer_id, connection }); - }, - } - } - - while let Poll::Ready(Some(())) = self.heartbeat.poll_next_unpin(cx) { - self.heartbeat(); - } - - while let Poll::Ready(Some(())) = self.relay_mesh_maintenance_interval.poll_next_unpin(cx) { - self.maintain_relays_mesh(); - } - - Poll::Pending - } -} - -/// An RPC received/sent. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct GossipsubRpc { - /// List of messages that were part of this RPC query. - pub messages: Vec, - /// List of subscriptions. - pub subscriptions: Vec, - /// List of Gossipsub control messages. - pub control_msgs: Vec, -} - -/// Event that can happen on the gossipsub behaviour. -#[derive(Debug)] -pub enum GossipsubEvent { - /// A message has been received. This contains the PeerId that we received the message from, - /// the message id (used if the application layer needs to propagate the message) and the - /// message itself. - Message(PeerId, MessageId, GossipsubMessage), - - /// A remote subscribed to a topic. - Subscribed { - /// Remote that has subscribed. - peer_id: PeerId, - /// The topic it has subscribed to. - topic: TopicHash, - }, - - /// A remote unsubscribed from a topic. - Unsubscribed { - /// Remote that has unsubscribed. - peer_id: PeerId, - /// The topic it has subscribed from. - topic: TopicHash, - }, -} diff --git a/mm2src/gossipsub/src/behaviour/tests.rs b/mm2src/gossipsub/src/behaviour/tests.rs deleted file mode 100644 index 1c1923df0b..0000000000 --- a/mm2src/gossipsub/src/behaviour/tests.rs +++ /dev/null @@ -1,900 +0,0 @@ -// Copyright 2020 Sigma Prime Pty Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -// collection of tests for the gossipsub network behaviour - -#[cfg(test)] -#[allow(clippy::module_inception)] -mod tests { - use super::super::*; - use crate::GossipsubConfigBuilder; - use libp2p_core::Endpoint; - use std::net::IpAddr; - use std::str::FromStr; - - // helper functions for testing - - // This function generates `peer_no` random PeerId's, subscribes to `topics` and subscribes the - // injected nodes to all topics if `to_subscribe` is set. All nodes are considered gossipsub nodes. - fn build_and_inject_nodes( - peer_no: usize, - topics: Vec, - gs_config: GossipsubConfig, - to_subscribe: bool, - ) -> (Gossipsub, Vec, Vec) { - // create a gossipsub struct - let mut gs: Gossipsub = Gossipsub::new(PeerId::random(), gs_config); - - let mut topic_hashes = vec![]; - - // subscribe to the topics - for t in topics { - let topic = Topic::new(t); - gs.subscribe(topic.clone()); - topic_hashes.push(topic.no_hash().clone()); - } - - // build and connect peer_no random peers - let mut peers = vec![]; - - for i in 0..peer_no { - let peer = PeerId::random(); - peers.push(peer); - ::inject_connection_established( - &mut gs, - &peer, - &ConnectionId::new(i), - &ConnectedPoint::Dialer { - address: Multiaddr::from(IpAddr::from_str("127.0.0.1").unwrap()), - role_override: Endpoint::Dialer, - }, - None, - 0, - ); - if to_subscribe { - gs.handle_received_subscriptions( - &topic_hashes - .iter() - .cloned() - .map(|t| GossipsubSubscription { - action: GossipsubSubscriptionAction::Subscribe, - topic_hash: t, - }) - .collect::>(), - &peer, - ); - }; - } - - (gs, peers, topic_hashes) - } - - #[test] - /// Test local node subscribing to a topic - fn test_subscribe() { - // The node should: - // - Create an empty vector in mesh[topic] - // - Send subscription request to all peers - // - run JOIN(topic) - - let subscribe_topic = vec![String::from("test_subscribe")]; - let (gs, _, topic_hashes) = build_and_inject_nodes(20, subscribe_topic, GossipsubConfig::default(), true); - - assert!( - gs.mesh.get(&topic_hashes[0]).is_some(), - "Subscribe should add a new entry to the mesh[topic] hashmap" - ); - - // collect all the subscriptions - let subscriptions = gs.events.iter().fold(vec![], |mut collected_subscriptions, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - for s in &event.subscriptions { - if s.action == GossipsubSubscriptionAction::Subscribe { - collected_subscriptions.push(s.clone()) - } - } - collected_subscriptions - }, - _ => collected_subscriptions, - }); - - // we sent a subscribe to all known peers - assert!( - subscriptions.len() == 20, - "Should send a subscription to all known peers" - ); - } - - #[test] - /// Test unsubscribe. - fn test_unsubscribe() { - // Unsubscribe should: - // - Remove the mesh entry for topic - // - Send UNSUBSCRIBE to all known peers - // - Call Leave - - let topic_strings = vec![String::from("topic1"), String::from("topic2")]; - let topics = topic_strings - .iter() - .map(|t| Topic::new(t.clone())) - .collect::>(); - - // subscribe to topic_strings - let (mut gs, _, topic_hashes) = build_and_inject_nodes(20, topic_strings, GossipsubConfig::default(), true); - - for topic_hash in &topic_hashes { - assert!( - gs.topic_peers.get(topic_hash).is_some(), - "Topic_peers contain a topic entry" - ); - assert!(gs.mesh.get(topic_hash).is_some(), "mesh should contain a topic entry"); - } - - // unsubscribe from both topics - assert!( - gs.unsubscribe(topics[0].clone()), - "should be able to unsubscribe successfully from each topic", - ); - assert!( - gs.unsubscribe(topics[1].clone()), - "should be able to unsubscribe successfully from each topic", - ); - - let subscriptions = gs.events.iter().fold(vec![], |mut collected_subscriptions, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - for s in &event.subscriptions { - if s.action == GossipsubSubscriptionAction::Unsubscribe { - collected_subscriptions.push(s.clone()) - } - } - collected_subscriptions - }, - _ => collected_subscriptions, - }); - - // we sent a unsubscribe to all known peers, for two topics - assert!( - subscriptions.len() == 40, - "Should send an unsubscribe event to all known peers" - ); - - // check we clean up internal structures - for topic_hash in &topic_hashes { - assert!( - gs.mesh.get(topic_hash).is_none(), - "All topics should have been removed from the mesh" - ); - } - } - - #[test] - /// Test JOIN(topic) functionality. - fn test_join() { - // The Join function should: - // - Remove peers from fanout[topic] - // - Add any fanout[topic] peers to the mesh (up to mesh_n) - // - Fill up to mesh_n peers from known gossipsub peers in the topic - // - Send GRAFT messages to all nodes added to the mesh - - // This test is not an isolated unit test, rather it uses higher level, - // subscribe/unsubscribe to perform the test. - - let topic_strings = vec![String::from("topic1"), String::from("topic2")]; - let topics = topic_strings - .iter() - .map(|t| Topic::new(t.clone())) - .collect::>(); - - let (mut gs, _, topic_hashes) = build_and_inject_nodes(20, topic_strings, GossipsubConfig::default(), true); - - // unsubscribe, then call join to invoke functionality - assert!( - gs.unsubscribe(topics[0].clone()), - "should be able to unsubscribe successfully" - ); - assert!( - gs.unsubscribe(topics[1].clone()), - "should be able to unsubscribe successfully" - ); - - // re-subscribe - there should be peers associated with the topic - assert!( - gs.subscribe(topics[0].clone()), - "should be able to subscribe successfully" - ); - - // should have added mesh_n nodes to the mesh - assert!( - gs.mesh.get(&topic_hashes[0]).unwrap().len() == 6, - "Should have added 6 nodes to the mesh" - ); - - // there should be mesh_n GRAFT messages. - let graft_messages = gs - .control_pool - .iter() - .fold(vec![], |mut collected_grafts, (_, controls)| { - for c in controls.iter() { - if let GossipsubControlAction::Graft { topic_hash: _ } = c { - collected_grafts.push(c.clone()) - } - } - collected_grafts - }); - - assert_eq!( - graft_messages.len(), - 6, - "There should be 6 grafts messages sent to peers" - ); - - // verify fanout nodes - // add 3 random peers to the fanout[topic1] - gs.fanout.insert(topic_hashes[1].clone(), vec![]); - let new_peers = vec![]; - for _ in 0..3 { - let fanout_peers = gs.fanout.get_mut(&topic_hashes[1]).unwrap(); - fanout_peers.push(PeerId::random()); - } - - // subscribe to topic1 - gs.subscribe(topics[1].clone()); - - // the three new peers should have been added, along with 3 more from the pool. - assert!( - gs.mesh.get(&topic_hashes[1]).unwrap().len() == 6, - "Should have added 6 nodes to the mesh" - ); - let mesh_peers = gs.mesh.get(&topic_hashes[1]).unwrap(); - for new_peer in new_peers { - assert!( - mesh_peers.contains(new_peer), - "Fanout peer should be included in the mesh" - ); - } - - // there should now be 12 graft messages to be sent - let graft_messages = gs - .control_pool - .iter() - .fold(vec![], |mut collected_grafts, (_, controls)| { - for c in controls.iter() { - if let GossipsubControlAction::Graft { topic_hash: _ } = c { - collected_grafts.push(c.clone()) - } - } - collected_grafts - }); - - assert!( - graft_messages.len() == 12, - "There should be 12 grafts messages sent to peers" - ); - } - - /// Test local node publish to subscribed topic - #[test] - fn test_publish() { - // node should: - // - Send publish message to all peers - // - Insert message into gs.mcache and gs.received - - let publish_topic = String::from("test_publish"); - let (mut gs, _, topic_hashes) = - build_and_inject_nodes(20, vec![publish_topic.clone()], GossipsubConfig::default(), true); - - assert!( - gs.mesh.get(&topic_hashes[0]).is_some(), - "Subscribe should add a new entry to the mesh[topic] hashmap" - ); - - // publish on topic - let publish_data = vec![0; 42]; - gs.publish(&Topic::new(publish_topic), publish_data); - - // Collect all publish messages - let publishes = gs.events.iter().fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - for s in &event.messages { - collected_publish.push(s.clone()); - } - collected_publish - }, - _ => collected_publish, - }); - - let msg_id = (gs.config.message_id_fn)(publishes.first().expect("Should contain > 0 entries")); - - assert!( - publishes.len() == 20, - "Should send a publish message to all known peers" - ); - - assert!( - gs.mcache.get(&msg_id).is_some(), - "Message cache should contain published message" - ); - assert!( - gs.received.get(&msg_id).is_some(), - "Received cache should contain published message" - ); - } - - /// Test local node publish to unsubscribed topic - #[test] - fn test_fanout() { - // node should: - // - Populate fanout peers - // - Send publish message to fanout peers - // - Insert message into gs.mcache and gs.received - let fanout_topic = String::from("test_fanout"); - let (mut gs, _, topic_hashes) = - build_and_inject_nodes(20, vec![fanout_topic.clone()], GossipsubConfig::default(), true); - - assert!( - gs.mesh.get(&topic_hashes[0]).is_some(), - "Subscribe should add a new entry to the mesh[topic] hashmap" - ); - // Unsubscribe from topic - assert!( - gs.unsubscribe(Topic::new(fanout_topic.clone())), - "should be able to unsubscribe successfully from topic" - ); - - // Publish on unsubscribed topic - let publish_data = vec![0; 42]; - gs.publish(&Topic::new(fanout_topic.clone()), publish_data); - - assert_eq!( - gs.fanout.get(&TopicHash::from_raw(fanout_topic)).unwrap().len(), - gs.config.mesh_n, - "Fanout should contain `mesh_n` peers for fanout topic" - ); - - // Collect all publish messages - let publishes = gs.events.iter().fold(vec![], |mut collected_publish, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - for s in &event.messages { - collected_publish.push(s.clone()); - } - collected_publish - }, - _ => collected_publish, - }); - - let msg_id = (gs.config.message_id_fn)(publishes.first().expect("Should contain > 0 entries")); - - assert_eq!( - publishes.len(), - gs.config.mesh_n, - "Should send a publish message to `mesh_n` fanout peers" - ); - - assert!( - gs.mcache.get(&msg_id).is_some(), - "Message cache should contain published message" - ); - assert!( - gs.received.get(&msg_id).is_some(), - "Received cache should contain published message" - ); - } - - #[test] - /// Test the gossipsub NetworkBehaviour peer connection logic. - fn test_inject_connected() { - let (gs, peers, topic_hashes) = build_and_inject_nodes( - 20, - vec![String::from("topic1"), String::from("topic2")], - GossipsubConfig::default(), - true, - ); - - // check that our subscriptions are sent to each of the peers - // collect all the SendEvents - let send_events: Vec<&NetworkBehaviourAction>> = gs - .events - .iter() - .filter(|e| matches!(e, NetworkBehaviourAction::NotifyHandler { .. })) - .collect(); - - // check that there are two subscriptions sent to each peer - for sevent in send_events.clone() { - if let NetworkBehaviourAction::NotifyHandler { event, .. } = sevent { - assert!( - event.subscriptions.len() == 2, - "There should be two subscriptions sent to each peer (1 for each topic)." - ); - }; - } - - // check that there are 20 send events created - assert!( - send_events.len() == 20, - "There should be a subscription event sent to each peer." - ); - - // should add the new peers to `peer_topics` with an empty vec as a gossipsub node - for peer in peers { - let known_topics = gs.peer_topics.get(&peer).unwrap(); - assert!( - known_topics == &topic_hashes, - "The topics for each node should all topics" - ); - } - } - - #[test] - /// Test subscription handling - fn test_handle_received_subscriptions() { - // For every subscription: - // SUBSCRIBE: - Add subscribed topic to peer_topics for peer. - // - Add peer to topics_peer. - // UNSUBSCRIBE - Remove topic from peer_topics for peer. - // - Remove peer from topic_peers. - - let topics = vec!["topic1", "topic2", "topic3", "topic4"] - .iter() - .map(|&t| String::from(t)) - .collect(); - let (mut gs, peers, topic_hashes) = build_and_inject_nodes(20, topics, GossipsubConfig::default(), false); - - // The first peer sends 3 subscriptions and 1 unsubscription - let mut subscriptions = topic_hashes[..3] - .iter() - .map(|topic_hash| GossipsubSubscription { - action: GossipsubSubscriptionAction::Subscribe, - topic_hash: topic_hash.clone(), - }) - .collect::>(); - - subscriptions.push(GossipsubSubscription { - action: GossipsubSubscriptionAction::Unsubscribe, - topic_hash: topic_hashes[topic_hashes.len() - 1].clone(), - }); - - let unknown_peer = PeerId::random(); - // process the subscriptions - // first and second peers send subscriptions - gs.handle_received_subscriptions(&subscriptions, &peers[0]); - gs.handle_received_subscriptions(&subscriptions, &peers[1]); - // unknown peer sends the same subscriptions - gs.handle_received_subscriptions(&subscriptions, &unknown_peer); - - // verify the result - - let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone(); - assert!( - peer_topics == topic_hashes[..3].to_vec(), - "First peer should be subscribed to three topics" - ); - let peer_topics = gs.peer_topics.get(&peers[1]).unwrap().clone(); - assert!( - peer_topics == topic_hashes[..3].to_vec(), - "Second peer should be subscribed to three topics" - ); - - assert!( - gs.peer_topics.get(&unknown_peer).is_none(), - "Unknown peer should not have been added" - ); - - for topic_hash in topic_hashes[..3].iter() { - let topic_peers = gs.topic_peers.get(topic_hash).unwrap().clone(); - assert!( - topic_peers == peers[..2].to_vec(), - "Two peers should be added to the first three topics" - ); - } - - // Peer 0 unsubscribes from the first topic - - gs.handle_received_subscriptions( - &[GossipsubSubscription { - action: GossipsubSubscriptionAction::Unsubscribe, - topic_hash: topic_hashes[0].clone(), - }], - &peers[0], - ); - - let peer_topics = gs.peer_topics.get(&peers[0]).unwrap().clone(); - assert!( - peer_topics == topic_hashes[1..3].to_vec(), - "Peer should be subscribed to two topics" - ); - - let topic_peers = gs.topic_peers.get(&topic_hashes[0]).unwrap().clone(); // only gossipsub at the moment - assert!( - topic_peers == peers[1..2].to_vec(), - "Only the second peers should be in the first topic" - ); - } - - #[test] - /// Test Gossipsub.get_random_peers() function - fn test_get_random_peers() { - // generate a default GossipsubConfig - let gs_config = GossipsubConfig::default(); - // create a gossipsub struct - let mut gs: Gossipsub = Gossipsub::new(PeerId::random(), gs_config); - - // create a topic and fill it with some peers - let topic_hash = Topic::new("Test".into()).no_hash(); - let mut peers = vec![]; - for _ in 0..20 { - peers.push(PeerId::random()) - } - - gs.topic_peers.insert(topic_hash.clone(), peers.clone()); - - let random_peers = Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 5, |_| true); - assert!(random_peers.len() == 5, "Expected 5 peers to be returned"); - let random_peers = Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 30, |_| true); - assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); - assert!(random_peers == peers, "Expected no shuffling"); - let random_peers = Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 20, |_| true); - assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); - assert!(random_peers == peers, "Expected no shuffling"); - let random_peers = Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 0, |_| true); - assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); - // test the filter - let random_peers = Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 5, |_| false); - assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); - let random_peers = Gossipsub::get_random_peers(&gs.topic_peers, &topic_hash, 10, |peer| peers.contains(peer)); - assert!(random_peers.len() == 10, "Expected 10 peers to be returned"); - } - - /// Tests that the correct message is sent when a peer asks for a message in our cache. - #[test] - fn test_handle_iwant_msg_cached() { - let (mut gs, peers, _) = build_and_inject_nodes(20, Vec::new(), GossipsubConfig::default(), true); - - let id = gs.config.message_id_fn; - - let message = GossipsubMessage { - source: peers[11], - data: vec![1, 2, 3, 4], - sequence_number: 1u64, - topics: Vec::new(), - }; - let msg_id = id(&message); - gs.mcache.put(message); - - gs.handle_iwant(&peers[7], vec![msg_id.clone()]); - - // the messages we are sending - let sent_messages = gs.events.iter().fold(vec![], |mut collected_messages, e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - for c in &event.messages { - collected_messages.push(c.clone()) - } - collected_messages - }, - _ => collected_messages, - }); - - assert!( - sent_messages.iter().any(|msg| id(msg) == msg_id), - "Expected the cached message to be sent to an IWANT peer" - ); - } - - /// Tests that messages are sent correctly depending on the shifting of the message cache. - #[test] - fn test_handle_iwant_msg_cached_shifted() { - let (mut gs, peers, _) = build_and_inject_nodes(20, Vec::new(), GossipsubConfig::default(), true); - - let id = gs.config.message_id_fn; - // perform 10 memshifts and check that it leaves the cache - for shift in 1..10 { - let message = GossipsubMessage { - source: peers[11], - data: vec![1, 2, 3, 4], - sequence_number: shift, - topics: Vec::new(), - }; - let msg_id = id(&message); - gs.mcache.put(message.clone()); - for _ in 0..shift { - gs.mcache.shift(); - } - - gs.handle_iwant(&peers[7], vec![msg_id.clone()]); - - // is the message is being sent? - let message_exists = gs.events.iter().any(|e| match e { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - event.messages.iter().any(|msg| id(msg) == msg_id) - }, - _ => false, - }); - // default history_length is 5, expect no messages after shift > 5 - if shift < 5 { - assert!( - message_exists, - "Expected the cached message to be sent to an IWANT peer before 5 shifts" - ); - } else { - assert!( - !message_exists, - "Expected the cached message to not be sent to an IWANT peer after 5 shifts" - ); - } - } - } - - #[test] - // tests that an event is not created when a peers asks for a message not in our cache - fn test_handle_iwant_msg_not_cached() { - let (mut gs, peers, _) = build_and_inject_nodes(20, Vec::new(), GossipsubConfig::default(), true); - - let events_before = gs.events.len(); - gs.handle_iwant(&peers[7], vec![MessageId(String::from("unknown id"))]); - let events_after = gs.events.len(); - - assert_eq!(events_before, events_after, "Expected event count to stay the same"); - } - - #[test] - // tests that an event is created when a peer shares that it has a message we want - fn test_handle_ihave_subscribed_and_msg_not_cached() { - let (mut gs, peers, topic_hashes) = - build_and_inject_nodes(20, vec![String::from("topic1")], GossipsubConfig::default(), true); - - gs.handle_ihave(&peers[7], vec![(topic_hashes[0].clone(), vec![MessageId( - String::from("unknown id"), - )])]); - - // check that we sent an IWANT request for `unknown id` - let iwant_exists = match gs.control_pool.get(&peers[7]) { - Some(controls) => controls.iter().any(|c| match c { - GossipsubControlAction::IWant { message_ids } => - { - #[allow(clippy::cmp_owned)] - message_ids.iter().any(|m| *m.0 == String::from("unknown id")) - }, - _ => false, - }), - _ => false, - }; - - assert!( - iwant_exists, - "Expected to send an IWANT control message for unkown message id" - ); - } - - #[test] - // tests that an event is not created when a peer shares that it has a message that - // we already have - fn test_handle_ihave_subscribed_and_msg_cached() { - let (mut gs, peers, topic_hashes) = - build_and_inject_nodes(20, vec![String::from("topic1")], GossipsubConfig::default(), true); - - let msg_id = MessageId(String::from("known id")); - gs.received.insert(msg_id.clone(), SmallVec::new()); - - let events_before = gs.events.len(); - gs.handle_ihave(&peers[7], vec![(topic_hashes[0].clone(), vec![msg_id])]); - let events_after = gs.events.len(); - - assert_eq!(events_before, events_after, "Expected event count to stay the same") - } - - #[test] - // test that an event is not created when a peer shares that it has a message in - // a topic that we are not subscribed to - fn test_handle_ihave_not_subscribed() { - let (mut gs, peers, _) = build_and_inject_nodes(20, vec![], GossipsubConfig::default(), true); - - let events_before = gs.events.len(); - gs.handle_ihave(&peers[7], vec![( - TopicHash::from_raw(String::from("unsubscribed topic")), - vec![MessageId(String::from("irrelevant id"))], - )]); - let events_after = gs.events.len(); - - assert_eq!(events_before, events_after, "Expected event count to stay the same") - } - - #[test] - // tests that a peer is added to our mesh when we are both subscribed - // to the same topic - fn test_handle_graft_is_subscribed() { - let (mut gs, peers, topic_hashes) = - build_and_inject_nodes(20, vec![String::from("topic1")], GossipsubConfig::default(), true); - - gs.handle_graft(&peers[7], topic_hashes.clone()); - - assert!( - gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), - "Expected peer to have been added to mesh" - ); - } - - #[test] - // tests that a peer is not added to our mesh when they are subscribed to - // a topic that we are not - fn test_handle_graft_is_not_subscribed() { - let (mut gs, peers, topic_hashes) = - build_and_inject_nodes(20, vec![String::from("topic1")], GossipsubConfig::default(), true); - - gs.handle_graft(&peers[7], vec![TopicHash::from_raw(String::from("unsubscribed topic"))]); - - assert!( - gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), - "Expected peer to have been added to mesh" - ); - } - - #[test] - // tests multiple topics in a single graft message - fn test_handle_graft_multiple_topics() { - let topics: Vec = vec!["topic1", "topic2", "topic3", "topic4"] - .iter() - .map(|&t| String::from(t)) - .collect(); - - let (mut gs, peers, topic_hashes) = build_and_inject_nodes(20, topics, GossipsubConfig::default(), true); - - let mut their_topics = topic_hashes.clone(); - // their_topics = [topic1, topic2, topic3] - // our_topics = [topic1, topic2, topic4] - their_topics.pop(); - gs.leave(&their_topics[2]); - - gs.handle_graft(&peers[7], their_topics.clone()); - - for item in topic_hashes.iter().take(2) { - assert!( - gs.mesh.get(item).unwrap().contains(&peers[7]), - "Expected peer to be in the mesh for the first 2 topics" - ); - } - - assert!( - gs.mesh.get(&topic_hashes[2]).is_none(), - "Expected the second topic to not be in the mesh" - ); - } - - #[test] - // tests that a peer is removed from our mesh - fn test_handle_prune_peer_in_mesh() { - let (mut gs, peers, topic_hashes) = - build_and_inject_nodes(20, vec![String::from("topic1")], GossipsubConfig::default(), true); - - // insert peer into our mesh for 'topic1' - gs.mesh.insert(topic_hashes[0].clone(), peers.clone()); - assert!( - gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), - "Expected peer to be in mesh" - ); - - gs.handle_prune(&peers[7], topic_hashes.clone()); - assert!( - !gs.mesh.get(&topic_hashes[0]).unwrap().contains(&peers[7]), - "Expected peer to be removed from mesh" - ); - } - - #[test] - fn test_maintain_relays_mesh_n_high() { - let peer_no = 20; - let (mut gs, peers, _) = build_and_inject_nodes(peer_no, vec![], GossipsubConfig::default(), false); - for peer in peers { - gs.relays_mesh.insert(peer, 0); - } - assert_eq!(peer_no, gs.relays_mesh.len(), "relays mesh must contain 20 peers"); - gs.maintain_relays_mesh(); - assert_eq!( - gs.config.mesh_n, - gs.relays_mesh.len(), - "relays mesh must contain mesh_n peers after maintenance" - ); - assert_eq!(gs.events.len(), 14); - for event in gs.events { - match event { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - assert_eq!(event.control_msgs, vec![GossipsubControlAction::IncludedToRelaysMesh { - included: false, - mesh_size: gs.relays_mesh.len(), - }]); - }, - _ => panic!("Invalid NetworkBehaviourAction variant"), - } - } - } - - #[test] - fn test_maintain_relays_mesh_n_low() { - let peer_no = 20; - let (mut gs, peers, _) = build_and_inject_nodes(peer_no, vec![], GossipsubConfig::default(), false); - for (i, peer) in peers.into_iter().enumerate() { - if i < 3 { - gs.relays_mesh.insert(peer, 0); - } else { - gs.connected_relays.insert(peer); - } - } - assert_eq!(3, gs.relays_mesh.len(), "relays mesh must contain 3 peers"); - gs.maintain_relays_mesh(); - assert_eq!( - 4, - gs.relays_mesh.len(), - "relays mesh must contain 1 more peer after maintenance (4 total)" - ); - assert_eq!(gs.events.len(), 1); - for event in gs.events { - match event { - NetworkBehaviourAction::NotifyHandler { event, .. } => { - assert_eq!(event.control_msgs, vec![GossipsubControlAction::IncludedToRelaysMesh { - included: true, - mesh_size: gs.relays_mesh.len(), - }]); - }, - _ => panic!("Invalid NetworkBehaviourAction variant"), - } - } - } - - #[test] - fn test_process_included_to_relays_mesh() { - let peer_no = 2; - let config = GossipsubConfigBuilder::default().i_am_relay(true).build(); - let (mut gs, peers, _) = build_and_inject_nodes(peer_no, vec![], config, false); - for peer in &peers { - gs.connected_relays.insert(*peer); - } - - gs.handle_included_to_relays_mesh(&peers[0], true, 1); - assert!(gs.relays_mesh.contains_key(&peers[0])); - - gs.handle_included_to_relays_mesh(&peers[0], false, 1); - assert!(!gs.relays_mesh.contains_key(&peers[0])); - } - - #[test] - fn test_process_included_to_relays_mesh_n_high_exceeded() { - let peer_no = 14; - let config = GossipsubConfigBuilder::default().i_am_relay(true).build(); - let (mut gs, peers, _) = build_and_inject_nodes(peer_no, vec![], config, false); - for (i, peer) in peers.iter().enumerate() { - gs.connected_relays.insert(*peer); - if i < 13 { - gs.relays_mesh.insert(*peer, 0); - } - } - - gs.handle_included_to_relays_mesh(&peers[13], true, 1); - assert!(!gs.relays_mesh.contains_key(&peers[13])); - - match gs.events.pop_back().unwrap() { - NetworkBehaviourAction::NotifyHandler { event, peer_id, .. } => { - assert_eq!(event.control_msgs, vec![GossipsubControlAction::IncludedToRelaysMesh { - included: false, - mesh_size: gs.relay_mesh_len(), - }]); - assert_eq!(peer_id, peers[13]); - }, - _ => panic!("Invalid NetworkBehaviourAction variant"), - } - } -} diff --git a/mm2src/gossipsub/src/config.rs b/mm2src/gossipsub/src/config.rs deleted file mode 100644 index f74343e21d..0000000000 --- a/mm2src/gossipsub/src/config.rs +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2020 Sigma Prime Pty Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::protocol::{GossipsubMessage, MessageId}; -use std::borrow::Cow; -use std::time::Duration; - -/// If the `no_source_id` flag is set, the IDENTITY_SOURCE value is used as the source of the -/// packet. -pub const IDENTITY_SOURCE: [u8; 3] = [0, 1, 0]; - -/// Configuration parameters that define the performance of the gossipsub network. -#[derive(Clone)] -pub struct GossipsubConfig { - /// The protocol id to negotiate this protocol (default is `/meshsub/1.0.0`). - pub protocol_id: Cow<'static, [u8]>, - - // Overlay network parameters. - /// Number of heartbeats to keep in the `memcache` (default is 5). - pub history_length: usize, - - /// Number of past heartbeats to gossip about (default is 3). - pub history_gossip: usize, - - /// Target number of peers for the mesh network (D in the spec, default is 6). - pub mesh_n: usize, - - /// Minimum number of peers in mesh network before adding more (D_lo in the spec, default is 4). - pub mesh_n_low: usize, - - /// Maximum number of peers in mesh network before removing some (D_high in the spec, default - /// is 12). - pub mesh_n_high: usize, - - /// Number of peers to emit gossip to during a heartbeat (D_lazy in the spec, default is 6). - pub gossip_lazy: usize, - - /// Initial delay in each heartbeat (default is 5 seconds). - pub heartbeat_initial_delay: Duration, - - /// Time between each heartbeat (default is 1 second). - pub heartbeat_interval: Duration, - - /// Time to live for fanout peers (default is 60 seconds). - pub fanout_ttl: Duration, - - /// The maximum byte size for each gossip (default is 2048 bytes). - pub max_transmit_size: usize, - - /// Duplicates are prevented by storing message id's of known messages in an LRU time cache. - /// This settings sets the time period that messages are stored in the cache. Duplicates can be - /// received if duplicate messages are sent at a time greater than this setting apart. The - /// default is 1 minute. - pub duplicate_cache_time: Duration, - - /// Flag determining if gossipsub topics are hashed or sent as plain strings (default is false). - pub hash_topics: bool, - - /// When set, all published messages will have a 0 source `PeerId` (default is false). - pub no_source_id: bool, - - /// When set to `true`, prevents automatic forwarding of all received messages. This setting - /// allows a user to validate the messages before propagating them to their peers. If set to - /// true, the user must manually call `propagate_message()` on the behaviour to forward message - /// once validated (default is false). - pub manual_propagation: bool, - - /// A user-defined function allowing the user to specify the message id of a gossipsub message. - /// The default value is to concatenate the source peer id with a sequence number. Setting this - /// parameter allows the user to address packets arbitrarily. One example is content based - /// addressing, where this function may be set to `hash(message)`. This would prevent messages - /// of the same content from being duplicated. - /// - /// The function takes a `GossipsubMessage` as input and outputs a String to be interpreted as - /// the message id. - pub message_id_fn: fn(&GossipsubMessage) -> MessageId, - - pub i_am_relay: bool, -} - -impl Default for GossipsubConfig { - fn default() -> GossipsubConfig { - GossipsubConfig { - protocol_id: Cow::Borrowed(b"/meshsub/1.0.0"), - history_length: 5, - history_gossip: 3, - mesh_n: 6, - mesh_n_low: 4, - mesh_n_high: 12, - gossip_lazy: 6, // default to mesh_n - heartbeat_initial_delay: Duration::from_secs(5), - heartbeat_interval: Duration::from_secs(1), - fanout_ttl: Duration::from_secs(60), - max_transmit_size: 2048, - duplicate_cache_time: Duration::from_secs(60), - hash_topics: false, // default compatibility with floodsub - no_source_id: false, - manual_propagation: false, - message_id_fn: |message| { - // default message id is: source + sequence number - let mut source_string = message.source.to_base58(); - source_string.push_str(&message.sequence_number.to_string()); - MessageId(source_string) - }, - i_am_relay: false, - } - } -} - -#[derive(Default)] -pub struct GossipsubConfigBuilder { - config: GossipsubConfig, -} - -impl GossipsubConfigBuilder { - // set default values - pub fn new() -> GossipsubConfigBuilder { GossipsubConfigBuilder::default() } - - pub fn protocol_id(&mut self, protocol_id: impl Into>) -> &mut Self { - self.config.protocol_id = protocol_id.into(); - self - } - - pub fn history_length(&mut self, history_length: usize) -> &mut Self { - assert!( - history_length >= self.config.history_gossip, - "The history_length must be greater than or equal to the history_gossip length" - ); - self.config.history_length = history_length; - self - } - - pub fn history_gossip(&mut self, history_gossip: usize) -> &mut Self { - assert!( - self.config.history_length >= history_gossip, - "The history_length must be greater than or equal to the history_gossip length" - ); - self.config.history_gossip = history_gossip; - self - } - - pub fn mesh_n(&mut self, mesh_n: usize) -> &mut Self { - assert!( - self.config.mesh_n_low <= mesh_n && mesh_n <= self.config.mesh_n_high, - "The following equality doesn't hold mesh_n_low <= mesh_n <= mesh_n_high" - ); - self.config.mesh_n = mesh_n; - self - } - - pub fn mesh_n_low(&mut self, mesh_n_low: usize) -> &mut Self { - assert!( - mesh_n_low <= self.config.mesh_n && self.config.mesh_n <= self.config.mesh_n_high, - "The following equality doesn't hold mesh_n_low <= mesh_n <= mesh_n_high" - ); - self.config.mesh_n_low = mesh_n_low; - self - } - - pub fn mesh_n_high(&mut self, mesh_n_high: usize) -> &mut Self { - assert!( - self.config.mesh_n_low <= self.config.mesh_n && self.config.mesh_n <= mesh_n_high, - "The following equality doesn't hold mesh_n_low <= mesh_n <= mesh_n_high" - ); - self.config.mesh_n_high = mesh_n_high; - self - } - - pub fn gossip_lazy(&mut self, gossip_lazy: usize) -> &mut Self { - self.config.gossip_lazy = gossip_lazy; - self - } - - pub fn heartbeat_initial_delay(&mut self, heartbeat_initial_delay: Duration) -> &mut Self { - self.config.heartbeat_initial_delay = heartbeat_initial_delay; - self - } - pub fn heartbeat_interval(&mut self, heartbeat_interval: Duration) -> &mut Self { - self.config.heartbeat_interval = heartbeat_interval; - self - } - pub fn fanout_ttl(&mut self, fanout_ttl: Duration) -> &mut Self { - self.config.fanout_ttl = fanout_ttl; - self - } - pub fn max_transmit_size(&mut self, max_transmit_size: usize) -> &mut Self { - self.config.max_transmit_size = max_transmit_size; - self - } - - pub fn hash_topics(&mut self) -> &mut Self { - self.config.hash_topics = true; - self - } - - pub fn no_source_id(&mut self) -> &mut Self { - self.config.no_source_id = true; - self - } - - pub fn manual_propagation(&mut self) -> &mut Self { - self.config.manual_propagation = true; - self - } - - pub fn message_id_fn(&mut self, id_fn: fn(&GossipsubMessage) -> MessageId) -> &mut Self { - self.config.message_id_fn = id_fn; - self - } - - pub fn i_am_relay(&mut self, i_am_relay: bool) -> &mut Self { - self.config.i_am_relay = i_am_relay; - self - } - - pub fn build(&self) -> GossipsubConfig { self.config.clone() } -} - -impl std::fmt::Debug for GossipsubConfig { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let mut builder = f.debug_struct("GossipsubConfig"); - let _ = builder.field("protocol_id", &self.protocol_id); - let _ = builder.field("history_length", &self.history_length); - let _ = builder.field("history_gossip", &self.history_gossip); - let _ = builder.field("mesh_n", &self.mesh_n); - let _ = builder.field("mesh_n_low", &self.mesh_n_low); - let _ = builder.field("mesh_n_high", &self.mesh_n_high); - let _ = builder.field("gossip_lazy", &self.gossip_lazy); - let _ = builder.field("heartbeat_initial_delay", &self.heartbeat_initial_delay); - let _ = builder.field("heartbeat_interval", &self.heartbeat_interval); - let _ = builder.field("fanout_ttl", &self.fanout_ttl); - let _ = builder.field("max_transmit_size", &self.max_transmit_size); - let _ = builder.field("hash_topics", &self.hash_topics); - let _ = builder.field("no_source_id", &self.no_source_id); - let _ = builder.field("manual_propagation", &self.manual_propagation); - let _ = builder.field("i_am_relay", &self.i_am_relay); - builder.finish() - } -} diff --git a/mm2src/gossipsub/src/handler.rs b/mm2src/gossipsub/src/handler.rs deleted file mode 100644 index 2b71cf13dd..0000000000 --- a/mm2src/gossipsub/src/handler.rs +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright 2020 Sigma Prime Pty Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::behaviour::GossipsubRpc; -use crate::protocol::{GossipsubCodec, ProtocolConfig}; -use futures::prelude::*; -use futures_codec::Framed; -use libp2p_core::upgrade::{InboundUpgrade, OutboundUpgrade}; -use libp2p_swarm::handler::{ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, - SubstreamProtocol}; -use libp2p_swarm::NegotiatedSubstream; -use log::{debug, error, trace, warn}; -use smallvec::SmallVec; -use std::{borrow::Cow, - io, - pin::Pin, - task::{Context, Poll}}; - -/// Protocol Handler that manages a single long-lived substream with a peer. -pub struct GossipsubHandler { - /// Upgrade configuration for the gossipsub protocol. - listen_protocol: SubstreamProtocol, - - /// The single long-lived outbound substream. - outbound_substream: Option, - - /// The single long-lived inbound substream. - inbound_substream: Option, - - /// Queue of values that we want to send to the remote. - send_queue: SmallVec<[GossipsubRpc; 16]>, - - /// Flag indicating that an outbound substream is being established to prevent duplicate - /// requests. - outbound_substream_establishing: bool, - - /// Flag determining whether to maintain the connection to the peer. - keep_alive: KeepAlive, -} - -/// State of the inbound substream, opened either by us or by the remote. -#[allow(clippy::large_enum_variant)] -enum InboundSubstreamState { - /// Waiting for a message from the remote. The idle state for an inbound substream. - WaitingInput(Framed), - /// The substream is being closed. - Closing(Framed), - /// An error occurred during processing. - Poisoned, -} - -/// State of the outbound substream, opened either by us or by the remote. -#[allow(clippy::large_enum_variant)] -enum OutboundSubstreamState { - /// Waiting for the user to send a message. The idle state for an outbound substream. - WaitingOutput(Framed), - /// Waiting to send a message to the remote. - PendingSend(Framed, GossipsubRpc), - /// Waiting to flush the substream so that the data arrives to the remote. - PendingFlush(Framed), - /// The substream is being closed. Used by either substream. - _Closing(Framed), - /// An error occurred during processing. - Poisoned, -} - -impl GossipsubHandler { - /// Builds a new `GossipsubHandler`. - pub fn new(protocol_id: impl Into>, max_transmit_size: usize) -> Self { - GossipsubHandler { - listen_protocol: SubstreamProtocol::new(ProtocolConfig::new(protocol_id, max_transmit_size), ()), - inbound_substream: None, - outbound_substream: None, - send_queue: SmallVec::new(), - keep_alive: KeepAlive::Yes, - outbound_substream_establishing: false, - } - } -} - -impl Default for GossipsubHandler { - fn default() -> Self { - GossipsubHandler { - listen_protocol: SubstreamProtocol::new(ProtocolConfig::default(), ()), - inbound_substream: None, - outbound_substream: None, - send_queue: SmallVec::new(), - keep_alive: KeepAlive::Yes, - outbound_substream_establishing: false, - } - } -} - -impl ConnectionHandler for GossipsubHandler { - type InEvent = GossipsubRpc; - type OutEvent = GossipsubRpc; - type Error = io::Error; - type InboundProtocol = ProtocolConfig; - type OutboundProtocol = ProtocolConfig; - type OutboundOpenInfo = GossipsubRpc; - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - self.listen_protocol.clone() - } - - fn inject_fully_negotiated_inbound( - &mut self, - substream: >::Output, - _info: Self::InboundOpenInfo, - ) { - // new inbound substream. Replace the current one, if it exists. - trace!("New inbound substream request"); - self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream)); - } - - fn inject_fully_negotiated_outbound( - &mut self, - substream: >::Output, - message: Self::OutboundOpenInfo, - ) { - self.outbound_substream_establishing = false; - // Should never establish a new outbound substream if one already exists. - // If this happens, an outbound message is not sent. - if self.outbound_substream.is_some() { - warn!("Established an outbound substream with one already available"); - // Add the message back to the send queue - self.send_queue.push(message); - } else { - self.outbound_substream = Some(OutboundSubstreamState::PendingSend(substream, message)); - } - } - - fn inject_event(&mut self, message: GossipsubRpc) { self.send_queue.push(message); } - - fn inject_dial_upgrade_error( - &mut self, - _: Self::OutboundOpenInfo, - _: ConnectionHandlerUpgrErr<>::Error>, - ) { - self.outbound_substream_establishing = false; - // Ignore upgrade errors for now. - // If a peer doesn't support this protocol, this will just ignore them, but not disconnect - // them. - } - - fn connection_keep_alive(&self) -> KeepAlive { self.keep_alive } - - #[allow(clippy::type_complexity)] - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll> { - // determine if we need to create the stream - if !self.send_queue.is_empty() && self.outbound_substream.is_none() && !self.outbound_substream_establishing { - let message = self.send_queue.remove(0); - self.send_queue.shrink_to_fit(); - self.outbound_substream_establishing = true; - return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: self.listen_protocol.clone().map_info(|()| message), - }); - } - - loop { - match std::mem::replace(&mut self.inbound_substream, Some(InboundSubstreamState::Poisoned)) { - // inbound idle state - Some(InboundSubstreamState::WaitingInput(mut substream)) => { - match substream.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(message))) => { - self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream)); - return Poll::Ready(ConnectionHandlerEvent::Custom(message)); - }, - Poll::Ready(Some(Err(e))) => { - debug!("Inbound substream error while awaiting input: {:?}", e); - self.inbound_substream = Some(InboundSubstreamState::Closing(substream)); - }, - // peer closed the stream - Poll::Ready(None) => { - self.inbound_substream = Some(InboundSubstreamState::Closing(substream)); - }, - Poll::Pending => { - self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream)); - break; - }, - } - }, - Some(InboundSubstreamState::Closing(mut substream)) => { - match Sink::poll_close(Pin::new(&mut substream), cx) { - Poll::Ready(res) => { - if let Err(e) = res { - // Don't close the connection but just drop the inbound substream. - // In case the remote has more to send, they will open up a new - // substream. - debug!("Inbound substream error while closing: {:?}", e); - } - - self.inbound_substream = None; - if self.outbound_substream.is_none() { - self.keep_alive = KeepAlive::No; - } - break; - }, - Poll::Pending => { - self.inbound_substream = Some(InboundSubstreamState::Closing(substream)); - break; - }, - } - }, - None => { - self.inbound_substream = None; - break; - }, - Some(InboundSubstreamState::Poisoned) => panic!("Error occurred during inbound stream processing"), - } - } - - loop { - match std::mem::replace(&mut self.outbound_substream, Some(OutboundSubstreamState::Poisoned)) { - // outbound idle state - Some(OutboundSubstreamState::WaitingOutput(substream)) => { - if !self.send_queue.is_empty() { - let message = self.send_queue.remove(0); - self.send_queue.shrink_to_fit(); - self.outbound_substream = Some(OutboundSubstreamState::PendingSend(substream, message)); - } else { - self.outbound_substream = Some(OutboundSubstreamState::WaitingOutput(substream)); - break; - } - }, - Some(OutboundSubstreamState::PendingSend(mut substream, message)) => { - match Sink::poll_ready(Pin::new(&mut substream), cx) { - Poll::Ready(Ok(())) => match Sink::start_send(Pin::new(&mut substream), message) { - Ok(()) => self.outbound_substream = Some(OutboundSubstreamState::PendingFlush(substream)), - Err(e) => { - if let io::ErrorKind::PermissionDenied = e.kind() { - error!("Message over the maximum transmission limit was not sent."); - self.outbound_substream = Some(OutboundSubstreamState::WaitingOutput(substream)); - } else { - return Poll::Ready(ConnectionHandlerEvent::Close(e)); - } - }, - }, - Poll::Ready(Err(e)) => { - debug!("Outbound substream error while sending output: {:?}", e); - return Poll::Ready(ConnectionHandlerEvent::Close(e)); - }, - Poll::Pending => { - self.outbound_substream = Some(OutboundSubstreamState::PendingSend(substream, message)); - break; - }, - } - }, - Some(OutboundSubstreamState::PendingFlush(mut substream)) => { - match Sink::poll_flush(Pin::new(&mut substream), cx) { - Poll::Ready(Ok(())) => { - self.outbound_substream = Some(OutboundSubstreamState::WaitingOutput(substream)) - }, - Poll::Ready(Err(e)) => return Poll::Ready(ConnectionHandlerEvent::Close(e)), - Poll::Pending => { - self.outbound_substream = Some(OutboundSubstreamState::PendingFlush(substream)); - break; - }, - } - }, - // Currently never used - manual shutdown may implement this in the future - Some(OutboundSubstreamState::_Closing(mut substream)) => { - match Sink::poll_close(Pin::new(&mut substream), cx) { - Poll::Ready(Ok(())) => { - self.outbound_substream = None; - if self.inbound_substream.is_none() { - self.keep_alive = KeepAlive::No; - } - break; - }, - Poll::Ready(Err(e)) => { - debug!("Outbound substream error while closing: {:?}", e); - return Poll::Ready(ConnectionHandlerEvent::Close(io::Error::new( - io::ErrorKind::BrokenPipe, - "Failed to close outbound substream", - ))); - }, - Poll::Pending => { - self.outbound_substream = Some(OutboundSubstreamState::_Closing(substream)); - break; - }, - } - }, - None => { - self.outbound_substream = None; - break; - }, - Some(OutboundSubstreamState::Poisoned) => panic!("Error occurred during outbound stream processing"), - } - } - - Poll::Pending - } -} diff --git a/mm2src/gossipsub/src/lib.rs b/mm2src/gossipsub/src/lib.rs deleted file mode 100644 index e0efa95571..0000000000 --- a/mm2src/gossipsub/src/lib.rs +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2020 Sigma Prime Pty Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Gossipsub is a P2P pubsub (publish/subscription) routing layer designed to extend upon -//! flooodsub and meshsub routing protocols. -//! -//! # Overview -//! -//! *Note: The gossipsub protocol specifications -//! (https://github.com/libp2p/specs/tree/master/pubsub/gossipsub) provide an outline for the -//! routing protocol. They should be consulted for further detail.* -//! -//! Gossipsub is a blend of meshsub for data and randomsub for mesh metadata. It provides bounded -//! degree and amplification factor with the meshsub construction and augments it using gossip -//! propagation of metadata with the randomsub technique. -//! -//! The router maintains an overlay mesh network of peers on which to efficiently send messages and -//! metadata. Peers use control messages to broadcast and request known messages and -//! subscribe/unsubscribe from topics in the mesh network. -//! -//! # Important Discrepancies -//! -//! This section outlines the current implementation's potential discrepancies from that of other -//! implementations, due to undefined elements in the current specification. -//! -//! - **Topics** - In gossipsub, topics configurable by the `hash_topics` configuration parameter. -//! Topics are of type `TopicHash`. The current go implementation uses raw utf-8 strings, and this -//! is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64 -//! encoded) by setting the `hash_topics` configuration parameter to true. -//! -//! - **Sequence Numbers** - A message on the gossipsub network is identified by the source -//! `PeerId` and a nonce (sequence number) of the message. The sequence numbers in this -//! implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned -//! integers. They are chosen at random in this implementation of gossipsub, but are sequential in -//! the current go implementation. -//! -//! # Using Gossipsub -//! -//! ## GossipsubConfig -//! -//! The [`GossipsubConfig`] struct specifies various network performance/tuning configuration -//! parameters. Specifically it specifies: -//! -//! [`GossipsubConfig`]: struct.GossipsubConfig.html -//! -//! - `protocol_id` - The protocol id that this implementation will accept connections on. -//! - `history_length` - The number of heartbeats which past messages are kept in cache (default: 5). -//! - `history_gossip` - The number of past heartbeats that the node will send gossip metadata -//! about (default: 3). -//! - `mesh_n` - The target number of peers store in the local mesh network. -//! (default: 6). -//! - `mesh_n_low` - The minimum number of peers in the local mesh network before. -//! trying to add more peers to the mesh from the connected peer pool (default: 4). -//! - `mesh_n_high` - The maximum number of peers in the local mesh network before removing peers to -//! reach `mesh_n` peers (default: 12). -//! - `gossip_lazy` - The number of peers that the local node will gossip to during a heartbeat (default: `mesh_n` = 6). -//! - `heartbeat_initial_delay - The initial time delay before starting the first heartbeat (default: 5 seconds). -//! - `heartbeat_interval` - The time between each heartbeat (default: 1 second). -//! - `fanout_ttl` - The fanout time to live time period. The timeout required before removing peers from the fanout -//! for a given topic (default: 1 minute). -//! - `max_transmit_size` - This sets the maximum transmission size for total gossipsub messages on the network. -//! - `hash_topics` - Whether to hash the topics using base64(SHA256(topic)) or to leave as plain utf-8 strings. -//! - `manual_propagation` - Whether gossipsub should immediately forward received messages on the -//! network. For applications requiring message validation, this should be set to false, then the -//! application should call `propagate_message(message_id, propagation_source)` once validated, to -//! propagate the message to peers. -//! -//! This struct implements the `Default` trait and can be initialised via -//! `GossipsubConfig::default()`. -//! -//! -//! ## Gossipsub -//! -//! The [`Gossipsub`] struct implements the `NetworkBehaviour` trait allowing it to act as the -//! routing behaviour in a `Swarm`. This struct requires an instance of `PeerId` and -//! [`GossipsubConfig`]. -//! -//! [`Gossipsub`]: struct.Gossipsub.html - -//! ## Example -//! -//! An example of initialising a gossipsub compatible swarm: -//! -//! ```ignore -//! #extern crate libp2p; -//! #extern crate futures; -//! #extern crate tokio; -//! #use libp2p::gossipsub::GossipsubEvent; -//! #use libp2p::{gossipsub, secio, -//! # tokio_codec::{FramedRead, LinesCodec}, -//! #}; -//! let local_key = secio::SecioKeyPair::ed25519_generated().unwrap(); -//! let local_pub_key = local_key.to_public_key(); -//! -//! // Set up an encrypted TCP Transport over the Mplex and Yamux protocols -//! let transport = libp2p::build_development_transport(local_key); -//! -//! // Create a Floodsub/Gossipsub topic -//! let topic = libp2p::floodsub::TopicBuilder::new("example").build(); -//! -//! // Create a Swarm to manage peers and events -//! let mut swarm = { -//! // set default parameters for gossipsub -//! let gossipsub_config = gossipsub::GossipsubConfig::default(); -//! // build a gossipsub network behaviour -//! let mut gossipsub = -//! gossipsub::Gossipsub::new(local_pub_key.clone().into_peer_id(), gossipsub_config); -//! gossipsub.subscribe(topic.clone()); -//! libp2p::Swarm::new( -//! transport, -//! gossipsub, -//! libp2p::core::topology::MemoryTopology::empty(local_pub_key), -//! ) -//! }; -//! -//! // Listen on all interfaces and whatever port the OS assigns -//! let addr = libp2p::Swarm::listen_on(&mut swarm, "/ip4/0.0.0.0/tcp/0".parse().unwrap()).unwrap(); -//! println!("Listening on {:?}", addr); -//! ``` - -pub mod protocol; - -mod behaviour; -mod config; -mod handler; -mod mcache; -mod topic; - -mod rpc_proto { - include!(concat!(env!("OUT_DIR"), "/gossipsub.pb.rs")); -} - -pub use self::behaviour::{Gossipsub, GossipsubEvent, GossipsubRpc}; -pub use self::config::{GossipsubConfig, GossipsubConfigBuilder}; -pub use self::protocol::{GossipsubMessage, MessageId}; -pub use self::topic::{Topic, TopicHash}; diff --git a/mm2src/gossipsub/src/mcache.rs b/mm2src/gossipsub/src/mcache.rs deleted file mode 100644 index fe92cd3c93..0000000000 --- a/mm2src/gossipsub/src/mcache.rs +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2020 Sigma Prime Pty Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -extern crate fnv; - -use crate::protocol::{GossipsubMessage, MessageId}; -use crate::topic::TopicHash; -use std::collections::HashMap; - -/// CacheEntry stored in the history. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct CacheEntry { - mid: MessageId, - topics: Vec, -} - -/// MessageCache struct holding history of messages. -#[derive(Clone)] -pub struct MessageCache { - msgs: HashMap, - history: Vec>, - gossip: usize, - msg_id: fn(&GossipsubMessage) -> MessageId, -} - -/// Implementation of the MessageCache. -impl MessageCache { - pub fn new(gossip: usize, history_capacity: usize, msg_id: fn(&GossipsubMessage) -> MessageId) -> MessageCache { - MessageCache { - gossip, - msgs: HashMap::default(), - history: vec![Vec::new(); history_capacity], - msg_id, - } - } - - /// Creates a `MessageCache` with a default message id function. - #[allow(dead_code)] - pub fn new_default(gossip: usize, history_capacity: usize) -> MessageCache { - let default_id = |message: &GossipsubMessage| { - // default message id is: source + sequence number - let mut source_string = message.source.to_base58(); - source_string.push_str(&message.sequence_number.to_string()); - MessageId(source_string) - }; - MessageCache { - gossip, - msgs: HashMap::default(), - history: vec![Vec::new(); history_capacity], - msg_id: default_id, - } - } - - /// Put a message into the memory cache - pub fn put(&mut self, msg: GossipsubMessage) { - let message_id = (self.msg_id)(&msg); - let cache_entry = CacheEntry { - mid: message_id.clone(), - topics: msg.topics.clone(), - }; - - self.msgs.insert(message_id, msg); - - self.history[0].push(cache_entry); - } - - /// Get a message with `message_id` - pub fn get(&self, message_id: &MessageId) -> Option<&GossipsubMessage> { self.msgs.get(message_id) } - - /// Get a list of GossipIds for a given topic - pub fn get_gossip_ids(&self, topic: &TopicHash) -> Vec { - self.history[..self.gossip] - .iter() - .fold(vec![], |mut current_entries, entries| { - // search for entries with desired topic - let mut found_entries: Vec = entries - .iter() - .filter_map(|entry| { - if entry.topics.iter().any(|t| t == topic) { - Some(entry.mid.clone()) - } else { - None - } - }) - .collect(); - - // generate the list - current_entries.append(&mut found_entries); - current_entries - }) - } - - /// Shift the history array down one and delete messages associated with the - /// last entry - pub fn shift(&mut self) { - for entry in self.history.pop().expect("history is always > 1") { - self.msgs.remove(&entry.mid); - } - - // Insert an empty vec in position 0 - self.history.insert(0, Vec::new()); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{Topic, TopicHash}; - use libp2p_core::PeerId; - - fn gen_testm(x: u64, topics: Vec) -> GossipsubMessage { - let u8x: u8 = x as u8; - let source = PeerId::random(); - let data: Vec = vec![u8x]; - let sequence_number = x; - - GossipsubMessage { - source, - data, - sequence_number, - topics, - } - } - - #[test] - /// Test that the message cache can be created. - fn test_new_cache() { - let default_id = |message: &GossipsubMessage| { - // default message id is: source + sequence number - let mut source_string = message.source.to_base58(); - source_string.push_str(&message.sequence_number.to_string()); - MessageId(source_string) - }; - let x: usize = 3; - let mc = MessageCache::new(x, 5, default_id); - - assert_eq!(mc.gossip, x); - } - - #[test] - /// Test you can put one message and get one. - fn test_put_get_one() { - let mut mc = MessageCache::new_default(10, 15); - - let topic1_hash = Topic::new("topic1".into()).no_hash(); - let topic2_hash = Topic::new("topic2".into()).no_hash(); - - let m = gen_testm(10, vec![topic1_hash, topic2_hash]); - - mc.put(m.clone()); - - assert!(mc.history[0].len() == 1); - - let fetched = mc.get(&(mc.msg_id)(&m)); - - assert!(fetched.is_some()); - - // Make sure it is the same fetched message - match fetched { - Some(x) => assert_eq!(*x, m), - _ => panic!("expected {:?}", m), - } - } - - #[test] - /// Test attempting to 'get' with a wrong id. - fn test_get_wrong() { - let mut mc = MessageCache::new_default(10, 15); - - let topic1_hash = Topic::new("topic1".into()).no_hash(); - let topic2_hash = Topic::new("topic2".into()).no_hash(); - - let m = gen_testm(10, vec![topic1_hash, topic2_hash]); - - mc.put(m); - - // Try to get an incorrect ID - let wrong_id = MessageId(String::from("wrongid")); - let fetched = mc.get(&wrong_id); - assert!(fetched.is_none()); - } - - #[test] - /// Test attempting to 'get' empty message cache. - fn test_get_empty() { - let mc = MessageCache::new_default(10, 15); - - // Try to get an incorrect ID - let wrong_string = MessageId(String::from("imempty")); - let fetched = mc.get(&wrong_string); - assert!(fetched.is_none()); - } - - #[test] - /// Test adding a message with no topics. - fn test_no_topic_put() { - let mut mc = MessageCache::new_default(3, 5); - - // Build the message - let m = gen_testm(1, vec![]); - mc.put(m.clone()); - - let fetched = mc.get(&(mc.msg_id)(&m)); - - // Make sure it is the same fetched message - match fetched { - Some(x) => assert_eq!(*x, m), - _ => panic!("expected {:?}", m), - } - } - - #[test] - /// Test shift mechanism. - fn test_shift() { - let mut mc = MessageCache::new_default(1, 5); - - let topic1_hash = Topic::new("topic1".into()).no_hash(); - let topic2_hash = Topic::new("topic2".into()).no_hash(); - - // Build the message - for i in 0..10 { - let m = gen_testm(i, vec![topic1_hash.clone(), topic2_hash.clone()]); - mc.put(m.clone()); - } - - mc.shift(); - - // Ensure the shift occurred - assert!(mc.history[0].is_empty()); - assert!(mc.history[1].len() == 10); - - // Make sure no messages deleted - assert!(mc.msgs.len() == 10); - } - - #[test] - /// Test Shift with no additions. - fn test_empty_shift() { - let mut mc = MessageCache::new_default(1, 5); - - let topic1_hash = Topic::new("topic1".into()).no_hash(); - let topic2_hash = Topic::new("topic2".into()).no_hash(); - // Build the message - for i in 0..10 { - let m = gen_testm(i, vec![topic1_hash.clone(), topic2_hash.clone()]); - mc.put(m.clone()); - } - - mc.shift(); - - // Ensure the shift occurred - assert!(mc.history[0].is_empty()); - assert!(mc.history[1].len() == 10); - - mc.shift(); - - assert!(mc.history[2].len() == 10); - assert!(mc.history[1].is_empty()); - assert!(mc.history[0].is_empty()); - } - - #[test] - /// Test shift to see if the last history messages are removed. - fn test_remove_last_from_shift() { - let mut mc = MessageCache::new_default(4, 5); - - let topic1_hash = Topic::new("topic1".into()).no_hash(); - let topic2_hash = Topic::new("topic2".into()).no_hash(); - // Build the message - for i in 0..10 { - let m = gen_testm(i, vec![topic1_hash.clone(), topic2_hash.clone()]); - mc.put(m.clone()); - } - - // Shift right until deleting messages - mc.shift(); - mc.shift(); - mc.shift(); - mc.shift(); - - assert_eq!(mc.history[mc.history.len() - 1].len(), 10); - - // Shift and delete the messages - mc.shift(); - assert_eq!(mc.history[mc.history.len() - 1].len(), 0); - assert_eq!(mc.history[0].len(), 0); - assert_eq!(mc.msgs.len(), 0); - } -} diff --git a/mm2src/gossipsub/src/protocol.rs b/mm2src/gossipsub/src/protocol.rs deleted file mode 100644 index 1173a89b22..0000000000 --- a/mm2src/gossipsub/src/protocol.rs +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright 2020 Sigma Prime Pty Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::behaviour::GossipsubRpc; -use crate::rpc_proto; -use crate::topic::TopicHash; -use byteorder::{BigEndian, ByteOrder}; -use bytes::Bytes; -use bytes::BytesMut; -use common::some_or_return_ok_none; -use futures::future; -use futures::prelude::*; -use futures_codec::{Decoder, Encoder, Framed}; -use libp2p_core::{InboundUpgrade, OutboundUpgrade, PeerId, UpgradeInfo}; -use prost::Message as ProtobufMessage; -use std::{borrow::Cow, io, iter, pin::Pin}; -use unsigned_varint::codec; - -/// Implementation of the `ConnectionUpgrade` for the Gossipsub protocol. -#[derive(Debug, Clone)] -pub struct ProtocolConfig { - protocol_id: Cow<'static, [u8]>, - max_transmit_size: usize, -} - -impl Default for ProtocolConfig { - fn default() -> Self { - Self { - protocol_id: Cow::Borrowed(b"/meshsub/1.0.0"), - max_transmit_size: 2048, - } - } -} - -impl ProtocolConfig { - /// Builds a new `ProtocolConfig`. - /// Sets the maximum gossip transmission size. - pub fn new(protocol_id: impl Into>, max_transmit_size: usize) -> ProtocolConfig { - ProtocolConfig { - protocol_id: protocol_id.into(), - max_transmit_size, - } - } -} - -impl UpgradeInfo for ProtocolConfig { - type Info = Cow<'static, [u8]>; - type InfoIter = iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { iter::once(self.protocol_id.clone()) } -} - -type PinBoxFut = Pin> + Send>>; - -impl InboundUpgrade for ProtocolConfig -where - TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static, -{ - type Output = Framed; - type Error = io::Error; - type Future = PinBoxFut; - - fn upgrade_inbound(self, socket: TSocket, _: Self::Info) -> Self::Future { - let mut length_codec = codec::UviBytes::default(); - length_codec.set_max_len(self.max_transmit_size); - Box::pin(future::ok(Framed::new(socket, GossipsubCodec { length_codec }))) - } -} - -impl OutboundUpgrade for ProtocolConfig -where - TSocket: AsyncWrite + AsyncRead + Unpin + Send + 'static, -{ - type Output = Framed; - type Error = io::Error; - type Future = PinBoxFut; - - fn upgrade_outbound(self, socket: TSocket, _: Self::Info) -> Self::Future { - let mut length_codec = codec::UviBytes::default(); - length_codec.set_max_len(self.max_transmit_size); - Box::pin(future::ok(Framed::new(socket, GossipsubCodec { length_codec }))) - } -} - -/* Gossip codec for the framing */ - -pub struct GossipsubCodec { - /// Codec to encode/decode the Unsigned varint length prefix of the frames. - length_codec: codec::UviBytes, -} - -impl Encoder for GossipsubCodec { - type Item = GossipsubRpc; - type Error = io::Error; - - fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { - // messages - let publish = item - .messages - .into_iter() - .map(|message| rpc_proto::Message { - from: Some(message.source.to_bytes()), - data: Some(message.data), - seqno: Some(message.sequence_number.to_be_bytes().to_vec()), - topic_ids: message.topics.into_iter().map(TopicHash::into_string).collect(), - }) - .collect::>(); - - // subscriptions - let subscriptions = item - .subscriptions - .into_iter() - .map(|sub| rpc_proto::rpc::SubOpts { - subscribe: Some(sub.action == GossipsubSubscriptionAction::Subscribe), - topic_id: Some(sub.topic_hash.into_string()), - }) - .collect::>(); - - // control messages - let mut control = rpc_proto::ControlMessage { - ihave: Vec::new(), - iwant: Vec::new(), - graft: Vec::new(), - prune: Vec::new(), - iamrelay: None, - included_to_relays_mesh: None, - mesh_size: None, - }; - - let empty_control_msg = item.control_msgs.is_empty(); - - for action in item.control_msgs { - match action { - // collect all ihave messages - GossipsubControlAction::IHave { - topic_hash, - message_ids, - } => { - let rpc_ihave = rpc_proto::ControlIHave { - topic_id: Some(topic_hash.into_string()), - message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), - }; - control.ihave.push(rpc_ihave); - }, - GossipsubControlAction::IWant { message_ids } => { - let rpc_iwant = rpc_proto::ControlIWant { - message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), - }; - control.iwant.push(rpc_iwant); - }, - GossipsubControlAction::Graft { topic_hash } => { - let rpc_graft = rpc_proto::ControlGraft { - topic_id: Some(topic_hash.into_string()), - }; - control.graft.push(rpc_graft); - }, - GossipsubControlAction::Prune { topic_hash } => { - let rpc_prune = rpc_proto::ControlPrune { - topic_id: Some(topic_hash.into_string()), - }; - control.prune.push(rpc_prune); - }, - GossipsubControlAction::IAmRelay(is_relay) => { - control.iamrelay = Some(is_relay); - }, - GossipsubControlAction::IncludedToRelaysMesh { included, mesh_size } => { - control.included_to_relays_mesh = Some(rpc_proto::IncludedToRelaysMesh { - included, - mesh_size: mesh_size as u32, - }); - }, - GossipsubControlAction::MeshSize(size) => { - control.mesh_size = Some(size as u32); - }, - } - } - - let rpc = rpc_proto::Rpc { - subscriptions, - publish, - control: if empty_control_msg { None } else { Some(control) }, - }; - - let mut buf = Vec::with_capacity(rpc.encoded_len()); - - rpc.encode(&mut buf).expect("Buffer has sufficient capacity"); - - // length prefix the protobuf message, ensuring the max limit is not hit - self.length_codec.encode(Bytes::from(buf), dst) - } -} - -impl Decoder for GossipsubCodec { - type Item = GossipsubRpc; - type Error = io::Error; - - fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - let packet = some_or_return_ok_none!(self.length_codec.decode(src)?); - - let rpc = rpc_proto::Rpc::decode(&packet[..])?; - - let mut messages = Vec::with_capacity(rpc.publish.len()); - for publish in rpc.publish.into_iter() { - // ensure the sequence number is a u64 - let seq_no = publish - .seqno - .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "sequence number was not provided"))?; - if seq_no.len() != 8 { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "sequence number has an incorrect size", - )); - } - messages.push(GossipsubMessage { - source: PeerId::from_bytes(&publish.from.unwrap_or_default()) - .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "Invalid Peer Id"))?, - data: publish.data.unwrap_or_default(), - sequence_number: BigEndian::read_u64(&seq_no), - topics: publish.topic_ids.into_iter().map(TopicHash::from_raw).collect(), - }); - } - - let mut control_msgs = Vec::new(); - - if let Some(rpc_control) = rpc.control { - // Collect the gossipsub control messages - let ihave_msgs: Vec = rpc_control - .ihave - .into_iter() - .map(|ihave| GossipsubControlAction::IHave { - topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()), - message_ids: ihave.message_ids.into_iter().map(MessageId).collect::>(), - }) - .collect(); - - let iwant_msgs: Vec = rpc_control - .iwant - .into_iter() - .map(|iwant| GossipsubControlAction::IWant { - message_ids: iwant.message_ids.into_iter().map(MessageId).collect::>(), - }) - .collect(); - - let graft_msgs: Vec = rpc_control - .graft - .into_iter() - .map(|graft| GossipsubControlAction::Graft { - topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()), - }) - .collect(); - - let prune_msgs: Vec = rpc_control - .prune - .into_iter() - .map(|prune| GossipsubControlAction::Prune { - topic_hash: TopicHash::from_raw(prune.topic_id.unwrap_or_default()), - }) - .collect(); - - control_msgs.extend(ihave_msgs); - control_msgs.extend(iwant_msgs); - control_msgs.extend(graft_msgs); - control_msgs.extend(prune_msgs); - - if let Some(is_relay) = rpc_control.iamrelay { - control_msgs.extend(iter::once(GossipsubControlAction::IAmRelay(is_relay))); - } - - if let Some(mesh_size) = rpc_control.mesh_size { - control_msgs.extend(iter::once(GossipsubControlAction::MeshSize(mesh_size as usize))); - } - - if let Some(msg) = rpc_control.included_to_relays_mesh { - control_msgs.extend(iter::once(GossipsubControlAction::IncludedToRelaysMesh { - included: msg.included, - mesh_size: msg.mesh_size as usize, - })); - } - } - - Ok(Some(GossipsubRpc { - messages, - subscriptions: rpc - .subscriptions - .into_iter() - .map(|sub| GossipsubSubscription { - action: if Some(true) == sub.subscribe { - GossipsubSubscriptionAction::Subscribe - } else { - GossipsubSubscriptionAction::Unsubscribe - }, - topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), - }) - .collect(), - control_msgs, - })) - } -} - -/// A type for gossipsub message ids. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct MessageId(pub String); - -impl std::fmt::Display for MessageId { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } -} - -impl From for String { - fn from(mid: MessageId) -> Self { mid.0 } -} - -/// A message received by the gossipsub system. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct GossipsubMessage { - /// Id of the peer that published this message. - pub source: PeerId, - - /// Content of the message. Its meaning is out of scope of this library. - pub data: Vec, - - /// A random sequence number. - pub sequence_number: u64, - - /// List of topics this message belongs to. - /// - /// Each message can belong to multiple topics at once. - pub topics: Vec, -} - -/// A subscription received by the gossipsub system. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct GossipsubSubscription { - /// Action to perform. - pub action: GossipsubSubscriptionAction, - /// The topic from which to subscribe or unsubscribe. - pub topic_hash: TopicHash, -} - -/// Action that a subscription wants to perform. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum GossipsubSubscriptionAction { - /// The remote wants to subscribe to the given topic. - Subscribe, - /// The remote wants to unsubscribe from the given topic. - Unsubscribe, -} - -/// A Control message received by the gossipsub system. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum GossipsubControlAction { - /// Node broadcasts known messages per topic - IHave control message. - IHave { - /// The topic of the messages. - topic_hash: TopicHash, - /// A list of known message ids (peer_id + sequence _number) as a string. - message_ids: Vec, - }, - /// The node requests specific message ids (peer_id + sequence _number) - IWant control message. - IWant { - /// A list of known message ids (peer_id + sequence _number) as a string. - message_ids: Vec, - }, - /// The node has been added to the mesh - Graft control message. - Graft { - /// The mesh topic the peer should be added to. - topic_hash: TopicHash, - }, - /// The node has been removed from the mesh - Prune control message. - Prune { - /// The mesh topic the peer should be removed from. - topic_hash: TopicHash, - }, - IAmRelay(bool), - /// Whether the node included or excluded from other node relays mesh - IncludedToRelaysMesh { - included: bool, - mesh_size: usize, - }, - MeshSize(usize), -} diff --git a/mm2src/gossipsub/src/rpc.proto b/mm2src/gossipsub/src/rpc.proto deleted file mode 100644 index 8a194011a9..0000000000 --- a/mm2src/gossipsub/src/rpc.proto +++ /dev/null @@ -1,87 +0,0 @@ -syntax = "proto2"; - -package gossipsub.pb; - -message RPC { - repeated SubOpts subscriptions = 1; - repeated Message publish = 2; - - message SubOpts { - optional bool subscribe = 1; // subscribe or unsubscribe - optional string topic_id = 2; - } - - optional ControlMessage control = 3; -} - -message Message { - optional bytes from = 1; - optional bytes data = 2; - optional bytes seqno = 3; - repeated string topic_ids = 4; -} - -message IncludedToRelaysMesh { - required bool included = 1; - required uint32 mesh_size = 2; -} - -message ControlMessage { - repeated ControlIHave ihave = 1; - repeated ControlIWant iwant = 2; - repeated ControlGraft graft = 3; - repeated ControlPrune prune = 4; - optional bool iamrelay = 5; - optional IncludedToRelaysMesh included_to_relays_mesh = 6; - optional uint32 mesh_size = 7; -} - -message ControlIHave { - optional string topic_id = 1; - repeated string message_ids = 2; -} - -message ControlIWant { - repeated string message_ids= 1; -} - -message ControlGraft { - optional string topic_id = 1; -} - -message ControlGraftRelay {} - -message ControlPrune { - optional string topic_id = 1; -} - -message ControlPruneRelay {} - -// topicID = hash(topicDescriptor); (not the topic.name) -message TopicDescriptor { - optional string name = 1; - optional AuthOpts auth = 2; - optional EncOpts enc = 3; - - message AuthOpts { - optional AuthMode mode = 1; - repeated bytes keys = 2; // root keys to trust - - enum AuthMode { - NONE = 0; // no authentication, anyone can publish - KEY = 1; // only messages signed by keys in the topic descriptor are accepted - WOT = 2; // web of trust, certificates can allow publisher set to grow - } - } - - message EncOpts { - optional EncMode mode = 1; - repeated bytes key_hashes = 2; // the hashes of the shared keys used (salted) - - enum EncMode { - NONE = 0; // no encryption, anyone can read - SHAREDKEY = 1; // messages are encrypted with shared key - WOT = 2; // web of trust, certificates can allow publisher set to grow - } - } -} diff --git a/mm2src/gossipsub/src/topic.rs b/mm2src/gossipsub/src/topic.rs deleted file mode 100644 index 970ea8947a..0000000000 --- a/mm2src/gossipsub/src/topic.rs +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2020 Sigma Prime Pty Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use crate::rpc_proto; -use base64::encode; -use prost::Message; -use sha2::{Digest, Sha256}; -use std::fmt; - -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct TopicHash { - /// The topic hash. Stored as a string to align with the protobuf API. - hash: String, -} - -impl TopicHash { - pub fn from_raw(hash: impl Into) -> TopicHash { TopicHash { hash: hash.into() } } - - pub fn into_string(self) -> String { self.hash } - - pub fn as_str(&self) -> &str { &self.hash } -} - -/// A gossipsub topic. -#[derive(Debug, Clone)] -pub struct Topic { - topic: String, -} - -impl Topic { - pub fn new(topic: String) -> Self { Topic { topic } } - - /// Creates a `TopicHash` by SHA256 hashing the topic then base64 encoding the - /// hash. - pub fn sha256_hash(&self) -> TopicHash { - let topic_descripter = rpc_proto::TopicDescriptor { - name: Some(self.topic.clone()), - auth: None, - enc: None, - }; - let mut bytes = Vec::with_capacity(topic_descripter.encoded_len()); - topic_descripter.encode(&mut bytes).expect("buffer is large enough"); - let hash = encode(Sha256::digest(&bytes).as_slice()); - - TopicHash { hash } - } - - /// Creates a `TopicHash` as a raw string. - pub fn no_hash(&self) -> TopicHash { - TopicHash { - hash: self.topic.clone(), - } - } -} - -impl fmt::Display for Topic { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.topic) } -} - -impl fmt::Display for TopicHash { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.hash) } -} diff --git a/mm2src/gossipsub/tests/smoke.rs b/mm2src/gossipsub/tests/smoke.rs deleted file mode 100644 index 44c11416fb..0000000000 --- a/mm2src/gossipsub/tests/smoke.rs +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -use futures::prelude::*; -use log::debug; -use quickcheck::{QuickCheck, TestResult}; -use rand::{random, seq::SliceRandom, SeedableRng}; -use std::{pin::Pin, - task::{Context, Poll}, - time::Duration}; - -use atomicdex_gossipsub::{Gossipsub, GossipsubConfigBuilder, GossipsubEvent, Topic}; -use futures::StreamExt; -use libp2p_core::{identity, multiaddr::Protocol, transport::MemoryTransport, upgrade, Multiaddr, Transport}; -use libp2p_plaintext::PlainText2Config; -use libp2p_swarm::{Swarm, SwarmEvent}; -use libp2p_yamux as yamux; - -struct Graph { - pub nodes: Vec<(Multiaddr, Swarm)>, -} - -impl Future for Graph { - type Output = (Multiaddr, GossipsubEvent); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - for (addr, node) in &mut self.nodes { - loop { - match node.poll_next_unpin(cx) { - Poll::Ready(Some(SwarmEvent::Behaviour(event))) => return Poll::Ready((addr.clone(), event)), - Poll::Ready(Some(_)) => {}, - Poll::Ready(None) => panic!("unexpected None when polling nodes"), - Poll::Pending => break, - } - } - } - - Poll::Pending - } -} - -impl Graph { - fn new_connected(num_nodes: usize, seed: u64) -> Graph { - if num_nodes == 0 { - panic!("expecting at least one node"); - } - - let mut rng = rand::rngs::StdRng::seed_from_u64(seed); - - let mut not_connected_nodes = std::iter::once(()) - .cycle() - .take(num_nodes) - .map(|_| build_node()) - .collect::)>>(); - - let mut connected_nodes = vec![not_connected_nodes.pop().unwrap()]; - - while !not_connected_nodes.is_empty() { - connected_nodes.shuffle(&mut rng); - not_connected_nodes.shuffle(&mut rng); - - let mut next = not_connected_nodes.pop().unwrap(); - let connected_addr = &connected_nodes[0].0; - - // Memory transport can not handle addresses with `/p2p` suffix. - let mut connected_addr_no_p2p = connected_addr.clone(); - let p2p_suffix_connected = connected_addr_no_p2p.pop(); - - debug!( - "Connect: {} -> {}", - next.0.clone().pop().unwrap(), - p2p_suffix_connected.unwrap() - ); - - Swarm::dial(&mut next.1, connected_addr_no_p2p).unwrap(); - - connected_nodes.push(next); - } - - Graph { nodes: connected_nodes } - } - - /// Polls the graph and passes each event into the provided FnMut until the closure returns - /// `true`. - /// - /// Returns [`true`] on success and [`false`] on timeout. - fn wait_for bool>(&mut self, mut f: F) -> bool { - let fut = futures::future::poll_fn(move |cx| match self.poll_unpin(cx) { - Poll::Ready((_addr, ev)) if f(&ev) => Poll::Ready(()), - _ => Poll::Pending, - }); - - let fut = async_std::future::timeout(Duration::from_secs(10), fut); - - futures::executor::block_on(fut).is_ok() - } - - /// Polls the graph until Poll::Pending is obtained, completing the underlying polls. - fn drain_poll(self) -> Self { - // The future below should return self. Given that it is a FnMut and not a FnOnce, one needs - // to wrap `self` in an Option, leaving a `None` behind after the final `Poll::Ready`. - let mut this = Some(self); - - let fut = futures::future::poll_fn(move |cx| match &mut this { - Some(graph) => loop { - match graph.poll_unpin(cx) { - Poll::Ready(_) => {}, - Poll::Pending => return Poll::Ready(this.take().unwrap()), - } - }, - None => panic!("future called after final return"), - }); - let fut = async_std::future::timeout(Duration::from_secs(10), fut); - futures::executor::block_on(fut).unwrap() - } -} - -fn build_node() -> (Multiaddr, Swarm) { - let key = identity::Keypair::generate_ed25519(); - let public_key = key.public(); - - let transport = MemoryTransport::default() - .upgrade(upgrade::Version::V1) - .authenticate(PlainText2Config { - local_public_key: public_key.clone(), - }) - .multiplex(yamux::YamuxConfig::default()) - .boxed(); - - let peer_id = public_key.to_peer_id(); - - // NOTE: The graph of created nodes can be disconnected from the mesh point of view as nodes - // can reach their d_lo value and not add other nodes to their mesh. To speed up this test, we - // reduce the default values of the heartbeat, so that all nodes will receive gossip in a - // timely fashion. - - let config = GossipsubConfigBuilder::default() - .heartbeat_initial_delay(Duration::from_millis(100)) - .heartbeat_interval(Duration::from_millis(200)) - .history_length(10) - .history_gossip(10) - .build(); - let behaviour = Gossipsub::new(peer_id, config); - let mut swarm = Swarm::new(transport, behaviour, peer_id); - - let port = 1 + random::(); - let mut addr: Multiaddr = Protocol::Memory(port).into(); - swarm.listen_on(addr.clone()).unwrap(); - - addr = addr.with(libp2p_core::multiaddr::Protocol::P2p(public_key.to_peer_id().into())); - - (addr, swarm) -} - -#[test] -fn multi_hop_propagation() { - let _ = env_logger::try_init(); - - fn prop(num_nodes: u8, seed: u64) -> TestResult { - if !(2..=50).contains(&num_nodes) { - return TestResult::discard(); - } - - debug!("number nodes: {:?}, seed: {:?}", num_nodes, seed); - - let mut graph = Graph::new_connected(num_nodes as usize, seed); - let number_nodes = graph.nodes.len(); - - // Subscribe each node to the same topic. - let topic = Topic::new("test-net".into()); - for (_addr, node) in &mut graph.nodes { - node.behaviour_mut().subscribe(topic.clone()); - } - - // Wait for all nodes to be subscribed. - let mut subscribed = 0; - let all_subscribed = graph.wait_for(move |ev| { - if let GossipsubEvent::Subscribed { .. } = ev { - subscribed += 1; - if subscribed == (number_nodes - 1) * 2 { - return true; - } - } - - false - }); - if !all_subscribed { - return TestResult::error(format!( - "Timed out waiting for all nodes to subscribe but only have {:?}/{:?}.", - subscribed, num_nodes, - )); - } - - // It can happen that the publish occurs before all grafts have completed causing this test - // to fail. We drain all the poll messages before publishing. - graph = graph.drain_poll(); - - // Publish a single message. - graph.nodes[0].1.behaviour_mut().publish(&topic, vec![1, 2, 3]); - - // Wait for all nodes to receive the published message. - let mut received_msgs = 0; - let all_received = graph.wait_for(move |ev| { - if let GossipsubEvent::Message { .. } = ev { - received_msgs += 1; - if received_msgs == number_nodes - 1 { - return true; - } - } - - false - }); - if !all_received { - return TestResult::error(format!( - "Timed out waiting for all nodes to receive the msg but only have {:?}/{:?}.", - received_msgs, num_nodes, - )); - } - - TestResult::passed() - } - - QuickCheck::new() - .max_tests(5) - .quickcheck(prop as fn(u8, u64) -> TestResult) -} diff --git a/mm2src/mm2_core/Cargo.toml b/mm2src/mm2_core/Cargo.toml index a9d308be9a..d0df9dbe7c 100644 --- a/mm2src/mm2_core/Cargo.toml +++ b/mm2src/mm2_core/Cargo.toml @@ -17,10 +17,10 @@ derive_more = "0.99" futures = { version = "0.3", package = "futures", features = ["compat", "async-await", "thread-pool"] } hex = "0.4.2" lazy_static = "1.4" +libp2p = { git = "https://github.com/KomodoPlatform/rust-libp2p.git", tag = "k-0.52.4", default-features = false, features = ["identify"] } mm2_err_handle = { path = "../mm2_err_handle" } mm2_event_stream = { path = "../mm2_event_stream" } mm2_metrics = { path = "../mm2_metrics" } -mm2_libp2p = { path = "../mm2_p2p", package = "mm2_p2p" } primitives = { path = "../mm2_bitcoin/primitives" } rand = { version = "0.7", features = ["std", "small_rng", "wasm-bindgen"] } serde = "1" diff --git a/mm2src/mm2_core/src/mm_ctx.rs b/mm2src/mm2_core/src/mm_ctx.rs index 3f56970f3c..8c417f2ce1 100644 --- a/mm2src/mm2_core/src/mm_ctx.rs +++ b/mm2src/mm2_core/src/mm_ctx.rs @@ -9,8 +9,8 @@ use futures::channel::oneshot; use futures::lock::Mutex as AsyncMutex; use gstuff::{try_s, Constructible, ERR, ERRL}; use lazy_static::lazy_static; +use libp2p::PeerId; use mm2_event_stream::{controller::Controller, Event, EventStreamConfiguration}; -use mm2_libp2p::PeerAddress; use mm2_metrics::{MetricsArc, MetricsOps}; use primitives::hash::H160; use rand::Rng; @@ -146,7 +146,7 @@ pub struct MmCtx { #[cfg(not(target_arch = "wasm32"))] pub async_sqlite_connection: Constructible>>, /// Links the RPC context to the P2P context to handle health check responses. - pub healthcheck_response_handler: AsyncMutex>>, + pub healthcheck_response_handler: AsyncMutex>>, } impl MmCtx { diff --git a/mm2src/mm2_libp2p/Cargo.toml b/mm2src/mm2_libp2p/Cargo.toml deleted file mode 100644 index bd01045799..0000000000 --- a/mm2src/mm2_libp2p/Cargo.toml +++ /dev/null @@ -1,43 +0,0 @@ -[package] -name = "mm2-libp2p" -version = "0.1.0" -authors = ["Artem Pikulin "] -edition = "2018" - -[lib] -doctest = false - -[dependencies] -async-trait = "0.1" -atomicdex-gossipsub = { path = "../gossipsub" } -common = { path = "../common" } -derive_more = "0.99" -libp2p-floodsub = { path = "../floodsub" } -futures = { version = "0.3.1", package = "futures", features = ["compat", "async-await"] } -hex = "0.4.2" -lazy_static = "1.4" -secp256k1 = { version = "0.20", features = ["rand"] } -log = "0.4.17" -rand = { package = "rand", version = "0.7", features = ["std", "wasm-bindgen"] } -regex = "1" -rmp-serde = "0.14.3" -serde = { version = "1.0", features = ["derive"] } -serde_bytes = "0.11.5" -sha2 = "0.10" -void = "1.0" -wasm-timer = "0.2.4" - -[target.'cfg(not(target_arch = "wasm32"))'.dependencies] -futures-rustls = { version = "0.24" } -tokio = { version = "1.20", features = ["rt-multi-thread", "macros"] } -libp2p = { git = "https://github.com/libp2p/rust-libp2p.git", tag = "v0.45.1", default-features = false, features = ["dns-tokio", "floodsub", "mplex", "noise", "ping", "request-response", "secp256k1", "tcp-tokio", "websocket"] } - -[target.'cfg(target_arch = "wasm32")'.dependencies] -futures-rustls = { version = "0.22" } -libp2p = { git = "https://github.com/libp2p/rust-libp2p.git", tag = "v0.45.1", default-features = false, features = ["floodsub", "mplex", "noise", "ping", "request-response", "secp256k1", "wasm-ext", "wasm-ext-websocket"] } -wasm-bindgen-futures = "0.4.21" - -[dev-dependencies] -async-std = { version = "1.6.2", features = ["unstable"] } -env_logger = "0.9.3" -serde_json = { version = "1", features = ["preserve_order", "raw_value"] } diff --git a/mm2src/mm2_libp2p/src/adex_ping.rs b/mm2src/mm2_libp2p/src/adex_ping.rs deleted file mode 100644 index 73ffb94d83..0000000000 --- a/mm2src/mm2_libp2p/src/adex_ping.rs +++ /dev/null @@ -1,56 +0,0 @@ -use libp2p::swarm::NetworkBehaviour; -use libp2p::{ping::{Ping, PingConfig, PingEvent}, - swarm::{CloseConnection, NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}, - NetworkBehaviour}; -use log::error; -use std::{collections::VecDeque, - num::NonZeroU32, - task::{Context, Poll}}; -use void::Void; - -/// Wrapper around libp2p Ping behaviour that forcefully disconnects a peer using NetworkBehaviourAction::DisconnectPeer -/// event. -/// Libp2p has unclear ConnectionHandlers keep alive logic so in some cases even if Ping handler emits Close event the -/// connection is kept active which is undesirable. -#[derive(NetworkBehaviour)] -#[behaviour(out_event = "Void", event_process = true)] -#[behaviour(poll_method = "poll_event")] -pub struct AdexPing { - ping: Ping, - #[behaviour(ignore)] - events: VecDeque::ConnectionHandler>>, -} - -impl NetworkBehaviourEventProcess for AdexPing { - fn inject_event(&mut self, event: PingEvent) { - if let Err(e) = event.result { - error!("Ping error {}. Disconnecting peer {}", e, event.peer); - self.events.push_back(NetworkBehaviourAction::CloseConnection { - peer_id: event.peer, - connection: CloseConnection::All, - }); - } - } -} - -#[allow(clippy::new_without_default)] -impl AdexPing { - pub fn new() -> Self { - AdexPing { - ping: Ping::new(PingConfig::new().with_max_failures(unsafe { NonZeroU32::new_unchecked(2) })), - events: VecDeque::new(), - } - } - - fn poll_event( - &mut self, - _cx: &mut Context, - _params: &mut impl PollParameters, - ) -> Poll::ConnectionHandler>> { - if let Some(event) = self.events.pop_front() { - return Poll::Ready(event); - } - - Poll::Pending - } -} diff --git a/mm2src/mm2_libp2p/src/atomicdex_behaviour.rs b/mm2src/mm2_libp2p/src/atomicdex_behaviour.rs deleted file mode 100644 index c0f2c47725..0000000000 --- a/mm2src/mm2_libp2p/src/atomicdex_behaviour.rs +++ /dev/null @@ -1,984 +0,0 @@ -use crate::{adex_ping::AdexPing, - network::{get_all_network_seednodes, NETID_8762}, - peers_exchange::{PeerAddresses, PeersExchange}, - request_response::{build_request_response_behaviour, PeerRequest, PeerResponse, RequestResponseBehaviour, - RequestResponseBehaviourEvent, RequestResponseSender}, - runtime::SwarmRuntime, - NetworkInfo, NetworkPorts, RelayAddress, RelayAddressError}; -use atomicdex_gossipsub::{Gossipsub, GossipsubConfigBuilder, GossipsubEvent, GossipsubMessage, MessageId, Topic, - TopicHash}; -use common::executor::SpawnFuture; -use derive_more::Display; -use futures::{channel::{mpsc::{channel, Receiver, Sender}, - oneshot}, - future::{join_all, poll_fn}, - Future, FutureExt, SinkExt, StreamExt}; -use futures_rustls::rustls; -use libp2p::core::transport::Boxed as BoxedTransport; -use libp2p::{core::{ConnectedPoint, Multiaddr, Transport}, - identity, - multiaddr::Protocol, - noise, - request_response::ResponseChannel, - swarm::{NetworkBehaviourEventProcess, Swarm}, - NetworkBehaviour, PeerId}; -use libp2p_floodsub::{Floodsub, FloodsubEvent, Topic as FloodsubTopic}; -use log::{debug, error, info}; -use rand::seq::SliceRandom; -use rand::Rng; -use std::{collections::{hash_map::DefaultHasher, BTreeMap}, - hash::{Hash, Hasher}, - iter, - net::IpAddr, - task::{Context, Poll}, - time::Duration}; -use void::Void; -use wasm_timer::{Instant, Interval}; - -pub type AdexCmdTx = Sender; -pub type AdexEventRx = Receiver; - -#[cfg(test)] mod tests; - -pub const PEERS_TOPIC: &str = "PEERS"; -const CONNECTED_RELAYS_CHECK_INTERVAL: Duration = Duration::from_secs(30); -const ANNOUNCE_INTERVAL: Duration = Duration::from_secs(600); -const ANNOUNCE_INITIAL_DELAY: Duration = Duration::from_secs(60); -const CHANNEL_BUF_SIZE: usize = 1024 * 8; - -/// Returns info about directly connected peers. -pub async fn get_directly_connected_peers(mut cmd_tx: AdexCmdTx) -> BTreeMap> { - let (result_tx, rx) = oneshot::channel(); - let cmd = AdexBehaviourCmd::GetPeersInfo { result_tx }; - cmd_tx.send(cmd).await.expect("Rx should be present"); - rx.await.expect("Tx should be present") -} - -/// Returns current gossipsub mesh state -pub async fn get_gossip_mesh(mut cmd_tx: AdexCmdTx) -> BTreeMap> { - let (result_tx, rx) = oneshot::channel(); - let cmd = AdexBehaviourCmd::GetGossipMesh { result_tx }; - cmd_tx.send(cmd).await.expect("Rx should be present"); - rx.await.expect("Tx should be present") -} - -pub async fn get_gossip_peer_topics(mut cmd_tx: AdexCmdTx) -> BTreeMap> { - let (result_tx, rx) = oneshot::channel(); - let cmd = AdexBehaviourCmd::GetGossipPeerTopics { result_tx }; - cmd_tx.send(cmd).await.expect("Rx should be present"); - rx.await.expect("Tx should be present") -} - -pub async fn get_gossip_topic_peers(mut cmd_tx: AdexCmdTx) -> BTreeMap> { - let (result_tx, rx) = oneshot::channel(); - let cmd = AdexBehaviourCmd::GetGossipTopicPeers { result_tx }; - cmd_tx.send(cmd).await.expect("Rx should be present"); - rx.await.expect("Tx should be present") -} - -pub async fn get_relay_mesh(mut cmd_tx: AdexCmdTx) -> Vec { - let (result_tx, rx) = oneshot::channel(); - let cmd = AdexBehaviourCmd::GetRelayMesh { result_tx }; - cmd_tx.send(cmd).await.expect("Rx should be present"); - rx.await.expect("Tx should be present") -} - -#[derive(Debug)] -pub struct AdexResponseChannel(ResponseChannel); - -impl From> for AdexResponseChannel { - fn from(res: ResponseChannel) -> Self { AdexResponseChannel(res) } -} - -impl From for ResponseChannel { - fn from(res: AdexResponseChannel) -> Self { res.0 } -} - -#[derive(Debug)] -pub enum AdexBehaviourCmd { - Subscribe { - /// Subscribe to this topic - topic: String, - }, - PublishMsg { - topics: Vec, - msg: Vec, - }, - PublishMsgFrom { - topics: Vec, - msg: Vec, - from: PeerId, - }, - /// Request relays sequential until a response is received. - RequestAnyRelay { - req: Vec, - response_tx: oneshot::Sender)>>, - }, - /// Request given peers and collect all their responses. - RequestPeers { - req: Vec, - peers: Vec, - response_tx: oneshot::Sender>, - }, - /// Request relays and collect all their responses. - RequestRelays { - req: Vec, - response_tx: oneshot::Sender>, - }, - /// Send a response using a `response_channel`. - SendResponse { - /// Response to a request. - res: AdexResponse, - /// Pass the same `response_channel` as that was obtained from [`AdexBehaviourEvent::PeerRequest`]. - response_channel: AdexResponseChannel, - }, - GetPeersInfo { - result_tx: oneshot::Sender>>, - }, - GetGossipMesh { - result_tx: oneshot::Sender>>, - }, - GetGossipPeerTopics { - result_tx: oneshot::Sender>>, - }, - GetGossipTopicPeers { - result_tx: oneshot::Sender>>, - }, - GetRelayMesh { - result_tx: oneshot::Sender>, - }, - /// Add a reserved peer to the peer exchange. - AddReservedPeer { - peer: PeerId, - addresses: PeerAddresses, - }, - PropagateMessage { - message_id: MessageId, - propagation_source: PeerId, - }, -} - -/// The structure is the same as `PeerResponse`, -/// but is used to prevent `PeerResponse` from being used outside the network implementation. -#[derive(Debug, Eq, PartialEq)] -pub enum AdexResponse { - Ok { response: Vec }, - None, - Err { error: String }, -} - -impl From for AdexResponse { - fn from(res: PeerResponse) -> Self { - match res { - PeerResponse::Ok { res } => AdexResponse::Ok { response: res }, - PeerResponse::None => AdexResponse::None, - PeerResponse::Err { err } => AdexResponse::Err { error: err }, - } - } -} - -impl From for PeerResponse { - fn from(res: AdexResponse) -> Self { - match res { - AdexResponse::Ok { response } => PeerResponse::Ok { res: response }, - AdexResponse::None => PeerResponse::None, - AdexResponse::Err { error } => PeerResponse::Err { err: error }, - } - } -} - -/// The structure consists of GossipsubEvent and RequestResponse events. -/// It is used to prevent the network events from being used outside the network implementation. -#[derive(Debug)] -pub enum AdexBehaviourEvent { - /// A message has been received. - /// Derived from GossipsubEvent. - Message(PeerId, MessageId, GossipsubMessage), - /// A remote subscribed to a topic. - Subscribed { - /// Remote that has subscribed. - peer_id: PeerId, - /// The topic it has subscribed to. - topic: TopicHash, - }, - /// A remote unsubscribed from a topic. - Unsubscribed { - /// Remote that has unsubscribed. - peer_id: PeerId, - /// The topic it has subscribed from. - topic: TopicHash, - }, - /// A remote peer sent a request and waits for a response. - PeerRequest { - /// Remote that sent this request. - peer_id: PeerId, - /// The serialized data. - request: Vec, - /// A channel for sending a response to this request. - /// The channel is used to identify the peer on the network that is waiting for an answer to this request. - /// See [`AdexBehaviourCmd::SendResponse`]. - response_channel: AdexResponseChannel, - }, -} - -impl From for AdexBehaviourEvent { - fn from(event: GossipsubEvent) -> Self { - match event { - GossipsubEvent::Message(peer_id, message_id, gossipsub_message) => { - AdexBehaviourEvent::Message(peer_id, message_id, gossipsub_message) - }, - GossipsubEvent::Subscribed { peer_id, topic } => AdexBehaviourEvent::Subscribed { peer_id, topic }, - GossipsubEvent::Unsubscribed { peer_id, topic } => AdexBehaviourEvent::Unsubscribed { peer_id, topic }, - } - } -} - -/// AtomicDEX libp2p Network behaviour implementation -#[derive(NetworkBehaviour)] -#[behaviour(event_process = true)] -pub struct AtomicDexBehaviour { - floodsub: Floodsub, - #[behaviour(ignore)] - event_tx: Sender, - #[behaviour(ignore)] - runtime: SwarmRuntime, - #[behaviour(ignore)] - cmd_rx: Receiver, - #[behaviour(ignore)] - netid: u16, - gossipsub: Gossipsub, - request_response: RequestResponseBehaviour, - peers_exchange: PeersExchange, - ping: AdexPing, -} - -impl AtomicDexBehaviour { - fn notify_on_adex_event(&mut self, event: AdexBehaviourEvent) { - if let Err(e) = self.event_tx.try_send(event) { - error!("notify_on_adex_event error {}", e); - } - } - - fn spawn(&self, fut: impl Future + Send + 'static) { self.runtime.spawn(fut) } - - fn process_cmd(&mut self, cmd: AdexBehaviourCmd) { - match cmd { - AdexBehaviourCmd::Subscribe { topic } => { - let topic = Topic::new(topic); - self.gossipsub.subscribe(topic); - }, - AdexBehaviourCmd::PublishMsg { topics, msg } => { - self.gossipsub.publish_many(topics.into_iter().map(Topic::new), msg); - }, - AdexBehaviourCmd::PublishMsgFrom { topics, msg, from } => { - self.gossipsub - .publish_many_from(topics.into_iter().map(Topic::new), msg, from); - }, - AdexBehaviourCmd::RequestAnyRelay { req, response_tx } => { - let relays = self.gossipsub.get_relay_mesh(); - // spawn the `request_any_peer` future - let future = request_any_peer(relays, req, self.request_response.sender(), response_tx); - self.spawn(future); - }, - AdexBehaviourCmd::RequestPeers { - req, - peers, - response_tx, - } => { - let peers = peers - .into_iter() - .filter_map(|peer| match peer.parse() { - Ok(p) => Some(p), - Err(e) => { - error!("Error on parse peer id {:?}: {:?}", peer, e); - None - }, - }) - .collect(); - let future = request_peers(peers, req, self.request_response.sender(), response_tx); - self.spawn(future); - }, - AdexBehaviourCmd::RequestRelays { req, response_tx } => { - let relays = self.gossipsub.get_relay_mesh(); - // spawn the `request_peers` future - let future = request_peers(relays, req, self.request_response.sender(), response_tx); - self.spawn(future); - }, - AdexBehaviourCmd::SendResponse { res, response_channel } => { - if let Err(response) = self.request_response.send_response(response_channel.into(), res.into()) { - error!("Error sending response: {:?}", response); - } - }, - AdexBehaviourCmd::GetPeersInfo { result_tx } => { - let result = self - .gossipsub - .get_peers_connections() - .into_iter() - .map(|(peer_id, connected_points)| { - let peer_id = peer_id.to_base58(); - let connected_points = connected_points - .into_iter() - .map(|(_conn_id, point)| match point { - ConnectedPoint::Dialer { address, .. } => address.to_string(), - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr.to_string(), - }) - .collect(); - (peer_id, connected_points) - }) - .collect(); - if result_tx.send(result).is_err() { - debug!("Result rx is dropped"); - } - }, - AdexBehaviourCmd::GetGossipMesh { result_tx } => { - let result = self - .gossipsub - .get_mesh() - .iter() - .map(|(topic, peers)| { - let topic = topic.to_string(); - let peers = peers.iter().map(|peer| peer.to_string()).collect(); - (topic, peers) - }) - .collect(); - if result_tx.send(result).is_err() { - debug!("Result rx is dropped"); - } - }, - AdexBehaviourCmd::GetGossipPeerTopics { result_tx } => { - let result = self - .gossipsub - .get_all_peer_topics() - .iter() - .map(|(peer, topics)| { - let peer = peer.to_string(); - let topics = topics.iter().map(|topic| topic.to_string()).collect(); - (peer, topics) - }) - .collect(); - if result_tx.send(result).is_err() { - error!("Result rx is dropped"); - } - }, - AdexBehaviourCmd::GetGossipTopicPeers { result_tx } => { - let result = self - .gossipsub - .get_all_topic_peers() - .iter() - .map(|(topic, peers)| { - let topic = topic.to_string(); - let peers = peers.iter().map(|peer| peer.to_string()).collect(); - (topic, peers) - }) - .collect(); - if result_tx.send(result).is_err() { - error!("Result rx is dropped"); - } - }, - AdexBehaviourCmd::GetRelayMesh { result_tx } => { - let result = self - .gossipsub - .get_relay_mesh() - .into_iter() - .map(|peer| peer.to_string()) - .collect(); - if result_tx.send(result).is_err() { - error!("Result rx is dropped"); - } - }, - AdexBehaviourCmd::AddReservedPeer { peer, addresses } => { - self.peers_exchange - .add_peer_addresses_to_reserved_peers(&peer, addresses); - }, - AdexBehaviourCmd::PropagateMessage { - message_id, - propagation_source, - } => { - self.gossipsub.propagate_message(&message_id, &propagation_source); - }, - } - } - - fn announce_listeners(&mut self, listeners: PeerAddresses) { - let serialized = rmp_serde::to_vec(&listeners).expect("PeerAddresses serialization should never fail"); - self.floodsub.publish(FloodsubTopic::new(PEERS_TOPIC), serialized); - } - - pub fn connected_relays_len(&self) -> usize { self.gossipsub.connected_relays_len() } - - pub fn relay_mesh_len(&self) -> usize { self.gossipsub.relay_mesh_len() } - - pub fn received_messages_in_period(&self) -> (Duration, usize) { self.gossipsub.get_received_messages_in_period() } - - pub fn connected_peers_len(&self) -> usize { self.gossipsub.get_num_peers() } -} - -impl NetworkBehaviourEventProcess for AtomicDexBehaviour { - fn inject_event(&mut self, event: GossipsubEvent) { self.notify_on_adex_event(event.into()); } -} - -impl NetworkBehaviourEventProcess for AtomicDexBehaviour { - fn inject_event(&mut self, event: FloodsubEvent) { - // do not process peer announce on 8762 temporary - if self.netid != NETID_8762 { - if let FloodsubEvent::Message(message) = &event { - for topic in &message.topics { - if topic == &FloodsubTopic::new(PEERS_TOPIC) { - let addresses: PeerAddresses = match rmp_serde::from_slice(&message.data) { - Ok(a) => a, - Err(_) => return, - }; - self.peers_exchange - .add_peer_addresses_to_known_peers(&message.source, addresses); - } - } - } - } - } -} - -impl NetworkBehaviourEventProcess for AtomicDexBehaviour { - fn inject_event(&mut self, _event: Void) {} -} - -impl NetworkBehaviourEventProcess<()> for AtomicDexBehaviour { - fn inject_event(&mut self, _event: ()) {} -} - -impl NetworkBehaviourEventProcess for AtomicDexBehaviour { - fn inject_event(&mut self, event: RequestResponseBehaviourEvent) { - match event { - RequestResponseBehaviourEvent::InboundRequest { - peer_id, - request, - response_channel, - } => { - let event = AdexBehaviourEvent::PeerRequest { - peer_id, - request: request.req, - response_channel: response_channel.into(), - }; - // forward the event to the AdexBehaviourCmd handler - self.notify_on_adex_event(event); - }, - } - } -} - -/// Custom types mapping the complex associated types of AtomicDexBehaviour to the ExpandedSwarm -type AtomicDexSwarm = Swarm; - -fn maintain_connection_to_relays(swarm: &mut AtomicDexSwarm, bootstrap_addresses: &[Multiaddr]) { - let behaviour = swarm.behaviour(); - let connected_relays = behaviour.gossipsub.connected_relays(); - let mesh_n_low = behaviour.gossipsub.get_config().mesh_n_low; - let mesh_n = behaviour.gossipsub.get_config().mesh_n; - // allow 2 * mesh_n_high connections to other nodes - let max_n = behaviour.gossipsub.get_config().mesh_n_high * 2; - - let mut rng = rand::thread_rng(); - if connected_relays.len() < mesh_n_low { - let to_connect_num = mesh_n - connected_relays.len(); - let to_connect = swarm - .behaviour_mut() - .peers_exchange - .get_random_peers(to_connect_num, |peer| !connected_relays.contains(peer)); - - // choose some random bootstrap addresses to connect if peers exchange returned not enough peers - if to_connect.len() < to_connect_num { - let connect_bootstrap_num = to_connect_num - to_connect.len(); - for addr in bootstrap_addresses - .iter() - .filter(|addr| !swarm.behaviour().gossipsub.is_connected_to_addr(addr)) - .collect::>() - .choose_multiple(&mut rng, connect_bootstrap_num) - { - if let Err(e) = libp2p::Swarm::dial(swarm, (*addr).clone()) { - error!("Bootstrap addr {} dial error {}", addr, e); - } - } - } - for (peer, addresses) in to_connect { - for addr in addresses { - if swarm.behaviour().gossipsub.is_connected_to_addr(&addr) { - continue; - } - if let Err(e) = libp2p::Swarm::dial(swarm, addr.clone()) { - error!("Peer {} address {} dial error {}", peer, addr, e); - } - } - } - } - - if connected_relays.len() > max_n { - let to_disconnect_num = connected_relays.len() - max_n; - let relays_mesh = swarm.behaviour().gossipsub.get_relay_mesh(); - let not_in_mesh: Vec<_> = connected_relays - .iter() - .filter(|peer| !relays_mesh.contains(peer)) - .collect(); - for peer in not_in_mesh.choose_multiple(&mut rng, to_disconnect_num) { - if !swarm.behaviour().peers_exchange.is_reserved_peer(peer) { - info!("Disconnecting peer {}", peer); - if Swarm::disconnect_peer_id(swarm, **peer).is_err() { - error!("Peer {} disconnect error", peer); - } - } - } - } - - for relay in connected_relays { - if !swarm.behaviour().peers_exchange.is_known_peer(&relay) { - swarm.behaviour_mut().peers_exchange.add_known_peer(relay); - } - } -} - -fn announce_my_addresses(swarm: &mut AtomicDexSwarm) { - let global_listeners: PeerAddresses = Swarm::listeners(swarm) - .filter(|listener| { - for protocol in listener.iter() { - if let Protocol::Ip4(ip) = protocol { - return ip.is_global(); - } - } - false - }) - .take(1) - .cloned() - .collect(); - if !global_listeners.is_empty() { - swarm.behaviour_mut().announce_listeners(global_listeners); - } -} - -#[derive(Debug, Display)] -pub enum AdexBehaviourError { - #[display(fmt = "{}", _0)] - ParsingRelayAddress(RelayAddressError), -} - -impl From for AdexBehaviourError { - fn from(e: RelayAddressError) -> Self { AdexBehaviourError::ParsingRelayAddress(e) } -} - -pub struct WssCerts { - pub server_priv_key: rustls::PrivateKey, - pub certs: Vec, -} - -pub enum NodeType { - Light { - network_ports: NetworkPorts, - }, - LightInMemory, - Relay { - ip: IpAddr, - network_ports: NetworkPorts, - wss_certs: Option, - }, - RelayInMemory { - port: u64, - }, -} - -impl NodeType { - pub fn to_network_info(&self) -> NetworkInfo { - match self { - NodeType::Light { network_ports } | NodeType::Relay { network_ports, .. } => NetworkInfo::Distributed { - network_ports: *network_ports, - }, - NodeType::LightInMemory | NodeType::RelayInMemory { .. } => NetworkInfo::InMemory, - } - } - - pub fn is_relay(&self) -> bool { matches!(self, NodeType::Relay { .. } | NodeType::RelayInMemory { .. }) } - - pub fn wss_certs(&self) -> Option<&WssCerts> { - match self { - NodeType::Relay { wss_certs, .. } => wss_certs.as_ref(), - _ => None, - } - } -} - -/// Creates and spawns new AdexBehaviour Swarm returning: -/// 1. tx to send control commands -/// 2. rx emitting gossip events to processing side -/// 3. our peer_id -/// 4. abort handle to stop the P2P processing fut. -pub async fn spawn_gossipsub( - netid: u16, - force_key: Option<[u8; 32]>, - runtime: SwarmRuntime, - to_dial: Vec, - node_type: NodeType, - on_poll: impl Fn(&AtomicDexSwarm) + Send + 'static, -) -> Result<(Sender, AdexEventRx, PeerId), AdexBehaviourError> { - let (result_tx, result_rx) = oneshot::channel(); - - let runtime_c = runtime.clone(); - let fut = async move { - let result = start_gossipsub(netid, force_key, runtime, to_dial, node_type, on_poll); - result_tx.send(result).unwrap(); - }; - - // `Libp2p` must be spawned on the tokio runtime - runtime_c.spawn(fut); - result_rx.await.expect("Fatal error on starting gossipsub") -} - -/// Creates and spawns new AdexBehaviour Swarm returning: -/// 1. tx to send control commands -/// 2. rx emitting gossip events to processing side -/// 3. our peer_id -/// 4. abort handle to stop the P2P processing fut -/// -/// Prefer using [`spawn_gossipsub`] to make sure the Swarm is initialized and spawned on the same runtime. -/// Otherwise, you can face the following error: -/// `panicked at 'there is no reactor running, must be called from the context of a Tokio 1.x runtime'`. -#[allow(clippy::too_many_arguments)] -fn start_gossipsub( - netid: u16, - force_key: Option<[u8; 32]>, - runtime: SwarmRuntime, - to_dial: Vec, - node_type: NodeType, - on_poll: impl Fn(&AtomicDexSwarm) + Send + 'static, -) -> Result<(Sender, AdexEventRx, PeerId), AdexBehaviourError> { - let i_am_relay = node_type.is_relay(); - let mut rng = rand::thread_rng(); - let local_key = generate_ed25519_keypair(&mut rng, force_key); - let local_peer_id = PeerId::from(local_key.public()); - info!("Local peer id: {:?}", local_peer_id); - - let noise_keys = noise::Keypair::::new() - .into_authentic(&local_key) - .expect("Signing libp2p-noise static DH keypair failed."); - - let network_info = node_type.to_network_info(); - let transport = match network_info { - NetworkInfo::InMemory => build_memory_transport(noise_keys), - NetworkInfo::Distributed { .. } => build_dns_ws_transport(noise_keys, node_type.wss_certs()), - }; - - let (cmd_tx, cmd_rx) = channel(CHANNEL_BUF_SIZE); - let (event_tx, event_rx) = channel(CHANNEL_BUF_SIZE); - - let bootstrap = to_dial - .into_iter() - .map(|addr| addr.try_to_multiaddr(network_info)) - .collect::, _>>()?; - - let (mesh_n_low, mesh_n, mesh_n_high) = if i_am_relay { (4, 6, 12) } else { (2, 3, 4) }; - - // Create a Swarm to manage peers and events - let mut swarm = { - // to set default parameters for gossipsub use: - // let gossipsub_config = gossipsub::GossipsubConfig::default(); - - // To content-address message, we can take the hash of message and use it as an ID. - let message_id_fn = |message: &GossipsubMessage| { - let mut s = DefaultHasher::new(); - message.data.hash(&mut s); - message.sequence_number.hash(&mut s); - MessageId(s.finish().to_string()) - }; - - // set custom gossipsub - let gossipsub_config = GossipsubConfigBuilder::new() - .message_id_fn(message_id_fn) - .i_am_relay(i_am_relay) - .mesh_n_low(mesh_n_low) - .mesh_n(mesh_n) - .mesh_n_high(mesh_n_high) - .manual_propagation() - .max_transmit_size(1024 * 1024 - 100) - .build(); - // build a gossipsub network behaviour - let mut gossipsub = Gossipsub::new(local_peer_id, gossipsub_config); - - let floodsub = Floodsub::new(local_peer_id, netid != NETID_8762); - - let mut peers_exchange = PeersExchange::new(network_info); - if !network_info.in_memory() { - // Please note WASM nodes don't support `PeersExchange` currently, - // so `get_all_network_seednodes` returns an empty list. - for (peer_id, addr) in get_all_network_seednodes(netid) { - let multiaddr = addr.try_to_multiaddr(network_info)?; - peers_exchange.add_peer_addresses_to_known_peers(&peer_id, iter::once(multiaddr).collect()); - gossipsub.add_explicit_relay(peer_id); - } - } - - // build a request-response network behaviour - let request_response = build_request_response_behaviour(); - - // use default ping config with 15s interval, 20s timeout and 1 max failure - let ping = AdexPing::new(); - - let adex_behavior = AtomicDexBehaviour { - floodsub, - event_tx, - runtime: runtime.clone(), - cmd_rx, - netid, - gossipsub, - request_response, - peers_exchange, - ping, - }; - libp2p::swarm::SwarmBuilder::new(transport, adex_behavior, local_peer_id) - .executor(Box::new(runtime.clone())) - .build() - }; - swarm - .behaviour_mut() - .floodsub - .subscribe(FloodsubTopic::new(PEERS_TOPIC.to_owned())); - - match node_type { - NodeType::Relay { - ip, - network_ports, - wss_certs, - } => { - let dns_addr: Multiaddr = format!("/ip4/{}/tcp/{}", ip, network_ports.tcp).parse().unwrap(); - libp2p::Swarm::listen_on(&mut swarm, dns_addr).unwrap(); - if wss_certs.is_some() { - let wss_addr: Multiaddr = format!("/ip4/{}/tcp/{}/wss", ip, network_ports.wss).parse().unwrap(); - libp2p::Swarm::listen_on(&mut swarm, wss_addr).unwrap(); - } - }, - NodeType::RelayInMemory { port } => { - let memory_addr: Multiaddr = format!("/memory/{}", port).parse().unwrap(); - libp2p::Swarm::listen_on(&mut swarm, memory_addr).unwrap(); - }, - _ => (), - } - - for relay in bootstrap.choose_multiple(&mut rng, mesh_n) { - match libp2p::Swarm::dial(&mut swarm, relay.clone()) { - Ok(_) => info!("Dialed {}", relay), - Err(e) => error!("Dial {:?} failed: {:?}", relay, e), - } - } - - let mut check_connected_relays_interval = Interval::new_at( - Instant::now() + CONNECTED_RELAYS_CHECK_INTERVAL, - CONNECTED_RELAYS_CHECK_INTERVAL, - ); - let mut announce_interval = Interval::new_at(Instant::now() + ANNOUNCE_INITIAL_DELAY, ANNOUNCE_INTERVAL); - let mut listening = false; - let polling_fut = poll_fn(move |cx: &mut Context| { - loop { - match swarm.behaviour_mut().cmd_rx.poll_next_unpin(cx) { - Poll::Ready(Some(cmd)) => swarm.behaviour_mut().process_cmd(cmd), - Poll::Ready(None) => return Poll::Ready(()), - Poll::Pending => break, - } - } - - loop { - match swarm.poll_next_unpin(cx) { - Poll::Ready(Some(event)) => debug!("Swarm event {:?}", event), - Poll::Ready(None) => return Poll::Ready(()), - Poll::Pending => break, - } - } - - if swarm.behaviour().gossipsub.is_relay() { - while let Poll::Ready(Some(())) = announce_interval.poll_next_unpin(cx) { - announce_my_addresses(&mut swarm); - } - } - - while let Poll::Ready(Some(())) = check_connected_relays_interval.poll_next_unpin(cx) { - maintain_connection_to_relays(&mut swarm, &bootstrap); - } - - if !listening && i_am_relay { - for listener in Swarm::listeners(&swarm) { - info!("Listening on {}", listener); - listening = true; - } - } - on_poll(&swarm); - Poll::Pending - }); - - runtime.spawn(polling_fut.then(|_| futures::future::ready(()))); - Ok((cmd_tx, event_rx, local_peer_id)) -} - -#[cfg(target_arch = "wasm32")] -fn build_dns_ws_transport( - noise_keys: libp2p::noise::AuthenticKeypair, - _wss_certs: Option<&WssCerts>, -) -> BoxedTransport<(PeerId, libp2p::core::muxing::StreamMuxerBox)> { - let websocket = libp2p::wasm_ext::ffi::websocket_transport(); - let transport = libp2p::wasm_ext::ExtTransport::new(websocket); - upgrade_transport(transport, noise_keys) -} - -#[cfg(not(target_arch = "wasm32"))] -fn build_dns_ws_transport( - noise_keys: libp2p::noise::AuthenticKeypair, - wss_certs: Option<&WssCerts>, -) -> BoxedTransport<(PeerId, libp2p::core::muxing::StreamMuxerBox)> { - use libp2p::websocket::tls as libp2p_tls; - - let ws_tcp = libp2p::dns::TokioDnsConfig::custom( - libp2p::tcp::TokioTcpConfig::new().nodelay(true), - libp2p::dns::ResolverConfig::google(), - Default::default(), - ) - .unwrap(); - let mut ws_dns_tcp = libp2p::websocket::WsConfig::new(ws_tcp); - - if let Some(certs) = wss_certs { - let server_priv_key = libp2p_tls::PrivateKey::new(certs.server_priv_key.0.clone()); - let certs = certs - .certs - .iter() - .map(|cert| libp2p_tls::Certificate::new(cert.0.clone())); - let wss_config = libp2p_tls::Config::new(server_priv_key, certs).unwrap(); - ws_dns_tcp.set_tls_config(wss_config); - } - - // This is for preventing port reuse of dns/tcp instead of - // websocket ports. - let dns_tcp = libp2p::dns::TokioDnsConfig::custom( - libp2p::tcp::TokioTcpConfig::new().nodelay(true), - libp2p::dns::ResolverConfig::google(), - Default::default(), - ) - .unwrap(); - - let transport = dns_tcp.or_transport(ws_dns_tcp); - upgrade_transport(transport, noise_keys) -} - -fn build_memory_transport( - noise_keys: libp2p::noise::AuthenticKeypair, -) -> BoxedTransport<(PeerId, libp2p::core::muxing::StreamMuxerBox)> { - let transport = libp2p::core::transport::MemoryTransport::default(); - upgrade_transport(transport, noise_keys) -} - -/// Set up an encrypted Transport over the Mplex protocol. -fn upgrade_transport( - transport: T, - noise_keys: libp2p::noise::AuthenticKeypair, -) -> BoxedTransport<(PeerId, libp2p::core::muxing::StreamMuxerBox)> -where - T: Transport + Send + Sync + 'static, - T::Output: futures::AsyncRead + futures::AsyncWrite + Unpin + Send + 'static, - T::ListenerUpgrade: Send, - T::Listener: Send, - T::Dial: Send, - T::Error: Send + Sync + 'static, -{ - transport - .upgrade(libp2p::core::upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(libp2p::mplex::MplexConfig::default()) - .timeout(std::time::Duration::from_secs(20)) - .map(|(peer, muxer), _| (peer, libp2p::core::muxing::StreamMuxerBox::new(muxer))) - .boxed() -} - -fn generate_ed25519_keypair(rng: &mut R, force_key: Option<[u8; 32]>) -> identity::Keypair { - let mut raw_key = match force_key { - Some(key) => key, - None => { - let mut key = [0; 32]; - rng.fill_bytes(&mut key); - key - }, - }; - let secret = identity::ed25519::SecretKey::from_bytes(&mut raw_key).expect("Secret length is 32 bytes"); - let keypair = identity::ed25519::Keypair::from(secret); - identity::Keypair::Ed25519(keypair) -} - -/// Request the peers sequential until a `PeerResponse::Ok()` will not be received. -async fn request_any_peer( - peers: Vec, - request_data: Vec, - request_response_tx: RequestResponseSender, - response_tx: oneshot::Sender)>>, -) { - debug!("start request_any_peer loop: peers {}", peers.len()); - for peer in peers { - match request_one_peer(peer, request_data.clone(), request_response_tx.clone()).await { - PeerResponse::Ok { res } => { - debug!("Received a response from peer {:?}, stop the request loop", peer); - if response_tx.send(Some((peer, res))).is_err() { - error!("Response oneshot channel was closed"); - } - return; - }, - PeerResponse::None => { - debug!("Received None from peer {:?}, request next peer", peer); - }, - PeerResponse::Err { err } => { - error!("Error on request {:?} peer: {:?}. Request next peer", peer, err); - }, - }; - } - - debug!("None of the peers responded to the request"); - if response_tx.send(None).is_err() { - error!("Response oneshot channel was closed"); - }; -} - -/// Request the peers and collect all their responses. -async fn request_peers( - peers: Vec, - request_data: Vec, - request_response_tx: RequestResponseSender, - response_tx: oneshot::Sender>, -) { - debug!("start request_any_peer loop: peers {}", peers.len()); - let mut futures = Vec::with_capacity(peers.len()); - for peer in peers { - let request_data = request_data.clone(); - let request_response_tx = request_response_tx.clone(); - futures.push(async move { - let response = request_one_peer(peer, request_data, request_response_tx).await; - (peer, response) - }) - } - - let responses = join_all(futures) - .await - .into_iter() - .map(|(peer_id, res)| { - let res: AdexResponse = res.into(); - (peer_id, res) - }) - .collect(); - - if response_tx.send(responses).is_err() { - error!("Response oneshot channel was closed"); - }; -} - -async fn request_one_peer(peer: PeerId, req: Vec, mut request_response_tx: RequestResponseSender) -> PeerResponse { - // Use the internal receiver to receive a response to this request. - let (internal_response_tx, internal_response_rx) = oneshot::channel(); - let request = PeerRequest { req }; - request_response_tx - .send((peer, request, internal_response_tx)) - .await - .unwrap(); - - match internal_response_rx.await { - Ok(response) => response, - Err(e) => PeerResponse::Err { - err: format!("Error on request the peer {:?}: \"{:?}\". Request next peer", peer, e), - }, - } -} diff --git a/mm2src/mm2_libp2p/src/atomicdex_behaviour/tests.rs b/mm2src/mm2_libp2p/src/atomicdex_behaviour/tests.rs deleted file mode 100644 index 4dfdfaa2cb..0000000000 --- a/mm2src/mm2_libp2p/src/atomicdex_behaviour/tests.rs +++ /dev/null @@ -1,368 +0,0 @@ -use super::{spawn_gossipsub, AdexBehaviourCmd, AdexBehaviourEvent, AdexResponse, NodeType, RelayAddress, SwarmRuntime}; -use async_std::task::spawn; -use common::executor::abortable_queue::AbortableQueue; -use futures::channel::{mpsc, oneshot}; -use futures::{SinkExt, StreamExt}; -use libp2p::PeerId; -use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; -use std::sync::Arc; -#[cfg(not(windows))] use std::sync::Mutex; -use std::time::Duration; - -static TEST_LISTEN_PORT: AtomicU64 = AtomicU64::new(1); - -lazy_static! { - static ref SYSTEM: AbortableQueue = AbortableQueue::default(); -} - -fn next_port() -> u64 { TEST_LISTEN_PORT.fetch_add(1, Ordering::Relaxed) } - -struct Node { - peer_id: PeerId, - cmd_tx: mpsc::Sender, -} - -impl Node { - async fn spawn(port: u64, seednodes: Vec, on_event: F) -> Node - where - F: Fn(mpsc::Sender, AdexBehaviourEvent) + Send + 'static, - { - let spawner = SwarmRuntime::new(SYSTEM.weak_spawner()); - let node_type = NodeType::RelayInMemory { port }; - let seednodes = seednodes.into_iter().map(RelayAddress::Memory).collect(); - let (cmd_tx, mut event_rx, peer_id) = spawn_gossipsub(333, None, spawner, seednodes, node_type, |_| {}) - .await - .expect("Error spawning AdexBehaviour"); - - // spawn a response future - let cmd_tx_fut = cmd_tx.clone(); - spawn(async move { - loop { - let cmd_tx_fut = cmd_tx_fut.clone(); - match event_rx.next().await { - Some(r) => on_event(cmd_tx_fut, r), - _ => { - log!("Finish response future"); - break; - }, - } - } - }); - - Node { peer_id, cmd_tx } - } - - async fn send_cmd(&mut self, cmd: AdexBehaviourCmd) { self.cmd_tx.send(cmd).await.unwrap(); } - - async fn wait_peers(&mut self, number: usize) { - let mut attempts = 0; - loop { - let (tx, rx) = oneshot::channel(); - self.cmd_tx - .send(AdexBehaviourCmd::GetPeersInfo { result_tx: tx }) - .await - .unwrap(); - match rx.await { - Ok(map) => { - if map.len() >= number { - return; - } - async_std::task::sleep(Duration::from_millis(500)).await; - }, - Err(e) => panic!("{}", e), - } - attempts += 1; - if attempts >= 10 { - panic!("wait_peers {} attempts exceeded", attempts); - } - } - } -} - -#[tokio::test] -async fn test_request_response_ok() { - let _ = env_logger::try_init(); - - let request_received = Arc::new(AtomicBool::new(false)); - let request_received_cpy = request_received.clone(); - - let node1_port = next_port(); - let node1 = Node::spawn(node1_port, vec![], move |mut cmd_tx, event| { - let (request, response_channel) = match event { - AdexBehaviourEvent::PeerRequest { - request, - response_channel, - .. - } => (request, response_channel), - _ => return, - }; - - request_received_cpy.store(true, Ordering::Relaxed); - assert_eq!(request, b"test request"); - - let res = AdexResponse::Ok { - response: b"test response".to_vec(), - }; - cmd_tx - .try_send(AdexBehaviourCmd::SendResponse { res, response_channel }) - .unwrap(); - }) - .await; - - let mut node2 = Node::spawn(next_port(), vec![node1_port], |_, _| ()).await; - - node2.wait_peers(1).await; - - let (response_tx, response_rx) = oneshot::channel(); - node2 - .send_cmd(AdexBehaviourCmd::RequestAnyRelay { - req: b"test request".to_vec(), - response_tx, - }) - .await; - - let response = response_rx.await.unwrap(); - assert_eq!(response, Some((node1.peer_id, b"test response".to_vec()))); - - assert!(request_received.load(Ordering::Relaxed)); -} - -#[tokio::test] -#[cfg(not(windows))] // https://github.com/KomodoPlatform/atomicDEX-API/issues/1712 -async fn test_request_response_ok_three_peers() { - let _ = env_logger::try_init(); - - #[derive(Default)] - struct RequestHandler { - requests: u8, - } - - impl RequestHandler { - fn handle(&mut self, mut cmd_tx: mpsc::Sender, event: AdexBehaviourEvent) { - let (request, response_channel) = match event { - AdexBehaviourEvent::PeerRequest { - request, - response_channel, - .. - } => (request, response_channel), - _ => return, - }; - - self.requests += 1; - - assert_eq!(request, b"test request"); - - // the first time we should respond the none - if self.requests == 1 { - let res = AdexResponse::None; - cmd_tx - .try_send(AdexBehaviourCmd::SendResponse { res, response_channel }) - .unwrap(); - return; - } - - // the second time we should respond an error - if self.requests == 2 { - let res = AdexResponse::Err { - error: "test error".into(), - }; - cmd_tx - .try_send(AdexBehaviourCmd::SendResponse { res, response_channel }) - .unwrap(); - return; - } - - // the third time we should respond an ok - if self.requests == 3 { - let res = AdexResponse::Ok { - response: format!("success {} request", self.requests).as_bytes().to_vec(), - }; - cmd_tx - .try_send(AdexBehaviourCmd::SendResponse { res, response_channel }) - .unwrap(); - return; - } - - panic!("Request received more than 3 times"); - } - } - - let request_handler = Arc::new(Mutex::new(RequestHandler::default())); - - let mut receivers = Vec::new(); - for _ in 0..3 { - let handler = request_handler.clone(); - let receiver_port = next_port(); - let receiver = Node::spawn(receiver_port, vec![], move |cmd_tx, event| { - let mut handler = handler.lock().unwrap(); - handler.handle(cmd_tx, event) - }) - .await; - receivers.push((receiver_port, receiver)); - } - - let mut sender = Node::spawn( - next_port(), - receivers.iter().map(|(port, _)| *port).collect(), - |_, _| (), - ) - .await; - - sender.wait_peers(3).await; - - let (response_tx, response_rx) = oneshot::channel(); - sender - .send_cmd(AdexBehaviourCmd::RequestAnyRelay { - req: b"test request".to_vec(), - response_tx, - }) - .await; - - let (_peer_id, res) = response_rx.await.unwrap().unwrap(); - assert_eq!(res, b"success 3 request".to_vec()); -} - -#[tokio::test] -async fn test_request_response_none() { - let _ = env_logger::try_init(); - - let request_received = Arc::new(AtomicBool::new(false)); - let request_received_cpy = request_received.clone(); - - let node1_port = next_port(); - let _node1 = Node::spawn(node1_port, vec![], move |mut cmd_tx, event| { - let (request, response_channel) = match event { - AdexBehaviourEvent::PeerRequest { - request, - response_channel, - .. - } => (request, response_channel), - _ => return, - }; - - request_received_cpy.store(true, Ordering::Relaxed); - assert_eq!(request, b"test request"); - - let res = AdexResponse::None; - cmd_tx - .try_send(AdexBehaviourCmd::SendResponse { res, response_channel }) - .unwrap(); - }) - .await; - - let mut node2 = Node::spawn(next_port(), vec![node1_port], |_, _| ()).await; - - node2.wait_peers(1).await; - - let (response_tx, response_rx) = oneshot::channel(); - node2 - .send_cmd(AdexBehaviourCmd::RequestAnyRelay { - req: b"test request".to_vec(), - response_tx, - }) - .await; - - assert_eq!(response_rx.await.unwrap(), None); - assert!(request_received.load(Ordering::Relaxed)); -} - -#[tokio::test] -#[cfg(target_os = "linux")] // https://github.com/KomodoPlatform/atomicDEX-API/issues/1712 -async fn test_request_peers_ok_three_peers() { - let _ = env_logger::try_init(); - - let receiver1_port = next_port(); - let receiver1 = Node::spawn(receiver1_port, vec![], move |mut cmd_tx, event| { - let (request, response_channel) = match event { - AdexBehaviourEvent::PeerRequest { - request, - response_channel, - .. - } => (request, response_channel), - _ => return, - }; - - assert_eq!(request, b"test request"); - - let res = AdexResponse::None; - cmd_tx - .try_send(AdexBehaviourCmd::SendResponse { res, response_channel }) - .unwrap(); - }) - .await; - - let receiver2_port = next_port(); - let receiver2 = Node::spawn(receiver2_port, vec![], move |mut cmd_tx, event| { - let (request, response_channel) = match event { - AdexBehaviourEvent::PeerRequest { - request, - response_channel, - .. - } => (request, response_channel), - _ => return, - }; - - assert_eq!(request, b"test request"); - - let res = AdexResponse::Err { - error: "test error".into(), - }; - cmd_tx - .try_send(AdexBehaviourCmd::SendResponse { res, response_channel }) - .unwrap(); - }) - .await; - - let receiver3_port = next_port(); - let receiver3 = Node::spawn(receiver3_port, vec![], move |mut cmd_tx, event| { - let (request, response_channel) = match event { - AdexBehaviourEvent::PeerRequest { - request, - response_channel, - .. - } => (request, response_channel), - _ => return, - }; - - assert_eq!(request, b"test request"); - - let res = AdexResponse::Ok { - response: b"test response".to_vec(), - }; - cmd_tx - .try_send(AdexBehaviourCmd::SendResponse { res, response_channel }) - .unwrap(); - }) - .await; - let mut sender = Node::spawn( - next_port(), - vec![receiver1_port, receiver2_port, receiver3_port], - |_, _| (), - ) - .await; - - sender.wait_peers(3).await; - - let (response_tx, response_rx) = oneshot::channel(); - sender - .send_cmd(AdexBehaviourCmd::RequestRelays { - req: b"test request".to_vec(), - response_tx, - }) - .await; - - let mut expected = vec![ - (receiver1.peer_id, AdexResponse::None), - (receiver2.peer_id, AdexResponse::Err { - error: "test error".into(), - }), - (receiver3.peer_id, AdexResponse::Ok { - response: b"test response".to_vec(), - }), - ]; - expected.sort_by(|x, y| x.0.cmp(&y.0)); - - let mut responses = response_rx.await.unwrap(); - responses.sort_by(|x, y| x.0.cmp(&y.0)); - assert_eq!(responses, expected); -} diff --git a/mm2src/mm2_libp2p/src/lib.rs b/mm2src/mm2_libp2p/src/lib.rs deleted file mode 100644 index ae554ca311..0000000000 --- a/mm2src/mm2_libp2p/src/lib.rs +++ /dev/null @@ -1,177 +0,0 @@ -#![feature(ip)] - -#[macro_use] extern crate lazy_static; - -mod adex_ping; -pub mod atomicdex_behaviour; -mod network; -pub mod peers_exchange; -pub mod relay_address; -pub mod request_response; -mod runtime; - -use lazy_static::lazy_static; -use secp256k1::{Message as SecpMessage, PublicKey as Secp256k1Pubkey, Secp256k1, SecretKey, SignOnly, Signature, - VerifyOnly}; -use sha2::{Digest, Sha256}; - -pub use atomicdex_behaviour::{spawn_gossipsub, AdexBehaviourError, NodeType, WssCerts}; -pub use atomicdex_gossipsub::{GossipsubEvent, GossipsubMessage, MessageId, TopicHash}; -pub use libp2p::identity::error::DecodingError; -pub use libp2p::identity::secp256k1::PublicKey as Libp2pSecpPublic; -pub use libp2p::identity::PublicKey as Libp2pPublic; -pub use libp2p::{Multiaddr, PeerId}; -pub use peers_exchange::PeerAddresses; -pub use relay_address::{RelayAddress, RelayAddressError}; -pub use runtime::SwarmRuntime; -use serde::{de, Deserialize, Serialize, Serializer}; - -lazy_static! { - static ref SECP_VERIFY: Secp256k1 = Secp256k1::verification_only(); - static ref SECP_SIGN: Secp256k1 = Secp256k1::signing_only(); -} - -#[derive(Clone, Copy)] -pub enum NetworkInfo { - /// The in-memory network. - InMemory, - /// The distributed network (out of the app memory). - Distributed { network_ports: NetworkPorts }, -} - -impl NetworkInfo { - pub fn in_memory(&self) -> bool { matches!(self, NetworkInfo::InMemory) } -} - -#[derive(Clone, Copy)] -pub struct NetworkPorts { - pub tcp: u16, - pub wss: u16, -} - -pub fn encode_message(message: &T) -> Result, rmp_serde::encode::Error> { - rmp_serde::to_vec(message) -} - -#[inline] -pub fn decode_message<'de, T: de::Deserialize<'de>>(bytes: &'de [u8]) -> Result { - rmp_serde::from_slice(bytes) -} - -#[derive(Deserialize, Serialize)] -struct SignedMessageSerdeHelper<'a> { - pubkey: PublicKey, - #[serde(with = "serde_bytes")] - signature: &'a [u8], - #[serde(with = "serde_bytes")] - payload: &'a [u8], -} - -pub fn encode_and_sign(message: &T, secret: &[u8; 32]) -> Result, rmp_serde::encode::Error> { - let secret = SecretKey::from_slice(secret).unwrap(); - let encoded = encode_message(message)?; - let sig_hash = SecpMessage::from_slice(&sha256(&encoded)).expect("Message::from_slice should never fail"); - let sig = SECP_SIGN.sign(&sig_hash, &secret); - let serialized_sig = sig.serialize_compact(); - let pubkey = PublicKey::from(Secp256k1Pubkey::from_secret_key(&*SECP_SIGN, &secret)); - let msg = SignedMessageSerdeHelper { - pubkey, - signature: &serialized_sig, - payload: &encoded, - }; - encode_message(&msg) -} - -pub fn decode_signed<'de, T: de::Deserialize<'de>>( - encoded: &'de [u8], -) -> Result<(T, Signature, PublicKey), rmp_serde::decode::Error> { - let helper: SignedMessageSerdeHelper = decode_message(encoded)?; - let signature = Signature::from_compact(helper.signature) - .map_err(|e| rmp_serde::decode::Error::Syntax(format!("Failed to parse signature {}", e)))?; - let sig_hash = SecpMessage::from_slice(&sha256(helper.payload)).expect("Message::from_slice should never fail"); - match &helper.pubkey { - PublicKey::Secp256k1(serialized_pub) => { - if SECP_VERIFY.verify(&sig_hash, &signature, &serialized_pub.0).is_err() { - return Err(rmp_serde::decode::Error::Syntax("Invalid message signature".into())); - } - }, - } - - let payload: T = decode_message(helper.payload)?; - Ok((payload, signature, helper.pubkey)) -} - -fn sha256(input: impl AsRef<[u8]>) -> [u8; 32] { Sha256::new().chain(input).finalize().into() } - -#[derive(Debug, Eq, PartialEq)] -pub struct Secp256k1PubkeySerialize(Secp256k1Pubkey); - -impl Serialize for Secp256k1PubkeySerialize { - fn serialize(&self, serializer: S) -> Result { - serializer.serialize_bytes(&self.0.serialize()) - } -} - -impl<'de> de::Deserialize<'de> for Secp256k1PubkeySerialize { - fn deserialize(deserializer: D) -> Result>::Error> - where - D: de::Deserializer<'de>, - { - let slice: &[u8] = de::Deserialize::deserialize(deserializer)?; - let pubkey = - Secp256k1Pubkey::from_slice(slice).map_err(|e| de::Error::custom(format!("Error {} parsing pubkey", e)))?; - - Ok(Secp256k1PubkeySerialize(pubkey)) - } -} - -#[derive(Debug, Deserialize, Eq, PartialEq, Serialize)] -pub enum PublicKey { - Secp256k1(Secp256k1PubkeySerialize), -} - -impl PublicKey { - pub fn to_bytes(&self) -> Vec { - match self { - PublicKey::Secp256k1(pubkey) => pubkey.0.serialize().to_vec(), - } - } - - pub fn to_hex(&self) -> String { - match self { - PublicKey::Secp256k1(pubkey) => hex::encode(pubkey.0.serialize().as_ref()), - } - } - - pub fn unprefixed(&self) -> [u8; 32] { - let mut res = [0; 32]; - match self { - PublicKey::Secp256k1(pubkey) => res.copy_from_slice(&pubkey.0.serialize()[1..33]), - } - res - } -} - -impl From for PublicKey { - fn from(pubkey: Secp256k1Pubkey) -> Self { PublicKey::Secp256k1(Secp256k1PubkeySerialize(pubkey)) } -} - -pub type TopicPrefix = &'static str; -pub const TOPIC_SEPARATOR: char = '/'; - -pub fn pub_sub_topic(prefix: TopicPrefix, topic: &str) -> String { - let mut res = prefix.to_owned(); - res.push(TOPIC_SEPARATOR); - res.push_str(topic); - res -} - -#[test] -fn signed_message_serde() { - let secret = [1u8; 32]; - let initial_msg = vec![0u8; 32]; - let signed_encoded = encode_and_sign(&initial_msg, &secret).unwrap(); - - let (decoded, ..) = decode_signed::>(&signed_encoded).unwrap(); - assert_eq!(decoded, initial_msg); -} diff --git a/mm2src/mm2_libp2p/src/network.rs b/mm2src/mm2_libp2p/src/network.rs deleted file mode 100644 index 0b09c444db..0000000000 --- a/mm2src/mm2_libp2p/src/network.rs +++ /dev/null @@ -1,84 +0,0 @@ -use crate::RelayAddress; -use libp2p::PeerId; - -pub const NETID_8762: u16 = 8762; - -#[cfg_attr(target_arch = "wasm32", allow(dead_code))] -const ALL_NETID_8762_SEEDNODES: &[(&str, &str)] = &[ - ( - "12D3KooWHKkHiNhZtKceQehHhPqwU5W1jXpoVBgS1qst899GjvTm", - "168.119.236.251", - ), - ( - "12D3KooWAToxtunEBWCoAHjefSv74Nsmxranw8juy3eKEdrQyGRF", - "168.119.236.240", - ), - ( - "12D3KooWSmEi8ypaVzFA1AGde2RjxNW5Pvxw3qa2fVe48PjNs63R", - "168.119.236.239", - ), - ( - "12D3KooWJWBnkVsVNjiqUEPjLyHpiSmQVAJ5t6qt1Txv5ctJi9Xd", - "135.181.34.220", - ), - ( - "12D3KooWEsuiKcQaBaKEzuMtT6uFjs89P1E8MK3wGRZbeuCbCw6P", - "168.119.236.241", - ), - ( - "12D3KooWHBeCnJdzNk51G4mLnao9cDsjuqiMTEo5wMFXrd25bd1F", - "168.119.236.243", - ), - ( - "12D3KooWKxavLCJVrQ5Gk1kd9m6cohctGQBmiKPS9XQFoXEoyGmS", - "168.119.236.249", - ), - ( - "12D3KooW9soGyPfX6kcyh3uVXNHq1y2dPmQNt2veKgdLXkBiCVKq", - "168.119.236.246", - ), - ( - "12D3KooWL6yrrNACb7t7RPyTEPxKmq8jtrcbkcNd6H5G2hK7bXaL", - "168.119.236.233", - ), - ( - "12D3KooWMrjLmrv8hNgAoVf1RfumfjyPStzd4nv5XL47zN4ZKisb", - "168.119.237.8", - ), - ( - "12D3KooWPR2RoPi19vQtLugjCdvVmCcGLP2iXAzbDfP3tp81ZL4d", - "168.119.237.13", - ), - ( - "12D3KooWJDoV9vJdy6PnzwVETZ3fWGMhV41VhSbocR1h2geFqq9Y", - "65.108.90.210", - ), - ( - "12D3KooWEaZpH61H4yuQkaNG5AsyGdpBhKRppaLdAY52a774ab5u", - "46.4.78.11", - ), - ( - "12D3KooWAd5gPXwX7eDvKWwkr2FZGfoJceKDCA53SHmTFFVkrN7Q", - "46.4.87.18", - ), -]; - -#[cfg(target_arch = "wasm32")] -pub fn get_all_network_seednodes(_netid: u16) -> Vec<(PeerId, RelayAddress)> { Vec::new() } - -#[cfg(not(target_arch = "wasm32"))] -pub fn get_all_network_seednodes(netid: u16) -> Vec<(PeerId, RelayAddress)> { - use std::str::FromStr; - - if netid != NETID_8762 { - return Vec::new(); - } - ALL_NETID_8762_SEEDNODES - .iter() - .map(|(peer_id, ipv4)| { - let peer_id = PeerId::from_str(peer_id).expect("valid peer id"); - let address = RelayAddress::IPv4(ipv4.to_string()); - (peer_id, address) - }) - .collect() -} diff --git a/mm2src/mm2_libp2p/src/peers_exchange.rs b/mm2src/mm2_libp2p/src/peers_exchange.rs deleted file mode 100644 index 1721f73f8d..0000000000 --- a/mm2src/mm2_libp2p/src/peers_exchange.rs +++ /dev/null @@ -1,436 +0,0 @@ -use crate::request_response::Codec; -use crate::NetworkInfo; -use futures::StreamExt; -use libp2p::swarm::NetworkBehaviour; -use libp2p::{multiaddr::{Multiaddr, Protocol}, - request_response::{ProtocolName, ProtocolSupport, RequestResponse, RequestResponseConfig, - RequestResponseEvent, RequestResponseMessage}, - swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}, - NetworkBehaviour, PeerId}; -use log::{error, info, warn}; -use rand::seq::SliceRandom; -use serde::{de::Deserializer, ser::Serializer, Deserialize, Serialize}; -use std::collections::HashSet; -use std::{collections::{HashMap, VecDeque}, - iter, - task::{Context, Poll}, - time::Duration}; -use wasm_timer::{Instant, Interval}; - -pub type PeerAddresses = HashSet; - -#[derive(Debug, Clone)] -pub enum PeersExchangeProtocol { - Version1, -} - -impl ProtocolName for PeersExchangeProtocol { - fn protocol_name(&self) -> &[u8] { - match self { - PeersExchangeProtocol::Version1 => b"/peers-exchange/1", - } - } -} - -type PeersExchangeCodec = Codec; - -const DEFAULT_PEERS_NUM: usize = 20; -const REQUEST_PEERS_INITIAL_DELAY: u64 = 20; -const REQUEST_PEERS_INTERVAL: u64 = 300; -const MAX_PEERS: usize = 100; - -#[derive(Debug, Clone, Eq, Hash, PartialEq)] -pub struct PeerIdSerde(PeerId); - -impl From for PeerIdSerde { - fn from(peer_id: PeerId) -> PeerIdSerde { PeerIdSerde(peer_id) } -} - -impl Serialize for PeerIdSerde { - fn serialize(&self, serializer: S) -> Result { - self.0.clone().to_bytes().serialize(serializer) - } -} - -impl<'de> Deserialize<'de> for PeerIdSerde { - fn deserialize>(deserializer: D) -> Result { - let bytes: Vec = Deserialize::deserialize(deserializer)?; - let peer_id = PeerId::from_bytes(&bytes).map_err(|_| serde::de::Error::custom("PeerId::from_bytes error"))?; - Ok(PeerIdSerde(peer_id)) - } -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub enum PeersExchangeRequest { - GetKnownPeers { num: usize }, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub enum PeersExchangeResponse { - KnownPeers { peers: HashMap }, -} - -/// Behaviour that requests known peers list from other peers at random -#[derive(NetworkBehaviour)] -#[behaviour(poll_method = "poll", event_process = true)] -pub struct PeersExchange { - request_response: RequestResponse, - #[behaviour(ignore)] - known_peers: Vec, - #[behaviour(ignore)] - reserved_peers: Vec, - #[behaviour(ignore)] - events: VecDeque::ConnectionHandler>>, - #[behaviour(ignore)] - maintain_peers_interval: Interval, - #[behaviour(ignore)] - network_info: NetworkInfo, -} - -#[allow(clippy::new_without_default)] -impl PeersExchange { - pub fn new(network_info: NetworkInfo) -> Self { - let codec = Codec::default(); - let protocol = iter::once((PeersExchangeProtocol::Version1, ProtocolSupport::Full)); - let config = RequestResponseConfig::default(); - let request_response = RequestResponse::new(codec, protocol, config); - PeersExchange { - request_response, - known_peers: Vec::new(), - reserved_peers: Vec::new(), - events: VecDeque::new(), - maintain_peers_interval: Interval::new_at( - Instant::now() + Duration::from_secs(REQUEST_PEERS_INITIAL_DELAY), - Duration::from_secs(REQUEST_PEERS_INTERVAL), - ), - network_info, - } - } - - fn get_random_known_peers(&mut self, num: usize) -> HashMap { - let mut result = HashMap::with_capacity(num); - let mut rng = rand::thread_rng(); - let peer_ids = self - .known_peers - .clone() - .into_iter() - .filter(|peer| !self.request_response.addresses_of_peer(peer).is_empty()) - .collect::>(); - - let peer_ids = peer_ids.choose_multiple(&mut rng, num); - for peer_id in peer_ids { - let addresses = self.request_response.addresses_of_peer(peer_id).into_iter().collect(); - result.insert((*peer_id).into(), addresses); - } - result - } - - fn forget_peer(&mut self, peer: &PeerId) { - self.known_peers.retain(|known_peer| known_peer != peer); - self.forget_peer_addresses(peer); - } - - fn forget_peer_addresses(&mut self, peer: &PeerId) { - for address in self.request_response.addresses_of_peer(peer) { - if !self.is_reserved_peer(peer) { - self.request_response.remove_address(peer, &address); - } - } - } - - pub fn add_peer_addresses_to_known_peers(&mut self, peer: &PeerId, addresses: PeerAddresses) { - for address in addresses.iter() { - if !self.validate_global_multiaddr(address) { - warn!("Attempt adding a not valid address of the peer '{}': {}", peer, address); - return; - } - } - if !self.known_peers.contains(peer) && !addresses.is_empty() { - self.known_peers.push(*peer); - } - let already_known = self.request_response.addresses_of_peer(peer); - for address in addresses { - if !already_known.contains(&address) { - self.request_response.add_address(peer, address); - } - } - } - - pub fn add_peer_addresses_to_reserved_peers(&mut self, peer: &PeerId, addresses: PeerAddresses) { - for address in addresses.iter() { - if !self.validate_global_multiaddr(address) { - return; - } - } - - if !self.reserved_peers.contains(peer) && !addresses.is_empty() { - self.reserved_peers.push(*peer); - } - - let already_reserved = self.request_response.addresses_of_peer(peer); - for address in addresses { - if !already_reserved.contains(&address) { - self.request_response.add_address(peer, address); - } - } - } - - fn maintain_known_peers(&mut self) { - if self.known_peers.len() > MAX_PEERS { - let mut rng = rand::thread_rng(); - let to_remove_num = self.known_peers.len() - MAX_PEERS; - self.known_peers.shuffle(&mut rng); - let removed_peers: Vec<_> = self.known_peers.drain(..to_remove_num).collect(); - for peer in removed_peers { - self.forget_peer_addresses(&peer); - } - } - self.request_known_peers_from_random_peer(); - } - - fn request_known_peers_from_random_peer(&mut self) { - let mut rng = rand::thread_rng(); - if let Some(from_peer) = self.known_peers.choose(&mut rng) { - info!("Try to request {} peers from peer {}", DEFAULT_PEERS_NUM, from_peer); - let request = PeersExchangeRequest::GetKnownPeers { num: DEFAULT_PEERS_NUM }; - self.request_response.send_request(from_peer, request); - } - } - - pub fn get_random_peers( - &mut self, - num: usize, - mut filter: impl FnMut(&PeerId) -> bool, - ) -> HashMap { - let mut result = HashMap::with_capacity(num); - let mut rng = rand::thread_rng(); - let peer_ids = self.known_peers.iter().filter(|peer| filter(peer)).collect::>(); - for peer_id in peer_ids.choose_multiple(&mut rng, num) { - let addresses = self.request_response.addresses_of_peer(peer_id).into_iter().collect(); - result.insert(**peer_id, addresses); - } - result - } - - pub fn is_known_peer(&self, peer: &PeerId) -> bool { self.known_peers.contains(peer) } - - pub fn is_reserved_peer(&self, peer: &PeerId) -> bool { self.reserved_peers.contains(peer) } - - pub fn add_known_peer(&mut self, peer: PeerId) { - if !self.is_known_peer(&peer) { - self.known_peers.push(peer) - } - } - - fn validate_global_multiaddr(&self, address: &Multiaddr) -> bool { - let network_ports = match self.network_info { - NetworkInfo::Distributed { network_ports } => network_ports, - NetworkInfo::InMemory => panic!("PeersExchange must not be used with in-memory network"), - }; - - let mut components = address.iter(); - match components.next() { - Some(Protocol::Ip4(addr)) => { - if !addr.is_global() { - return false; - } - }, - _ => return false, - } - - match components.next() { - Some(Protocol::Tcp(port)) => { - // currently, `NetworkPorts::ws` is not supported by `PeersExchange` - if port != network_ports.tcp { - return false; - } - }, - _ => return false, - } - - true - } - - fn validate_get_known_peers_response(&self, response: &HashMap) -> bool { - if response.is_empty() { - return false; - } - - if response.len() > DEFAULT_PEERS_NUM { - return false; - } - - for addresses in response.values() { - if addresses.is_empty() { - return false; - } - - for address in addresses { - if !self.validate_global_multiaddr(address) { - warn!("Received a not valid address: {}", address); - return false; - } - } - } - true - } - - fn poll( - &mut self, - cx: &mut Context, - _params: &mut impl PollParameters, - ) -> Poll::ConnectionHandler>> { - while let Poll::Ready(Some(())) = self.maintain_peers_interval.poll_next_unpin(cx) { - self.maintain_known_peers(); - } - - if let Some(event) = self.events.pop_front() { - return Poll::Ready(event); - } - - Poll::Pending - } -} - -impl NetworkBehaviourEventProcess> for PeersExchange { - fn inject_event(&mut self, event: RequestResponseEvent) { - match event { - RequestResponseEvent::Message { message, peer } => match message { - RequestResponseMessage::Request { request, channel, .. } => match request { - PeersExchangeRequest::GetKnownPeers { num } => { - // Should not send a response in such case - if num > DEFAULT_PEERS_NUM { - return; - } - let response = PeersExchangeResponse::KnownPeers { - peers: self.get_random_known_peers(num), - }; - if let Err(_response) = self.request_response.send_response(channel, response) { - warn!("Response channel has been closed already"); - } - }, - }, - RequestResponseMessage::Response { response, .. } => match response { - PeersExchangeResponse::KnownPeers { peers } => { - info!("Got peers {:?}", peers); - - if !self.validate_get_known_peers_response(&peers) { - // if peer provides invalid response forget it and try to request from other peer - self.forget_peer(&peer); - self.request_known_peers_from_random_peer(); - return; - } - - peers.into_iter().for_each(|(peer, addresses)| { - self.add_peer_addresses_to_known_peers(&peer.0, addresses); - }); - }, - }, - }, - RequestResponseEvent::OutboundFailure { - peer, - request_id, - error, - } => { - error!( - "Outbound failure {:?} while requesting {:?} to peer {}", - error, request_id, peer - ); - self.forget_peer(&peer); - self.request_known_peers_from_random_peer(); - }, - RequestResponseEvent::InboundFailure { peer, error, .. } => { - error!( - "Inbound failure {:?} while processing request from peer {}", - error, peer - ); - }, - RequestResponseEvent::ResponseSent { .. } => (), - } - } -} - -#[cfg(test)] -mod tests { - use super::{NetworkInfo, PeerIdSerde, PeersExchange}; - use crate::{NetworkPorts, PeerId}; - use libp2p::core::Multiaddr; - use std::collections::{HashMap, HashSet}; - use std::iter::FromIterator; - - #[test] - fn test_peer_id_serde() { - let peer_id = PeerIdSerde(PeerId::random()); - let serialized = rmp_serde::to_vec(&peer_id).unwrap(); - let deserialized: PeerIdSerde = rmp_serde::from_slice(&serialized).unwrap(); - assert_eq!(peer_id.0, deserialized.0); - } - - #[test] - fn test_validate_get_known_peers_response() { - let network_info = NetworkInfo::Distributed { - network_ports: NetworkPorts { tcp: 3000, wss: 3010 }, - }; - let behaviour = PeersExchange::new(network_info); - let response = HashMap::default(); - assert!(!behaviour.validate_get_known_peers_response(&response)); - - let response = HashMap::from_iter(vec![(PeerIdSerde(PeerId::random()), HashSet::new())]); - assert!(!behaviour.validate_get_known_peers_response(&response)); - - let address: Multiaddr = "/ip4/127.0.0.1/tcp/3000".parse().unwrap(); - let response = HashMap::from_iter(vec![(PeerIdSerde(PeerId::random()), HashSet::from_iter(vec![address]))]); - assert!(!behaviour.validate_get_known_peers_response(&response)); - - let address: Multiaddr = "/ip4/216.58.210.142/tcp/3000".parse().unwrap(); - let response = HashMap::from_iter(vec![(PeerIdSerde(PeerId::random()), HashSet::from_iter(vec![address]))]); - assert!(behaviour.validate_get_known_peers_response(&response)); - - let address: Multiaddr = "/ip4/216.58.210.142/tcp/3001".parse().unwrap(); - let response = HashMap::from_iter(vec![(PeerIdSerde(PeerId::random()), HashSet::from_iter(vec![address]))]); - assert!(!behaviour.validate_get_known_peers_response(&response)); - - let address: Multiaddr = "/ip4/216.58.210.142".parse().unwrap(); - let response = HashMap::from_iter(vec![(PeerIdSerde(PeerId::random()), HashSet::from_iter(vec![address]))]); - assert!(!behaviour.validate_get_known_peers_response(&response)); - - let address: Multiaddr = - "/ip4/168.119.236.241/tcp/3000/p2p/12D3KooWEsuiKcQaBaKEzuMtT6uFjs89P1E8MK3wGRZbeuCbCw6P" - .parse() - .unwrap(); - let response = HashMap::from_iter(vec![(PeerIdSerde(PeerId::random()), HashSet::from_iter(vec![address]))]); - assert!(behaviour.validate_get_known_peers_response(&response)); - - let address1: Multiaddr = - "/ip4/168.119.236.241/tcp/3000/p2p/12D3KooWEsuiKcQaBaKEzuMtT6uFjs89P1E8MK3wGRZbeuCbCw6P" - .parse() - .unwrap(); - - let address2: Multiaddr = "/ip4/168.119.236.241/tcp/3000".parse().unwrap(); - let response = HashMap::from_iter(vec![( - PeerIdSerde(PeerId::random()), - HashSet::from_iter(vec![address1, address2]), - )]); - assert!(behaviour.validate_get_known_peers_response(&response)); - } - - #[test] - fn test_get_random_known_peers() { - let mut behaviour = PeersExchange::new(NetworkInfo::InMemory); - let peer_id = PeerId::random(); - behaviour.add_known_peer(peer_id); - - let result = behaviour.get_random_known_peers(1); - assert!(result.is_empty()); - - let address: Multiaddr = "/ip4/168.119.236.241/tcp/3000".parse().unwrap(); - behaviour.request_response.add_address(&peer_id, address.clone()); - - let result = behaviour.get_random_known_peers(1); - assert_eq!(result.len(), 1); - - let addresses = result.get(&peer_id.into()).unwrap(); - assert_eq!(addresses.len(), 1); - assert!(addresses.contains(&address)); - } -} diff --git a/mm2src/mm2_libp2p/src/relay_address.rs b/mm2src/mm2_libp2p/src/relay_address.rs deleted file mode 100644 index d23c419632..0000000000 --- a/mm2src/mm2_libp2p/src/relay_address.rs +++ /dev/null @@ -1,186 +0,0 @@ -use crate::{NetworkInfo, NetworkPorts}; -use derive_more::Display; -use libp2p::Multiaddr; -use serde::{de, Deserialize, Deserializer, Serialize}; -use std::str::FromStr; - -#[derive(Clone, Debug, Display, Serialize)] -pub enum RelayAddressError { - #[display( - fmt = "Error parsing 'RelayAddress' from {}: address has unknown protocol, expected either IPv4 or DNS or Memory address", - found - )] - FromStrError { found: String }, - #[display( - fmt = "Error converting '{:?}' to Multiaddr: unexpected IPv4/DNS address on a memory network", - self_str - )] - DistributedAddrOnMemoryNetwork { self_str: String }, - #[display( - fmt = "Error converting '{:?}' to Multiaddr: unexpected memory address on a distributed network", - self_str - )] - MemoryAddrOnDistributedNetwork { self_str: String }, -} - -impl std::error::Error for RelayAddressError {} - -impl RelayAddressError { - fn distributed_addr_on_memory_network(addr: &RelayAddress) -> RelayAddressError { - RelayAddressError::DistributedAddrOnMemoryNetwork { - self_str: format!("{:?}", addr), - } - } - - fn memory_addr_on_distributed_network(addr: &RelayAddress) -> RelayAddressError { - RelayAddressError::MemoryAddrOnDistributedNetwork { - self_str: format!("{:?}", addr), - } - } -} - -#[derive(Debug, Clone, PartialEq)] -pub enum RelayAddress { - IPv4(String), - Dns(String), - Memory(u64), -} - -impl FromStr for RelayAddress { - type Err = RelayAddressError; - - fn from_str(s: &str) -> Result { - // check if the string is IPv4 - if std::net::Ipv4Addr::from_str(s).is_ok() { - return Ok(RelayAddress::IPv4(s.to_string())); - } - // check if the string is a domain name - if validate_domain_name(s) { - return Ok(RelayAddress::Dns(s.to_owned())); - } - // check if the string is a `/memory/` address - if let Some(port_str) = s.strip_prefix("/memory/") { - if let Ok(port) = port_str.parse() { - return Ok(RelayAddress::Memory(port)); - } - } - Err(RelayAddressError::FromStrError { found: s.to_owned() }) - } -} - -impl<'de> Deserialize<'de> for RelayAddress { - fn deserialize(deserializer: D) -> Result>::Error> - where - D: Deserializer<'de>, - { - let addr_str = String::deserialize(deserializer)?; - RelayAddress::from_str(&addr_str).map_err(de::Error::custom) - } -} - -impl RelayAddress { - /// Try to convert `RelayAddress` to `Multiaddr` using the given `network_info`. - pub fn try_to_multiaddr(&self, network_info: NetworkInfo) -> Result { - let network_ports = match network_info { - NetworkInfo::InMemory => match self { - RelayAddress::Memory(port) => return Ok(memory_multiaddr(*port)), - _ => return Err(RelayAddressError::distributed_addr_on_memory_network(self)), - }, - NetworkInfo::Distributed { network_ports } => network_ports, - }; - - match self { - RelayAddress::IPv4(ipv4) => Ok(ipv4_multiaddr(ipv4, network_ports)), - RelayAddress::Dns(dns) => Ok(dns_multiaddr(dns, network_ports)), - RelayAddress::Memory(_) => Err(RelayAddressError::memory_addr_on_distributed_network(self)), - } - } -} - -/// Use [this](https://regex101.com/r/94nCB5/1) regular expression to validate the domain name. -/// See examples at the linked resource above. -fn validate_domain_name(s: &str) -> bool { - use regex::Regex; - - lazy_static! { - static ref DNS_REGEX: Regex = Regex::new(r#"^([a-z0-9]+(-[a-z0-9]+)*\.)+[a-z]{2,}$"#).unwrap(); - } - - DNS_REGEX.is_match(s) -} - -fn memory_multiaddr(port: u64) -> Multiaddr { format!("/memory/{}", port).parse().unwrap() } - -#[cfg(target_arch = "wasm32")] -fn ipv4_multiaddr(ipv4_addr: &str, ports: NetworkPorts) -> Multiaddr { - format!("/ip4/{}/tcp/{}/wss", ipv4_addr, ports.wss).parse().unwrap() -} - -#[cfg(not(target_arch = "wasm32"))] -fn ipv4_multiaddr(ipv4_addr: &str, ports: NetworkPorts) -> Multiaddr { - format!("/ip4/{}/tcp/{}", ipv4_addr, ports.tcp).parse().unwrap() -} - -#[cfg(target_arch = "wasm32")] -fn dns_multiaddr(dns_addr: &str, ports: NetworkPorts) -> Multiaddr { - format!("/dns/{}/tcp/{}/wss", dns_addr, ports.wss).parse().unwrap() -} - -#[cfg(not(target_arch = "wasm32"))] -fn dns_multiaddr(dns_addr: &str, ports: NetworkPorts) -> Multiaddr { - format!("/dns/{}/tcp/{}", dns_addr, ports.tcp).parse().unwrap() -} - -#[test] -fn test_relay_address_from_str() { - let valid_addresses = vec![ - ("127.0.0.1", RelayAddress::IPv4("127.0.0.1".to_owned())), - ("255.255.255.255", RelayAddress::IPv4("255.255.255.255".to_owned())), - ("google.com", RelayAddress::Dns("google.com".to_owned())), - ("www.google.com", RelayAddress::Dns("www.google.com".to_owned())), - ("g.co", RelayAddress::Dns("g.co".to_owned())), - ( - "stackoverflow.co.uk", - RelayAddress::Dns("stackoverflow.co.uk".to_owned()), - ), - ("1.2.3.4.com", RelayAddress::Dns("1.2.3.4.com".to_owned())), - ("/memory/123", RelayAddress::Memory(123)), - ("/memory/71428421981", RelayAddress::Memory(71428421981)), - ]; - for (s, expected) in valid_addresses { - let actual = RelayAddress::from_str(s).unwrap_or_else(|_| panic!("Error parsing '{}'", s)); - assert_eq!(actual, expected); - } - - let invalid_addresses = vec![ - "127.0.0", - "127.0.0.0.2", - "google.c", - "http://google.com", - "https://google.com/", - "google.com/", - "/memory/", - "/memory/9999999999999999999999999999999", - ]; - for s in invalid_addresses { - let _ = RelayAddress::from_str(s).expect_err("Expected an error"); - } -} - -#[test] -fn test_deserialize_relay_address() { - #[derive(Deserialize, PartialEq)] - struct Config { - addresses: Vec, - } - - let Config { addresses: actual } = - serde_json::from_str(r#"{"addresses": ["foo.bar.com", "127.0.0.2", "/memory/12345"]}"#) - .expect("Error deserializing a list of RelayAddress"); - let expected = vec![ - RelayAddress::Dns("foo.bar.com".to_owned()), - RelayAddress::IPv4("127.0.0.2".to_owned()), - RelayAddress::Memory(12345), - ]; - assert_eq!(actual, expected); -} diff --git a/mm2src/mm2_libp2p/src/request_response.rs b/mm2src/mm2_libp2p/src/request_response.rs deleted file mode 100644 index 25b87e46de..0000000000 --- a/mm2src/mm2_libp2p/src/request_response.rs +++ /dev/null @@ -1,320 +0,0 @@ -use crate::{decode_message, encode_message}; -use async_trait::async_trait; -use core::iter; -use futures::channel::{mpsc, oneshot}; -use futures::io::{AsyncRead, AsyncWrite}; -use futures::task::{Context, Poll}; -use futures::StreamExt; -use libp2p::core::upgrade::{read_length_prefixed, write_length_prefixed}; -use libp2p::request_response::{ProtocolName, ProtocolSupport, RequestId, RequestResponse, RequestResponseCodec, - RequestResponseConfig, RequestResponseEvent, RequestResponseMessage, ResponseChannel}; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}; -use libp2p::NetworkBehaviour; -use libp2p::PeerId; -use log::{debug, error, warn}; -use serde::de::DeserializeOwned; -use serde::{Deserialize, Serialize}; -use std::collections::{HashMap, VecDeque}; -use std::io; -use std::time::Duration; -use wasm_timer::{Instant, Interval}; - -const MAX_BUFFER_SIZE: usize = 1024 * 1024 - 100; - -pub type RequestResponseReceiver = mpsc::UnboundedReceiver<(PeerId, PeerRequest, oneshot::Sender)>; -pub type RequestResponseSender = mpsc::UnboundedSender<(PeerId, PeerRequest, oneshot::Sender)>; - -/// Build a request-response network behaviour. -pub fn build_request_response_behaviour() -> RequestResponseBehaviour { - let config = RequestResponseConfig::default(); - let protocol = iter::once((Protocol::Version1, ProtocolSupport::Full)); - let inner = RequestResponse::new(Codec::default(), protocol, config); - - let (tx, rx) = mpsc::unbounded(); - let pending_requests = HashMap::new(); - let events = VecDeque::new(); - let timeout = Duration::from_secs(10); - let timeout_interval = Interval::new(Duration::from_secs(1)); - - RequestResponseBehaviour { - inner, - rx, - tx, - pending_requests, - events, - timeout, - timeout_interval, - } -} - -pub enum RequestResponseBehaviourEvent { - InboundRequest { - peer_id: PeerId, - request: PeerRequest, - response_channel: ResponseChannel, - }, -} - -struct PendingRequest { - tx: oneshot::Sender, - initiated_at: Instant, -} - -#[derive(NetworkBehaviour)] -#[behaviour(out_event = "RequestResponseBehaviourEvent", event_process = true)] -#[behaviour(poll_method = "poll_event")] -pub struct RequestResponseBehaviour { - /// The inner RequestResponse network behaviour. - inner: RequestResponse>, - #[behaviour(ignore)] - rx: RequestResponseReceiver, - #[behaviour(ignore)] - tx: RequestResponseSender, - #[behaviour(ignore)] - pending_requests: HashMap, - /// Events that need to be yielded to the outside when polling. - #[behaviour(ignore)] - events: VecDeque, - /// Timeout for pending requests - #[behaviour(ignore)] - timeout: Duration, - /// Interval for request timeout check - #[behaviour(ignore)] - timeout_interval: Interval, -} - -impl RequestResponseBehaviour { - pub fn sender(&self) -> RequestResponseSender { self.tx.clone() } - - pub fn send_response(&mut self, ch: ResponseChannel, rs: PeerResponse) -> Result<(), PeerResponse> { - self.inner.send_response(ch, rs) - } - - pub fn send_request( - &mut self, - peer_id: &PeerId, - request: PeerRequest, - response_tx: oneshot::Sender, - ) -> RequestId { - let request_id = self.inner.send_request(peer_id, request); - let pending_request = PendingRequest { - tx: response_tx, - initiated_at: Instant::now(), - }; - assert!(self.pending_requests.insert(request_id, pending_request).is_none()); - request_id - } - - fn poll_event( - &mut self, - cx: &mut Context, - _params: &mut impl PollParameters, - ) -> Poll::ConnectionHandler>> - { - // poll the `rx` - match self.rx.poll_next_unpin(cx) { - // received a request, forward it through the network and put to the `pending_requests` - Poll::Ready(Some((peer_id, request, response_tx))) => { - let _request_id = self.send_request(&peer_id, request, response_tx); - }, - // the channel was closed - Poll::Ready(None) => panic!("request-response channel has been closed"), - Poll::Pending => (), - } - - if let Some(event) = self.events.pop_front() { - // forward a pending event to the top - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); - } - - while let Poll::Ready(Some(())) = self.timeout_interval.poll_next_unpin(cx) { - let now = Instant::now(); - let timeout = self.timeout; - self.pending_requests.retain(|request_id, pending_request| { - let retain = now.duration_since(pending_request.initiated_at) < timeout; - if !retain { - warn!("Request {} timed out", request_id); - } - retain - }); - } - - Poll::Pending - } - - fn process_request( - &mut self, - peer_id: PeerId, - request: PeerRequest, - response_channel: ResponseChannel, - ) { - self.events.push_back(RequestResponseBehaviourEvent::InboundRequest { - peer_id, - request, - response_channel, - }) - } - - fn process_response(&mut self, request_id: RequestId, response: PeerResponse) { - match self.pending_requests.remove(&request_id) { - Some(pending) => { - if let Err(e) = pending.tx.send(response) { - error!("{:?}. Request {:?} is not processed", e, request_id); - } - }, - _ => error!("Received unknown request {:?}", request_id), - } - } -} - -impl NetworkBehaviourEventProcess> for RequestResponseBehaviour { - fn inject_event(&mut self, event: RequestResponseEvent) { - let (peer_id, message) = match event { - RequestResponseEvent::Message { peer, message } => (peer, message), - RequestResponseEvent::InboundFailure { error, .. } => { - error!("Error on receive a request: {:?}", error); - return; - }, - RequestResponseEvent::OutboundFailure { - peer, - request_id, - error, - } => { - error!("Error on send request {:?} to peer {:?}: {:?}", request_id, peer, error); - let err_response = PeerResponse::Err { - err: format!("{:?}", error), - }; - self.process_response(request_id, err_response); - return; - }, - RequestResponseEvent::ResponseSent { .. } => return, - }; - - match message { - RequestResponseMessage::Request { request, channel, .. } => { - debug!("Received a request from {:?} peer", peer_id); - self.process_request(peer_id, request, channel) - }, - RequestResponseMessage::Response { request_id, response } => { - debug!( - "Received a response to the {:?} request from peer {:?}", - request_id, peer_id - ); - self.process_response(request_id, response) - }, - } - } -} - -#[derive(Clone)] -pub struct Codec { - phantom: std::marker::PhantomData<(Proto, Req, Res)>, -} - -impl Default for Codec { - fn default() -> Self { - Codec { - phantom: Default::default(), - } - } -} - -#[derive(Debug, Clone)] -pub enum Protocol { - Version1, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct PeerRequest { - pub req: Vec, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub enum PeerResponse { - Ok { res: Vec }, - None, - Err { err: String }, -} - -macro_rules! try_io { - ($e: expr) => { - match $e { - Ok(ok) => ok, - Err(err) => return Err(io::Error::new(io::ErrorKind::InvalidData, err)), - } - }; -} - -impl ProtocolName for Protocol { - fn protocol_name(&self) -> &[u8] { - match self { - Protocol::Version1 => b"/request-response/1", - } - } -} - -#[async_trait] -impl< - Proto: Clone + ProtocolName + Send + Sync, - Req: DeserializeOwned + Serialize + Send + Sync, - Res: DeserializeOwned + Serialize + Send + Sync, - > RequestResponseCodec for Codec -{ - type Protocol = Proto; - type Request = Req; - type Response = Res; - - async fn read_request(&mut self, _protocol: &Self::Protocol, io: &mut T) -> io::Result - where - T: AsyncRead + Unpin + Send, - { - read_to_end(io).await - } - - async fn read_response(&mut self, _protocol: &Self::Protocol, io: &mut T) -> io::Result - where - T: AsyncRead + Unpin + Send, - { - read_to_end(io).await - } - - async fn write_request(&mut self, _protocol: &Self::Protocol, io: &mut T, req: Self::Request) -> io::Result<()> - where - T: AsyncWrite + Unpin + Send, - { - write_all(io, &req).await - } - - async fn write_response(&mut self, _protocol: &Self::Protocol, io: &mut T, res: Self::Response) -> io::Result<()> - where - T: AsyncWrite + Unpin + Send, - { - write_all(io, &res).await - } -} - -async fn read_to_end(io: &mut T) -> io::Result -where - T: AsyncRead + Unpin + Send, - M: DeserializeOwned, -{ - match read_length_prefixed(io, MAX_BUFFER_SIZE).await { - Ok(data) => Ok(try_io!(decode_message(&data))), - Err(e) => Err(io::Error::new(io::ErrorKind::InvalidData, e)), - } -} - -async fn write_all(io: &mut T, msg: &M) -> io::Result<()> -where - T: AsyncWrite + Unpin + Send, - M: Serialize, -{ - let data = try_io!(encode_message(msg)); - if data.len() > MAX_BUFFER_SIZE { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "Try to send data size over maximum", - )); - } - write_length_prefixed(io, data).await -} diff --git a/mm2src/mm2_libp2p/src/runtime.rs b/mm2src/mm2_libp2p/src/runtime.rs deleted file mode 100644 index 016cf2b455..0000000000 --- a/mm2src/mm2_libp2p/src/runtime.rs +++ /dev/null @@ -1,33 +0,0 @@ -use common::executor::{BoxFutureSpawner, SpawnFuture}; -use futures::Future; -use std::pin::Pin; -use std::sync::Arc; - -#[derive(Clone)] -pub struct SwarmRuntime { - inner: Arc, -} - -impl SwarmRuntime { - pub fn new(spawner: S) -> SwarmRuntime - where - S: BoxFutureSpawner + Send + Sync + 'static, - { - SwarmRuntime { - inner: Arc::new(spawner), - } - } -} - -impl SpawnFuture for SwarmRuntime { - fn spawn(&self, f: F) - where - F: Future + Send + 'static, - { - self.inner.spawn_boxed(Box::new(Box::pin(f))) - } -} - -impl libp2p::core::Executor for SwarmRuntime { - fn exec(&self, future: Pin + Send>>) { self.inner.spawn_boxed(Box::new(future)) } -} diff --git a/mm2src/mm2_main/Cargo.toml b/mm2src/mm2_main/Cargo.toml index 0b394ed15a..3ddc5e0e09 100644 --- a/mm2src/mm2_main/Cargo.toml +++ b/mm2src/mm2_main/Cargo.toml @@ -63,9 +63,9 @@ mm2_err_handle = { path = "../mm2_err_handle" } mm2_event_stream = { path = "../mm2_event_stream" } mm2_gui_storage = { path = "../mm2_gui_storage" } mm2_io = { path = "../mm2_io" } -mm2_libp2p = { path = "../mm2_p2p", package = "mm2_p2p" } +mm2_libp2p = { path = "../mm2_p2p", package = "mm2_p2p", features = ["application"] } mm2_metrics = { path = "../mm2_metrics" } -mm2_net = { path = "../mm2_net", features = ["event-stream", "p2p"] } +mm2_net = { path = "../mm2_net" } mm2_number = { path = "../mm2_number" } mm2_rpc = { path = "../mm2_rpc", features = ["rpc_facilities"]} mm2_state_machine = { path = "../mm2_state_machine" } diff --git a/mm2src/mm2_main/src/lp_healthcheck.rs b/mm2src/mm2_main/src/lp_healthcheck.rs index 849f478d5f..5e9db51111 100644 --- a/mm2src/mm2_main/src/lp_healthcheck.rs +++ b/mm2src/mm2_main/src/lp_healthcheck.rs @@ -9,8 +9,8 @@ use instant::{Duration, Instant}; use lazy_static::lazy_static; use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::MmError; +use mm2_libp2p::p2p_ctx::P2PContext; use mm2_libp2p::{decode_message, encode_message, pub_sub_topic, Libp2pPublic, PeerAddress, TopicPrefix}; -use mm2_net::p2p::P2PContext; use ser_error_derive::SerializeErrorType; use serde::{Deserialize, Serialize}; use std::convert::TryFrom; @@ -265,7 +265,7 @@ pub async fn peer_connection_healthcheck_rpc( { let mut book = ctx.healthcheck_response_handler.lock().await; - book.insert(target_peer_address, tx, address_record_exp); + book.insert(target_peer_address.into(), tx, address_record_exp); } broadcast_p2p_msg( @@ -328,7 +328,7 @@ pub(crate) async fn process_p2p_healthcheck_message(ctx: &MmArc, message: mm2_li } else { // The requested peer is healthy; signal the response channel. let mut response_handler = ctx.healthcheck_response_handler.lock().await; - if let Some(tx) = response_handler.remove(&sender_peer) { + if let Some(tx) = response_handler.remove(&sender_peer.into()) { if tx.send(()).is_err() { log::error!("Result channel isn't present for peer '{sender_peer}'."); }; diff --git a/mm2src/mm2_main/src/lp_native_dex.rs b/mm2src/mm2_main/src/lp_native_dex.rs index cd055132a5..8e1e91ec13 100644 --- a/mm2src/mm2_main/src/lp_native_dex.rs +++ b/mm2src/mm2_main/src/lp_native_dex.rs @@ -29,12 +29,12 @@ use mm2_core::mm_ctx::{MmArc, MmCtx}; use mm2_err_handle::common_errors::InternalError; use mm2_err_handle::prelude::*; use mm2_event_stream::behaviour::{EventBehaviour, EventInitStatus}; +use mm2_libp2p::application::network_event::NetworkEvent; use mm2_libp2p::behaviours::atomicdex::{generate_ed25519_keypair, GossipsubConfig, DEPRECATED_NETID_LIST}; +use mm2_libp2p::p2p_ctx::P2PContext; use mm2_libp2p::{spawn_gossipsub, AdexBehaviourError, NodeType, RelayAddress, RelayAddressError, SeedNodeInfo, SwarmRuntime, WssCerts}; use mm2_metrics::mm_gauge; -use mm2_net::network_event::NetworkEvent; -use mm2_net::p2p::P2PContext; use rpc_task::RpcTaskError; use serde_json as json; use std::convert::TryInto; diff --git a/mm2src/mm2_main/src/lp_network.rs b/mm2src/mm2_main/src/lp_network.rs index b514c8c95e..9de49245f3 100644 --- a/mm2src/mm2_main/src/lp_network.rs +++ b/mm2src/mm2_main/src/lp_network.rs @@ -29,12 +29,13 @@ use instant::Instant; use keys::KeyPair; use mm2_core::mm_ctx::{MmArc, MmWeak}; use mm2_err_handle::prelude::*; +use mm2_libp2p::application::request_response::P2PRequest; +use mm2_libp2p::p2p_ctx::P2PContext; use mm2_libp2p::{decode_message, encode_message, DecodingError, GossipsubEvent, GossipsubMessage, Libp2pPublic, Libp2pSecpPublic, MessageId, NetworkPorts, PeerId, TOPIC_SEPARATOR}; use mm2_libp2p::{AdexBehaviourCmd, AdexBehaviourEvent, AdexEventRx, AdexResponse}; use mm2_libp2p::{PeerAddresses, RequestResponseBehaviourEvent}; use mm2_metrics::{mm_label, mm_timing}; -use mm2_net::p2p::P2PContext; use serde::de; use std::net::ToSocketAddrs; @@ -88,12 +89,6 @@ impl From for P2PRequestError { fn from(e: rmp_serde::decode::Error) -> Self { P2PRequestError::DecodeError(e.to_string()) } } -#[derive(Eq, Debug, Deserialize, PartialEq, Serialize)] -pub enum P2PRequest { - Ordermatch(lp_ordermatch::OrdermatchRequest), - NetworkInfo(lp_stats::NetworkInfoRequest), -} - pub async fn p2p_event_process_loop(ctx: MmWeak, mut rx: AdexEventRx, i_am_relay: bool) { loop { let adex_event = rx.next().await; diff --git a/mm2src/mm2_main/src/lp_ordermatch.rs b/mm2src/mm2_main/src/lp_ordermatch.rs index 38e2c5e265..2f634b5611 100644 --- a/mm2src/mm2_main/src/lp_ordermatch.rs +++ b/mm2src/mm2_main/src/lp_ordermatch.rs @@ -21,7 +21,6 @@ // use async_trait::async_trait; -use best_orders::BestOrdersAction; use blake2::digest::{Update, VariableOutput}; use blake2::Blake2bVar; use coins::utxo::{compressed_pub_key_from_priv_raw, ChecksumType, UtxoAddressFormat}; @@ -42,6 +41,8 @@ use http::Response; use keys::{AddressFormat, KeyPair}; use mm2_core::mm_ctx::{from_ctx, MmArc, MmWeak}; use mm2_err_handle::prelude::*; +use mm2_libp2p::application::request_response::ordermatch::OrdermatchRequest; +use mm2_libp2p::application::request_response::P2PRequest; use mm2_libp2p::{decode_signed, encode_and_sign, encode_message, pub_sub_topic, PublicKey, TopicHash, TopicPrefix, TOPIC_SEPARATOR}; use mm2_metrics::mm_gauge; @@ -69,8 +70,7 @@ use std::time::Duration; use trie_db::NodeCodec as NodeCodecT; use uuid::Uuid; -use crate::lp_network::{broadcast_p2p_msg, request_any_relay, request_one_peer, subscribe_to_topic, P2PRequest, - P2PRequestError}; +use crate::lp_network::{broadcast_p2p_msg, request_any_relay, request_one_peer, subscribe_to_topic, P2PRequestError}; use crate::lp_swap::maker_swap_v2::{self, MakerSwapStateMachine, MakerSwapStorage}; use crate::lp_swap::taker_swap_v2::{self, TakerSwapStateMachine, TakerSwapStorage}; use crate::lp_swap::{calc_max_maker_vol, check_balance_for_maker_swap, check_balance_for_taker_swap, @@ -600,34 +600,6 @@ pub async fn process_msg(ctx: MmArc, from_peer: String, msg: &[u8], i_am_relay: } } -#[derive(Debug, Deserialize, Eq, PartialEq, Serialize)] -pub enum OrdermatchRequest { - /// Get an orderbook for the given pair. - GetOrderbook { - base: String, - rel: String, - }, - /// Sync specific pubkey orderbook state if our known Patricia trie state doesn't match the latest keep alive message - SyncPubkeyOrderbookState { - pubkey: String, - /// Request using this condition - trie_roots: HashMap, - }, - BestOrders { - coin: String, - action: BestOrdersAction, - volume: BigRational, - }, - OrderbookDepth { - pairs: Vec<(String, String)>, - }, - BestOrdersByNumber { - coin: String, - action: BestOrdersAction, - number: usize, - }, -} - #[derive(Debug)] struct TryFromBytesError(String); @@ -5165,6 +5137,7 @@ pub struct CancelOrderResponse { result: String, } +// TODO: This is a near copy of the function below, `cancel_order_rpc`. pub async fn cancel_order(ctx: MmArc, req: CancelOrderReq) -> Result> { let ordermatch_ctx = match OrdermatchContext::from_ctx(&ctx) { Ok(x) => x, diff --git a/mm2src/mm2_main/src/lp_ordermatch/best_orders.rs b/mm2src/mm2_main/src/lp_ordermatch/best_orders.rs index 121a444e29..3bf684b66c 100644 --- a/mm2src/mm2_main/src/lp_ordermatch/best_orders.rs +++ b/mm2src/mm2_main/src/lp_ordermatch/best_orders.rs @@ -4,6 +4,8 @@ use derive_more::Display; use http::{Response, StatusCode}; use mm2_core::mm_ctx::MmArc; use mm2_err_handle::prelude::*; +use mm2_libp2p::application::request_response::{ordermatch::{BestOrdersAction, OrdermatchRequest}, + P2PRequest}; use mm2_number::{BigRational, MmNumber}; use mm2_rpc::data::legacy::OrderConfirmationsSettings; use num_traits::Zero; @@ -12,15 +14,8 @@ use std::collections::{HashMap, HashSet}; use uuid::Uuid; use super::{addr_format_from_protocol_info, is_my_order, mm2_internal_pubkey_hex, orderbook_address, - BaseRelProtocolInfo, OrderbookP2PItemWithProof, OrdermatchContext, OrdermatchRequest, RpcOrderbookEntryV2}; -use crate::lp_network::{request_any_relay, P2PRequest}; - -#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] -#[serde(rename_all = "lowercase")] -pub enum BestOrdersAction { - Buy, - Sell, -} + BaseRelProtocolInfo, OrderbookP2PItemWithProof, OrdermatchContext, RpcOrderbookEntryV2}; +use crate::lp_network::request_any_relay; #[derive(Debug, Deserialize)] pub struct BestOrdersRequest { diff --git a/mm2src/mm2_main/src/lp_ordermatch/orderbook_depth.rs b/mm2src/mm2_main/src/lp_ordermatch/orderbook_depth.rs index c335ac75ef..1772acbe61 100644 --- a/mm2src/mm2_main/src/lp_ordermatch/orderbook_depth.rs +++ b/mm2src/mm2_main/src/lp_ordermatch/orderbook_depth.rs @@ -1,9 +1,10 @@ -use super::{orderbook_topic_from_base_rel, OrdermatchContext, OrdermatchRequest}; -use crate::lp_network::{request_any_relay, P2PRequest}; +use super::{orderbook_topic_from_base_rel, OrdermatchContext}; +use crate::lp_network::request_any_relay; use coins::is_wallet_only_ticker; use common::log; use http::Response; use mm2_core::mm_ctx::MmArc; +use mm2_libp2p::application::request_response::{ordermatch::OrdermatchRequest, P2PRequest}; use serde_json::{self as json, Value as Json}; use std::collections::HashMap; diff --git a/mm2src/mm2_main/src/lp_stats.rs b/mm2src/mm2_main/src/lp_stats.rs index e86e742457..185996ecd1 100644 --- a/mm2src/mm2_main/src/lp_stats.rs +++ b/mm2src/mm2_main/src/lp_stats.rs @@ -7,13 +7,14 @@ use futures::lock::Mutex as AsyncMutex; use http::StatusCode; use mm2_core::mm_ctx::{from_ctx, MmArc}; use mm2_err_handle::prelude::*; +use mm2_libp2p::application::request_response::network_info::NetworkInfoRequest; use mm2_libp2p::{encode_message, NetworkInfo, PeerId, RelayAddress, RelayAddressError}; use serde_json::{self as json, Value as Json}; use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use crate::lp_network::{add_reserved_peer_addresses, lp_network_ports, request_peers, NetIdError, P2PRequest, - ParseAddressError, PeerDecodedResponse}; +use crate::lp_network::{add_reserved_peer_addresses, lp_network_ports, request_peers, NetIdError, ParseAddressError, + PeerDecodedResponse}; use std::str::FromStr; pub type NodeVersionResult = Result>; @@ -169,12 +170,6 @@ struct Mm2VersionRes { nodes: HashMap, } -#[derive(Debug, Deserialize, Eq, PartialEq, Serialize)] -pub enum NetworkInfoRequest { - /// Get MM2 version of nodes added to stats collection - GetMm2Version, -} - fn process_get_version_request(ctx: MmArc) -> Result>, String> { let response = ctx.mm_version().to_string(); let encoded = try_s!(encode_message(&response)); @@ -264,6 +259,7 @@ pub async fn start_version_stat_collection(ctx: MmArc, req: Json) -> NodeVersion #[cfg(not(target_arch = "wasm32"))] async fn stat_collection_loop(ctx: MmArc, interval: f64) { use common::now_sec; + use mm2_libp2p::application::request_response::P2PRequest; use crate::database::stats_nodes::select_peers_names; diff --git a/mm2src/mm2_main/src/lp_swap.rs b/mm2src/mm2_main/src/lp_swap.rs index dd1af80a98..48f0d09f2c 100644 --- a/mm2src/mm2_main/src/lp_swap.rs +++ b/mm2src/mm2_main/src/lp_swap.rs @@ -1885,7 +1885,7 @@ mod lp_swap_tests { use super::*; use crate::lp_native_dex::{fix_directories, init_p2p}; use coins::hd_wallet::HDPathAccountToAddressId; - use coins::utxo::rpc_clients::ElectrumRpcRequest; + use coins::utxo::rpc_clients::ElectrumConnectionSettings; use coins::utxo::utxo_standard::utxo_standard_coin_with_priv_key; use coins::utxo::{UtxoActivationParams, UtxoRpcMode}; use coins::PrivKeyActivationPolicy; @@ -2221,12 +2221,15 @@ mod lp_swap_tests { mode: UtxoRpcMode::Electrum { servers: electrums .iter() - .map(|url| ElectrumRpcRequest { + .map(|url| ElectrumConnectionSettings { url: url.to_string(), protocol: Default::default(), disable_cert_verification: false, + timeout_sec: None, }) .collect(), + min_connected: None, + max_connected: None, }, utxo_merge_params: None, tx_history: false, diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap.rs b/mm2src/mm2_main/src/lp_swap/maker_swap.rs index a873cd721a..4b3bd3d4ff 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap.rs @@ -853,7 +853,6 @@ impl MakerSwap { min_block_number: taker_coin_start_block, uuid: self.uuid.as_bytes(), }) - .compat() .await { Ok(_) => break, @@ -880,7 +879,8 @@ impl MakerSwap { } async fn maker_payment(&self) -> Result<(Option, Vec), String> { - let timeout = self.r().data.started_at + self.r().data.lock_duration / 3; + let lock_duration = self.r().data.lock_duration; + let timeout = self.r().data.started_at + lock_duration / 3; let now = now_sec(); if now > timeout { return Ok((Some(MakerSwapCommand::Finish), vec![ @@ -888,24 +888,24 @@ impl MakerSwap { ])); } + let maker_payment_lock = self.r().data.maker_payment_lock; + let other_maker_coin_htlc_pub = self.r().other_maker_coin_htlc_pub; let secret_hash = self.secret_hash(); + let maker_coin_swap_contract_address = self.r().data.maker_coin_swap_contract_address.clone(); let unique_data = self.unique_swap_data(); - let transaction_f = self - .maker_coin - .check_if_my_payment_sent(CheckIfMyPaymentSentArgs { - time_lock: self.r().data.maker_payment_lock, - other_pub: &*self.r().other_maker_coin_htlc_pub, - secret_hash: secret_hash.as_slice(), - search_from_block: self.r().data.maker_coin_start_block, - swap_contract_address: &self.r().data.maker_coin_swap_contract_address, - swap_unique_data: &unique_data, - amount: &self.maker_amount, - payment_instructions: &self.r().payment_instructions, - }) - .compat(); + let payment_instructions = self.r().payment_instructions.clone(); + let transaction_f = self.maker_coin.check_if_my_payment_sent(CheckIfMyPaymentSentArgs { + time_lock: maker_payment_lock, + other_pub: &*other_maker_coin_htlc_pub, + secret_hash: secret_hash.as_slice(), + search_from_block: self.r().data.maker_coin_start_block, + swap_contract_address: &maker_coin_swap_contract_address, + swap_unique_data: &unique_data, + amount: &self.maker_amount, + payment_instructions: &payment_instructions, + }); - let wait_maker_payment_until = - wait_for_maker_payment_conf_until(self.r().data.started_at, self.r().data.lock_duration); + let wait_maker_payment_until = wait_for_maker_payment_conf_until(self.r().data.started_at, lock_duration); let watcher_reward = if self.r().watcher_reward { match self .maker_coin @@ -927,20 +927,23 @@ impl MakerSwap { Ok(res) => match res { Some(tx) => tx, None => { - let payment_fut = self.maker_coin.send_maker_payment(SendPaymentArgs { - time_lock_duration: self.r().data.lock_duration, - time_lock: self.r().data.maker_payment_lock, - other_pubkey: &*self.r().other_maker_coin_htlc_pub, - secret_hash: secret_hash.as_slice(), - amount: self.maker_amount.clone(), - swap_contract_address: &self.r().data.maker_coin_swap_contract_address, - swap_unique_data: &unique_data, - payment_instructions: &self.r().payment_instructions, - watcher_reward, - wait_for_confirmation_until: wait_maker_payment_until, - }); - - match payment_fut.compat().await { + let payment = self + .maker_coin + .send_maker_payment(SendPaymentArgs { + time_lock_duration: lock_duration, + time_lock: maker_payment_lock, + other_pubkey: &*other_maker_coin_htlc_pub, + secret_hash: secret_hash.as_slice(), + amount: self.maker_amount.clone(), + swap_contract_address: &maker_coin_swap_contract_address, + swap_unique_data: &unique_data, + payment_instructions: &payment_instructions, + watcher_reward, + wait_for_confirmation_until: wait_maker_payment_until, + }) + .await; + + match payment { Ok(t) => t, Err(err) => { return Ok((Some(MakerSwapCommand::Finish), vec![ @@ -1226,12 +1229,10 @@ impl MakerSwap { } async fn confirm_taker_payment_spend(&self) -> Result<(Option, Vec), String> { - // We should wait for only one confirmation to make sure our spend transaction is not failed. - // However, we allow the user to use 0 confirmations if specified. let requires_nota = false; let confirm_taker_payment_spend_input = ConfirmPaymentInput { payment_tx: self.r().taker_payment_spend.clone().unwrap().tx_hex.0, - confirmations: std::cmp::min(1, self.r().data.taker_payment_confirmations), + confirmations: self.r().data.taker_payment_confirmations, requires_nota, wait_until: self.wait_refund_until(), check_every: WAIT_CONFIRM_INTERVAL_SEC, @@ -1295,7 +1296,7 @@ impl MakerSwap { } loop { - match self.maker_coin.can_refund_htlc(locktime).compat().await { + match self.maker_coin.can_refund_htlc(locktime).await { Ok(CanRefundHtlc::CanRefundNow) => break, Ok(CanRefundHtlc::HaveToWait(to_sleep)) => Timer::sleep(to_sleep as f64).await, Err(e) => { @@ -1565,7 +1566,6 @@ impl MakerSwap { amount: &self.maker_amount, payment_instructions: &payment_instructions, }) - .compat() .await ); match maybe_maker_payment { @@ -1608,7 +1608,7 @@ impl MakerSwap { return ERR!("Maker payment will be refunded automatically!"); } - let can_refund_htlc = try_s!(self.maker_coin.can_refund_htlc(maker_payment_lock).compat().await); + let can_refund_htlc = try_s!(self.maker_coin.can_refund_htlc(maker_payment_lock).await); if let CanRefundHtlc::HaveToWait(seconds_to_wait) = can_refund_htlc { return ERR!("Too early to refund, wait until {}", wait_until_sec(seconds_to_wait)); } @@ -2486,12 +2486,12 @@ mod maker_swap_tests { TestCoin::ticker.mock_safe(|_| MockResult::Return("ticker")); TestCoin::swap_contract_address.mock_safe(|_| MockResult::Return(None)); TestCoin::can_refund_htlc - .mock_safe(|_, _| MockResult::Return(Box::new(futures01::future::ok(CanRefundHtlc::CanRefundNow)))); + .mock_safe(|_, _| MockResult::Return(Box::pin(futures::future::ok(CanRefundHtlc::CanRefundNow)))); static mut MY_PAYMENT_SENT_CALLED: bool = false; TestCoin::check_if_my_payment_sent.mock_safe(|_, _| { unsafe { MY_PAYMENT_SENT_CALLED = true }; - MockResult::Return(Box::new(futures01::future::ok(Some(eth_tx_for_test().into())))) + MockResult::Return(Box::pin(futures::future::ok(Some(eth_tx_for_test().into())))) }); static mut MAKER_REFUND_CALLED: bool = false; @@ -2526,7 +2526,7 @@ mod maker_swap_tests { TestCoin::ticker.mock_safe(|_| MockResult::Return("ticker")); TestCoin::swap_contract_address.mock_safe(|_| MockResult::Return(None)); TestCoin::can_refund_htlc - .mock_safe(|_, _| MockResult::Return(Box::new(futures01::future::ok(CanRefundHtlc::CanRefundNow)))); + .mock_safe(|_, _| MockResult::Return(Box::pin(futures::future::ok(CanRefundHtlc::CanRefundNow)))); static mut MAKER_REFUND_CALLED: bool = false; TestCoin::send_maker_refunds_payment.mock_safe(|_, _| { @@ -2621,10 +2621,10 @@ mod maker_swap_tests { static mut MY_PAYMENT_SENT_CALLED: bool = false; TestCoin::check_if_my_payment_sent.mock_safe(|_, _| { unsafe { MY_PAYMENT_SENT_CALLED = true }; - MockResult::Return(Box::new(futures01::future::ok(Some(eth_tx_for_test().into())))) + MockResult::Return(Box::pin(futures::future::ok(Some(eth_tx_for_test().into())))) }); TestCoin::can_refund_htlc - .mock_safe(|_, _| MockResult::Return(Box::new(futures01::future::ok(CanRefundHtlc::HaveToWait(1000))))); + .mock_safe(|_, _| MockResult::Return(Box::pin(futures::future::ok(CanRefundHtlc::HaveToWait(1000))))); TestCoin::search_for_swap_tx_spend_my .mock_safe(|_, _| MockResult::Return(Box::pin(futures::future::ready(Ok(None))))); let maker_coin = MmCoinEnum::Test(TestCoin::default()); @@ -2650,7 +2650,7 @@ mod maker_swap_tests { static mut MY_PAYMENT_SENT_CALLED: bool = false; TestCoin::check_if_my_payment_sent.mock_safe(|_, _| { unsafe { MY_PAYMENT_SENT_CALLED = true }; - MockResult::Return(Box::new(futures01::future::ok(None))) + MockResult::Return(Box::pin(futures::future::ok(None))) }); let maker_coin = MmCoinEnum::Test(TestCoin::default()); let taker_coin = MmCoinEnum::Test(TestCoin::default()); diff --git a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs index 1a8ec0452b..3579e46d87 100644 --- a/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/maker_swap_v2.rs @@ -1502,7 +1502,6 @@ impl break, diff --git a/mm2src/mm2_main/src/lp_swap/swap_watcher.rs b/mm2src/mm2_main/src/lp_swap/swap_watcher.rs index ff5dd2dc9e..6488e466de 100644 --- a/mm2src/mm2_main/src/lp_swap/swap_watcher.rs +++ b/mm2src/mm2_main/src/lp_swap/swap_watcher.rs @@ -444,7 +444,6 @@ impl State for RefundTakerPayment { match watcher_ctx .taker_coin .can_refund_htlc(watcher_ctx.taker_locktime()) - .compat() .await { Ok(CanRefundHtlc::CanRefundNow) => break, diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap.rs b/mm2src/mm2_main/src/lp_swap/taker_swap.rs index ac0c6f37f7..39240b8721 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap.rs @@ -1396,7 +1396,6 @@ impl TakerSwap { let fee_tx = self .taker_coin .send_taker_fee(dex_fee, self.uuid.as_bytes(), expire_at) - .compat() .await; let transaction = match fee_tx { Ok(t) => t, @@ -1611,20 +1610,26 @@ impl TakerSwap { ])); } + let taker_payment_lock = self.r().data.taker_payment_lock; + let other_taker_coin_htlc_pub = self.r().other_taker_coin_htlc_pub; + let secret_hash = self.r().secret_hash.clone(); + let taker_coin_swap_contract_address = self.r().data.taker_coin_swap_contract_address.clone(); let unique_data = self.unique_swap_data(); + let taker_amount_decimal = self.taker_amount.to_decimal(); + let payment_instructions = self.r().payment_instructions.clone(); let f = self.taker_coin.check_if_my_payment_sent(CheckIfMyPaymentSentArgs { - time_lock: self.r().data.taker_payment_lock, - other_pub: self.r().other_taker_coin_htlc_pub.as_slice(), - secret_hash: &self.r().secret_hash.0, + time_lock: taker_payment_lock, + other_pub: other_taker_coin_htlc_pub.as_slice(), + secret_hash: &secret_hash.0, search_from_block: self.r().data.taker_coin_start_block, - swap_contract_address: &self.r().data.taker_coin_swap_contract_address, + swap_contract_address: &taker_coin_swap_contract_address, swap_unique_data: &unique_data, - amount: &self.taker_amount.to_decimal(), - payment_instructions: &self.r().payment_instructions, + amount: &taker_amount_decimal, + payment_instructions: &payment_instructions, }); let reward_amount = self.r().reward_amount.clone(); - let wait_until = self.r().data.taker_payment_lock; + let wait_until = taker_payment_lock; let watcher_reward = if self.r().watcher_reward { match self .taker_coin @@ -1650,28 +1655,32 @@ impl TakerSwap { None }; - let transaction = match f.compat().await { + let transaction = match f.await { Ok(res) => match res { Some(tx) => tx, None => { let time_lock = match std::env::var("USE_TEST_LOCKTIME") { Ok(_) => self.r().data.started_at, - Err(_) => self.r().data.taker_payment_lock, + Err(_) => taker_payment_lock, }; - let payment_fut = self.taker_coin.send_taker_payment(SendPaymentArgs { - time_lock_duration: self.r().data.lock_duration, - time_lock, - other_pubkey: &*self.r().other_taker_coin_htlc_pub, - secret_hash: &self.r().secret_hash.0, - amount: self.taker_amount.to_decimal(), - swap_contract_address: &self.r().data.taker_coin_swap_contract_address, - swap_unique_data: &unique_data, - payment_instructions: &self.r().payment_instructions, - watcher_reward, - wait_for_confirmation_until: self.r().data.taker_payment_lock, - }); + let lock_duration = self.r().data.lock_duration; + let payment = self + .taker_coin + .send_taker_payment(SendPaymentArgs { + time_lock_duration: lock_duration, + time_lock, + other_pubkey: &*other_taker_coin_htlc_pub, + secret_hash: &secret_hash.0, + amount: taker_amount_decimal, + swap_contract_address: &taker_coin_swap_contract_address, + swap_unique_data: &unique_data, + payment_instructions: &payment_instructions, + watcher_reward, + wait_for_confirmation_until: taker_payment_lock, + }) + .await; - match payment_fut.compat().await { + match payment { Ok(t) => t, Err(err) => { return Ok((Some(TakerSwapCommand::Finish), vec![ @@ -1802,34 +1811,14 @@ impl TakerSwap { self.p2p_privkey, ); - let confirm_taker_payment_input = ConfirmPaymentInput { - payment_tx: self.r().taker_payment.clone().unwrap().tx_hex.0, - confirmations: self.r().data.taker_payment_confirmations, - requires_nota: self.r().data.taker_payment_requires_nota.unwrap_or(false), - wait_until: self.r().data.taker_payment_lock, - check_every: WAIT_CONFIRM_INTERVAL_SEC, - }; - let wait_f = self - .taker_coin - .wait_for_confirmations(confirm_taker_payment_input) - .compat(); - if let Err(err) = wait_f.await { - return Ok((Some(TakerSwapCommand::PrepareForTakerPaymentRefund), vec![ - TakerSwapEvent::TakerPaymentWaitConfirmFailed( - ERRL!("!taker_coin.wait_for_confirmations: {}", err).into(), - ), - TakerSwapEvent::TakerPaymentWaitRefundStarted { - wait_until: self.wait_refund_until(), - }, - ])); - } - #[cfg(any(test, feature = "run-docker-tests"))] if self.fail_at == Some(FailAt::WaitForTakerPaymentSpendPanic) { + // Wait for 5 seconds before panicking to ensure the message is sent + Timer::sleep(5.).await; panic!("Taker panicked unexpectedly at wait for taker payment spend"); } - info!("Taker payment confirmed"); + info!("Waiting for maker to spend taker payment!"); let wait_until = match std::env::var("USE_TEST_LOCKTIME") { Ok(_) => self.r().data.started_at, @@ -1952,11 +1941,9 @@ impl TakerSwap { } async fn confirm_maker_payment_spend(&self) -> Result<(Option, Vec), String> { - // We should wait for only one confirmation to ensure our spend transaction does not fail. - // However, we allow the user to use 0 confirmations if specified. let confirm_maker_payment_spend_input = ConfirmPaymentInput { payment_tx: self.r().maker_payment_spend.clone().unwrap().tx_hex.0, - confirmations: std::cmp::min(1, self.r().data.maker_payment_confirmations), + confirmations: self.r().data.maker_payment_confirmations, requires_nota: false, wait_until: self.wait_refund_until(), check_every: WAIT_CONFIRM_INTERVAL_SEC, @@ -2019,7 +2006,7 @@ impl TakerSwap { } loop { - match self.taker_coin.can_refund_htlc(locktime).compat().await { + match self.taker_coin.can_refund_htlc(locktime).await { Ok(CanRefundHtlc::CanRefundNow) => break, Ok(CanRefundHtlc::HaveToWait(to_sleep)) => Timer::sleep(to_sleep as f64).await, Err(e) => { @@ -2203,8 +2190,8 @@ impl TakerSwap { return ERR!("Taker payment is refunded, swap is not recoverable"); } - if self.r().maker_payment_spend.is_some() { - return ERR!("Maker payment is spent, swap is not recoverable"); + if self.r().maker_payment_spend.is_some() && self.r().maker_payment_spend_confirmed { + return ERR!("Maker payment is spent and confirmed, swap is not recoverable"); } let maker_payment = match &self.r().maker_payment { @@ -2279,7 +2266,6 @@ impl TakerSwap { amount: &self.taker_amount.to_decimal(), payment_instructions: &payment_instructions, }) - .compat() .await ); match maybe_sent { @@ -2406,7 +2392,7 @@ impl TakerSwap { return ERR!("Taker payment will be refunded automatically!"); } - let can_refund = try_s!(self.taker_coin.can_refund_htlc(taker_payment_lock).compat().await); + let can_refund = try_s!(self.taker_coin.can_refund_htlc(taker_payment_lock).await); if let CanRefundHtlc::HaveToWait(seconds_to_wait) = can_refund { return ERR!("Too early to refund, wait until {}", wait_until_sec(seconds_to_wait)); } @@ -2915,12 +2901,12 @@ mod taker_swap_tests { TestCoin::ticker.mock_safe(|_| MockResult::Return("ticker")); TestCoin::swap_contract_address.mock_safe(|_| MockResult::Return(None)); TestCoin::can_refund_htlc - .mock_safe(|_, _| MockResult::Return(Box::new(futures01::future::ok(CanRefundHtlc::CanRefundNow)))); + .mock_safe(|_, _| MockResult::Return(Box::pin(futures::future::ok(CanRefundHtlc::CanRefundNow)))); static mut MY_PAYMENT_SENT_CALLED: bool = false; TestCoin::check_if_my_payment_sent.mock_safe(|_, _| { unsafe { MY_PAYMENT_SENT_CALLED = true }; - MockResult::Return(Box::new(futures01::future::ok(Some(eth_tx_for_test().into())))) + MockResult::Return(Box::pin(futures::future::ok(Some(eth_tx_for_test().into())))) }); static mut TX_SPEND_CALLED: bool = false; @@ -2969,7 +2955,7 @@ mod taker_swap_tests { static mut MY_PAYMENT_SENT_CALLED: bool = false; TestCoin::check_if_my_payment_sent.mock_safe(|_, _| { unsafe { MY_PAYMENT_SENT_CALLED = true }; - MockResult::Return(Box::new(futures01::future::ok(Some(eth_tx_for_test().into())))) + MockResult::Return(Box::pin(futures::future::ok(Some(eth_tx_for_test().into())))) }); static mut SEARCH_TX_SPEND_CALLED: bool = false; @@ -3018,7 +3004,7 @@ mod taker_swap_tests { TestCoin::ticker.mock_safe(|_| MockResult::Return("ticker")); TestCoin::swap_contract_address.mock_safe(|_| MockResult::Return(None)); TestCoin::can_refund_htlc - .mock_safe(|_, _| MockResult::Return(Box::new(futures01::future::ok(CanRefundHtlc::CanRefundNow)))); + .mock_safe(|_, _| MockResult::Return(Box::pin(futures::future::ok(CanRefundHtlc::CanRefundNow)))); static mut SEARCH_TX_SPEND_CALLED: bool = false; TestCoin::search_for_swap_tx_spend_my.mock_safe(|_, _| { @@ -3061,7 +3047,7 @@ mod taker_swap_tests { TestCoin::ticker.mock_safe(|_| MockResult::Return("ticker")); TestCoin::swap_contract_address.mock_safe(|_| MockResult::Return(None)); TestCoin::can_refund_htlc - .mock_safe(|_, _| MockResult::Return(Box::new(futures01::future::ok(CanRefundHtlc::HaveToWait(1000))))); + .mock_safe(|_, _| MockResult::Return(Box::pin(futures::future::ok(CanRefundHtlc::HaveToWait(1000))))); static mut SEARCH_TX_SPEND_CALLED: bool = false; TestCoin::search_for_swap_tx_spend_my.mock_safe(|_, _| { diff --git a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs index 0ff2a70ecd..574c3da564 100644 --- a/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs +++ b/mm2src/mm2_main/src/lp_swap/taker_swap_v2.rs @@ -1901,7 +1901,6 @@ impl break, diff --git a/mm2src/mm2_main/src/ordermatch_tests.rs b/mm2src/mm2_main/src/ordermatch_tests.rs index 38d17af4ed..1ac83697af 100644 --- a/mm2src/mm2_main/src/ordermatch_tests.rs +++ b/mm2src/mm2_main/src/ordermatch_tests.rs @@ -6,10 +6,12 @@ use crypto::privkey::key_pair_from_seed; use db_common::sqlite::rusqlite::Connection; use futures::{channel::mpsc, StreamExt}; use mm2_core::mm_ctx::{MmArc, MmCtx}; +use mm2_libp2p::application::request_response::ordermatch::OrdermatchRequest; +use mm2_libp2p::application::request_response::P2PRequest; use mm2_libp2p::behaviours::atomicdex::generate_ed25519_keypair; +use mm2_libp2p::p2p_ctx::P2PContext; use mm2_libp2p::AdexBehaviourCmd; use mm2_libp2p::{decode_message, PeerId}; -use mm2_net::p2p::P2PContext; use mm2_test_helpers::for_tests::mm_ctx_with_iguana; use mocktopus::mocking::*; use rand::{seq::SliceRandom, thread_rng, Rng}; diff --git a/mm2src/mm2_main/src/rpc.rs b/mm2src/mm2_main/src/rpc.rs index 1bef856e15..85b61db612 100644 --- a/mm2src/mm2_main/src/rpc.rs +++ b/mm2src/mm2_main/src/rpc.rs @@ -37,6 +37,7 @@ use std::net::SocketAddr; cfg_native! { use hyper::{self, Body, Server}; + use futures::channel::oneshot; use mm2_net::sse_handler::{handle_sse, SSE_ENDPOINT}; } @@ -333,6 +334,34 @@ pub extern "C" fn spawn_rpc(ctx_h: u32) { Ok((cert_chain, privkey)) } + // Handles incoming HTTP requests. + async fn handle_request( + req: Request, + remote_addr: SocketAddr, + ctx_h: u32, + is_event_stream_enabled: bool, + ) -> Result, Infallible> { + let (tx, rx) = oneshot::channel(); + // We execute the request in a separate task to avoid it being left uncompleted if the client disconnects. + // So what's inside the spawn here will complete till completion (or panic). + common::executor::spawn(async move { + if is_event_stream_enabled && req.uri().path() == SSE_ENDPOINT { + tx.send(handle_sse(req, ctx_h).await).ok(); + } else { + tx.send(rpc_service(req, ctx_h, remote_addr).await).ok(); + } + }); + // On the other hand, this `.await` might be aborted if the client disconnects. + match rx.await { + Ok(res) => Ok(res), + Err(_) => { + let err = "The RPC service aborted without responding."; + error!("{}", err); + Ok(Response::builder().status(500).body(Body::from(err)).unwrap()) + }, + } + } + // NB: We need to manually handle the incoming connections in order to get the remote IP address, // cf. https://github.com/hyperium/hyper/issues/1410#issuecomment-419510220. // Although if the ability to access the remote IP address is solved by the Hyper in the future @@ -340,28 +369,19 @@ pub extern "C" fn spawn_rpc(ctx_h: u32) { // cf. https://github.com/hyperium/hyper/pull/1640. let ctx = MmArc::from_ffi_handle(ctx_h).expect("No context"); - let is_event_stream_enabled = ctx.event_stream_configuration.is_some(); - let make_svc_fut = move |remote_addr: SocketAddr| async move { - Ok::<_, Infallible>(service_fn(move |req: Request| async move { - if is_event_stream_enabled && req.uri().path() == SSE_ENDPOINT { - let res = handle_sse(req, ctx_h).await?; - return Ok::<_, Infallible>(res); - } - - let res = rpc_service(req, ctx_h, remote_addr).await; - Ok::<_, Infallible>(res) - })) - }; - //The `make_svc` macro creates a `make_service_fn` for a specified socket type. // `$socket_type`: The socket type with a `remote_addr` method that returns a `SocketAddr`. macro_rules! make_svc { ($socket_type:ty) => { make_service_fn(move |socket: &$socket_type| { let remote_addr = socket.remote_addr(); - make_svc_fut(remote_addr) + async move { + Ok::<_, Infallible>(service_fn(move |req: Request| { + handle_request(req, remote_addr, ctx_h, is_event_stream_enabled) + })) + } }) }; } diff --git a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs index 5ef386942c..59970c06bd 100644 --- a/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs +++ b/mm2src/mm2_main/src/rpc/lp_commands/lp_commands_legacy.rs @@ -20,13 +20,14 @@ // use coins::{lp_coinfind, lp_coinfind_any, lp_coininit, CoinsContext, MmCoinEnum}; +use common::custom_futures::timeout::FutureTimerExt; use common::executor::Timer; use common::{rpc_err_response, rpc_response, HyRes}; use futures::compat::Future01CompatExt; use http::Response; use mm2_core::mm_ctx::MmArc; +use mm2_libp2p::p2p_ctx::P2PContext; use mm2_metrics::MetricsOps; -use mm2_net::p2p::P2PContext; use mm2_number::construct_detailed; use mm2_rpc::data::legacy::{BalanceResponse, CoinInitResponse, Mm2RpcResult, MmVersionResponse, Status}; use serde_json::{self as json, Value as Json}; @@ -138,7 +139,16 @@ pub async fn disable_coin(ctx: MmArc, req: Json) -> Result>, St pub async fn electrum(ctx: MmArc, req: Json) -> Result>, String> { let ticker = try_s!(req["coin"].as_str().ok_or("No 'coin' field")).to_owned(); let coin: MmCoinEnum = try_s!(lp_coininit(&ctx, &ticker, &req).await); - let balance = try_s!(coin.my_balance().compat().await); + let balance = match coin.my_balance().compat().timeout_secs(5.).await { + Ok(Ok(balance)) => balance, + // If the coin was activated successfully but the balance query failed (most probably due to faulty + // electrum servers), remove the coin as the whole request is a failure now from the POV of the GUI. + err => { + let coins_ctx = try_s!(CoinsContext::from_ctx(&ctx)); + coins_ctx.remove_coin(coin).await; + return Err(ERRL!("Deactivated coin due to error in balance querying: {:?}", err)); + }, + }; let res = CoinInitResponse { result: "success".into(), address: try_s!(coin.my_address()), diff --git a/mm2src/mm2_main/tests/docker_tests/docker_tests_inner.rs b/mm2src/mm2_main/tests/docker_tests/docker_tests_inner.rs index 5733090e80..5f194f321d 100644 --- a/mm2src/mm2_main/tests/docker_tests/docker_tests_inner.rs +++ b/mm2src/mm2_main/tests/docker_tests/docker_tests_inner.rs @@ -51,7 +51,7 @@ fn test_search_for_swap_tx_spend_native_was_refunded_taker() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let tx = block_on_f01(coin.send_taker_payment(taker_payment_args)).unwrap(); + let tx = block_on(coin.send_taker_payment(taker_payment_args)).unwrap(); let confirm_payment_input = ConfirmPaymentInput { payment_tx: tx.tx_hex(), @@ -140,7 +140,7 @@ fn test_search_for_swap_tx_spend_native_was_refunded_maker() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let tx = block_on_f01(coin.send_maker_payment(maker_payment_args)).unwrap(); + let tx = block_on(coin.send_maker_payment(maker_payment_args)).unwrap(); let confirm_payment_input = ConfirmPaymentInput { payment_tx: tx.tx_hex(), @@ -209,7 +209,7 @@ fn test_search_for_taker_swap_tx_spend_native_was_spent_by_maker() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let tx = block_on_f01(coin.send_taker_payment(taker_payment_args)).unwrap(); + let tx = block_on(coin.send_taker_payment(taker_payment_args)).unwrap(); let confirm_payment_input = ConfirmPaymentInput { payment_tx: tx.tx_hex(), @@ -277,7 +277,7 @@ fn test_search_for_maker_swap_tx_spend_native_was_spent_by_taker() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let tx = block_on_f01(coin.send_maker_payment(maker_payment_args)).unwrap(); + let tx = block_on(coin.send_maker_payment(maker_payment_args)).unwrap(); let confirm_payment_input = ConfirmPaymentInput { payment_tx: tx.tx_hex(), @@ -348,7 +348,7 @@ fn test_one_hundred_maker_payments_in_a_row_native() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let tx = block_on_f01(coin.send_maker_payment(maker_payment_args)).unwrap(); + let tx = block_on(coin.send_maker_payment(maker_payment_args)).unwrap(); if let TransactionEnum::UtxoTx(tx) = tx { unspents.push(UnspentInfo { outpoint: OutPoint { diff --git a/mm2src/mm2_main/tests/docker_tests/eth_docker_tests.rs b/mm2src/mm2_main/tests/docker_tests/eth_docker_tests.rs index 2658fde991..c0c93b22f1 100644 --- a/mm2src/mm2_main/tests/docker_tests/eth_docker_tests.rs +++ b/mm2src/mm2_main/tests/docker_tests/eth_docker_tests.rs @@ -539,7 +539,7 @@ fn send_and_refund_eth_maker_payment_impl(swap_txfee_policy: SwapTxFeePolicy) { watcher_reward: None, wait_for_confirmation_until: 0, }; - let eth_maker_payment = block_on_f01(eth_coin.send_maker_payment(send_payment_args)).unwrap(); + let eth_maker_payment = block_on(eth_coin.send_maker_payment(send_payment_args)).unwrap(); let confirm_input = ConfirmPaymentInput { payment_tx: eth_maker_payment.tx_hex(), @@ -625,7 +625,7 @@ fn send_and_spend_eth_maker_payment_impl(swap_txfee_policy: SwapTxFeePolicy) { watcher_reward: None, wait_for_confirmation_until: 0, }; - let eth_maker_payment = block_on_f01(maker_eth_coin.send_maker_payment(send_payment_args)).unwrap(); + let eth_maker_payment = block_on(maker_eth_coin.send_maker_payment(send_payment_args)).unwrap(); let confirm_input = ConfirmPaymentInput { payment_tx: eth_maker_payment.tx_hex(), @@ -708,7 +708,7 @@ fn send_and_refund_erc20_maker_payment_impl(swap_txfee_policy: SwapTxFeePolicy) watcher_reward: None, wait_for_confirmation_until: now_sec() + 60, }; - let eth_maker_payment = block_on_f01(erc20_coin.send_maker_payment(send_payment_args)).unwrap(); + let eth_maker_payment = block_on(erc20_coin.send_maker_payment(send_payment_args)).unwrap(); let confirm_input = ConfirmPaymentInput { payment_tx: eth_maker_payment.tx_hex(), @@ -797,7 +797,7 @@ fn send_and_spend_erc20_maker_payment_impl(swap_txfee_policy: SwapTxFeePolicy) { watcher_reward: None, wait_for_confirmation_until: now_sec() + 60, }; - let eth_maker_payment = block_on_f01(maker_erc20_coin.send_maker_payment(send_payment_args)).unwrap(); + let eth_maker_payment = block_on(maker_erc20_coin.send_maker_payment(send_payment_args)).unwrap(); let confirm_input = ConfirmPaymentInput { payment_tx: eth_maker_payment.tx_hex(), diff --git a/mm2src/mm2_main/tests/docker_tests/qrc20_tests.rs b/mm2src/mm2_main/tests/docker_tests/qrc20_tests.rs index dd172a74b9..bcb125ba26 100644 --- a/mm2src/mm2_main/tests/docker_tests/qrc20_tests.rs +++ b/mm2src/mm2_main/tests/docker_tests/qrc20_tests.rs @@ -184,7 +184,7 @@ fn test_taker_spends_maker_payment() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let payment = block_on_f01(maker_coin.send_maker_payment(maker_payment_args)).unwrap(); + let payment = block_on(maker_coin.send_maker_payment(maker_payment_args)).unwrap(); let payment_tx_hash = payment.tx_hash_as_bytes(); let payment_tx_hex = payment.tx_hex(); log!("Maker payment: {:?}", payment_tx_hash); @@ -274,7 +274,7 @@ fn test_maker_spends_taker_payment() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let payment = block_on_f01(taker_coin.send_taker_payment(taker_payment_args)).unwrap(); + let payment = block_on(taker_coin.send_taker_payment(taker_payment_args)).unwrap(); let payment_tx_hash = payment.tx_hash_as_bytes(); let payment_tx_hex = payment.tx_hex(); log!("Taker payment: {:?}", payment_tx_hash); @@ -359,7 +359,7 @@ fn test_maker_refunds_payment() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let payment = block_on_f01(coin.send_maker_payment(maker_payment)).unwrap(); + let payment = block_on(coin.send_maker_payment(maker_payment)).unwrap(); let payment_tx_hash = payment.tx_hash_as_bytes(); let payment_tx_hex = payment.tx_hex(); log!("Maker payment: {:?}", payment_tx_hash); @@ -431,7 +431,7 @@ fn test_taker_refunds_payment() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let payment = block_on_f01(coin.send_taker_payment(taker_payment_args)).unwrap(); + let payment = block_on(coin.send_taker_payment(taker_payment_args)).unwrap(); let payment_tx_hash = payment.tx_hash_as_bytes(); let payment_tx_hex = payment.tx_hex(); log!("Taker payment: {:?}", payment_tx_hash); @@ -500,7 +500,7 @@ fn test_check_if_my_payment_sent() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let payment = block_on_f01(coin.send_maker_payment(maker_payment_args)).unwrap(); + let payment = block_on(coin.send_maker_payment(maker_payment_args)).unwrap(); let payment_tx_hash = payment.tx_hash_as_bytes(); let payment_tx_hex = payment.tx_hex(); log!("Maker payment: {:?}", payment_tx_hash); @@ -529,7 +529,7 @@ fn test_check_if_my_payment_sent() { amount: &amount, payment_instructions: &None, }; - let found = block_on_f01(coin.check_if_my_payment_sent(if_my_payment_sent_args)).unwrap(); + let found = block_on(coin.check_if_my_payment_sent(if_my_payment_sent_args)).unwrap(); assert_eq!(found, Some(payment)); } @@ -557,7 +557,7 @@ fn test_search_for_swap_tx_spend_taker_spent() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let payment = block_on_f01(maker_coin.send_maker_payment(maker_payment_args)).unwrap(); + let payment = block_on(maker_coin.send_maker_payment(maker_payment_args)).unwrap(); let payment_tx_hash = payment.tx_hash_as_bytes(); let payment_tx_hex = payment.tx_hex(); log!("Maker payment: {:?}", payment_tx_hash); @@ -636,7 +636,7 @@ fn test_search_for_swap_tx_spend_maker_refunded() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let payment = block_on_f01(maker_coin.send_maker_payment(maker_payment_args)).unwrap(); + let payment = block_on(maker_coin.send_maker_payment(maker_payment_args)).unwrap(); let payment_tx_hash = payment.tx_hash_as_bytes(); let payment_tx_hex = payment.tx_hex(); log!("Maker payment: {:?}", payment_tx_hash); @@ -716,7 +716,7 @@ fn test_search_for_swap_tx_spend_not_spent() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let payment = block_on_f01(maker_coin.send_maker_payment(maker_payment_args)).unwrap(); + let payment = block_on(maker_coin.send_maker_payment(maker_payment_args)).unwrap(); let payment_tx_hash = payment.tx_hash_as_bytes(); let payment_tx_hex = payment.tx_hex(); log!("Maker payment: {:?}", payment_tx_hash); @@ -773,7 +773,7 @@ fn test_wait_for_tx_spend() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let payment = block_on_f01(maker_coin.send_maker_payment(maker_payment_args)).unwrap(); + let payment = block_on(maker_coin.send_maker_payment(maker_payment_args)).unwrap(); let payment_tx_hash = payment.tx_hash_as_bytes(); let payment_tx_hex = payment.tx_hex(); log!("Maker payment: {:?}", payment_tx_hash); @@ -1069,7 +1069,7 @@ fn test_get_max_taker_vol_and_trade_with_dynamic_trade_fee(coin: QtumCoin, priv_ let secret_hash = &[0; 20]; let dex_fee = dex_fee_from_taker_coin(&coin, "MYCOIN", &expected_max_taker_vol, None, None); - let _taker_fee_tx = block_on_f01(coin.send_taker_fee(dex_fee, &[], timelock)).expect("!send_taker_fee"); + let _taker_fee_tx = block_on(coin.send_taker_fee(dex_fee, &[], timelock)).expect("!send_taker_fee"); let taker_payment_args = SendPaymentArgs { time_lock_duration: 0, time_lock: timelock, @@ -1083,7 +1083,7 @@ fn test_get_max_taker_vol_and_trade_with_dynamic_trade_fee(coin: QtumCoin, priv_ wait_for_confirmation_until: 0, }; - let _taker_payment_tx = block_on_f01(coin.send_taker_payment(taker_payment_args)).expect("!send_taker_payment"); + let _taker_payment_tx = block_on(coin.send_taker_payment(taker_payment_args)).expect("!send_taker_payment"); let my_balance = block_on_f01(coin.my_spendable_balance()).expect("!my_balance"); assert_eq!( @@ -1479,7 +1479,7 @@ fn test_search_for_segwit_swap_tx_spend_native_was_refunded_maker() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let tx = block_on_f01(coin.send_maker_payment(maker_payment)).unwrap(); + let tx = block_on(coin.send_maker_payment(maker_payment)).unwrap(); let confirm_payment_input = ConfirmPaymentInput { payment_tx: tx.tx_hex(), @@ -1547,7 +1547,7 @@ fn test_search_for_segwit_swap_tx_spend_native_was_refunded_taker() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let tx = block_on_f01(coin.send_taker_payment(taker_payment)).unwrap(); + let tx = block_on(coin.send_taker_payment(taker_payment)).unwrap(); let confirm_payment_input = ConfirmPaymentInput { payment_tx: tx.tx_hex(), @@ -1701,11 +1701,10 @@ fn test_send_standard_taker_fee_qtum() { generate_segwit_qtum_coin_with_random_privkey("QTUM", BigDecimal::try_from(0.5).unwrap(), Some(0)); let amount = BigDecimal::from_str("0.01").unwrap(); - let tx = - block_on_f01(coin.send_taker_fee(DexFee::Standard(amount.clone().into()), &[], 0)).expect("!send_taker_fee"); + let tx = block_on(coin.send_taker_fee(DexFee::Standard(amount.clone().into()), &[], 0)).expect("!send_taker_fee"); assert!(matches!(tx, TransactionEnum::UtxoTx(_)), "Expected UtxoTx"); - block_on_f01(coin.validate_fee(ValidateFeeArgs { + block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &tx, expected_sender: coin.my_public_key().unwrap(), dex_fee: &DexFee::Standard(amount.into()), @@ -1723,7 +1722,7 @@ fn test_send_taker_fee_with_burn_qtum() { let fee_amount = BigDecimal::from_str("0.0075").unwrap(); let burn_amount = BigDecimal::from_str("0.0025").unwrap(); - let tx = block_on_f01(coin.send_taker_fee( + let tx = block_on(coin.send_taker_fee( DexFee::WithBurn { fee_amount: fee_amount.clone().into(), burn_amount: burn_amount.clone().into(), @@ -1735,7 +1734,7 @@ fn test_send_taker_fee_with_burn_qtum() { .expect("!send_taker_fee"); assert!(matches!(tx, TransactionEnum::UtxoTx(_)), "Expected UtxoTx"); - block_on_f01(coin.validate_fee(ValidateFeeArgs { + block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &tx, expected_sender: coin.my_public_key().unwrap(), dex_fee: &DexFee::WithBurn { @@ -1758,11 +1757,10 @@ fn test_send_taker_fee_qrc20() { ); let amount = BigDecimal::from_str("0.01").unwrap(); - let tx = - block_on_f01(coin.send_taker_fee(DexFee::Standard(amount.clone().into()), &[], 0)).expect("!send_taker_fee"); + let tx = block_on(coin.send_taker_fee(DexFee::Standard(amount.clone().into()), &[], 0)).expect("!send_taker_fee"); assert!(matches!(tx, TransactionEnum::UtxoTx(_)), "Expected UtxoTx"); - block_on_f01(coin.validate_fee(ValidateFeeArgs { + block_on(coin.validate_fee(ValidateFeeArgs { fee_tx: &tx, expected_sender: coin.my_public_key().unwrap(), dex_fee: &DexFee::Standard(amount.into()), diff --git a/mm2src/mm2_main/tests/docker_tests/swap_watcher_tests.rs b/mm2src/mm2_main/tests/docker_tests/swap_watcher_tests.rs index aee44e2d6a..102e8eacf5 100644 --- a/mm2src/mm2_main/tests/docker_tests/swap_watcher_tests.rs +++ b/mm2src/mm2_main/tests/docker_tests/swap_watcher_tests.rs @@ -688,11 +688,8 @@ fn test_taker_completes_swap_after_taker_payment_spent_while_offline() { // stop taker after taker payment sent let taker_payment_msg = "Taker payment tx hash "; block_on(mm_alice.wait_for_log(120., |log| log.contains(taker_payment_msg))).unwrap(); - let alice_log = mm_alice.log_as_utf8().unwrap(); - let tx_hash_start = alice_log.find(taker_payment_msg).unwrap() + taker_payment_msg.len(); - let payment_tx_hash = alice_log[tx_hash_start..tx_hash_start + 64].to_string(); // ensure p2p message is sent to the maker, this happens before this message: - block_on(mm_alice.wait_for_log(120., |log| log.contains(&format!("Waiting for tx {}", payment_tx_hash)))).unwrap(); + block_on(mm_alice.wait_for_log(120., |log| log.contains("Waiting for maker to spend taker payment!"))).unwrap(); alice_conf.conf["dbdir"] = mm_alice.folder.join("DB").to_str().unwrap().into(); block_on(mm_alice.stop()).unwrap(); @@ -1211,7 +1208,7 @@ fn test_watcher_validate_taker_fee_utxo() { let taker_amount = MmNumber::from((10, 1)); let dex_fee = dex_fee_from_taker_coin(&taker_coin, maker_coin.ticker(), &taker_amount, None, None); - let taker_fee = block_on_f01(taker_coin.send_taker_fee(dex_fee, Uuid::new_v4().as_bytes(), lock_duration)).unwrap(); + let taker_fee = block_on(taker_coin.send_taker_fee(dex_fee, Uuid::new_v4().as_bytes(), lock_duration)).unwrap(); let confirm_payment_input = ConfirmPaymentInput { payment_tx: taker_fee.tx_hex(), @@ -1318,7 +1315,7 @@ fn test_watcher_validate_taker_fee_eth() { let taker_amount = MmNumber::from((1, 1)); let dex_fee = dex_fee_from_taker_coin(&taker_coin, "ETH", &taker_amount, None, None); - let taker_fee = block_on_f01(taker_coin.send_taker_fee(dex_fee, Uuid::new_v4().as_bytes(), lock_duration)).unwrap(); + let taker_fee = block_on(taker_coin.send_taker_fee(dex_fee, Uuid::new_v4().as_bytes(), lock_duration)).unwrap(); let confirm_payment_input = ConfirmPaymentInput { payment_tx: taker_fee.tx_hex(), @@ -1409,7 +1406,7 @@ fn test_watcher_validate_taker_fee_erc20() { let taker_amount = MmNumber::from((1, 1)); let dex_fee = dex_fee_from_taker_coin(&taker_coin, "ETH", &taker_amount, None, None); - let taker_fee = block_on_f01(taker_coin.send_taker_fee(dex_fee, Uuid::new_v4().as_bytes(), lock_duration)).unwrap(); + let taker_fee = block_on(taker_coin.send_taker_fee(dex_fee, Uuid::new_v4().as_bytes(), lock_duration)).unwrap(); let confirm_payment_input = ConfirmPaymentInput { payment_tx: taker_fee.tx_hex(), @@ -1504,7 +1501,7 @@ fn test_watcher_validate_taker_payment_utxo() { let secret_hash = dhash160(&generate_secret().unwrap()); - let taker_payment = block_on_f01(taker_coin.send_taker_payment(SendPaymentArgs { + let taker_payment = block_on(taker_coin.send_taker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: maker_pubkey, @@ -1599,7 +1596,7 @@ fn test_watcher_validate_taker_payment_utxo() { ), } - let taker_payment_wrong_secret = block_on_f01(taker_coin.send_taker_payment(SendPaymentArgs { + let taker_payment_wrong_secret = block_on(taker_coin.send_taker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: maker_pubkey, @@ -1712,7 +1709,7 @@ fn test_watcher_validate_taker_payment_eth() { .unwrap(), ); - let taker_payment = block_on_f01(taker_coin.send_taker_payment(SendPaymentArgs { + let taker_payment = block_on(taker_coin.send_taker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: maker_pub, @@ -1776,7 +1773,7 @@ fn test_watcher_validate_taker_payment_eth() { ), } - let taker_payment_wrong_contract = block_on_f01(taker_coin.send_taker_payment(SendPaymentArgs { + let taker_payment_wrong_contract = block_on(taker_coin.send_taker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: maker_pub, @@ -1844,7 +1841,7 @@ fn test_watcher_validate_taker_payment_eth() { ), } - let taker_payment_wrong_secret = block_on_f01(taker_coin.send_taker_payment(SendPaymentArgs { + let taker_payment_wrong_secret = block_on(taker_coin.send_taker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: maker_pub, @@ -1948,7 +1945,7 @@ fn test_watcher_validate_taker_payment_erc20() { .unwrap(), ); - let taker_payment = block_on_f01(taker_coin.send_taker_payment(SendPaymentArgs { + let taker_payment = block_on(taker_coin.send_taker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: maker_pub, @@ -2009,7 +2006,7 @@ fn test_watcher_validate_taker_payment_erc20() { ), } - let taker_payment_wrong_contract = block_on_f01(taker_coin.send_taker_payment(SendPaymentArgs { + let taker_payment_wrong_contract = block_on(taker_coin.send_taker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: maker_pub, @@ -2073,7 +2070,7 @@ fn test_watcher_validate_taker_payment_erc20() { ), } - let taker_payment_wrong_secret = block_on_f01(taker_coin.send_taker_payment(SendPaymentArgs { + let taker_payment_wrong_secret = block_on(taker_coin.send_taker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: maker_pub, @@ -2158,7 +2155,7 @@ fn test_taker_validates_taker_payment_refund_utxo() { let secret_hash = dhash160(&generate_secret().unwrap()); - let taker_payment = block_on_f01(taker_coin.send_taker_payment(SendPaymentArgs { + let taker_payment = block_on(taker_coin.send_taker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: maker_pubkey, @@ -2247,7 +2244,7 @@ fn test_taker_validates_taker_payment_refund_eth() { )) .unwrap(); - let taker_payment = block_on_f01(taker_coin.send_taker_payment(SendPaymentArgs { + let taker_payment = block_on(taker_coin.send_taker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: maker_pub, @@ -2548,7 +2545,7 @@ fn test_taker_validates_taker_payment_refund_erc20() { .unwrap(), ); - let taker_payment = block_on_f01(taker_coin.send_taker_payment(SendPaymentArgs { + let taker_payment = block_on(taker_coin.send_taker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: maker_pub, @@ -2649,7 +2646,7 @@ fn test_taker_validates_maker_payment_spend_utxo() { let secret = generate_secret().unwrap(); let secret_hash = dhash160(&secret); - let maker_payment = block_on_f01(maker_coin.send_maker_payment(SendPaymentArgs { + let maker_payment = block_on(maker_coin.send_maker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: taker_pubkey, @@ -2735,7 +2732,7 @@ fn test_taker_validates_maker_payment_spend_eth() { .unwrap() .unwrap(); - let maker_payment = block_on_f01(maker_coin.send_maker_payment(SendPaymentArgs { + let maker_payment = block_on(maker_coin.send_maker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: taker_pub, @@ -3037,7 +3034,7 @@ fn test_taker_validates_maker_payment_spend_erc20() { )) .unwrap(); - let maker_payment = block_on_f01(maker_coin.send_maker_payment(SendPaymentArgs { + let maker_payment = block_on(maker_coin.send_maker_payment(SendPaymentArgs { time_lock_duration, time_lock, other_pubkey: taker_pub, @@ -3147,7 +3144,7 @@ fn test_send_taker_payment_refund_preimage_utxo() { watcher_reward: None, wait_for_confirmation_until: 0, }; - let tx = block_on_f01(coin.send_taker_payment(taker_payment_args)).unwrap(); + let tx = block_on(coin.send_taker_payment(taker_payment_args)).unwrap(); let confirm_payment_input = ConfirmPaymentInput { payment_tx: tx.tx_hex(), diff --git a/mm2src/mm2_net/Cargo.toml b/mm2src/mm2_net/Cargo.toml index 3962bf1136..a720327d96 100644 --- a/mm2src/mm2_net/Cargo.toml +++ b/mm2src/mm2_net/Cargo.toml @@ -7,11 +7,9 @@ edition = "2018" doctest = false [features] -event-stream = ["mm2_event_stream", "async-stream" , "p2p"] -p2p = ["mm2-libp2p", "parking_lot"] [dependencies] -async-stream = { version = "0.3", optional = true } +async-stream = { version = "0.3" } async-trait = "0.1" bytes = "1.1" cfg-if = "1.0" @@ -24,9 +22,7 @@ http = "0.2" lazy_static = "1.4" mm2_core = { path = "../mm2_core" } mm2_err_handle = { path = "../mm2_err_handle" } -mm2_event_stream = { path = "../mm2_event_stream", optional = true } -mm2-libp2p = { path = "../mm2_p2p", package = "mm2_p2p", optional = true } -parking_lot = { version = "0.12.0", features = ["nightly"], optional = true } +mm2_number = { path = "../mm2_number" } prost = "0.12" rand = { version = "0.7", features = ["std", "small_rng", "wasm-bindgen"] } serde = "1" diff --git a/mm2src/mm2_net/src/lib.rs b/mm2src/mm2_net/src/lib.rs index 954e25c5a0..4ae26ca182 100644 --- a/mm2src/mm2_net/src/lib.rs +++ b/mm2src/mm2_net/src/lib.rs @@ -1,13 +1,9 @@ pub mod grpc_web; -#[cfg(feature = "event-stream")] pub mod network_event; -#[cfg(feature = "p2p")] pub mod p2p; pub mod transport; #[cfg(not(target_arch = "wasm32"))] pub mod ip_addr; #[cfg(not(target_arch = "wasm32"))] pub mod native_http; #[cfg(not(target_arch = "wasm32"))] pub mod native_tls; -#[cfg(all(feature = "event-stream", not(target_arch = "wasm32")))] -pub mod sse_handler; +#[cfg(not(target_arch = "wasm32"))] pub mod sse_handler; #[cfg(target_arch = "wasm32")] pub mod wasm; -#[cfg(all(feature = "event-stream", target_arch = "wasm32"))] -pub mod wasm_event_stream; +#[cfg(target_arch = "wasm32")] pub mod wasm_event_stream; diff --git a/mm2src/mm2_net/src/sse_handler.rs b/mm2src/mm2_net/src/sse_handler.rs index 3b3afeee58..568bfc98c0 100644 --- a/mm2src/mm2_net/src/sse_handler.rs +++ b/mm2src/mm2_net/src/sse_handler.rs @@ -1,12 +1,11 @@ use hyper::{body::Bytes, Body, Request, Response}; use mm2_core::mm_ctx::MmArc; use serde_json::json; -use std::convert::Infallible; pub const SSE_ENDPOINT: &str = "/event-stream"; /// Handles broadcasted messages from `mm2_event_stream` continuously. -pub async fn handle_sse(request: Request, ctx_h: u32) -> Result, Infallible> { +pub async fn handle_sse(request: Request, ctx_h: u32) -> Response { // This is only called once for per client on the initialization, // meaning this is not a resource intensive computation. let ctx = match MmArc::from_ffi_handle(ctx_h) { @@ -62,17 +61,15 @@ pub async fn handle_sse(request: Request, ctx_h: u32) -> Result Ok(res), + Ok(res) => res, Err(err) => handle_internal_error(err.to_string()).await, } } /// Fallback function for handling errors in SSE connections -async fn handle_internal_error(message: String) -> Result, Infallible> { - let response = Response::builder() +async fn handle_internal_error(message: String) -> Response { + Response::builder() .status(500) .body(Body::from(message)) - .expect("Returning 500 should never fail."); - - Ok(response) + .expect("Returning 500 should never fail.") } diff --git a/mm2src/mm2_net/src/wasm/wasm_ws.rs b/mm2src/mm2_net/src/wasm/wasm_ws.rs index 7464dcf142..1d19d43a60 100644 --- a/mm2src/mm2_net/src/wasm/wasm_ws.rs +++ b/mm2src/mm2_net/src/wasm/wasm_ws.rs @@ -22,7 +22,7 @@ const NORMAL_CLOSURE_CODE: u16 = 1000; pub type ConnIdx = usize; -pub type WsOutgoingReceiver = mpsc::Receiver; +pub type WsOutgoingReceiver = mpsc::Receiver>; pub type WsIncomingSender = mpsc::Sender<(ConnIdx, WebSocketEvent)>; type WsTransportReceiver = mpsc::Receiver; @@ -69,14 +69,14 @@ impl InitWsError { } } -/// The `WsEventReceiver` wrapper that filters and maps the incoming `WebSocketEvent` events into `Result`. +/// The `WsEventReceiver` wrapper that filters and maps the incoming `WebSocketEvent` events into `Result, WebSocketError>`. pub struct WsIncomingReceiver { inner: WsEventReceiver, closed: bool, } impl Stream for WsIncomingReceiver { - type Item = Result; + type Item = Result, WebSocketError>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if self.closed { @@ -122,7 +122,7 @@ impl Stream for WsEventReceiver { #[derive(Debug, Clone)] pub struct WsOutgoingSender { - inner: mpsc::Sender, + inner: mpsc::Sender>, /// Is used to determine when all senders are dropped. #[allow(dead_code)] shutdown_tx: OutgoingShutdownTx, @@ -132,9 +132,9 @@ pub struct WsOutgoingSender { /// Please note `WsOutgoingSender` must not provide a way to close the [`WsOutgoingSender::inner`] channel, /// because the shutdown_tx wouldn't be closed properly. impl WsOutgoingSender { - pub async fn send(&mut self, msg: Json) -> Result<(), SendError> { self.inner.send(msg).await } + pub async fn send(&mut self, msg: Vec) -> Result<(), SendError> { self.inner.send(msg).await } - pub fn try_send(&mut self, msg: Json) -> Result<(), TrySendError> { self.inner.try_send(msg) } + pub fn try_send(&mut self, msg: Vec) -> Result<(), TrySendError>> { self.inner.try_send(msg) } } #[derive(Debug)] @@ -147,12 +147,12 @@ pub enum WebSocketEvent { /// Please note some of the errors lead to the connection close. Error(WebSocketError), /// A message has been received through a WebSocket connection. - Incoming(Json), + Incoming(Vec), } #[derive(Debug)] pub enum WebSocketError { - OutgoingError { reason: OutgoingError, outgoing: Json }, + OutgoingError { reason: OutgoingError, outgoing: Vec }, InvalidIncoming { description: String }, } @@ -212,6 +212,7 @@ fn spawn_ws_transport( ) -> InitWsResult<(WsOutgoingSender, WsEventReceiver)> { let (ws, ws_transport_rx) = WebSocketImpl::init(url)?; let (incoming_tx, incoming_rx, incoming_shutdown) = incoming_channel(1024); + let (outgoing_tx, outgoing_rx, outgoing_shutdown) = outgoing_channel(1024); let user_shutdown = into_one_shutdown(incoming_shutdown, outgoing_shutdown); @@ -353,17 +354,11 @@ impl WebSocketImpl { Ok((WebSocketImpl { ws, closures }, rx)) } - fn send_to_ws(&self, outgoing: Json) -> Result<(), WebSocketError> { - match json::to_string(&outgoing) { - Ok(req) => self.ws.send_with_str(&req).map_err(|error| { - let reason = OutgoingError::UnderlyingError(stringify_js_error(&error)); - WebSocketError::OutgoingError { reason, outgoing } - }), - Err(e) => { - let reason = OutgoingError::SerializingError(e.to_string()); - Err(WebSocketError::OutgoingError { reason, outgoing }) - }, - } + fn send_to_ws(&self, outgoing: Vec) -> Result<(), WebSocketError> { + self.ws.send_with_u8_array(&outgoing).map_err(|error| { + let reason = OutgoingError::UnderlyingError(stringify_js_error(&error)); + WebSocketError::OutgoingError { reason, outgoing } + }) } fn validate_websocket_url(url: &str) -> Result<(), MmError> { @@ -423,7 +418,7 @@ impl WsStateMachine { } } - fn send_unexpected_outgoing_back(&mut self, outgoing: Json, current_state: &str) { + fn send_unexpected_outgoing_back(&mut self, outgoing: Vec, current_state: &str) { error!( "Unexpected outgoing message while the socket idx={} state is {}", self.idx, current_state @@ -478,7 +473,7 @@ enum StateEvent { /// All instances of `WsOutgoingSender` and `WsIncomingReceiver` were dropped. UserSideClosed, /// Received an outgoing message. It should be forwarded to `WebSocket`. - OutgoingMessage(Json), + OutgoingMessage(Vec), /// Received a `WsTransportEvent` event. It might be an incoming message from `WebSocket` or something else. WsTransportEvent(WsTransportEvent), } @@ -491,7 +486,7 @@ enum WsTransportEvent { code: u16, }, Error(WsTransportError), - Incoming(Json), + Incoming(Vec), } #[derive(Debug)] @@ -565,8 +560,8 @@ impl State for ConnectingState { } }, StateEvent::WsTransportEvent(WsTransportEvent::Incoming(incoming)) => error!( - "Unexpected incoming message {} while the socket idx={} state is ConnectingState", - ctx.idx, incoming + "Unexpected incoming message {:?} while the socket idx={} state is ConnectingState", + incoming, ctx.idx ), } } @@ -647,11 +642,11 @@ impl ClosedState { } } -fn decode_incoming(incoming: MessageEvent) -> Result { +fn decode_incoming(incoming: MessageEvent) -> Result, String> { match incoming.data().dyn_into::() { Ok(txt) => { let txt = String::from(txt); - json::from_str(&txt).map_err(|e| format!("Error deserializing an incoming payload: {}", e)) + Ok(txt.into_bytes()) }, Err(e) => Err(format!("Unknown MessageEvent {:?}", e)), } @@ -724,10 +719,12 @@ mod tests { "method": "server.version", "params": ["1.2", "1.4"], }); + let get_version = json::to_vec(&get_version).expect("Vec serialization won't fail"); outgoing_tx.send(get_version).await.expect("!outgoing_tx.send"); match incoming_rx.next().timeout_secs(5.).await.unwrap_w() { Some((_conn_idx, WebSocketEvent::Incoming(response))) => { + let response: Json = json::from_slice(&response).expect("Failed to parse incoming message"); debug!("Response: {:?}", response); assert!(response.get("result").is_some()); }, diff --git a/mm2src/mm2_p2p/Cargo.toml b/mm2src/mm2_p2p/Cargo.toml index 7001ebeabf..6b7f43e7f4 100644 --- a/mm2src/mm2_p2p/Cargo.toml +++ b/mm2src/mm2_p2p/Cargo.toml @@ -3,6 +3,10 @@ name = "mm2_p2p" version = "0.1.0" edition = "2021" +[features] +default = [] +application = ["dep:mm2_number"] + [lib] doctest = false @@ -15,12 +19,17 @@ futures-ticker = "0.0.3" hex = "0.4.2" lazy_static = "1.4" log = "0.4" +mm2_core = { path = "../mm2_core" } +mm2_event_stream = { path = "../mm2_event_stream" } +mm2_number = { path = "../mm2_number", optional = true } +parking_lot = { version = "0.12.0", features = ["nightly"] } rand = { version = "0.7", default-features = false, features = ["wasm-bindgen"] } regex = "1" rmp-serde = "0.14.3" secp256k1 = { version = "0.20", features = ["rand"] } serde = { version = "1.0", default-features = false } serde_bytes = "0.11.5" +serde_json = { version = "1", features = ["preserve_order", "raw_value"] } sha2 = "0.10" smallvec = "1.6.1" syn = "2.0.18" diff --git a/mm2src/mm2_p2p/src/application/mod.rs b/mm2src/mm2_p2p/src/application/mod.rs new file mode 100644 index 0000000000..bccb70ac5c --- /dev/null +++ b/mm2src/mm2_p2p/src/application/mod.rs @@ -0,0 +1,5 @@ +//! This module contains KDF application logic related to P2P network gated +//! by the "application" feature. + +pub mod network_event; +pub mod request_response; diff --git a/mm2src/mm2_net/src/network_event.rs b/mm2src/mm2_p2p/src/application/network_event.rs similarity index 79% rename from mm2src/mm2_net/src/network_event.rs rename to mm2src/mm2_p2p/src/application/network_event.rs index 9ff7ae15f7..c3c0a0eb5c 100644 --- a/mm2src/mm2_net/src/network_event.rs +++ b/mm2src/mm2_p2p/src/application/network_event.rs @@ -1,12 +1,11 @@ -use crate::p2p::P2PContext; use async_trait::async_trait; use common::{executor::{SpawnFuture, Timer}, log::info}; use futures::channel::oneshot::{self, Receiver, Sender}; + use mm2_core::mm_ctx::MmArc; pub use mm2_event_stream::behaviour::EventBehaviour; use mm2_event_stream::{behaviour::EventInitStatus, Event, EventName, EventStreamConfiguration}; -use mm2_libp2p::behaviours::atomicdex; use serde_json::json; pub struct NetworkEvent { @@ -22,7 +21,7 @@ impl EventBehaviour for NetworkEvent { fn event_name() -> EventName { EventName::NETWORK } async fn handle(self, interval: f64, tx: oneshot::Sender) { - let p2p_ctx = P2PContext::fetch_from_mm_arc(&self.ctx); + let p2p_ctx = crate::p2p_ctx::P2PContext::fetch_from_mm_arc(&self.ctx); let mut previously_sent = json!({}); tx.send(EventInitStatus::Success).unwrap(); @@ -30,11 +29,11 @@ impl EventBehaviour for NetworkEvent { loop { let p2p_cmd_tx = p2p_ctx.cmd_tx.lock().clone(); - let directly_connected_peers = atomicdex::get_directly_connected_peers(p2p_cmd_tx.clone()).await; - let gossip_mesh = atomicdex::get_gossip_mesh(p2p_cmd_tx.clone()).await; - let gossip_peer_topics = atomicdex::get_gossip_peer_topics(p2p_cmd_tx.clone()).await; - let gossip_topic_peers = atomicdex::get_gossip_topic_peers(p2p_cmd_tx.clone()).await; - let relay_mesh = atomicdex::get_relay_mesh(p2p_cmd_tx).await; + let directly_connected_peers = crate::get_directly_connected_peers(p2p_cmd_tx.clone()).await; + let gossip_mesh = crate::get_gossip_mesh(p2p_cmd_tx.clone()).await; + let gossip_peer_topics = crate::get_gossip_peer_topics(p2p_cmd_tx.clone()).await; + let gossip_topic_peers = crate::get_gossip_topic_peers(p2p_cmd_tx.clone()).await; + let relay_mesh = crate::get_relay_mesh(p2p_cmd_tx).await; let event_data = json!({ "directly_connected_peers": directly_connected_peers, diff --git a/mm2src/mm2_p2p/src/application/request_response/mod.rs b/mm2src/mm2_p2p/src/application/request_response/mod.rs new file mode 100644 index 0000000000..28da482bdc --- /dev/null +++ b/mm2src/mm2_p2p/src/application/request_response/mod.rs @@ -0,0 +1,21 @@ +//! This module defines types exclusively for the request-response P2P protocol +//! which are separate from other request types such as RPC requests or Gossipsub +//! messages. + +pub mod network_info; +pub mod ordermatch; + +use serde::{Deserialize, Serialize}; + +/// Wrapper type for handling request-response P2P requests. +#[derive(Eq, Debug, Deserialize, PartialEq, Serialize)] +pub enum P2PRequest { + /// Request for order matching. + Ordermatch(ordermatch::OrdermatchRequest), + /// Request for network information from the target peer. + /// + /// TODO: This should be called `PeerInfoRequest` instead. However, renaming it + /// will introduce a breaking change in the network and is not worth it. Do this + /// renaming when there is already a breaking change in the release. + NetworkInfo(network_info::NetworkInfoRequest), +} diff --git a/mm2src/mm2_p2p/src/application/request_response/network_info.rs b/mm2src/mm2_p2p/src/application/request_response/network_info.rs new file mode 100644 index 0000000000..c8dece2ef5 --- /dev/null +++ b/mm2src/mm2_p2p/src/application/request_response/network_info.rs @@ -0,0 +1,9 @@ +use serde::{Deserialize, Serialize}; + +/// Wraps the different types of network information requests for the P2P request-response +/// protocol. +#[derive(Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum NetworkInfoRequest { + /// Get MM2 version of nodes added to stats collection + GetMm2Version, +} diff --git a/mm2src/mm2_p2p/src/application/request_response/ordermatch.rs b/mm2src/mm2_p2p/src/application/request_response/ordermatch.rs new file mode 100644 index 0000000000..250758f594 --- /dev/null +++ b/mm2src/mm2_p2p/src/application/request_response/ordermatch.rs @@ -0,0 +1,46 @@ +use mm2_number::BigRational; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +type AlbOrderedOrderbookPair = String; +type H64 = [u8; 8]; + +#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum BestOrdersAction { + Buy, + Sell, +} + +/// Wraps the different types of order matching requests for the P2P request-response protocol. +/// +/// TODO: We should use fixed sizes for dynamic fields (such as strings and maps) +/// and prefer stricter types instead of accepting `String` for nearly everything. +/// See https://github.com/KomodoPlatform/komodo-defi-framework/issues/2236 for reference. +#[derive(Debug, Deserialize, Eq, PartialEq, Serialize)] +pub enum OrdermatchRequest { + /// Get an orderbook for the given pair. + GetOrderbook { base: String, rel: String }, + /// Sync specific pubkey orderbook state if our known Patricia trie state doesn't match the latest keep alive message + SyncPubkeyOrderbookState { + pubkey: String, + /// Request using this condition + trie_roots: HashMap, + }, + /// Request best orders for a specific coin and action. + BestOrders { + coin: String, + action: BestOrdersAction, + volume: BigRational, + }, + /// Get orderbook depth for the specified pairs + OrderbookDepth { pairs: Vec<(String, String)> }, + /// Request best orders for a specific coin and action limited by the number of results. + /// + /// Q: Shouldn't we support pagination here? + BestOrdersByNumber { + coin: String, + action: BestOrdersAction, + number: usize, + }, +} diff --git a/mm2src/mm2_p2p/src/lib.rs b/mm2src/mm2_p2p/src/lib.rs index 27e26e8f06..b1d0283be0 100644 --- a/mm2src/mm2_p2p/src/lib.rs +++ b/mm2src/mm2_p2p/src/lib.rs @@ -6,6 +6,9 @@ mod network; mod relay_address; mod swarm_runtime; +#[cfg(feature = "application")] pub mod application; +pub mod p2p_ctx; + use derive_more::Display; use lazy_static::lazy_static; use secp256k1::{Message as SecpMessage, PublicKey as Secp256k1Pubkey, Secp256k1, SecretKey, SignOnly, Signature, diff --git a/mm2src/mm2_net/src/p2p.rs b/mm2src/mm2_p2p/src/p2p_ctx.rs similarity index 91% rename from mm2src/mm2_net/src/p2p.rs rename to mm2src/mm2_p2p/src/p2p_ctx.rs index 30625cbeb5..2d9d991298 100644 --- a/mm2src/mm2_net/src/p2p.rs +++ b/mm2src/mm2_p2p/src/p2p_ctx.rs @@ -1,10 +1,9 @@ +use libp2p::{identity::Keypair, PeerId}; use mm2_core::mm_ctx::MmArc; -use mm2_libp2p::behaviours::atomicdex::AdexCmdTx; -use mm2_libp2p::PeerId; use parking_lot::Mutex; use std::sync::Arc; -pub use mm2_libp2p::Keypair; +use crate::AdexCmdTx; pub struct P2PContext { /// Using Mutex helps to prevent cloning which can actually result to channel being unbounded in case of using 1 tx clone per 1 message.