From e246ae189910c47501a02239c429db31e2435446 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 20 Jun 2024 02:47:04 -0700 Subject: [PATCH 01/65] init commit --- sequencer/src/context.rs | 96 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 91 insertions(+), 5 deletions(-) diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index ab239c751b..1acf013a62 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -9,19 +9,19 @@ use futures::{ stream::{Stream, StreamExt}, }; use hotshot::{ - traits::election::static_committee::GeneralStaticCommittee, - types::{Event, SystemContextHandle}, + traits::{election::static_committee::GeneralStaticCommittee, BlockPayload}, + types::{Event, EventType, SystemContextHandle}, Memberships, Networks, SystemContext, }; -use hotshot_orchestrator::client::OrchestratorClient; +use hotshot_orchestrator::client::{BenchResults, OrchestratorClient}; use hotshot_query_service::Leaf; use hotshot_types::{ consensus::ConsensusMetricsValue, data::ViewNumber, - traits::{election::Membership, metrics::Metrics}, + traits::{block_contents::BlockHeader, election::Membership, metrics::Metrics}, HotShotConfig, }; -use std::fmt::Display; +use std::{fmt::Display, time::Instant}; use url::Url; use vbs::version::StaticVersionType; @@ -251,7 +251,93 @@ impl { + panic!("Error! Event stream completed before consensus ended."); + } + Some(Event { event, .. }) => { + match event { + EventType::Error { error } => { + tracing::error!("Error in consensus: {:?}", error); + } + EventType::Decide { + leaf_chain, + qc: _, + block_size, + } => { + if let Some(leaf_info) = leaf_chain.first() { + let leaf = &leaf_info.leaf; + tracing::info!("Decide event for leaf: {}", *leaf.view_number()); + + // iterate all the decided transactions + // Sishan TODO: to calculate latency + if let Some(block_payload) = &leaf.block_payload() { + for tx in + block_payload.transactions(leaf.block_header().metadata()) + { + println!("tx = {:?}", tx); + } + } + } + + if let Some(size) = block_size { + total_transactions_committed += size; + } + + num_successful_commits += leaf_chain.len(); + if num_successful_commits % 100 == 0 { + let bench_results: BenchResults; + println!("[{node_index}]: {num_successful_commits} rounds completed - Total transactions committed: {total_transactions_committed}"); + if num_successful_commits == 100 { + if total_transactions_committed != 0 { + let total_time_elapsed = start.elapsed(); // in seconds + let total_time_elapsed_sec = + std::cmp::max(total_time_elapsed.as_secs(), 1u64); + let throughput_bytes_per_sec = total_transactions_committed + * 8 //Sishan TODO: transaction_size_in_bytes + / total_time_elapsed_sec; + // Sishan TODO: for latency and failed view number + bench_results = BenchResults { + avg_latency_in_sec: 0, + num_latency: 1, + minimum_latency_in_sec: 0, + maximum_latency_in_sec: 0, + throughput_bytes_per_sec, + total_transactions_committed, + transaction_size_in_bytes: 8, //transaction_size_in_bytes, + total_time_elapsed_in_sec: total_time_elapsed.as_secs(), + total_num_views: 100, + failed_num_views: 0, + }; + } else { + bench_results = BenchResults::default(); + } + if let Some(orchestrator_client) = &self.wait_for_orchestrator { + orchestrator_client.post_bench_results(bench_results).await; + } + } + } + if leaf_chain.len() > 1 { + tracing::warn!( + "Leaf chain is greater than 1 with len {}", + leaf_chain.len() + ); + } + // when we make progress, submit new events + } + _ => {} // mostly DA proposal + } + } + } + } } /// Spawn a background task attached to this context. From e8e5f40f143a42bf1a47e1e2f37c86e8d3fd4692 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 20 Jun 2024 17:23:43 -0700 Subject: [PATCH 02/65] get transaction_size from OrchestratorState --- sequencer/src/context.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 1acf013a62..34460d0967 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -13,7 +13,10 @@ use hotshot::{ types::{Event, EventType, SystemContextHandle}, Memberships, Networks, SystemContext, }; -use hotshot_orchestrator::client::{BenchResults, OrchestratorClient}; +use hotshot_orchestrator::{ + client::{BenchResults, OrchestratorClient}, + config::NetworkConfig, +}; use hotshot_query_service::Leaf; use hotshot_types::{ consensus::ConsensusMetricsValue, @@ -244,11 +247,15 @@ impl = + orchestrator_client.get_config_after_collection().await; + transaction_size_in_bytes = network_config.transaction_size as u64; } tracing::warn!("starting consensus"); let start = Instant::now(); @@ -299,11 +306,9 @@ impl Date: Fri, 21 Jun 2024 15:29:02 -0700 Subject: [PATCH 03/65] format --- sequencer/src/context.rs | 72 +++++++++++++++++++++++----------------- 1 file changed, 42 insertions(+), 30 deletions(-) diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 34460d0967..6374ae80d0 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -21,7 +21,10 @@ use hotshot_query_service::Leaf; use hotshot_types::{ consensus::ConsensusMetricsValue, data::ViewNumber, - traits::{block_contents::BlockHeader, election::Membership, metrics::Metrics}, + traits::{ + block_contents::BlockHeader, election::Membership, metrics::Metrics, + node_implementation::ConsensusTime, + }, HotShotConfig, }; use std::{fmt::Display, time::Instant}; @@ -247,7 +250,8 @@ impl = orchestrator_client.get_config_after_collection().await; transaction_size_in_bytes = network_config.transaction_size as u64; + // rounds = network_config.rounds; // uncomment after ORCHESTRATOR_DEFAULT_NUM_ROUNDS is updated + } else { + transaction_size_in_bytes = 0; + tracing::error!("Cannot get transaction_size_in_bytes from orchestrator client"); } tracing::warn!("starting consensus"); let start = Instant::now(); @@ -300,43 +308,47 @@ impl 1 { tracing::warn!( "Leaf chain is greater than 1 with len {}", leaf_chain.len() ); } - // when we make progress, submit new events } _ => {} // mostly DA proposal } From 0fadacc4b871f29bc8b941536833186a038d455c Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 21 Jun 2024 17:22:59 -0700 Subject: [PATCH 04/65] real throughput --- sequencer/src/context.rs | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 6374ae80d0..83f454a88d 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -13,10 +13,7 @@ use hotshot::{ types::{Event, EventType, SystemContextHandle}, Memberships, Networks, SystemContext, }; -use hotshot_orchestrator::{ - client::{BenchResults, OrchestratorClient}, - config::NetworkConfig, -}; +use hotshot_orchestrator::client::{BenchResults, OrchestratorClient}; use hotshot_query_service::Leaf; use hotshot_types::{ consensus::ConsensusMetricsValue, @@ -250,20 +247,18 @@ impl = - orchestrator_client.get_config_after_collection().await; - transaction_size_in_bytes = network_config.transaction_size as u64; - // rounds = network_config.rounds; // uncomment after ORCHESTRATOR_DEFAULT_NUM_ROUNDS is updated + // Sishan: uncomment after ORCHESTRATOR_DEFAULT_NUM_ROUNDS in HotShot is updated to 100. + // let network_config: NetworkConfig = + // orchestrator_client.get_config_after_collection().await; + // rounds = network_config.rounds; } else { - transaction_size_in_bytes = 0; - tracing::error!("Cannot get transaction_size_in_bytes from orchestrator client"); + tracing::error!("Cannot get info from orchestrator client"); } tracing::warn!("starting consensus"); let start = Instant::now(); @@ -272,6 +267,7 @@ impl + let tx_sz = payload_length * std::mem::size_of::() + + std::mem::size_of::(); + total_throughput += tx_sz; } } } @@ -317,10 +317,8 @@ impl Date: Sun, 23 Jun 2024 16:47:30 -0700 Subject: [PATCH 05/65] add a warm up before calculating benchmark --- sequencer/src/bin/submit-transactions.rs | 1 + sequencer/src/context.rs | 54 +++++++++++++++--------- sequencer/src/transaction.rs | 12 ++++++ 3 files changed, 47 insertions(+), 20 deletions(-) diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index ba2941d048..3288ecc709 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -295,6 +295,7 @@ async fn submit_transactions( } { tracing::error!("failed to submit batch of {txns_batch_count} transactions: {err}"); } else { + println!("submitted batch of {txns_batch_count} transactions"); tracing::info!("submitted batch of {txns_batch_count} transactions"); let submitted_at = Instant::now(); for hash in hashes.iter() { diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 83f454a88d..63c0cb36af 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -247,7 +247,6 @@ impl { @@ -287,29 +289,39 @@ impl - let tx_sz = payload_length * std::mem::size_of::() - + std::mem::size_of::(); - total_throughput += tx_sz; + num_successful_commits += leaf_chain.len(); + + // only count in the info after warm up + if num_successful_commits >= start_rounds { + // iterate all the decided transactions + // Sishan TODO: to calculate latency + if let Some(block_payload) = &leaf.block_payload() { + for tx in block_payload + .transactions(leaf.block_header().metadata()) + { + let payload_length = tx.into_payload().len(); + // Transaction = NamespaceId(u64) + payload(Vec) + has_timestamp(bool) + println!( + "Transaction Structure size = {:?}", + std::mem::size_of::() + ); + let tx_sz = payload_length * std::mem::size_of::() // size of payload + + std::mem::size_of::() // size of the namespace + + std::mem::size_of::() // size of has_timestamp + + std::mem::size_of::(); // size of the struct wrapper + total_throughput += tx_sz; + } } } } - if let Some(size) = block_size { - total_transactions_committed += size; + if num_successful_commits >= start_rounds { + if let Some(size) = block_size { + total_transactions_committed += size; + } } - num_successful_commits += leaf_chain.len(); - - if num_successful_commits == rounds { + if num_successful_commits >= end_rounds { let total_time_elapsed = start.elapsed(); // in seconds let consensus_lock = self.handle.read().await.hotshot.consensus(); let consensus = consensus_lock.read().await; @@ -327,7 +339,8 @@ impl 1 { diff --git a/sequencer/src/transaction.rs b/sequencer/src/transaction.rs index e4ac5f4ae1..54e5aeb4d4 100644 --- a/sequencer/src/transaction.rs +++ b/sequencer/src/transaction.rs @@ -107,13 +107,25 @@ pub struct Transaction { namespace: NamespaceId, #[serde(with = "base64_bytes")] payload: Vec, + #[cfg(feature = "benchmarking")] + has_timestamp: bool, } impl Transaction { + #[cfg(not(feature = "benchmarking"))] pub fn new(namespace: NamespaceId, payload: Vec) -> Self { Self { namespace, payload } } + #[cfg(feature = "benchmarking")] + pub fn new(namespace: NamespaceId, payload: Vec, has_timestamp: bool) -> Self { + Self { + namespace, + payload, + has_timestamp, + } + } + pub fn namespace(&self) -> NamespaceId { self.namespace } From 3f048caaaa457872ed8ef46af13865702b6b8aed Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 25 Jun 2024 00:02:46 -0700 Subject: [PATCH 06/65] add latency --- Cargo.lock | 1 + scripts/benchmarks_results/upload_results.csv | 9 +++ sequencer/Cargo.toml | 1 + sequencer/src/bin/submit-transactions.rs | 61 ++++++++++++++++++- sequencer/src/context.rs | 21 +++---- sequencer/src/transaction.rs | 12 ---- 6 files changed, 81 insertions(+), 24 deletions(-) create mode 100644 scripts/benchmarks_results/upload_results.csv diff --git a/Cargo.lock b/Cargo.lock index 6ecb064561..4e510265a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8618,6 +8618,7 @@ dependencies = [ "cld", "committable", "contract-bindings", + "csv", "derivative", "derive_more", "dotenvy", diff --git a/scripts/benchmarks_results/upload_results.csv b/scripts/benchmarks_results/upload_results.csv new file mode 100644 index 0000000000..2ce7857883 --- /dev/null +++ b/scripts/benchmarks_results/upload_results.csv @@ -0,0 +1,9 @@ +Note: actual rounds = 100 (usually calculate info between round 20 ~ 120), actual transactions_per_round is a distribution between 1~20, transaction_size is the avg size of all txs +commit_sha,total_nodes,da_committee_size,transactions_per_round,transaction_size,rounds,leader_election_type,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,throughput_bytes_per_sec,total_transactions_committed,total_time_elapsed_in_sec,total_num_views,failed_num_views +20-120-no-latency,5,5,10,542,10,static-leader-selection,0,0,0,2022,149,40,121,1 +commit_sha,total_nodes,da_committee_size,transactions_per_round,transaction_size,rounds,leader_election_type,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,throughput_bytes_per_sec,total_transactions_committed,total_time_elapsed_in_sec,total_num_views,failed_num_views +20-120,5,5,10,578,10,static-leader-selection,0,0,0,2826,303,62,121,1 +private_pool_avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec +3,0,7 +public_pool_avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec +2,0,7 \ No newline at end of file diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 8c8c5fe582..0065e4af4c 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -54,6 +54,7 @@ clap = { workspace = true } cld = { workspace = true } committable = "0.2" contract-bindings = { path = "../contract-bindings" } +csv = "1" derivative = "2.2" derive_more = { workspace = true } dotenvy = { workspace = true } diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index 3288ecc709..ce069f6738 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -4,6 +4,7 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::{sleep, spawn}; use clap::Parser; use committable::{Commitment, Committable}; +use csv::Writer; use es_version::{SequencerVersion, SEQUENCER_VERSION}; use futures::{ channel::mpsc::{self, Sender}, @@ -20,6 +21,7 @@ use sequencer::{ }; use std::{ collections::HashMap, + fs::OpenOptions, time::{Duration, Instant}, }; use surf_disco::{Client, Url}; @@ -180,6 +182,12 @@ async fn main() { let mut pending = HashMap::new(); let mut total_latency = Duration::default(); let mut total_transactions = 0; + let mut num_successful_commits = 0; + let mut benchmark_total_latency = Duration::default(); + let mut benchmark_minimum_latency = Duration::default(); + let mut benchmark_maximum_latency = Duration::default(); + let mut benchmark_total_transactions = 0; + let mut benchmark_finish = false; while let Some(block) = blocks.next().await { let block: BlockQueryData = match block { Ok(block) => block, @@ -190,6 +198,7 @@ async fn main() { }; let received_at = Instant::now(); tracing::debug!("got block {}", block.height()); + num_successful_commits += 1; // Get all transactions which were submitted before this block. while let Ok(Some(tx)) = receiver.try_next() { @@ -208,9 +217,60 @@ async fn main() { total_latency += latency; total_transactions += 1; tracing::info!("average latency: {:?}", total_latency / total_transactions); + + if !benchmark_finish && (20..=120).contains(&num_successful_commits) { + benchmark_minimum_latency = if total_transactions == 0 { + latency + } else { + std::cmp::min(benchmark_minimum_latency, latency) + }; + benchmark_maximum_latency = if total_transactions == 0 { + latency + } else { + std::cmp::max(benchmark_maximum_latency, latency) + }; + + benchmark_total_latency += latency; + benchmark_total_transactions += 1; + } } } + if !benchmark_finish && num_successful_commits > 120 { + let benchmark_average_latency = benchmark_total_latency / benchmark_total_transactions; + // Open the CSV file in append mode + let results_csv_file = OpenOptions::new() + .create(true) + .append(true) // Open in append mode + .open("scripts/benchmarks_results/results.csv") + .unwrap(); + // Open a file for writing + let mut wtr = Writer::from_writer(results_csv_file); + if opt.use_public_mempool() { + let _ = wtr.write_record([ + "public_pool_avg_latency_in_sec", + "minimum_latency_in_sec", + "maximum_latency_in_sec", + ]); + } else { + let _ = wtr.write_record([ + "private_pool_avg_latency_in_sec", + "minimum_latency_in_sec", + "maximum_latency_in_sec", + ]); + } + let _ = wtr.write_record(&[ + benchmark_average_latency.as_secs().to_string(), + benchmark_minimum_latency.as_secs().to_string(), + benchmark_maximum_latency.as_secs().to_string(), + ]); + let _ = wtr.flush(); + println!( + "Latency results successfully saved in scripts/benchmarks_results/results.csv" + ); + benchmark_finish = true; + } + // If a lot of transactions are pending, it might indicate the sequencer is struggling to // finalize them. We should warn about this. if pending.len() >= opt.pending_transactions_warning_threshold { @@ -295,7 +355,6 @@ async fn submit_transactions( } { tracing::error!("failed to submit batch of {txns_batch_count} transactions: {err}"); } else { - println!("submitted batch of {txns_batch_count} transactions"); tracing::info!("submitted batch of {txns_batch_count} transactions"); let submitted_at = Instant::now(); for hash in hashes.iter() { diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 63c0cb36af..f5dce80c72 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -260,7 +260,6 @@ impl { @@ -293,21 +294,20 @@ impl= start_rounds { + if !has_started { + start = Instant::now(); + has_started = true; + } + // iterate all the decided transactions - // Sishan TODO: to calculate latency if let Some(block_payload) = &leaf.block_payload() { for tx in block_payload .transactions(leaf.block_header().metadata()) { let payload_length = tx.into_payload().len(); - // Transaction = NamespaceId(u64) + payload(Vec) + has_timestamp(bool) - println!( - "Transaction Structure size = {:?}", - std::mem::size_of::() - ); + // Transaction = NamespaceId(u64) + payload(Vec) let tx_sz = payload_length * std::mem::size_of::() // size of payload + std::mem::size_of::() // size of the namespace - + std::mem::size_of::() // size of has_timestamp + std::mem::size_of::(); // size of the struct wrapper total_throughput += tx_sz; } @@ -331,9 +331,8 @@ impl, - #[cfg(feature = "benchmarking")] - has_timestamp: bool, } impl Transaction { - #[cfg(not(feature = "benchmarking"))] pub fn new(namespace: NamespaceId, payload: Vec) -> Self { Self { namespace, payload } } - #[cfg(feature = "benchmarking")] - pub fn new(namespace: NamespaceId, payload: Vec, has_timestamp: bool) -> Self { - Self { - namespace, - payload, - has_timestamp, - } - } - pub fn namespace(&self) -> NamespaceId { self.namespace } From 59cb269a128a5afa710b3855253339f9326bf076 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 25 Jun 2024 16:06:43 -0700 Subject: [PATCH 07/65] add feature benchmarking --- justfile | 4 + sequencer/Cargo.toml | 1 + sequencer/src/bin/submit-transactions.rs | 54 ++++-- sequencer/src/context.rs | 216 ++++++++++++----------- 4 files changed, 157 insertions(+), 118 deletions(-) diff --git a/justfile b/justfile index 8a6596c8f7..5d9220a67f 100644 --- a/justfile +++ b/justfile @@ -11,6 +11,10 @@ demo-native: cargo build --release scripts/demo-native +demo-native-benchmark: + cargo build --release --features benchmarking + scripts/demo-native + down *args: docker compose down {{args}} diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 0065e4af4c..a878da4e83 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -8,6 +8,7 @@ edition = "2021" [features] testing = ["hotshot-testing"] libp2p = [] +benchmarking = [] [[bin]] name = "espresso-dev-node" diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index ce069f6738..34cbdf9d81 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -4,7 +4,6 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::{sleep, spawn}; use clap::Parser; use committable::{Commitment, Committable}; -use csv::Writer; use es_version::{SequencerVersion, SEQUENCER_VERSION}; use futures::{ channel::mpsc::{self, Sender}, @@ -21,13 +20,17 @@ use sequencer::{ }; use std::{ collections::HashMap, - fs::OpenOptions, time::{Duration, Instant}, }; use surf_disco::{Client, Url}; use tide_disco::{error::ServerError, App}; use vbs::version::StaticVersionType; +#[cfg(feature = "benchmarking")] +use csv::Writer; +#[cfg(feature = "benchmarking")] +use std::fs::OpenOptions; + /// Submit random transactions to an Espresso Sequencer. #[derive(Clone, Debug, Parser)] struct Options { @@ -182,12 +185,21 @@ async fn main() { let mut pending = HashMap::new(); let mut total_latency = Duration::default(); let mut total_transactions = 0; + + // Keep track of the latency after warm up for benchmarking + #[cfg(feature = "benchmarking")] let mut num_successful_commits = 0; + #[cfg(feature = "benchmarking")] let mut benchmark_total_latency = Duration::default(); + #[cfg(feature = "benchmarking")] let mut benchmark_minimum_latency = Duration::default(); + #[cfg(feature = "benchmarking")] let mut benchmark_maximum_latency = Duration::default(); + #[cfg(feature = "benchmarking")] let mut benchmark_total_transactions = 0; + #[cfg(feature = "benchmarking")] let mut benchmark_finish = false; + while let Some(block) = blocks.next().await { let block: BlockQueryData = match block { Ok(block) => block, @@ -198,7 +210,10 @@ async fn main() { }; let received_at = Instant::now(); tracing::debug!("got block {}", block.height()); - num_successful_commits += 1; + #[cfg(feature = "benchmarking")] + { + num_successful_commits += 1; + } // Get all transactions which were submitted before this block. while let Ok(Some(tx)) = receiver.try_next() { @@ -217,25 +232,28 @@ async fn main() { total_latency += latency; total_transactions += 1; tracing::info!("average latency: {:?}", total_latency / total_transactions); - - if !benchmark_finish && (20..=120).contains(&num_successful_commits) { - benchmark_minimum_latency = if total_transactions == 0 { - latency - } else { - std::cmp::min(benchmark_minimum_latency, latency) - }; - benchmark_maximum_latency = if total_transactions == 0 { - latency - } else { - std::cmp::max(benchmark_maximum_latency, latency) - }; - - benchmark_total_latency += latency; - benchmark_total_transactions += 1; + #[cfg(feature = "benchmarking")] + { + if !benchmark_finish && (20..=120).contains(&num_successful_commits) { + benchmark_minimum_latency = if total_transactions == 0 { + latency + } else { + std::cmp::min(benchmark_minimum_latency, latency) + }; + benchmark_maximum_latency = if total_transactions == 0 { + latency + } else { + std::cmp::max(benchmark_maximum_latency, latency) + }; + + benchmark_total_latency += latency; + benchmark_total_transactions += 1; + } } } } + #[cfg(feature = "benchmarking")] if !benchmark_finish && num_successful_commits > 120 { let benchmark_average_latency = benchmark_total_latency / benchmark_total_transactions; // Open the CSV file in append mode diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index f5dce80c72..6ff18cb3fd 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -9,25 +9,31 @@ use futures::{ stream::{Stream, StreamExt}, }; use hotshot::{ - traits::{election::static_committee::GeneralStaticCommittee, BlockPayload}, - types::{Event, EventType, SystemContextHandle}, + traits::election::static_committee::GeneralStaticCommittee, + types::{Event, SystemContextHandle}, Memberships, Networks, SystemContext, }; -use hotshot_orchestrator::client::{BenchResults, OrchestratorClient}; +use hotshot_orchestrator::client::OrchestratorClient; use hotshot_query_service::Leaf; use hotshot_types::{ consensus::ConsensusMetricsValue, data::ViewNumber, - traits::{ - block_contents::BlockHeader, election::Membership, metrics::Metrics, - node_implementation::ConsensusTime, - }, + traits::{election::Membership, metrics::Metrics}, HotShotConfig, }; -use std::{fmt::Display, time::Instant}; +use std::fmt::Display; use url::Url; use vbs::version::StaticVersionType; +#[cfg(feature = "benchmarking")] +use hotshot::{traits::BlockPayload, types::EventType}; +#[cfg(feature = "benchmarking")] +use hotshot_orchestrator::{client::BenchResults, config::NetworkConfig}; +#[cfg(feature = "benchmarking")] +use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::ConsensusTime}; +#[cfg(feature = "benchmarking")] +use std::time::Instant; + use crate::{ network, persistence::SequencerPersistence, state_signature::StateSigner, static_stake_table_commitment, Node, NodeState, PubKey, SeqTypes, Transaction, ValidatedState, @@ -252,116 +258,126 @@ impl = - // orchestrator_client.get_config_after_collection().await; - // rounds = network_config.rounds; + #[cfg(feature = "benchmarking")] + { + let network_config: NetworkConfig = + orchestrator_client.get_config_after_collection().await; + // Sishan: change to useful parameter once Hotshot updated to 0.5.60 + let _rounds = network_config.rounds; + } } else { tracing::error!("Cannot get info from orchestrator client"); } tracing::warn!("starting consensus"); self.handle.read().await.hotshot.start_consensus().await; - // number of rounds for warm up, which will not be counted in for benchmarking phase - let start_rounds: usize = 20; - let end_rounds: usize = 120; - let mut event_stream = self.event_stream().await; - let mut num_successful_commits = 0; - let mut total_transactions_committed = 0; - let mut total_throughput = 0; - let node_index: u64 = self.node_state().node_id; - let mut start: Instant = Instant::now(); // will be re-assign once has_started turned to true - let mut has_started: bool = false; - loop { - match event_stream.next().await { - None => { - panic!("Error! Event stream completed before consensus ended."); - } - Some(Event { event, .. }) => { - match event { - EventType::Error { error } => { - tracing::error!("Error in consensus: {:?}", error); - } - EventType::Decide { - leaf_chain, - qc: _, - block_size, - } => { - if let Some(leaf_info) = leaf_chain.first() { - let leaf = &leaf_info.leaf; - tracing::info!("Decide event for leaf: {}", *leaf.view_number()); - num_successful_commits += leaf_chain.len(); - - // only count in the info after warm up - if num_successful_commits >= start_rounds { - if !has_started { - start = Instant::now(); - has_started = true; - } + #[cfg(feature = "benchmarking")] + { + // number of rounds for warm up, which will not be counted in for benchmarking phase + let start_rounds: usize = 20; + let end_rounds: usize = 120; + let mut event_stream = self.event_stream().await; + let mut num_successful_commits = 0; + let mut total_transactions_committed = 0; + let mut total_throughput = 0; + let node_index: u64 = self.node_state().node_id; + let mut start: Instant = Instant::now(); // will be re-assign once has_started turned to true + let mut has_started: bool = false; + loop { + match event_stream.next().await { + None => { + panic!("Error! Event stream completed before consensus ended."); + } + Some(Event { event, .. }) => { + match event { + EventType::Error { error } => { + tracing::error!("Error in consensus: {:?}", error); + } + EventType::Decide { + leaf_chain, + qc: _, + block_size, + } => { + if let Some(leaf_info) = leaf_chain.first() { + let leaf = &leaf_info.leaf; + tracing::info!( + "Decide event for leaf: {}", + *leaf.view_number() + ); + num_successful_commits += leaf_chain.len(); + + // only count in the info after warm up + if num_successful_commits >= start_rounds { + if !has_started { + start = Instant::now(); + has_started = true; + } - // iterate all the decided transactions - if let Some(block_payload) = &leaf.block_payload() { - for tx in block_payload - .transactions(leaf.block_header().metadata()) - { - let payload_length = tx.into_payload().len(); - // Transaction = NamespaceId(u64) + payload(Vec) - let tx_sz = payload_length * std::mem::size_of::() // size of payload - + std::mem::size_of::() // size of the namespace - + std::mem::size_of::(); // size of the struct wrapper - total_throughput += tx_sz; + // iterate all the decided transactions + if let Some(block_payload) = &leaf.block_payload() { + for tx in block_payload + .transactions(leaf.block_header().metadata()) + { + let payload_length = tx.into_payload().len(); + // Transaction = NamespaceId(u64) + payload(Vec) + let tx_sz = payload_length * std::mem::size_of::() // size of payload + + std::mem::size_of::() // size of the namespace + + std::mem::size_of::(); // size of the struct wrapper + total_throughput += tx_sz; + } } } } - } - if num_successful_commits >= start_rounds { - if let Some(size) = block_size { - total_transactions_committed += size; + if num_successful_commits >= start_rounds { + if let Some(size) = block_size { + total_transactions_committed += size; + } } - } - if num_successful_commits >= end_rounds { - let total_time_elapsed = start.elapsed(); // in seconds - let consensus_lock = self.handle.read().await.hotshot.consensus(); - let consensus = consensus_lock.read().await; - let total_num_views = - usize::try_from(consensus.locked_view().u64()).unwrap(); - let failed_num_views = total_num_views - num_successful_commits; - let bench_results = if total_transactions_committed != 0 { - let throughput_bytes_per_sec = (total_throughput as u64) - / std::cmp::max(total_time_elapsed.as_secs(), 1u64); - BenchResults { - avg_latency_in_sec: 0, // latency will be reported in another struct - num_latency: 1, - minimum_latency_in_sec: 0, - maximum_latency_in_sec: 0, - throughput_bytes_per_sec, - total_transactions_committed, - transaction_size_in_bytes: (total_throughput as u64) - / total_transactions_committed, // refer to `submit-transactions.rs` for the range of transaction size - total_time_elapsed_in_sec: total_time_elapsed.as_secs(), - total_num_views, - failed_num_views, + if num_successful_commits >= end_rounds { + let total_time_elapsed = start.elapsed(); // in seconds + let consensus_lock = + self.handle.read().await.hotshot.consensus(); + let consensus = consensus_lock.read().await; + let total_num_views = + usize::try_from(consensus.locked_view().u64()).unwrap(); + let failed_num_views = total_num_views - num_successful_commits; + let bench_results = if total_transactions_committed != 0 { + let throughput_bytes_per_sec = (total_throughput as u64) + / std::cmp::max(total_time_elapsed.as_secs(), 1u64); + BenchResults { + avg_latency_in_sec: 0, // latency will be reported in another struct + num_latency: 1, + minimum_latency_in_sec: 0, + maximum_latency_in_sec: 0, + throughput_bytes_per_sec, + total_transactions_committed, + transaction_size_in_bytes: (total_throughput as u64) + / total_transactions_committed, // refer to `submit-transactions.rs` for the range of transaction size + total_time_elapsed_in_sec: total_time_elapsed.as_secs(), + total_num_views, + failed_num_views, + } + } else { + BenchResults::default() + }; + println!("[{node_index}]: {total_transactions_committed} committed from round {start_rounds} to {end_rounds} in {total_time_elapsed:?}, total number of views = {total_num_views}."); + if let Some(orchestrator_client) = &self.wait_for_orchestrator { + orchestrator_client.post_bench_results(bench_results).await; } - } else { - BenchResults::default() - }; - println!("[{node_index}]: {total_transactions_committed} committed from round {start_rounds} to {end_rounds} in {total_time_elapsed:?}, total number of views = {total_num_views}."); - if let Some(orchestrator_client) = &self.wait_for_orchestrator { - orchestrator_client.post_bench_results(bench_results).await; + break; } - break; - } - if leaf_chain.len() > 1 { - tracing::warn!( - "Leaf chain is greater than 1 with len {}", - leaf_chain.len() - ); + if leaf_chain.len() > 1 { + tracing::warn!( + "Leaf chain is greater than 1 with len {}", + leaf_chain.len() + ); + } } + _ => {} // mostly DA proposal } - _ => {} // mostly DA proposal } } } From a4b6d8f2334ba5a02956f92203c82d1515bb87c3 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 25 Jun 2024 16:40:07 -0700 Subject: [PATCH 08/65] prepare to sync main --- Cargo.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e510265a0..40e023e4d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7479,7 +7479,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.12.1", "proc-macro2", "quote", "syn 2.0.66", @@ -10610,9 +10610,9 @@ dependencies = [ [[package]] name = "url" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna 0.5.0", From a3f893493cf93eacc198aff5fc6bffdaee567daf Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 25 Jun 2024 22:18:33 -0700 Subject: [PATCH 09/65] comment out my changes --- sequencer/src/bin/submit-transactions.rs | 146 ++++++------- sequencer/src/context.rs | 252 +++++++++++------------ 2 files changed, 199 insertions(+), 199 deletions(-) diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index 34cbdf9d81..785a9274da 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -26,10 +26,10 @@ use surf_disco::{Client, Url}; use tide_disco::{error::ServerError, App}; use vbs::version::StaticVersionType; -#[cfg(feature = "benchmarking")] -use csv::Writer; -#[cfg(feature = "benchmarking")] -use std::fs::OpenOptions; +// #[cfg(feature = "benchmarking")] +// use csv::Writer; +// #[cfg(feature = "benchmarking")] +// use std::fs::OpenOptions; /// Submit random transactions to an Espresso Sequencer. #[derive(Clone, Debug, Parser)] @@ -187,18 +187,18 @@ async fn main() { let mut total_transactions = 0; // Keep track of the latency after warm up for benchmarking - #[cfg(feature = "benchmarking")] - let mut num_successful_commits = 0; - #[cfg(feature = "benchmarking")] - let mut benchmark_total_latency = Duration::default(); - #[cfg(feature = "benchmarking")] - let mut benchmark_minimum_latency = Duration::default(); - #[cfg(feature = "benchmarking")] - let mut benchmark_maximum_latency = Duration::default(); - #[cfg(feature = "benchmarking")] - let mut benchmark_total_transactions = 0; - #[cfg(feature = "benchmarking")] - let mut benchmark_finish = false; + // #[cfg(feature = "benchmarking")] + // let mut num_successful_commits = 0; + // #[cfg(feature = "benchmarking")] + // let mut benchmark_total_latency = Duration::default(); + // #[cfg(feature = "benchmarking")] + // let mut benchmark_minimum_latency = Duration::default(); + // #[cfg(feature = "benchmarking")] + // let mut benchmark_maximum_latency = Duration::default(); + // #[cfg(feature = "benchmarking")] + // let mut benchmark_total_transactions = 0; + // #[cfg(feature = "benchmarking")] + // let mut benchmark_finish = false; while let Some(block) = blocks.next().await { let block: BlockQueryData = match block { @@ -210,10 +210,10 @@ async fn main() { }; let received_at = Instant::now(); tracing::debug!("got block {}", block.height()); - #[cfg(feature = "benchmarking")] - { - num_successful_commits += 1; - } + // #[cfg(feature = "benchmarking")] + // { + // num_successful_commits += 1; + // } // Get all transactions which were submitted before this block. while let Ok(Some(tx)) = receiver.try_next() { @@ -232,62 +232,62 @@ async fn main() { total_latency += latency; total_transactions += 1; tracing::info!("average latency: {:?}", total_latency / total_transactions); - #[cfg(feature = "benchmarking")] - { - if !benchmark_finish && (20..=120).contains(&num_successful_commits) { - benchmark_minimum_latency = if total_transactions == 0 { - latency - } else { - std::cmp::min(benchmark_minimum_latency, latency) - }; - benchmark_maximum_latency = if total_transactions == 0 { - latency - } else { - std::cmp::max(benchmark_maximum_latency, latency) - }; - - benchmark_total_latency += latency; - benchmark_total_transactions += 1; - } - } + // #[cfg(feature = "benchmarking")] + // { + // if !benchmark_finish && (20..=120).contains(&num_successful_commits) { + // benchmark_minimum_latency = if total_transactions == 0 { + // latency + // } else { + // std::cmp::min(benchmark_minimum_latency, latency) + // }; + // benchmark_maximum_latency = if total_transactions == 0 { + // latency + // } else { + // std::cmp::max(benchmark_maximum_latency, latency) + // }; + + // benchmark_total_latency += latency; + // benchmark_total_transactions += 1; + // } + // } } } - #[cfg(feature = "benchmarking")] - if !benchmark_finish && num_successful_commits > 120 { - let benchmark_average_latency = benchmark_total_latency / benchmark_total_transactions; - // Open the CSV file in append mode - let results_csv_file = OpenOptions::new() - .create(true) - .append(true) // Open in append mode - .open("scripts/benchmarks_results/results.csv") - .unwrap(); - // Open a file for writing - let mut wtr = Writer::from_writer(results_csv_file); - if opt.use_public_mempool() { - let _ = wtr.write_record([ - "public_pool_avg_latency_in_sec", - "minimum_latency_in_sec", - "maximum_latency_in_sec", - ]); - } else { - let _ = wtr.write_record([ - "private_pool_avg_latency_in_sec", - "minimum_latency_in_sec", - "maximum_latency_in_sec", - ]); - } - let _ = wtr.write_record(&[ - benchmark_average_latency.as_secs().to_string(), - benchmark_minimum_latency.as_secs().to_string(), - benchmark_maximum_latency.as_secs().to_string(), - ]); - let _ = wtr.flush(); - println!( - "Latency results successfully saved in scripts/benchmarks_results/results.csv" - ); - benchmark_finish = true; - } + // #[cfg(feature = "benchmarking")] + // if !benchmark_finish && num_successful_commits > 120 { + // let benchmark_average_latency = benchmark_total_latency / benchmark_total_transactions; + // // Open the CSV file in append mode + // let results_csv_file = OpenOptions::new() + // .create(true) + // .append(true) // Open in append mode + // .open("scripts/benchmarks_results/results.csv") + // .unwrap(); + // // Open a file for writing + // let mut wtr = Writer::from_writer(results_csv_file); + // if opt.use_public_mempool() { + // let _ = wtr.write_record([ + // "public_pool_avg_latency_in_sec", + // "minimum_latency_in_sec", + // "maximum_latency_in_sec", + // ]); + // } else { + // let _ = wtr.write_record([ + // "private_pool_avg_latency_in_sec", + // "minimum_latency_in_sec", + // "maximum_latency_in_sec", + // ]); + // } + // let _ = wtr.write_record(&[ + // benchmark_average_latency.as_secs().to_string(), + // benchmark_minimum_latency.as_secs().to_string(), + // benchmark_maximum_latency.as_secs().to_string(), + // ]); + // let _ = wtr.flush(); + // println!( + // "Latency results successfully saved in scripts/benchmarks_results/results.csv" + // ); + // benchmark_finish = true; + // } // If a lot of transactions are pending, it might indicate the sequencer is struggling to // finalize them. We should warn about this. diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 6ff18cb3fd..a32e16b296 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -25,14 +25,14 @@ use std::fmt::Display; use url::Url; use vbs::version::StaticVersionType; -#[cfg(feature = "benchmarking")] -use hotshot::{traits::BlockPayload, types::EventType}; -#[cfg(feature = "benchmarking")] -use hotshot_orchestrator::{client::BenchResults, config::NetworkConfig}; -#[cfg(feature = "benchmarking")] -use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::ConsensusTime}; -#[cfg(feature = "benchmarking")] -use std::time::Instant; +// #[cfg(feature = "benchmarking")] +// use hotshot::{traits::BlockPayload, types::EventType}; +// #[cfg(feature = "benchmarking")] +// use hotshot_orchestrator::{client::BenchResults, config::NetworkConfig}; +// #[cfg(feature = "benchmarking")] +// use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::ConsensusTime}; +// #[cfg(feature = "benchmarking")] +// use std::time::Instant; use crate::{ network, persistence::SequencerPersistence, state_signature::StateSigner, @@ -258,130 +258,130 @@ impl = - orchestrator_client.get_config_after_collection().await; - // Sishan: change to useful parameter once Hotshot updated to 0.5.60 - let _rounds = network_config.rounds; - } + // #[cfg(feature = "benchmarking")] + // { + // let network_config: NetworkConfig = + // orchestrator_client.get_config_after_collection().await; + // // Sishan: change to useful parameter once Hotshot updated to 0.5.60 + // let _rounds = network_config.rounds; + // } } else { tracing::error!("Cannot get info from orchestrator client"); } tracing::warn!("starting consensus"); self.handle.read().await.hotshot.start_consensus().await; - #[cfg(feature = "benchmarking")] - { - // number of rounds for warm up, which will not be counted in for benchmarking phase - let start_rounds: usize = 20; - let end_rounds: usize = 120; - let mut event_stream = self.event_stream().await; - let mut num_successful_commits = 0; - let mut total_transactions_committed = 0; - let mut total_throughput = 0; - let node_index: u64 = self.node_state().node_id; - let mut start: Instant = Instant::now(); // will be re-assign once has_started turned to true - let mut has_started: bool = false; - loop { - match event_stream.next().await { - None => { - panic!("Error! Event stream completed before consensus ended."); - } - Some(Event { event, .. }) => { - match event { - EventType::Error { error } => { - tracing::error!("Error in consensus: {:?}", error); - } - EventType::Decide { - leaf_chain, - qc: _, - block_size, - } => { - if let Some(leaf_info) = leaf_chain.first() { - let leaf = &leaf_info.leaf; - tracing::info!( - "Decide event for leaf: {}", - *leaf.view_number() - ); - num_successful_commits += leaf_chain.len(); - - // only count in the info after warm up - if num_successful_commits >= start_rounds { - if !has_started { - start = Instant::now(); - has_started = true; - } - - // iterate all the decided transactions - if let Some(block_payload) = &leaf.block_payload() { - for tx in block_payload - .transactions(leaf.block_header().metadata()) - { - let payload_length = tx.into_payload().len(); - // Transaction = NamespaceId(u64) + payload(Vec) - let tx_sz = payload_length * std::mem::size_of::() // size of payload - + std::mem::size_of::() // size of the namespace - + std::mem::size_of::(); // size of the struct wrapper - total_throughput += tx_sz; - } - } - } - } - - if num_successful_commits >= start_rounds { - if let Some(size) = block_size { - total_transactions_committed += size; - } - } - - if num_successful_commits >= end_rounds { - let total_time_elapsed = start.elapsed(); // in seconds - let consensus_lock = - self.handle.read().await.hotshot.consensus(); - let consensus = consensus_lock.read().await; - let total_num_views = - usize::try_from(consensus.locked_view().u64()).unwrap(); - let failed_num_views = total_num_views - num_successful_commits; - let bench_results = if total_transactions_committed != 0 { - let throughput_bytes_per_sec = (total_throughput as u64) - / std::cmp::max(total_time_elapsed.as_secs(), 1u64); - BenchResults { - avg_latency_in_sec: 0, // latency will be reported in another struct - num_latency: 1, - minimum_latency_in_sec: 0, - maximum_latency_in_sec: 0, - throughput_bytes_per_sec, - total_transactions_committed, - transaction_size_in_bytes: (total_throughput as u64) - / total_transactions_committed, // refer to `submit-transactions.rs` for the range of transaction size - total_time_elapsed_in_sec: total_time_elapsed.as_secs(), - total_num_views, - failed_num_views, - } - } else { - BenchResults::default() - }; - println!("[{node_index}]: {total_transactions_committed} committed from round {start_rounds} to {end_rounds} in {total_time_elapsed:?}, total number of views = {total_num_views}."); - if let Some(orchestrator_client) = &self.wait_for_orchestrator { - orchestrator_client.post_bench_results(bench_results).await; - } - break; - } - - if leaf_chain.len() > 1 { - tracing::warn!( - "Leaf chain is greater than 1 with len {}", - leaf_chain.len() - ); - } - } - _ => {} // mostly DA proposal - } - } - } - } - } + // #[cfg(feature = "benchmarking")] + // { + // // number of rounds for warm up, which will not be counted in for benchmarking phase + // let start_rounds: usize = 20; + // let end_rounds: usize = 120; + // let mut event_stream = self.event_stream().await; + // let mut num_successful_commits = 0; + // let mut total_transactions_committed = 0; + // let mut total_throughput = 0; + // let node_index: u64 = self.node_state().node_id; + // let mut start: Instant = Instant::now(); // will be re-assign once has_started turned to true + // let mut has_started: bool = false; + // loop { + // match event_stream.next().await { + // None => { + // panic!("Error! Event stream completed before consensus ended."); + // } + // Some(Event { event, .. }) => { + // match event { + // EventType::Error { error } => { + // tracing::error!("Error in consensus: {:?}", error); + // } + // EventType::Decide { + // leaf_chain, + // qc: _, + // block_size, + // } => { + // if let Some(leaf_info) = leaf_chain.first() { + // let leaf = &leaf_info.leaf; + // tracing::info!( + // "Decide event for leaf: {}", + // *leaf.view_number() + // ); + // num_successful_commits += leaf_chain.len(); + + // // only count in the info after warm up + // if num_successful_commits >= start_rounds { + // if !has_started { + // start = Instant::now(); + // has_started = true; + // } + + // // iterate all the decided transactions + // if let Some(block_payload) = &leaf.block_payload() { + // for tx in block_payload + // .transactions(leaf.block_header().metadata()) + // { + // let payload_length = tx.into_payload().len(); + // // Transaction = NamespaceId(u64) + payload(Vec) + // let tx_sz = payload_length * std::mem::size_of::() // size of payload + // + std::mem::size_of::() // size of the namespace + // + std::mem::size_of::(); // size of the struct wrapper + // total_throughput += tx_sz; + // } + // } + // } + // } + + // if num_successful_commits >= start_rounds { + // if let Some(size) = block_size { + // total_transactions_committed += size; + // } + // } + + // if num_successful_commits >= end_rounds { + // let total_time_elapsed = start.elapsed(); // in seconds + // let consensus_lock = + // self.handle.read().await.hotshot.consensus(); + // let consensus = consensus_lock.read().await; + // let total_num_views = + // usize::try_from(consensus.locked_view().u64()).unwrap(); + // let failed_num_views = total_num_views - num_successful_commits; + // let bench_results = if total_transactions_committed != 0 { + // let throughput_bytes_per_sec = (total_throughput as u64) + // / std::cmp::max(total_time_elapsed.as_secs(), 1u64); + // BenchResults { + // avg_latency_in_sec: 0, // latency will be reported in another struct + // num_latency: 1, + // minimum_latency_in_sec: 0, + // maximum_latency_in_sec: 0, + // throughput_bytes_per_sec, + // total_transactions_committed, + // transaction_size_in_bytes: (total_throughput as u64) + // / total_transactions_committed, // refer to `submit-transactions.rs` for the range of transaction size + // total_time_elapsed_in_sec: total_time_elapsed.as_secs(), + // total_num_views, + // failed_num_views, + // } + // } else { + // BenchResults::default() + // }; + // println!("[{node_index}]: {total_transactions_committed} committed from round {start_rounds} to {end_rounds} in {total_time_elapsed:?}, total number of views = {total_num_views}."); + // if let Some(orchestrator_client) = &self.wait_for_orchestrator { + // orchestrator_client.post_bench_results(bench_results).await; + // } + // break; + // } + + // if leaf_chain.len() > 1 { + // tracing::warn!( + // "Leaf chain is greater than 1 with len {}", + // leaf_chain.len() + // ); + // } + // } + // _ => {} // mostly DA proposal + // } + // } + // } + // } + // } } /// Spawn a background task attached to this context. From 32b92bf91c8c60fec504c1268508189a1d6c7c18 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Tue, 25 Jun 2024 23:56:20 -0700 Subject: [PATCH 10/65] uncomment latency part --- sequencer/src/bin/submit-transactions.rs | 146 +++++++++++------------ 1 file changed, 73 insertions(+), 73 deletions(-) diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index 785a9274da..34cbdf9d81 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -26,10 +26,10 @@ use surf_disco::{Client, Url}; use tide_disco::{error::ServerError, App}; use vbs::version::StaticVersionType; -// #[cfg(feature = "benchmarking")] -// use csv::Writer; -// #[cfg(feature = "benchmarking")] -// use std::fs::OpenOptions; +#[cfg(feature = "benchmarking")] +use csv::Writer; +#[cfg(feature = "benchmarking")] +use std::fs::OpenOptions; /// Submit random transactions to an Espresso Sequencer. #[derive(Clone, Debug, Parser)] @@ -187,18 +187,18 @@ async fn main() { let mut total_transactions = 0; // Keep track of the latency after warm up for benchmarking - // #[cfg(feature = "benchmarking")] - // let mut num_successful_commits = 0; - // #[cfg(feature = "benchmarking")] - // let mut benchmark_total_latency = Duration::default(); - // #[cfg(feature = "benchmarking")] - // let mut benchmark_minimum_latency = Duration::default(); - // #[cfg(feature = "benchmarking")] - // let mut benchmark_maximum_latency = Duration::default(); - // #[cfg(feature = "benchmarking")] - // let mut benchmark_total_transactions = 0; - // #[cfg(feature = "benchmarking")] - // let mut benchmark_finish = false; + #[cfg(feature = "benchmarking")] + let mut num_successful_commits = 0; + #[cfg(feature = "benchmarking")] + let mut benchmark_total_latency = Duration::default(); + #[cfg(feature = "benchmarking")] + let mut benchmark_minimum_latency = Duration::default(); + #[cfg(feature = "benchmarking")] + let mut benchmark_maximum_latency = Duration::default(); + #[cfg(feature = "benchmarking")] + let mut benchmark_total_transactions = 0; + #[cfg(feature = "benchmarking")] + let mut benchmark_finish = false; while let Some(block) = blocks.next().await { let block: BlockQueryData = match block { @@ -210,10 +210,10 @@ async fn main() { }; let received_at = Instant::now(); tracing::debug!("got block {}", block.height()); - // #[cfg(feature = "benchmarking")] - // { - // num_successful_commits += 1; - // } + #[cfg(feature = "benchmarking")] + { + num_successful_commits += 1; + } // Get all transactions which were submitted before this block. while let Ok(Some(tx)) = receiver.try_next() { @@ -232,62 +232,62 @@ async fn main() { total_latency += latency; total_transactions += 1; tracing::info!("average latency: {:?}", total_latency / total_transactions); - // #[cfg(feature = "benchmarking")] - // { - // if !benchmark_finish && (20..=120).contains(&num_successful_commits) { - // benchmark_minimum_latency = if total_transactions == 0 { - // latency - // } else { - // std::cmp::min(benchmark_minimum_latency, latency) - // }; - // benchmark_maximum_latency = if total_transactions == 0 { - // latency - // } else { - // std::cmp::max(benchmark_maximum_latency, latency) - // }; - - // benchmark_total_latency += latency; - // benchmark_total_transactions += 1; - // } - // } + #[cfg(feature = "benchmarking")] + { + if !benchmark_finish && (20..=120).contains(&num_successful_commits) { + benchmark_minimum_latency = if total_transactions == 0 { + latency + } else { + std::cmp::min(benchmark_minimum_latency, latency) + }; + benchmark_maximum_latency = if total_transactions == 0 { + latency + } else { + std::cmp::max(benchmark_maximum_latency, latency) + }; + + benchmark_total_latency += latency; + benchmark_total_transactions += 1; + } + } } } - // #[cfg(feature = "benchmarking")] - // if !benchmark_finish && num_successful_commits > 120 { - // let benchmark_average_latency = benchmark_total_latency / benchmark_total_transactions; - // // Open the CSV file in append mode - // let results_csv_file = OpenOptions::new() - // .create(true) - // .append(true) // Open in append mode - // .open("scripts/benchmarks_results/results.csv") - // .unwrap(); - // // Open a file for writing - // let mut wtr = Writer::from_writer(results_csv_file); - // if opt.use_public_mempool() { - // let _ = wtr.write_record([ - // "public_pool_avg_latency_in_sec", - // "minimum_latency_in_sec", - // "maximum_latency_in_sec", - // ]); - // } else { - // let _ = wtr.write_record([ - // "private_pool_avg_latency_in_sec", - // "minimum_latency_in_sec", - // "maximum_latency_in_sec", - // ]); - // } - // let _ = wtr.write_record(&[ - // benchmark_average_latency.as_secs().to_string(), - // benchmark_minimum_latency.as_secs().to_string(), - // benchmark_maximum_latency.as_secs().to_string(), - // ]); - // let _ = wtr.flush(); - // println!( - // "Latency results successfully saved in scripts/benchmarks_results/results.csv" - // ); - // benchmark_finish = true; - // } + #[cfg(feature = "benchmarking")] + if !benchmark_finish && num_successful_commits > 120 { + let benchmark_average_latency = benchmark_total_latency / benchmark_total_transactions; + // Open the CSV file in append mode + let results_csv_file = OpenOptions::new() + .create(true) + .append(true) // Open in append mode + .open("scripts/benchmarks_results/results.csv") + .unwrap(); + // Open a file for writing + let mut wtr = Writer::from_writer(results_csv_file); + if opt.use_public_mempool() { + let _ = wtr.write_record([ + "public_pool_avg_latency_in_sec", + "minimum_latency_in_sec", + "maximum_latency_in_sec", + ]); + } else { + let _ = wtr.write_record([ + "private_pool_avg_latency_in_sec", + "minimum_latency_in_sec", + "maximum_latency_in_sec", + ]); + } + let _ = wtr.write_record(&[ + benchmark_average_latency.as_secs().to_string(), + benchmark_minimum_latency.as_secs().to_string(), + benchmark_maximum_latency.as_secs().to_string(), + ]); + let _ = wtr.flush(); + println!( + "Latency results successfully saved in scripts/benchmarks_results/results.csv" + ); + benchmark_finish = true; + } // If a lot of transactions are pending, it might indicate the sequencer is struggling to // finalize them. We should warn about this. From 063f5f8a6dedd7a2c0afa6eeab51a78991d4287f Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 26 Jun 2024 11:09:35 -0700 Subject: [PATCH 11/65] uncomment partial codes for benchmarks --- sequencer/src/context.rs | 232 +++++++++++++++++++-------------------- 1 file changed, 114 insertions(+), 118 deletions(-) diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index a32e16b296..c6d9749c90 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -25,6 +25,8 @@ use std::fmt::Display; use url::Url; use vbs::version::StaticVersionType; +#[cfg(feature = "benchmarking")] +use hotshot::types::EventType; // #[cfg(feature = "benchmarking")] // use hotshot::{traits::BlockPayload, types::EventType}; // #[cfg(feature = "benchmarking")] @@ -258,130 +260,124 @@ impl = - // orchestrator_client.get_config_after_collection().await; - // // Sishan: change to useful parameter once Hotshot updated to 0.5.60 - // let _rounds = network_config.rounds; - // } } else { tracing::error!("Cannot get info from orchestrator client"); } tracing::warn!("starting consensus"); self.handle.read().await.hotshot.start_consensus().await; - // #[cfg(feature = "benchmarking")] - // { - // // number of rounds for warm up, which will not be counted in for benchmarking phase - // let start_rounds: usize = 20; - // let end_rounds: usize = 120; - // let mut event_stream = self.event_stream().await; - // let mut num_successful_commits = 0; - // let mut total_transactions_committed = 0; - // let mut total_throughput = 0; - // let node_index: u64 = self.node_state().node_id; - // let mut start: Instant = Instant::now(); // will be re-assign once has_started turned to true - // let mut has_started: bool = false; - // loop { - // match event_stream.next().await { - // None => { - // panic!("Error! Event stream completed before consensus ended."); - // } - // Some(Event { event, .. }) => { - // match event { - // EventType::Error { error } => { - // tracing::error!("Error in consensus: {:?}", error); - // } - // EventType::Decide { - // leaf_chain, - // qc: _, - // block_size, - // } => { - // if let Some(leaf_info) = leaf_chain.first() { - // let leaf = &leaf_info.leaf; - // tracing::info!( - // "Decide event for leaf: {}", - // *leaf.view_number() - // ); - // num_successful_commits += leaf_chain.len(); - - // // only count in the info after warm up - // if num_successful_commits >= start_rounds { - // if !has_started { - // start = Instant::now(); - // has_started = true; - // } - - // // iterate all the decided transactions - // if let Some(block_payload) = &leaf.block_payload() { - // for tx in block_payload - // .transactions(leaf.block_header().metadata()) - // { - // let payload_length = tx.into_payload().len(); - // // Transaction = NamespaceId(u64) + payload(Vec) - // let tx_sz = payload_length * std::mem::size_of::() // size of payload - // + std::mem::size_of::() // size of the namespace - // + std::mem::size_of::(); // size of the struct wrapper - // total_throughput += tx_sz; - // } - // } - // } - // } - - // if num_successful_commits >= start_rounds { - // if let Some(size) = block_size { - // total_transactions_committed += size; - // } - // } - - // if num_successful_commits >= end_rounds { - // let total_time_elapsed = start.elapsed(); // in seconds - // let consensus_lock = - // self.handle.read().await.hotshot.consensus(); - // let consensus = consensus_lock.read().await; - // let total_num_views = - // usize::try_from(consensus.locked_view().u64()).unwrap(); - // let failed_num_views = total_num_views - num_successful_commits; - // let bench_results = if total_transactions_committed != 0 { - // let throughput_bytes_per_sec = (total_throughput as u64) - // / std::cmp::max(total_time_elapsed.as_secs(), 1u64); - // BenchResults { - // avg_latency_in_sec: 0, // latency will be reported in another struct - // num_latency: 1, - // minimum_latency_in_sec: 0, - // maximum_latency_in_sec: 0, - // throughput_bytes_per_sec, - // total_transactions_committed, - // transaction_size_in_bytes: (total_throughput as u64) - // / total_transactions_committed, // refer to `submit-transactions.rs` for the range of transaction size - // total_time_elapsed_in_sec: total_time_elapsed.as_secs(), - // total_num_views, - // failed_num_views, - // } - // } else { - // BenchResults::default() - // }; - // println!("[{node_index}]: {total_transactions_committed} committed from round {start_rounds} to {end_rounds} in {total_time_elapsed:?}, total number of views = {total_num_views}."); - // if let Some(orchestrator_client) = &self.wait_for_orchestrator { - // orchestrator_client.post_bench_results(bench_results).await; - // } - // break; - // } - - // if leaf_chain.len() > 1 { - // tracing::warn!( - // "Leaf chain is greater than 1 with len {}", - // leaf_chain.len() - // ); - // } - // } - // _ => {} // mostly DA proposal - // } - // } - // } - // } - // } + #[cfg(feature = "benchmarking")] + #[allow(unused_variables)] + { + // number of rounds for warm up, which will not be counted in for benchmarking phase + // let start_rounds: usize = 20; + // let end_rounds: usize = 120; + let mut event_stream = self.event_stream().await; + // let mut num_successful_commits = 0; + // let mut total_transactions_committed = 0; + // let mut total_throughput = 0; + // let node_index: u64 = self.node_state().node_id; + // let mut start: Instant = Instant::now(); // will be re-assign once has_started turned to true + // let mut has_started: bool = false; + loop { + match event_stream.next().await { + None => { + // panic!("Error! Event stream completed before consensus ended."); + } + Some(Event { event, .. }) => { + match event { + EventType::Error { error } => { + tracing::error!("Error in consensus: {:?}", error); + } + EventType::Decide { + leaf_chain, + qc: _, + block_size, + } => { + // if let Some(leaf_info) = leaf_chain.first() { + // let leaf = &leaf_info.leaf; + // tracing::info!( + // "Decide event for leaf: {}", + // *leaf.view_number() + // ); + // num_successful_commits += leaf_chain.len(); + + // // only count in the info after warm up + // if num_successful_commits >= start_rounds { + // if !has_started { + // start = Instant::now(); + // has_started = true; + // } + + // // iterate all the decided transactions + // if let Some(block_payload) = &leaf.block_payload() { + // for tx in block_payload + // .transactions(leaf.block_header().metadata()) + // { + // let payload_length = tx.into_payload().len(); + // // Transaction = NamespaceId(u64) + payload(Vec) + // let tx_sz = payload_length * std::mem::size_of::() // size of payload + // + std::mem::size_of::() // size of the namespace + // + std::mem::size_of::(); // size of the struct wrapper + // total_throughput += tx_sz; + // } + // } + // } + // } + + // if num_successful_commits >= start_rounds { + // if let Some(size) = block_size { + // total_transactions_committed += size; + // } + // } + + // if num_successful_commits >= end_rounds { + // let total_time_elapsed = start.elapsed(); // in seconds + // let consensus_lock = + // self.handle.read().await.hotshot.consensus(); + // let consensus = consensus_lock.read().await; + // let total_num_views = + // usize::try_from(consensus.locked_view().u64()).unwrap(); + // let failed_num_views = total_num_views - num_successful_commits; + // let bench_results = if total_transactions_committed != 0 { + // let throughput_bytes_per_sec = (total_throughput as u64) + // / std::cmp::max(total_time_elapsed.as_secs(), 1u64); + // BenchResults { + // avg_latency_in_sec: 0, // latency will be reported in another struct + // num_latency: 1, + // minimum_latency_in_sec: 0, + // maximum_latency_in_sec: 0, + // throughput_bytes_per_sec, + // total_transactions_committed, + // transaction_size_in_bytes: (total_throughput as u64) + // / total_transactions_committed, // refer to `submit-transactions.rs` for the range of transaction size + // total_time_elapsed_in_sec: total_time_elapsed.as_secs(), + // total_num_views, + // failed_num_views, + // } + // } else { + // BenchResults::default() + // }; + // println!("[{node_index}]: {total_transactions_committed} committed from round {start_rounds} to {end_rounds} in {total_time_elapsed:?}, total number of views = {total_num_views}."); + // if let Some(orchestrator_client) = &self.wait_for_orchestrator { + // orchestrator_client.post_bench_results(bench_results).await; + // } + // break; + // } + + if leaf_chain.len() > 1 { + tracing::warn!( + "Leaf chain is greater than 1 with len {}", + leaf_chain.len() + ); + } + } + _ => {} // mostly DA proposal + } + } + } + } + } } /// Spawn a background task attached to this context. From 9587e121c08bfff713726c195c90fe33059786bc Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 26 Jun 2024 12:07:22 -0700 Subject: [PATCH 12/65] uncomment partial codes for benchmarks again --- sequencer/src/context.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index c6d9749c90..0bdf8bb6ab 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -282,6 +282,7 @@ impl { + break; // panic!("Error! Event stream completed before consensus ended."); } Some(Event { event, .. }) => { @@ -365,12 +366,12 @@ impl 1 { - tracing::warn!( - "Leaf chain is greater than 1 with len {}", - leaf_chain.len() - ); - } + // if leaf_chain.len() > 1 { + // tracing::warn!( + // "Leaf chain is greater than 1 with len {}", + // leaf_chain.len() + // ); + // } } _ => {} // mostly DA proposal } From e27572e108e27ef097e1d90d4f5b67ff70b83734 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 26 Jun 2024 19:49:15 -0700 Subject: [PATCH 13/65] should pass ci --- sequencer/src/context.rs | 192 +++++++++++++++++++-------------------- 1 file changed, 95 insertions(+), 97 deletions(-) diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 0bdf8bb6ab..3b105a198c 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -26,15 +26,13 @@ use url::Url; use vbs::version::StaticVersionType; #[cfg(feature = "benchmarking")] -use hotshot::types::EventType; -// #[cfg(feature = "benchmarking")] -// use hotshot::{traits::BlockPayload, types::EventType}; -// #[cfg(feature = "benchmarking")] -// use hotshot_orchestrator::{client::BenchResults, config::NetworkConfig}; -// #[cfg(feature = "benchmarking")] -// use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::ConsensusTime}; -// #[cfg(feature = "benchmarking")] -// use std::time::Instant; +use hotshot::{traits::BlockPayload, types::EventType}; +#[cfg(feature = "benchmarking")] +use hotshot_orchestrator::client::BenchResults; +#[cfg(feature = "benchmarking")] +use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::ConsensusTime}; +#[cfg(feature = "benchmarking")] +use std::time::Instant; use crate::{ network, persistence::SequencerPersistence, state_signature::StateSigner, @@ -269,21 +267,21 @@ impl { - break; - // panic!("Error! Event stream completed before consensus ended."); + panic!("Error! Event stream completed before consensus ended."); } Some(Event { event, .. }) => { match event { @@ -295,83 +293,83 @@ impl { - // if let Some(leaf_info) = leaf_chain.first() { - // let leaf = &leaf_info.leaf; - // tracing::info!( - // "Decide event for leaf: {}", - // *leaf.view_number() - // ); - // num_successful_commits += leaf_chain.len(); - - // // only count in the info after warm up - // if num_successful_commits >= start_rounds { - // if !has_started { - // start = Instant::now(); - // has_started = true; - // } - - // // iterate all the decided transactions - // if let Some(block_payload) = &leaf.block_payload() { - // for tx in block_payload - // .transactions(leaf.block_header().metadata()) - // { - // let payload_length = tx.into_payload().len(); - // // Transaction = NamespaceId(u64) + payload(Vec) - // let tx_sz = payload_length * std::mem::size_of::() // size of payload - // + std::mem::size_of::() // size of the namespace - // + std::mem::size_of::(); // size of the struct wrapper - // total_throughput += tx_sz; - // } - // } - // } - // } - - // if num_successful_commits >= start_rounds { - // if let Some(size) = block_size { - // total_transactions_committed += size; - // } - // } - - // if num_successful_commits >= end_rounds { - // let total_time_elapsed = start.elapsed(); // in seconds - // let consensus_lock = - // self.handle.read().await.hotshot.consensus(); - // let consensus = consensus_lock.read().await; - // let total_num_views = - // usize::try_from(consensus.locked_view().u64()).unwrap(); - // let failed_num_views = total_num_views - num_successful_commits; - // let bench_results = if total_transactions_committed != 0 { - // let throughput_bytes_per_sec = (total_throughput as u64) - // / std::cmp::max(total_time_elapsed.as_secs(), 1u64); - // BenchResults { - // avg_latency_in_sec: 0, // latency will be reported in another struct - // num_latency: 1, - // minimum_latency_in_sec: 0, - // maximum_latency_in_sec: 0, - // throughput_bytes_per_sec, - // total_transactions_committed, - // transaction_size_in_bytes: (total_throughput as u64) - // / total_transactions_committed, // refer to `submit-transactions.rs` for the range of transaction size - // total_time_elapsed_in_sec: total_time_elapsed.as_secs(), - // total_num_views, - // failed_num_views, - // } - // } else { - // BenchResults::default() - // }; - // println!("[{node_index}]: {total_transactions_committed} committed from round {start_rounds} to {end_rounds} in {total_time_elapsed:?}, total number of views = {total_num_views}."); - // if let Some(orchestrator_client) = &self.wait_for_orchestrator { - // orchestrator_client.post_bench_results(bench_results).await; - // } - // break; - // } - - // if leaf_chain.len() > 1 { - // tracing::warn!( - // "Leaf chain is greater than 1 with len {}", - // leaf_chain.len() - // ); - // } + if let Some(leaf_info) = leaf_chain.first() { + let leaf = &leaf_info.leaf; + tracing::info!( + "Decide event for leaf: {}", + *leaf.view_number() + ); + num_successful_commits += leaf_chain.len(); + + // only count in the info after warm up + if num_successful_commits >= start_rounds { + if !has_started { + start = Instant::now(); + has_started = true; + } + + // iterate all the decided transactions + if let Some(block_payload) = &leaf.block_payload() { + for tx in block_payload + .transactions(leaf.block_header().metadata()) + { + let payload_length = tx.into_payload().len(); + // Transaction = NamespaceId(u64) + payload(Vec) + let tx_sz = payload_length * std::mem::size_of::() // size of payload + + std::mem::size_of::() // size of the namespace + + std::mem::size_of::(); // size of the struct wrapper + total_throughput += tx_sz; + } + } + } + } + + if num_successful_commits >= start_rounds { + if let Some(size) = block_size { + total_transactions_committed += size; + } + } + + if num_successful_commits >= end_rounds { + let total_time_elapsed = start.elapsed(); // in seconds + let consensus_lock = + self.handle.read().await.hotshot.consensus(); + let consensus = consensus_lock.read().await; + let total_num_views = + usize::try_from(consensus.locked_view().u64()).unwrap(); + let failed_num_views = total_num_views - num_successful_commits; + let bench_results = if total_transactions_committed != 0 { + let throughput_bytes_per_sec = (total_throughput as u64) + / std::cmp::max(total_time_elapsed.as_secs(), 1u64); + BenchResults { + avg_latency_in_sec: 0, // latency will be reported in another struct + num_latency: 1, + minimum_latency_in_sec: 0, + maximum_latency_in_sec: 0, + throughput_bytes_per_sec, + total_transactions_committed, + transaction_size_in_bytes: (total_throughput as u64) + / total_transactions_committed, // refer to `submit-transactions.rs` for the range of transaction size + total_time_elapsed_in_sec: total_time_elapsed.as_secs(), + total_num_views, + failed_num_views, + } + } else { + BenchResults::default() + }; + tracing::info!("[{node_index}]: {total_transactions_committed} committed from round {start_rounds} to {end_rounds} in {total_time_elapsed:?}, total number of views = {total_num_views}."); + if let Some(orchestrator_client) = &self.wait_for_orchestrator { + orchestrator_client.post_bench_results(bench_results).await; + } + break; + } + + if leaf_chain.len() > 1 { + tracing::warn!( + "Leaf chain is greater than 1 with len {}", + leaf_chain.len() + ); + } } _ => {} // mostly DA proposal } From c4b385c79510bb2610fc935a7995182c672c06e7 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 26 Jun 2024 22:49:08 -0700 Subject: [PATCH 14/65] parameterize start_round and end_round in submit_tx --- sequencer/src/bin/submit-transactions.rs | 10 ++++++++-- sequencer/src/context.rs | 16 ++++++++-------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index 34cbdf9d81..794732dcc5 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -190,6 +190,10 @@ async fn main() { #[cfg(feature = "benchmarking")] let mut num_successful_commits = 0; #[cfg(feature = "benchmarking")] + let start_round = 20; + #[cfg(feature = "benchmarking")] + let end_round = 120; + #[cfg(feature = "benchmarking")] let mut benchmark_total_latency = Duration::default(); #[cfg(feature = "benchmarking")] let mut benchmark_minimum_latency = Duration::default(); @@ -234,7 +238,9 @@ async fn main() { tracing::info!("average latency: {:?}", total_latency / total_transactions); #[cfg(feature = "benchmarking")] { - if !benchmark_finish && (20..=120).contains(&num_successful_commits) { + if !benchmark_finish + && (start_round..=end_round).contains(&num_successful_commits) + { benchmark_minimum_latency = if total_transactions == 0 { latency } else { @@ -254,7 +260,7 @@ async fn main() { } #[cfg(feature = "benchmarking")] - if !benchmark_finish && num_successful_commits > 120 { + if !benchmark_finish && num_successful_commits > end_round { let benchmark_average_latency = benchmark_total_latency / benchmark_total_transactions; // Open the CSV file in append mode let results_csv_file = OpenOptions::new() diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 3b105a198c..5bd64c5e00 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -267,10 +267,10 @@ impl= start_rounds { + if num_successful_commits >= start_round { if !has_started { start = Instant::now(); has_started = true; @@ -324,13 +324,13 @@ impl= start_rounds { + if num_successful_commits >= start_round { if let Some(size) = block_size { total_transactions_committed += size; } } - if num_successful_commits >= end_rounds { + if num_successful_commits >= end_round { let total_time_elapsed = start.elapsed(); // in seconds let consensus_lock = self.handle.read().await.hotshot.consensus(); @@ -357,7 +357,7 @@ impl Date: Thu, 27 Jun 2024 13:08:17 -0700 Subject: [PATCH 15/65] no benchmark without orchestrator --- sequencer/src/context.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 5bd64c5e00..e0cc698988 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -253,11 +253,17 @@ impl Date: Tue, 2 Jul 2024 14:06:09 -0700 Subject: [PATCH 16/65] fix merge --- sequencer/src/context.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 10899f91e0..e816257f36 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -348,6 +348,7 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp let throughput_bytes_per_sec = (total_throughput as u64) / std::cmp::max(total_time_elapsed.as_secs(), 1u64); BenchResults { + partial_results: "Unset".to_string(), // latency will be reported in another struct avg_latency_in_sec: 0, num_latency: 1, From 885850e942d10a269558e113d2c789df31173a57 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 10 Jul 2024 10:05:56 -0700 Subject: [PATCH 17/65] merge throughput calc to submit-transactions.rs --- sequencer/src/bin/submit-transactions.rs | 25 ++++++++++++++++++++++++ sequencer/src/context.rs | 11 ++++++++--- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index 794732dcc5..fa3c366ac0 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -203,6 +203,12 @@ async fn main() { let mut benchmark_total_transactions = 0; #[cfg(feature = "benchmarking")] let mut benchmark_finish = false; + #[cfg(feature = "benchmarking")] + let mut total_throughput = 0; + #[cfg(feature = "benchmarking")] + let mut start: Instant = Instant::now(); // will be re-assign once has_started turned to true + #[cfg(feature = "benchmarking")] + let mut has_started: bool = false; while let Some(block) = blocks.next().await { let block: BlockQueryData = match block { @@ -217,6 +223,10 @@ async fn main() { #[cfg(feature = "benchmarking")] { num_successful_commits += 1; + if !has_started && num_successful_commits >= start_round { + has_started = true; + start = Instant::now(); + } } // Get all transactions which were submitted before this block. @@ -254,6 +264,12 @@ async fn main() { benchmark_total_latency += latency; benchmark_total_transactions += 1; + // Transaction = NamespaceId(u64) + payload(Vec) + let payload_length = tx.into_payload().len(); + let tx_sz = payload_length * std::mem::size_of::() // size of payload + + std::mem::size_of::() // size of the namespace + + std::mem::size_of::(); // size of the struct wrapper + total_throughput += tx_sz; } } } @@ -262,6 +278,9 @@ async fn main() { #[cfg(feature = "benchmarking")] if !benchmark_finish && num_successful_commits > end_round { let benchmark_average_latency = benchmark_total_latency / benchmark_total_transactions; + let total_time_elapsed = start.elapsed(); // in seconds + let throughput_bytes_per_sec = + (total_throughput as u64) / std::cmp::max(total_time_elapsed.as_secs(), 1u64); // Open the CSV file in append mode let results_csv_file = OpenOptions::new() .create(true) @@ -275,18 +294,24 @@ async fn main() { "public_pool_avg_latency_in_sec", "minimum_latency_in_sec", "maximum_latency_in_sec", + "throughput_bytes_per_sec", + "total_time_elapsed", ]); } else { let _ = wtr.write_record([ "private_pool_avg_latency_in_sec", "minimum_latency_in_sec", "maximum_latency_in_sec", + "throughput_bytes_per_sec", + "total_time_elapsed", ]); } let _ = wtr.write_record(&[ benchmark_average_latency.as_secs().to_string(), benchmark_minimum_latency.as_secs().to_string(), benchmark_maximum_latency.as_secs().to_string(), + throughput_bytes_per_sec.to_string(), + total_time_elapsed.as_secs().to_string(), ]); let _ = wtr.flush(); println!( diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index e816257f36..65995c7da0 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -14,7 +14,7 @@ use hotshot::{ Memberships, SystemContext, }; use hotshot_example_types::auction_results_provider_types::TestAuctionResultsProvider; -use hotshot_orchestrator::client::OrchestratorClient; +use hotshot_orchestrator::{client::OrchestratorClient, config::NetworkConfig}; use hotshot_query_service::Leaf; use hotshot_types::{ consensus::ConsensusMetricsValue, @@ -257,6 +257,8 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp pub async fn start_consensus(&self) { #[cfg(feature = "benchmarking")] let mut has_orchestrator_client = false; + #[cfg(feature = "benchmarking")] + let mut network_config: NetworkConfig = Default::default(); if let Some(orchestrator_client) = &self.wait_for_orchestrator { tracing::warn!("waiting for orchestrated start"); orchestrator_client @@ -265,6 +267,7 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp #[cfg(feature = "benchmarking")] { has_orchestrator_client = true; + network_config = orchestrator_client.get_config_after_collection().await; } } else { tracing::error!("Cannot get info from orchestrator client"); @@ -276,7 +279,7 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp if has_orchestrator_client { // start_round is the number of rounds for warm up, which will not be counted in for benchmarking phase let start_round: usize = 20; - let end_round: usize = 120; + let end_round: usize = start_round + network_config.rounds; let mut event_stream = self.event_stream().await; let mut num_successful_commits = 0; let mut total_transactions_committed = 0; @@ -287,7 +290,9 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp loop { match event_stream.next().await { None => { - panic!("Error! Event stream completed before consensus ended."); + tracing::error!( + "Error in Benchmarking! Event stream completed before consensus ended." + ); } Some(Event { event, .. }) => { match event { From 3d41c0e5f8ba3e5c227a5af53f68b84171679acb Mon Sep 17 00:00:00 2001 From: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> Date: Wed, 10 Jul 2024 22:43:44 +0500 Subject: [PATCH 18/65] versioning (#1637) * Replace public header fields with getters This will make it easier to keep a somewhat consistent Rust API even as we add or change various fields in the underlying representation of the header. * WIP upgradable types * v2 * merge origin/main * cargo sort * block impls * remove unused imports * export types * group imports * v3 header * lint * remove v2 header definition * move crates to workspace * remove types from sequencer crate * group imports * serialization * fix test * fix deserialization * cargo sort * lint * fix payload::empty() * fix: visit_map deserialization * use serde_json map * cargo sort * fix ResolvableChainConfigOrVersion deserialization * move out traits and errors * fix imports * move errors into impls * remove ResolvableChainConfig and generic parameters from EitherOrVersion * remove committable trait implementation for v1 Header * add comments for VersionedHeader * remove StructFields enum * add reference tests for v1, v2, and v3 header * move reference_tests to types crate * fix: v2 and v3 reference test * fix comments * add comments for Header::genesis() * add comments for Header::create panic * add comment for global error issue * add comment for global error issue * build genesis header for the current sequencer version * version sub-directories for data dir * move messages.json and messages.bin to data dir * update data readme * cargo update * include sub dirs binaries in gitignore --------- Co-authored-by: Jeb Bearer --- .cargo/config | 2 +- .gitignore | 2 +- Cargo.lock | 286 ++-- Cargo.toml | 10 +- builder/Cargo.toml | 1 + builder/src/bin/permissioned-builder.rs | 39 +- builder/src/bin/permissionless-builder.rs | 10 +- builder/src/lib.rs | 183 +-- builder/src/non_permissioned.rs | 73 +- builder/src/permissioned.rs | 159 ++- contracts/rust/adapter/src/jellyfish.rs | 9 +- contracts/rust/diff-test/src/main.rs | 18 +- contracts/rust/gen-vk-contract/src/main.rs | 5 +- data/README.md | 4 +- data/{ => v1}/chain_config.bin | Bin data/{ => v1}/chain_config.json | 0 data/{ => v1}/fee_info.bin | Bin data/{ => v1}/fee_info.json | 0 data/{ => v1}/header.bin | Bin data/{ => v1}/header.json | 0 data/{ => v1}/l1_block.bin | Bin data/{ => v1}/l1_block.json | 0 data/{ => v1}/ns_table.bin | Bin data/{ => v1}/ns_table.json | 0 data/{ => v1}/payload.bin | Bin data/{ => v1}/payload.json | 0 data/{ => v1}/transaction.bin | Bin data/{ => v1}/transaction.json | 0 data/{ => v1}/tx_index.bin | Bin data/{ => v1}/tx_index.json | 0 data/v2/header.bin | Bin 0 -> 828 bytes data/v2/header.json | 45 + data/v3/header.bin | Bin 0 -> 828 bytes data/v3/header.json | 45 + hotshot-state-prover/src/bin/state-prover.rs | 11 +- hotshot-state-prover/src/circuit.rs | 8 +- hotshot-state-prover/src/mock_ledger.rs | 33 +- hotshot-state-prover/src/service.rs | 39 +- hotshot-state-prover/src/snark.rs | 16 +- sequencer/Cargo.toml | 7 +- sequencer/src/api.rs | 117 +- sequencer/src/api/data_source.rs | 29 +- sequencer/src/api/endpoints.rs | 24 +- sequencer/src/api/fs.rs | 15 +- sequencer/src/api/options.rs | 32 +- sequencer/src/api/sql.rs | 33 +- sequencer/src/api/update.rs | 6 +- sequencer/src/bin/cdn-broker.rs | 14 +- sequencer/src/bin/cdn-marshal.rs | 3 +- sequencer/src/bin/cdn-whitelist.rs | 2 +- sequencer/src/bin/commitment-task.rs | 17 +- sequencer/src/bin/count-transactions.rs | 7 +- sequencer/src/bin/deploy.rs | 3 +- sequencer/src/bin/dev-cdn.rs | 12 +- sequencer/src/bin/espresso-bridge.rs | 17 +- sequencer/src/bin/espresso-dev-node.rs | 8 +- sequencer/src/bin/keygen.rs | 12 +- sequencer/src/bin/nasty-client.rs | 92 +- sequencer/src/bin/orchestrator.rs | 14 +- sequencer/src/bin/pub-key.rs | 5 +- sequencer/src/bin/submit-transactions.rs | 15 +- sequencer/src/bin/verify-headers.rs | 27 +- sequencer/src/block.rs | 8 - sequencer/src/block/full_payload.rs | 9 - sequencer/src/block/namespace_payload.rs | 12 - sequencer/src/catchup.rs | 461 +------ sequencer/src/context.rs | 13 +- sequencer/src/genesis.rs | 97 +- sequencer/src/hotshot_commitment.rs | 17 +- sequencer/src/lib.rs | 300 +--- sequencer/src/main.rs | 8 +- sequencer/src/message_compat_tests.rs | 12 +- sequencer/src/network/mod.rs | 2 + sequencer/src/options.rs | 24 +- sequencer/src/persistence.rs | 280 +--- sequencer/src/persistence/fs.rs | 33 +- sequencer/src/persistence/no_storage.rs | 11 +- sequencer/src/persistence/sql.rs | 15 +- sequencer/src/state.rs | 1228 +---------------- sequencer/src/state_signature.rs | 22 +- sequencer/src/state_signature/relay_server.rs | 12 +- types/Cargo.toml | 59 + types/README.md | 120 ++ {sequencer => types}/src/eth_signature_key.rs | 14 +- types/src/lib.rs | 7 + {sequencer => types}/src/reference_tests.rs | 93 +- types/src/v0/error.rs | 31 + types/src/v0/header.rs | 43 + types/src/v0/impls/block/full_payload.rs | 3 + .../v0/impls}/block/full_payload/ns_proof.rs | 18 +- .../v0/impls}/block/full_payload/ns_table.rs | 164 +-- .../block/full_payload/ns_table/test.rs | 21 +- .../v0/impls}/block/full_payload/payload.rs | 61 +- types/src/v0/impls/block/mod.rs | 6 + types/src/v0/impls/block/namespace_payload.rs | 5 + .../v0/impls}/block/namespace_payload/iter.rs | 22 +- .../block/namespace_payload/ns_payload.rs | 28 +- .../namespace_payload/ns_payload/test.rs | 17 +- .../namespace_payload/ns_payload_range.rs | 5 +- .../block/namespace_payload/tx_proof.rs | 44 +- .../impls}/block/namespace_payload/types.rs | 94 +- .../src => types/src/v0/impls}/block/test.rs | 17 +- .../src/v0/impls}/block/uint_bytes.rs | 6 +- .../src/v0/impls}/chain_config.rs | 63 +- types/src/v0/impls/fee_info.rs | 377 +++++ .../src => types/src/v0/impls}/header.rs | 808 ++++++++--- types/src/v0/impls/instance_state.rs | 162 +++ .../l1_client.rs => types/src/v0/impls/l1.rs | 90 +- types/src/v0/impls/mod.rs | 15 + types/src/v0/impls/state.rs | 918 ++++++++++++ .../src => types/src/v0/impls}/transaction.rs | 53 +- types/src/v0/mod.rs | 154 +++ types/src/v0/traits.rs | 610 ++++++++ types/src/v0/utils.rs | 237 ++++ types/src/v0/v0_1/block.rs | 307 +++++ types/src/v0/v0_1/chain_config.rs | 48 + types/src/v0/v0_1/fee_info.rs | 90 ++ types/src/v0/v0_1/header.rs | 109 ++ types/src/v0/v0_1/instance_state.rs | 41 + types/src/v0/v0_1/l1.rs | 44 + types/src/v0/v0_1/mod.rs | 23 + types/src/v0/v0_1/signature.rs | 1 + types/src/v0/v0_1/state.rs | 40 + types/src/v0/v0_1/transaction.rs | 41 + types/src/v0/v0_2/mod.rs | 17 + types/src/v0/v0_3/header.rs | 60 + types/src/v0/v0_3/mod.rs | 21 + utils/src/deployer.rs | 3 +- utils/src/lib.rs | 19 +- utils/src/test_utils.rs | 6 +- 130 files changed, 5510 insertions(+), 3798 deletions(-) rename data/{ => v1}/chain_config.bin (100%) rename data/{ => v1}/chain_config.json (100%) rename data/{ => v1}/fee_info.bin (100%) rename data/{ => v1}/fee_info.json (100%) rename data/{ => v1}/header.bin (100%) rename data/{ => v1}/header.json (100%) rename data/{ => v1}/l1_block.bin (100%) rename data/{ => v1}/l1_block.json (100%) rename data/{ => v1}/ns_table.bin (100%) rename data/{ => v1}/ns_table.json (100%) rename data/{ => v1}/payload.bin (100%) rename data/{ => v1}/payload.json (100%) rename data/{ => v1}/transaction.bin (100%) rename data/{ => v1}/transaction.json (100%) rename data/{ => v1}/tx_index.bin (100%) rename data/{ => v1}/tx_index.json (100%) create mode 100644 data/v2/header.bin create mode 100644 data/v2/header.json create mode 100644 data/v3/header.bin create mode 100644 data/v3/header.json delete mode 100644 sequencer/src/block.rs delete mode 100644 sequencer/src/block/full_payload.rs delete mode 100644 sequencer/src/block/namespace_payload.rs create mode 100644 types/Cargo.toml create mode 100644 types/README.md rename {sequencer => types}/src/eth_signature_key.rs (99%) create mode 100644 types/src/lib.rs rename {sequencer => types}/src/reference_tests.rs (83%) create mode 100644 types/src/v0/error.rs create mode 100644 types/src/v0/header.rs create mode 100644 types/src/v0/impls/block/full_payload.rs rename {sequencer/src => types/src/v0/impls}/block/full_payload/ns_proof.rs (91%) rename {sequencer/src => types/src/v0/impls}/block/full_payload/ns_table.rs (63%) rename {sequencer/src => types/src/v0/impls}/block/full_payload/ns_table/test.rs (97%) rename {sequencer/src => types/src/v0/impls}/block/full_payload/payload.rs (84%) create mode 100644 types/src/v0/impls/block/mod.rs create mode 100644 types/src/v0/impls/block/namespace_payload.rs rename {sequencer/src => types/src/v0/impls}/block/namespace_payload/iter.rs (75%) rename {sequencer/src => types/src/v0/impls}/block/namespace_payload/ns_payload.rs (82%) rename {sequencer/src => types/src/v0/impls}/block/namespace_payload/ns_payload/test.rs (97%) rename {sequencer/src => types/src/v0/impls}/block/namespace_payload/ns_payload_range.rs (78%) rename {sequencer/src => types/src/v0/impls}/block/namespace_payload/tx_proof.rs (86%) rename {sequencer/src => types/src/v0/impls}/block/namespace_payload/types.rs (79%) rename {sequencer/src => types/src/v0/impls}/block/test.rs (97%) rename {sequencer/src => types/src/v0/impls}/block/uint_bytes.rs (99%) rename {sequencer/src => types/src/v0/impls}/chain_config.rs (79%) create mode 100644 types/src/v0/impls/fee_info.rs rename {sequencer/src => types/src/v0/impls}/header.rs (56%) create mode 100644 types/src/v0/impls/instance_state.rs rename sequencer/src/l1_client.rs => types/src/v0/impls/l1.rs (85%) create mode 100644 types/src/v0/impls/mod.rs create mode 100644 types/src/v0/impls/state.rs rename {sequencer/src => types/src/v0/impls}/transaction.rs (59%) create mode 100644 types/src/v0/mod.rs create mode 100644 types/src/v0/traits.rs create mode 100644 types/src/v0/utils.rs create mode 100644 types/src/v0/v0_1/block.rs create mode 100644 types/src/v0/v0_1/chain_config.rs create mode 100644 types/src/v0/v0_1/fee_info.rs create mode 100644 types/src/v0/v0_1/header.rs create mode 100644 types/src/v0/v0_1/instance_state.rs create mode 100644 types/src/v0/v0_1/l1.rs create mode 100644 types/src/v0/v0_1/mod.rs create mode 100644 types/src/v0/v0_1/signature.rs create mode 100644 types/src/v0/v0_1/state.rs create mode 100644 types/src/v0/v0_1/transaction.rs create mode 100644 types/src/v0/v0_2/mod.rs create mode 100644 types/src/v0/v0_3/header.rs create mode 100644 types/src/v0/v0_3/mod.rs diff --git a/.cargo/config b/.cargo/config index c91c3f38b7..656e08b011 100644 --- a/.cargo/config +++ b/.cargo/config @@ -1,2 +1,2 @@ [net] -git-fetch-with-cli = true +git-fetch-with-cli = true \ No newline at end of file diff --git a/.gitignore b/.gitignore index 4503c6c731..1d100bae8f 100644 --- a/.gitignore +++ b/.gitignore @@ -36,7 +36,7 @@ __pycache__/ *.py[cod] .hypothesis/ wake-coverage.cov -!data/*.bin +!data/**/*.bin # generated by failing serialization tests data/*-actual.json diff --git a/Cargo.lock b/Cargo.lock index c7b36343c2..07b411c6bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -559,7 +559,7 @@ checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", "synstructure 0.13.1", ] @@ -571,7 +571,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -912,7 +912,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -942,7 +942,7 @@ checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -1045,7 +1045,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -1062,7 +1062,7 @@ checksum = "edf3ee19dbc0a46d740f6f0926bde8c50f02bdbc7b536842da28f6ac56513a8b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -1078,7 +1078,7 @@ dependencies = [ "futures-util", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.29", + "hyper 0.14.30", "itoa", "matchit", "memchr", @@ -1328,6 +1328,7 @@ dependencies = [ "committable", "dotenvy", "es-version", + "espresso-types", "ethers", "futures", "hotshot", @@ -1346,7 +1347,7 @@ dependencies = [ "rand 0.8.5", "sequencer", "serde", - "snafu 0.8.3", + "snafu 0.8.4", "surf", "surf-disco", "tagged-base64", @@ -1500,9 +1501,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.106" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "066fce287b1d4eafef758e89e09d724a24808a9196fe9756b8ca90e86d0719a2" +checksum = "eaff6f8ce506b9773fa786672d63fc7a191ffea1be33f72bbd4aeacefca9ffc8" dependencies = [ "jobserver", "libc", @@ -1585,7 +1586,7 @@ dependencies = [ "rcgen 0.13.1", "redis", "rkyv", - "rustls 0.23.10", + "rustls 0.23.11", "rustls-pki-types", "sqlx", "thiserror", @@ -1663,9 +1664,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.8" +version = "4.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b3edb18336f4df585bc9aa31dd99c036dfa5dc5e9a2939a722a188f3a8970d" +checksum = "64acc1846d54c1fe936a78dc189c34e28d3f5afc348403f28ecf53660b9b8462" dependencies = [ "clap_builder", "clap_derive", @@ -1673,9 +1674,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.8" +version = "4.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c09dd5ada6c6c78075d6fd0da3f90d8080651e2d6cc8eb2f1aaa4034ced708" +checksum = "6fb8393d67ba2e7bfaf28a23458e4e2b543cc73a99595511eb207fdb8aede942" dependencies = [ "anstream", "anstyle", @@ -1692,7 +1693,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -2292,7 +2293,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -2334,15 +2335,15 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", "synstructure 0.13.1", ] [[package]] name = "darling" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ "darling_core", "darling_macro", @@ -2350,27 +2351,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] name = "darling_macro" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -2480,7 +2481,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -2501,7 +2502,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -2511,7 +2512,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" dependencies = [ "derive_builder_core", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -2524,7 +2525,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -2661,7 +2662,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -2838,7 +2839,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -2907,6 +2908,60 @@ name = "espresso-systems-common" version = "0.4.1" source = "git+https://github.com/espressosystems/espresso-systems-common?tag=0.4.1#2e889e878866c2a5cce1daaab947f7c93d5811ae" +[[package]] +name = "espresso-types" +version = "0.1.0" +dependencies = [ + "anyhow", + "ark-serialize", + "async-compatibility-layer", + "async-once-cell", + "async-std", + "async-trait", + "base64-bytes", + "bincode", + "blake3", + "bytesize", + "clap", + "cld", + "committable", + "contract-bindings", + "derivative", + "derive_more", + "es-version", + "ethers", + "fluent-asserter", + "futures", + "hotshot", + "hotshot-orchestrator", + "hotshot-query-service", + "hotshot-testing", + "hotshot-types", + "itertools 0.12.1", + "jf-merkle-tree", + "jf-utils", + "jf-vid", + "num-traits", + "paste", + "pretty_assertions", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_distr", + "sequencer-utils", + "serde", + "serde_json", + "sha2 0.10.8", + "snafu 0.8.4", + "static_assertions", + "tagged-base64", + "thiserror", + "time 0.3.36", + "tracing", + "trait-set", + "url", + "vbs", +] + [[package]] name = "etcetera" version = "0.8.0" @@ -3054,7 +3109,7 @@ dependencies = [ "reqwest 0.11.27", "serde", "serde_json", - "syn 2.0.69", + "syn 2.0.70", "toml", "walkdir", ] @@ -3072,7 +3127,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -3098,7 +3153,7 @@ dependencies = [ "serde", "serde_json", "strum", - "syn 2.0.69", + "syn 2.0.70", "tempfile", "thiserror", "tiny-keccak", @@ -3545,7 +3600,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -3565,7 +3620,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.10", + "rustls 0.23.11", "rustls-pki-types", ] @@ -4060,7 +4115,7 @@ dependencies = [ "rand 0.8.5", "serde", "sha2 0.10.8", - "snafu 0.8.3", + "snafu 0.8.4", "surf-disco", "time 0.3.36", "tokio", @@ -4080,7 +4135,7 @@ dependencies = [ "futures", "hotshot-types", "serde", - "snafu 0.8.3", + "snafu 0.8.4", "tagged-base64", "tide-disco", "toml", @@ -4108,7 +4163,7 @@ dependencies = [ "hotshot-types", "serde", "sha2 0.10.8", - "snafu 0.8.3", + "snafu 0.8.4", "surf-disco", "tagged-base64", "tide-disco", @@ -4157,7 +4212,7 @@ dependencies = [ "futures", "hotshot-types", "serde", - "snafu 0.8.3", + "snafu 0.8.4", "tagged-base64", "tide-disco", "toml", @@ -4189,7 +4244,7 @@ dependencies = [ "serde", "sha2 0.10.8", "sha3", - "snafu 0.8.3", + "snafu 0.8.4", "time 0.3.36", "tokio", "tracing", @@ -4205,7 +4260,7 @@ dependencies = [ "derive_builder", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -4276,7 +4331,7 @@ dependencies = [ "refinery", "serde", "serde_json", - "snafu 0.8.3", + "snafu 0.8.4", "spin_sleep", "surf-disco", "tagged-base64", @@ -4355,7 +4410,7 @@ dependencies = [ "reqwest 0.12.5", "sequencer-utils", "serde", - "snafu 0.8.3", + "snafu 0.8.4", "surf-disco", "tagged-base64", "tide-disco", @@ -4407,7 +4462,7 @@ dependencies = [ "rand 0.8.5", "serde", "sha2 0.10.8", - "snafu 0.8.3", + "snafu 0.8.4", "surf-disco", "tagged-base64", "time 0.3.36", @@ -4450,7 +4505,7 @@ dependencies = [ "serde", "sha2 0.10.8", "sha3", - "snafu 0.8.3", + "snafu 0.8.4", "tagged-base64", "tide-disco", "tokio", @@ -4502,7 +4557,7 @@ dependencies = [ "rand_chacha 0.3.1", "serde", "sha2 0.10.8", - "snafu 0.8.3", + "snafu 0.8.4", "tagged-base64", "time 0.3.36", "tokio", @@ -4625,9 +4680,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.29" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes 1.6.0", "futures-channel", @@ -4649,9 +4704,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4fe55fb7a772d59a5ff1dfbff4fe0258d19b89fec4b233e75d35d5d2316badc" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ "bytes 1.6.0", "futures-channel", @@ -4675,7 +4730,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.29", + "hyper 0.14.30", "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", @@ -4689,9 +4744,9 @@ checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.4.0", + "hyper 1.4.1", "hyper-util", - "rustls 0.23.10", + "rustls 0.23.11", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -4704,7 +4759,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "hyper 0.14.29", + "hyper 0.14.30", "pin-project-lite 0.2.14", "tokio", "tokio-io-timeout", @@ -4718,7 +4773,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes 1.6.0", "http-body-util", - "hyper 1.4.0", + "hyper 1.4.1", "hyper-util", "native-tls", "tokio", @@ -4737,7 +4792,7 @@ dependencies = [ "futures-util", "http 1.1.0", "http-body 1.0.0", - "hyper 1.4.0", + "hyper 1.4.1", "pin-project-lite 0.2.14", "socket2 0.5.7", "tokio", @@ -4836,7 +4891,7 @@ dependencies = [ "bytes 1.6.0", "futures", "http 0.2.12", - "hyper 0.14.29", + "hyper 0.14.30", "log", "rand 0.8.5", "tokio", @@ -5765,7 +5820,7 @@ dependencies = [ "serde", "serde_bytes", "serde_json", - "snafu 0.8.3", + "snafu 0.8.4", "tokio", "tokio-stream", "tracing", @@ -5864,7 +5919,7 @@ dependencies = [ "quinn", "rand 0.8.5", "ring 0.17.8", - "rustls 0.23.10", + "rustls 0.23.11", "socket2 0.5.7", "thiserror", "tokio", @@ -5976,7 +6031,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -6009,7 +6064,7 @@ dependencies = [ "libp2p-identity", "rcgen 0.11.3", "ring 0.17.8", - "rustls 0.23.10", + "rustls 0.23.11", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -6287,7 +6342,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -6730,7 +6785,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -6820,7 +6875,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -7040,7 +7095,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -7104,7 +7159,7 @@ dependencies = [ "phf_shared 0.11.2", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -7142,7 +7197,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -7362,7 +7417,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -7461,7 +7516,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -7500,7 +7555,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -7593,7 +7648,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.10", + "rustls 0.23.11", "thiserror", "tokio", "tracing", @@ -7609,7 +7664,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "rustc-hash", - "rustls 0.23.10", + "rustls 0.23.11", "slab", "thiserror", "tinyvec", @@ -7873,7 +7928,7 @@ dependencies = [ "quote", "refinery-core", "regex", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -7943,7 +7998,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.29", + "hyper 0.14.30", "hyper-rustls 0.24.2", "ipnet", "js-sys", @@ -7985,7 +8040,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.4.0", + "hyper 1.4.1", "hyper-rustls 0.27.2", "hyper-tls", "hyper-util", @@ -8087,7 +8142,7 @@ dependencies = [ "rkyv_derive", "seahash", "tinyvec", - "uuid 1.9.1", + "uuid 1.10.0", ] [[package]] @@ -8296,9 +8351,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "4828ea528154ae444e5a642dbb7d5623354030dc9822b83fd9bb79683c7399d0" dependencies = [ "log", "once_cell", @@ -8584,6 +8639,7 @@ dependencies = [ "es-version", "escargot", "espresso-macros", + "espresso-types", "ethers", "ethers-contract-derive", "fluent-asserter", @@ -8621,7 +8677,7 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", - "snafu 0.8.3", + "snafu 0.8.4", "static_assertions", "strum", "surf-disco", @@ -8703,7 +8759,7 @@ checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -8785,7 +8841,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -9026,11 +9082,11 @@ dependencies = [ [[package]] name = "snafu" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418b8136fec49956eba89be7da2847ec1909df92a9ae4178b5ff0ff092c8d95e" +checksum = "2b835cb902660db3415a672d862905e791e54d306c6e8189168c7f3d9ae1c79d" dependencies = [ - "snafu-derive 0.8.3", + "snafu-derive 0.8.4", ] [[package]] @@ -9047,14 +9103,14 @@ dependencies = [ [[package]] name = "snafu-derive" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a4812a669da00d17d8266a0439eddcacbc88b17f732f927e52eeb9d196f7fb5" +checksum = "38d1e02fca405f6280643174a50c942219f0bbf4dbf7d480f1dd864d6f211ae5" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -9487,7 +9543,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -9649,9 +9705,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.69" +version = "2.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201fcda3845c23e8212cd466bfebf0bd20694490fc0356ae8e428e0824a915a6" +checksum = "2f0209b68b3613b093e0ec905354eccaedcfe83b8cb37cbdeae64026c3064c16" dependencies = [ "proc-macro2", "quote", @@ -9690,7 +9746,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -9725,7 +9781,7 @@ dependencies = [ "base64 0.22.1", "crc-any", "serde", - "snafu 0.8.3", + "snafu 0.8.4", "tagged-base64-macros", "wasm-bindgen", ] @@ -9786,7 +9842,7 @@ checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -9867,7 +9923,7 @@ dependencies = [ "shellexpand", "signal-hook", "signal-hook-async-std", - "snafu 0.8.3", + "snafu 0.8.4", "strum", "strum_macros", "tagged-base64", @@ -10034,7 +10090,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -10100,7 +10156,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.10", + "rustls 0.23.11", "rustls-pki-types", "tokio", ] @@ -10153,7 +10209,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.14", + "toml_edit 0.22.15", ] [[package]] @@ -10178,9 +10234,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.14" +version = "0.22.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38" +checksum = "d59a3a72298453f564e2b111fa896f8d07fabb36f51f06d7e875fc5e0b5a3ef1" dependencies = [ "indexmap 2.2.6", "serde", @@ -10203,7 +10259,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.29", + "hyper 0.14.30", "hyper-timeout", "percent-encoding", "pin-project", @@ -10230,7 +10286,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.29", + "hyper 0.14.30", "hyper-timeout", "percent-encoding", "pin-project", @@ -10295,7 +10351,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -10410,7 +10466,7 @@ checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -10608,7 +10664,7 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.23.10", + "rustls 0.23.11", "rustls-pki-types", "url", "webpki-roots 0.26.3", @@ -10656,9 +10712,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.9.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" [[package]] name = "valuable" @@ -10732,9 +10788,9 @@ dependencies = [ [[package]] name = "vergen" -version = "8.3.1" +version = "8.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e27d6bdd219887a9eadd19e1c34f32e47fa332301184935c6d9bca26f3cca525" +checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" dependencies = [ "anyhow", "cfg-if", @@ -10790,7 +10846,7 @@ dependencies = [ "futures-util", "headers", "http 0.2.12", - "hyper 0.14.29", + "hyper 0.14.30", "log", "mime", "mime_guess", @@ -10847,7 +10903,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", "wasm-bindgen-shared", ] @@ -10881,7 +10937,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11344,7 +11400,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] @@ -11364,7 +11420,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.69", + "syn 2.0.70", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d73803709e..bc28a7ec67 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ members = [ "contracts/rust/gen-vk-contract", "hotshot-state-prover", "sequencer", + "types", "utils", ] @@ -106,7 +107,6 @@ surf-disco = "0.9" tagged-base64 = "0.4" tide-disco = "0.9" thiserror = "1.0.61" -time = "0.3" tracing = "0.1" bytesize = "1.3" itertools = "0.12" @@ -114,6 +114,7 @@ rand_chacha = "0.3" rand_distr = "0.4" reqwest = "0.12" serde = { version = "1.0.195", features = ["derive"] } +serde_json = "^1.0.113" toml = "0.8" url = "2.3" vbs = "0.1" @@ -123,3 +124,10 @@ zeroize = "1.7" committable = "0.2" portpicker = "0.1.1" pretty_assertions = "1.4" +static_assertions = "1.1" +num-traits = "0.2" +derivative = "2.2" +paste = "1.0" +rand = "0.8.5" +time = "0.3" +trait-set = "0.3.0" diff --git a/builder/Cargo.toml b/builder/Cargo.toml index 899870c474..fe4afaebdc 100644 --- a/builder/Cargo.toml +++ b/builder/Cargo.toml @@ -21,6 +21,7 @@ cld = { workspace = true } committable = { workspace = true } dotenvy = { workspace = true } es-version = { workspace = true } +espresso-types = { path = "../types", features = ["testing"] } ethers = { workspace = true } futures = { workspace = true } hotshot = { workspace = true } diff --git a/builder/src/bin/permissioned-builder.rs b/builder/src/bin/permissioned-builder.rs index 53833f537f..a83411ed38 100644 --- a/builder/src/bin/permissioned-builder.rs +++ b/builder/src/bin/permissioned-builder.rs @@ -1,23 +1,24 @@ +use std::{ + collections::HashMap, net::ToSocketAddrs, num::NonZeroUsize, path::PathBuf, time::Duration, +}; + use anyhow::{bail, Context}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use builder::permissioned::init_node; use clap::Parser; -use cld::ClDuration; use es_version::SEQUENCER_VERSION; +use espresso_types::eth_signature_key::EthKeyPair; use ethers::types::Address; -use hotshot_types::data::ViewNumber; -use hotshot_types::light_client::StateSignKey; -use hotshot_types::signature_key::BLSPrivKey; -use hotshot_types::traits::metrics::NoMetrics; -use hotshot_types::traits::node_implementation::ConsensusTime; +use hotshot_types::{ + data::ViewNumber, + light_client::StateSignKey, + signature_key::BLSPrivKey, + traits::{metrics::NoMetrics, node_implementation::ConsensusTime}, +}; use libp2p::Multiaddr; -use sequencer::persistence::no_storage::NoStorage; -use sequencer::{eth_signature_key::EthKeyPair, Genesis}; -use sequencer::{L1Params, NetworkParams}; -use snafu::Snafu; -use std::net::ToSocketAddrs; -use std::num::NonZeroUsize; -use std::{collections::HashMap, path::PathBuf, str::FromStr, time::Duration}; +use sequencer::{ + options::parse_duration, persistence::no_storage::NoStorage, Genesis, L1Params, NetworkParams, +}; use url::Url; #[derive(Parser, Clone, Debug)] @@ -189,18 +190,6 @@ pub struct PermissionedBuilderOptions { pub is_da: bool, } -#[derive(Clone, Debug, Snafu)] -pub struct ParseDurationError { - reason: String, -} - -pub fn parse_duration(s: &str) -> Result { - ClDuration::from_str(s) - .map(Duration::from) - .map_err(|err| ParseDurationError { - reason: err.to_string(), - }) -} impl PermissionedBuilderOptions { pub fn private_keys(&self) -> anyhow::Result<(BLSPrivKey, StateSignKey)> { if let Some(path) = &self.key_file { diff --git a/builder/src/bin/permissionless-builder.rs b/builder/src/bin/permissionless-builder.rs index 35b09a8c41..e07c418a4d 100644 --- a/builder/src/bin/permissionless-builder.rs +++ b/builder/src/bin/permissionless-builder.rs @@ -1,15 +1,15 @@ +use std::{num::NonZeroUsize, path::PathBuf, str::FromStr, time::Duration}; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use builder::non_permissioned::{build_instance_state, BuilderConfig}; use clap::Parser; use cld::ClDuration; use es_version::SEQUENCER_VERSION; +use espresso_types::eth_signature_key::EthKeyPair; use hotshot::traits::ValidatedState; -use hotshot_types::data::ViewNumber; -use hotshot_types::traits::node_implementation::ConsensusTime; -use sequencer::{eth_signature_key::EthKeyPair, Genesis, L1Params}; +use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; +use sequencer::{Genesis, L1Params}; use snafu::Snafu; -use std::num::NonZeroUsize; -use std::{path::PathBuf, str::FromStr, time::Duration}; use url::Url; #[derive(Parser, Clone, Debug)] diff --git a/builder/src/lib.rs b/builder/src/lib.rs index 1ba9b77f4a..e3a62a8c2a 100644 --- a/builder/src/lib.rs +++ b/builder/src/lib.rs @@ -1,4 +1,23 @@ #![allow(unused_imports)] +use std::{ + alloc::System, + any, + fmt::{Debug, Display}, + marker::PhantomData, + mem, + net::{IpAddr, Ipv4Addr}, + thread::Builder, +}; + +use async_compatibility_layer::art::{async_sleep, async_spawn}; +use async_std::{ + sync::{Arc, RwLock}, + task::{spawn, JoinHandle}, +}; +use espresso_types::{ + v0::traits::{PersistenceOptions, SequencerPersistence, StateCatchup}, + SeqTypes, +}; use ethers::{ core::k256::ecdsa::SigningKey, signers::{coins_bip39::English, MnemonicBuilder, Signer as _, Wallet}, @@ -13,51 +32,41 @@ use hotshot::{ types::{SignatureKey, SystemContextHandle}, HotShotInitializer, Memberships, SystemContext, }; +use hotshot_builder_api::builder::{ + BuildError, Error as BuilderApiError, Options as HotshotBuilderApiOptions, +}; +use hotshot_builder_core::service::{GlobalState, ProxyGlobalState}; use hotshot_orchestrator::{ client::{OrchestratorClient, ValidatorArgs}, config::NetworkConfig, }; +// Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support +use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; use hotshot_types::{ consensus::ConsensusMetricsValue, - event::Event, + event::LeafInfo, light_client::StateKeyPair, signature_key::{BLSPrivKey, BLSPubKey}, - traits::{election::Membership, metrics::Metrics}, + traits::{ + block_contents::{ + vid_commitment, BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES, + }, + election::Membership, + metrics::Metrics, + node_implementation::NodeType, + }, + utils::BuilderCommitment, HotShotConfig, PeerConfig, ValidatorConfig, }; -use std::fmt::Display; -// Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support -use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; - -use async_std::sync::{Arc, RwLock}; -use async_std::task::{spawn, JoinHandle}; - -use async_compatibility_layer::art::{async_sleep, async_spawn}; -use hotshot_builder_api::builder::{ - BuildError, Error as BuilderApiError, Options as HotshotBuilderApiOptions, -}; -use hotshot_builder_core::{ - service::{GlobalState, ProxyGlobalState}, - testing::basic_test::NodeType, -}; use jf_merkle_tree::{namespaced_merkle_tree::NamespacedMerkleTreeScheme, MerkleTreeScheme}; use jf_signature::bls_over_bn254::VerKey; -use sequencer::catchup::mock::MockStateCatchup; -use sequencer::state_signature::StakeTableCommitmentType; use sequencer::{ catchup::StatePeers, context::{Consensus, SequencerContext}, - l1_client::L1Client, network, - persistence::SequencerPersistence, - state::FeeAccount, - state::ValidatedState, - state_signature::{static_stake_table_commitment, StateSigner}, - L1Params, NetworkParams, Node, NodeState, PrivKey, PubKey, SeqTypes, + state_signature::{static_stake_table_commitment, StakeTableCommitmentType, StateSigner}, + L1Params, NetworkParams, Node, }; -use std::{alloc::System, any, fmt::Debug, mem}; -use std::{marker::PhantomData, net::IpAddr}; -use std::{net::Ipv4Addr, thread::Builder}; use tide_disco::{app, method::ReadState, App, Url}; use vbs::version::StaticVersionType; @@ -95,9 +104,25 @@ pub fn run_builder_api_service(url: Url, source: ProxyGlobalState) { #[cfg(test)] pub mod testing { - use super::*; - use committable::Committable; use core::num; + use std::{collections::HashSet, num::NonZeroUsize, time::Duration}; + + //use sequencer::persistence::NoStorage; + use async_broadcast::{ + broadcast, Receiver as BroadcastReceiver, RecvError, Sender as BroadcastSender, + TryRecvError, + }; + use async_compatibility_layer::{ + art::{async_sleep, async_spawn}, + channel::{unbounded, UnboundedReceiver, UnboundedSender}, + }; + use async_lock::RwLock; + use async_trait::async_trait; + use committable::Committable; + use espresso_types::{ + mock::MockStateCatchup, ChainConfig, Event, FeeAccount, L1Client, NodeState, PrivKey, + PubKey, Transaction, ValidatedState, + }; use ethers::{ types::spoof::State, utils::{Anvil, AnvilInstance}, @@ -106,62 +131,47 @@ pub mod testing { future::join_all, stream::{Stream, StreamExt}, }; - use hotshot::traits::{ - implementations::{MasterMap, MemoryNetwork}, - BlockPayload, - }; - use hotshot::types::{EventType::Decide, Message}; - use hotshot_types::{ - light_client::StateKeyPair, + use hotshot::{ traits::{ - block_contents::BlockHeader, metrics::NoMetrics, - signature_key::BuilderSignatureKey as _, + implementations::{MasterMap, MemoryNetwork}, + BlockPayload, }, - ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, - }; - use portpicker::pick_unused_port; - use vbs::version::StaticVersion; - //use sequencer::persistence::NoStorage; - use async_broadcast::{ - broadcast, Receiver as BroadcastReceiver, RecvError, Sender as BroadcastSender, - TryRecvError, + types::{EventType::Decide, Message}, }; - use async_compatibility_layer::channel::unbounded; - use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - channel::{UnboundedReceiver, UnboundedSender}, + use hotshot_builder_api::builder::{ + BuildError, Error as BuilderApiError, Options as HotshotBuilderApiOptions, }; - use async_lock::RwLock; use hotshot_builder_core::{ builder_state::{BuildBlockInfo, BuilderState, MessageType, ResponseMessage}, service::GlobalState, }; - use hotshot_types::event::LeafInfo; + use hotshot_events_service::{ + events::{Error as EventStreamApiError, Options as EventStreamingApiOptions}, + events_source::{EventConsumer, EventsStreamer}, + }; use hotshot_types::{ data::{fake_commitment, Leaf, ViewNumber}, + event::LeafInfo, + light_client::StateKeyPair, traits::{ - block_contents::{vid_commitment, GENESIS_VID_NUM_STORAGE_NODES}, + block_contents::{vid_commitment, BlockHeader, GENESIS_VID_NUM_STORAGE_NODES}, + metrics::NoMetrics, node_implementation::ConsensusTime, + signature_key::BuilderSignatureKey as _, }, + ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, }; - use sequencer::{ - catchup::StateCatchup, eth_signature_key::EthKeyPair, persistence::PersistenceOptions, - state_signature::StateSignatureMemStorage, ChainConfig, - }; - use sequencer::{Event, Transaction}; - use std::{collections::HashSet, num::NonZeroUsize, time::Duration}; - - use crate::non_permissioned::BuilderConfig; - use crate::permissioned::{init_hotshot, BuilderContext}; - use async_trait::async_trait; - use hotshot_builder_api::builder::Options as HotshotBuilderApiOptions; - use hotshot_builder_api::builder::{BuildError, Error as BuilderApiError}; - use hotshot_events_service::{ - events::{Error as EventStreamApiError, Options as EventStreamingApiOptions}, - events_source::{EventConsumer, EventsStreamer}, - }; + use portpicker::pick_unused_port; + use sequencer::state_signature::StateSignatureMemStorage; use serde::{Deserialize, Serialize}; use snafu::{guide::feature_flags, *}; + use vbs::version::StaticVersion; + + use super::*; + use crate::{ + non_permissioned::BuilderConfig, + permissioned::{init_hotshot, BuilderContext}, + }; #[derive(Clone)] pub struct HotShotTestConfig { @@ -657,30 +667,27 @@ pub mod testing { mod test { //use self::testing::mock_node_state; - use super::*; //use super::{transaction::ApplicationTransaction, vm::TestVm, *}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; - use async_std::stream::IntoStream; use clap::builder; + use es_version::SequencerVersion; + use espresso_types::{Header, NodeState, Payload, ValidatedState}; use ethers::providers::Quorum; use futures::StreamExt; use hotshot::types::EventType::Decide; - use hotshot_builder_api::block_info::AvailableBlockData; use hotshot_builder_core::service::GlobalState; - use hotshot_types::event::LeafInfo; - use hotshot_types::traits::block_contents::{ - vid_commitment, BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES, + use sequencer::{ + empty_builder_commitment, + persistence::{ + no_storage::{self, NoStorage}, + sql, + }, }; - use hotshot_types::utils::BuilderCommitment; - use sequencer::block::Payload; - use sequencer::persistence::no_storage::{self, NoStorage}; - use sequencer::persistence::sql; - use sequencer::{empty_builder_commitment, Header}; use testing::{wait_for_decide_on_handle, HotShotTestConfig}; - use es_version::SequencerVersion; + use super::*; // Test that a non-voting hotshot node can participate in consensus and reach a certain height. // It is enabled by keeping the node(s) in the stake table, but with a stake of 0. @@ -741,18 +748,18 @@ mod test { // the fields which should be monotonic are. for LeafInfo { leaf, .. } in leaf_chain.iter().rev() { let header = leaf.block_header().clone(); - if header.height == 0 { + if header.height() == 0 { parent = header; continue; } - assert_eq!(header.height, parent.height + 1); - assert!(header.timestamp >= parent.timestamp); - assert!(header.l1_head >= parent.l1_head); - assert!(header.l1_finalized >= parent.l1_finalized); + assert_eq!(header.height(), parent.height() + 1); + assert!(header.timestamp() >= parent.timestamp()); + assert!(header.l1_head() >= parent.l1_head()); + assert!(header.l1_finalized() >= parent.l1_finalized()); parent = header; } - if parent.height >= success_height { + if parent.height() >= success_height { break; } } diff --git a/builder/src/non_permissioned.rs b/builder/src/non_permissioned.rs index 683a695f21..fbcc508ee9 100644 --- a/builder/src/non_permissioned.rs +++ b/builder/src/non_permissioned.rs @@ -1,3 +1,5 @@ +use std::{num::NonZeroUsize, time::Duration}; + use anyhow::Context; use async_broadcast::{ broadcast, Receiver as BroadcastReceiver, RecvError, Sender as BroadcastSender, TryRecvError, @@ -7,6 +9,10 @@ use async_compatibility_layer::{ channel::{unbounded, UnboundedReceiver, UnboundedSender}, }; use async_std::sync::{Arc, RwLock}; +use espresso_types::{ + eth_signature_key::EthKeyPair, ChainConfig, L1Client, NodeState, Payload, SeqTypes, + ValidatedState, +}; use ethers::{ core::k256::ecdsa::SigningKey, signers::{coins_bip39::English, MnemonicBuilder, Signer as _, Wallet}, @@ -26,7 +32,10 @@ use hotshot_builder_core::{ ReceivedTransaction, }, }; - +use hotshot_events_service::{ + events::{Error as EventStreamApiError, Options as EventStreamingApiOptions}, + events_source::{BuilderEvent, EventConsumer, EventsStreamer}, +}; use hotshot_types::{ data::{fake_commitment, Leaf, ViewNumber}, traits::{ @@ -36,23 +45,14 @@ use hotshot_types::{ }, utils::BuilderCommitment, }; -use sequencer::{ - catchup::StatePeers, eth_signature_key::EthKeyPair, l1_client::L1Client, ChainConfig, L1Params, - NetworkParams, NodeState, Payload, PrivKey, PubKey, SeqTypes, ValidatedState, -}; - -use hotshot_events_service::{ - events::{Error as EventStreamApiError, Options as EventStreamingApiOptions}, - events_source::{BuilderEvent, EventConsumer, EventsStreamer}, -}; - -use crate::run_builder_api_service; -use std::{num::NonZeroUsize, time::Duration}; +use sequencer::{catchup::StatePeers, L1Params, NetworkParams}; use surf::http::headers::ACCEPT; use surf_disco::Client; use tide_disco::{app, method::ReadState, App, Url}; use vbs::version::StaticVersionType; +use crate::run_builder_api_service; + #[derive(Clone, Debug)] pub struct BuilderConfig { pub global_state: Arc>>, @@ -222,44 +222,47 @@ impl BuilderConfig { #[cfg(test)] mod test { - use super::*; - use crate::testing::{ - hotshot_builder_url, HotShotTestConfig, NonPermissionedBuilderTestConfig, + use std::time::Duration; + + use async_compatibility_layer::{ + art::{async_sleep, async_spawn}, + logging::{setup_backtrace, setup_logging}, }; - use async_compatibility_layer::art::{async_sleep, async_spawn}; - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_lock::RwLock; use async_std::task; use es_version::SequencerVersion; + use espresso_types::{FeeAccount, NamespaceId, Transaction}; use hotshot_builder_api::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::BuildError, }; - use hotshot_builder_core::builder_state::BuilderProgress; - use hotshot_builder_core::service::{ - run_non_permissioned_standalone_builder_service, - run_permissioned_standalone_builder_service, + use hotshot_builder_core::{ + builder_state::BuilderProgress, + service::{ + run_non_permissioned_standalone_builder_service, + run_permissioned_standalone_builder_service, + }, }; use hotshot_events_service::{ events::{Error as EventStreamApiError, Options as EventStreamingApiOptions}, events_source::{BuilderEvent, EventConsumer, EventsStreamer}, }; - use hotshot_types::traits::{ - block_contents::{BlockPayload, GENESIS_VID_NUM_STORAGE_NODES}, - node_implementation::NodeType, - }; - use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; - use sequencer::{ - persistence::{ - no_storage::{self, NoStorage}, - PersistenceOptions, + use hotshot_types::{ + signature_key::BLSPubKey, + traits::{ + block_contents::{BlockPayload, GENESIS_VID_NUM_STORAGE_NODES}, + node_implementation::NodeType, + signature_key::SignatureKey, }, - state::FeeAccount, - NamespaceId, Payload, Transaction, }; - use std::time::Duration; + use sequencer::persistence::no_storage::{self, NoStorage}; use surf_disco::Client; + use super::*; + use crate::testing::{ + hotshot_builder_url, HotShotTestConfig, NonPermissionedBuilderTestConfig, + }; + /// Test the non-permissioned builder core /// It creates a memory hotshot network and launches the hotshot event streaming api /// Builder subscrived to this api, and server the hotshot client request and the private mempool tx submission @@ -416,7 +419,7 @@ mod test { } } - let txn = Transaction::new(NamespaceId::from(1), vec![1, 2, 3]); + let txn = Transaction::new(NamespaceId::from(1_u32), vec![1, 2, 3]); match builder_client .post::<()>("txn_submit/submit") .body_json(&txn) diff --git a/builder/src/permissioned.rs b/builder/src/permissioned.rs index bcc233f128..b40c7ea667 100644 --- a/builder/src/permissioned.rs +++ b/builder/src/permissioned.rs @@ -1,4 +1,32 @@ +use std::{ + alloc::System, + any, + fmt::{Debug, Display}, + marker::PhantomData, + mem, + net::{IpAddr, Ipv4Addr}, + num::NonZeroUsize, + thread::Builder, + time::Duration, +}; + use anyhow::Context; +use async_broadcast::{ + broadcast, Receiver as BroadcastReceiver, RecvError, Sender as BroadcastSender, TryRecvError, +}; +use async_compatibility_layer::{ + art::{async_sleep, async_spawn}, + channel::{unbounded, UnboundedReceiver, UnboundedSender}, +}; +use async_std::{ + sync::{Arc, RwLock}, + task::{spawn, JoinHandle}, +}; +use espresso_types::{ + eth_signature_key::EthKeyPair, + v0::traits::{PersistenceOptions, SequencerPersistence, StateCatchup}, + L1Client, NodeState, Payload, PubKey, SeqTypes, ValidatedState, +}; use ethers::{ core::k256::ecdsa::SigningKey, signers::{coins_bip39::English, MnemonicBuilder, Signer as _, Wallet}, @@ -15,41 +43,11 @@ use hotshot::{ derive_libp2p_peer_id, CdnMetricsValue, CombinedNetworks, KeyPair, Libp2pNetwork, PushCdnNetwork, Topic, WrappedSignatureKey, }, + BlockPayload, }, types::{SignatureKey, SystemContextHandle}, HotShotInitializer, Memberships, SystemContext, }; -use hotshot_example_types::auction_results_provider_types::TestAuctionResultsProvider; -use hotshot_orchestrator::{ - client::{OrchestratorClient, ValidatorArgs}, - config::NetworkConfig, -}; -use hotshot_types::{ - consensus::ConsensusMetricsValue, - event::Event, - light_client::StateKeyPair, - signature_key::{BLSPrivKey, BLSPubKey}, - traits::{election::Membership, metrics::Metrics, network::ConnectedNetwork, EncodeBytes}, - utils::BuilderCommitment, - HotShotConfig, PeerConfig, ValidatorConfig, -}; -use std::fmt::Display; -// Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support -use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; - -use async_broadcast::{ - broadcast, Receiver as BroadcastReceiver, RecvError, Sender as BroadcastSender, TryRecvError, -}; -use async_compatibility_layer::{ - art::{async_sleep, async_spawn}, - channel::{unbounded, UnboundedReceiver, UnboundedSender}, -}; -use async_std::sync::RwLock; -use async_std::{ - sync::Arc, - task::{spawn, JoinHandle}, -}; -use hotshot::traits::BlockPayload; use hotshot_builder_api::builder::{ BuildError, Error as BuilderApiError, Options as HotshotBuilderApiOptions, }; @@ -64,46 +62,51 @@ use hotshot_builder_core::{ ReceivedTransaction, }, }; +use hotshot_events_service::{ + events::{Error as EventStreamApiError, Options as EventStreamingApiOptions}, + events_source::{BuilderEvent, EventConsumer, EventsStreamer}, +}; +use hotshot_example_types::auction_results_provider_types::TestAuctionResultsProvider; +use hotshot_orchestrator::{ + client::{OrchestratorClient, ValidatorArgs}, + config::NetworkConfig, +}; +// Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support +use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; use hotshot_state_prover; +use hotshot_types::{ + consensus::ConsensusMetricsValue, + data::{fake_commitment, Leaf, ViewNumber}, + event::Event, + light_client::StateKeyPair, + signature_key::{BLSPrivKey, BLSPubKey}, + traits::{ + block_contents::{vid_commitment, GENESIS_VID_NUM_STORAGE_NODES}, + election::Membership, + metrics::Metrics, + network::ConnectedNetwork, + node_implementation::{ConsensusTime, NodeType}, + EncodeBytes, + }, + utils::BuilderCommitment, + HotShotConfig, PeerConfig, ValidatorConfig, +}; use jf_merkle_tree::{namespaced_merkle_tree::NamespacedMerkleTreeScheme, MerkleTreeScheme}; use jf_signature::bls_over_bn254::VerKey; use sequencer::{ - catchup::{mock::MockStateCatchup, StatePeers}, + catchup::StatePeers, context::{Consensus, SequencerContext}, - eth_signature_key::EthKeyPair, genesis::L1Finalized, - l1_client::L1Client, network, network::libp2p::split_off_peer_id, - persistence::SequencerPersistence, - state::FeeAccount, - state::ValidatedState, - state_signature::StakeTableCommitmentType, - state_signature::{static_stake_table_commitment, StateSigner}, - ChainConfig, Genesis, L1Params, NetworkParams, Node, NodeState, Payload, PrivKey, PubKey, - SeqTypes, + state_signature::{static_stake_table_commitment, StakeTableCommitmentType, StateSigner}, + Genesis, L1Params, NetworkParams, Node, }; -use std::{alloc::System, any, fmt::Debug, mem}; -use std::{marker::PhantomData, net::IpAddr}; -use std::{net::Ipv4Addr, thread::Builder}; +use surf_disco::Client; use tide_disco::{app, method::ReadState, App, Url}; use vbs::version::StaticVersionType; -use hotshot_types::{ - data::{fake_commitment, Leaf, ViewNumber}, - traits::{ - block_contents::{vid_commitment, GENESIS_VID_NUM_STORAGE_NODES}, - node_implementation::{ConsensusTime, NodeType}, - }, -}; - use crate::run_builder_api_service; -use hotshot_events_service::{ - events::{Error as EventStreamApiError, Options as EventStreamingApiOptions}, - events_source::{BuilderEvent, EventConsumer, EventsStreamer}, -}; -use std::{num::NonZeroUsize, time::Duration}; -use surf_disco::Client; pub struct BuilderContext< N: ConnectedNetwork, @@ -537,23 +540,26 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp #[cfg(test)] mod test { - use super::*; - use crate::non_permissioned; - use crate::testing::{hotshot_builder_url, PermissionedBuilderTestConfig}; - use crate::testing::{HotShotTestConfig, NonPermissionedBuilderTestConfig}; - use async_compatibility_layer::art::{async_sleep, async_spawn}; - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; + use std::time::Duration; + + use async_compatibility_layer::{ + art::{async_sleep, async_spawn}, + logging::{setup_backtrace, setup_logging}, + }; use async_lock::RwLock; use async_std::task; use es_version::SequencerVersion; + use espresso_types::{FeeAccount, NamespaceId, Transaction}; use hotshot_builder_api::{ block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, builder::BuildError, }; - use hotshot_builder_core::builder_state::BuilderProgress; - use hotshot_builder_core::service::{ - run_non_permissioned_standalone_builder_service, - run_permissioned_standalone_builder_service, + use hotshot_builder_core::{ + builder_state::BuilderProgress, + service::{ + run_non_permissioned_standalone_builder_service, + run_permissioned_standalone_builder_service, + }, }; use hotshot_events_service::{ events::{Error as EventStreamApiError, Options as EventStreamingApiOptions}, @@ -567,13 +573,18 @@ mod test { signature_key::SignatureKey, }, }; - use sequencer::{ - persistence::no_storage::{self, NoStorage}, - NamespaceId, Payload, Transaction, - }; - use std::time::Duration; + use sequencer::persistence::no_storage::{self, NoStorage}; use surf_disco::Client; + use super::*; + use crate::{ + non_permissioned, + testing::{ + hotshot_builder_url, HotShotTestConfig, NonPermissionedBuilderTestConfig, + PermissionedBuilderTestConfig, + }, + }; + #[async_std::test] async fn test_permissioned_builder() { setup_logging(); @@ -716,7 +727,7 @@ mod test { } } - let txn = Transaction::new(NamespaceId::from(1), vec![1, 2, 3]); + let txn = Transaction::new(NamespaceId::from(1_u32), vec![1, 2, 3]); match builder_client .post::<()>("txn_submit/submit") .body_json(&txn) diff --git a/contracts/rust/adapter/src/jellyfish.rs b/contracts/rust/adapter/src/jellyfish.rs index 1df5a92ab5..cd8b0cad16 100644 --- a/contracts/rust/adapter/src/jellyfish.rs +++ b/contracts/rust/adapter/src/jellyfish.rs @@ -12,9 +12,12 @@ use ethers::{ types::{Bytes, H256, U256}, }; use jf_pcs::prelude::Commitment; -use jf_plonk::proof_system::structs::{OpenKey, Proof, ProofEvaluations, VerifyingKey}; -use jf_plonk::testing_apis::Challenges; -use jf_plonk::{constants::KECCAK256_STATE_SIZE, transcript::SolidityTranscript}; +use jf_plonk::{ + constants::KECCAK256_STATE_SIZE, + proof_system::structs::{OpenKey, Proof, ProofEvaluations, VerifyingKey}, + testing_apis::Challenges, + transcript::SolidityTranscript, +}; use num_bigint::BigUint; use num_traits::Num; diff --git a/contracts/rust/diff-test/src/main.rs b/contracts/rust/diff-test/src/main.rs index 5c7b58b407..d8537bdc05 100644 --- a/contracts/rust/diff-test/src/main.rs +++ b/contracts/rust/diff-test/src/main.rs @@ -2,12 +2,10 @@ use ark_bn254::{Bn254, Fq, Fr, G1Affine, G2Affine}; use ark_ec::{AffineRepr, CurveGroup}; use ark_ed_on_bn254::{EdwardsConfig as EdOnBn254Config, Fq as FqEd254}; use ark_ff::field_hashers::{DefaultFieldHasher, HashToField}; -use ark_poly::domain::radix2::Radix2EvaluationDomain; -use ark_poly::EvaluationDomain; +use ark_poly::{domain::radix2::Radix2EvaluationDomain, EvaluationDomain}; use ark_std::rand::{rngs::StdRng, Rng, SeedableRng}; use clap::{Parser, ValueEnum}; use diff_test_bn254::ParsedG2Point; - use ethers::{ abi::{AbiDecode, AbiEncode, Address}, types::{Bytes, U256}, @@ -18,15 +16,19 @@ use hotshot_state_prover::mock_ledger::{ }; use itertools::multiunzip; use jf_pcs::prelude::Commitment; -use jf_plonk::proof_system::structs::{Proof, VerifyingKey}; -use jf_plonk::proof_system::PlonkKzgSnark; use jf_plonk::{ + proof_system::{ + structs::{Proof, VerifyingKey}, + PlonkKzgSnark, + }, testing_apis::Verifier, transcript::{PlonkTranscript, SolidityTranscript}, }; -use jf_signature::bls_over_bn254::{hash_to_curve, KeyPair as BLSKeyPair, Signature}; -use jf_signature::constants::CS_ID_BLS_BN254; -use jf_signature::schnorr::KeyPair as SchnorrKeyPair; +use jf_signature::{ + bls_over_bn254::{hash_to_curve, KeyPair as BLSKeyPair, Signature}, + constants::CS_ID_BLS_BN254, + schnorr::KeyPair as SchnorrKeyPair, +}; use sha3::Keccak256; #[derive(Parser)] diff --git a/contracts/rust/gen-vk-contract/src/main.rs b/contracts/rust/gen-vk-contract/src/main.rs index d6804eca7c..24530124c4 100644 --- a/contracts/rust/gen-vk-contract/src/main.rs +++ b/contracts/rust/gen-vk-contract/src/main.rs @@ -2,10 +2,7 @@ //! LightClient updates by running `cargo run -p gen-vk-contract --release`. //! Adapted from [CAPE project][https://github.com/EspressoSystems/cape/blob/main/contracts/rust/src/bin/gen-vk-libraries.rs] -use std::fs::OpenOptions; -use std::io::Write; -use std::path::PathBuf; -use std::process::Command; +use std::{fs::OpenOptions, io::Write, path::PathBuf, process::Command}; use hotshot_contract_adapter::jellyfish::ParsedVerifyingKey; use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; diff --git a/data/README.md b/data/README.md index 42d9a011dd..d7f0fd598b 100644 --- a/data/README.md +++ b/data/README.md @@ -6,11 +6,11 @@ commitments. The objects in this directory have well-known commitments. They ser by the Espresso Sequencer, and can be used as test cases for ports of the serialization and commitment algorithms to other languages. -The Rust module `sequencer::reference_tests` contains test cases which are designed to fail if the serialization format +The Rust module `espresso-types::reference_tests` contains test cases which are designed to fail if the serialization format or commitment scheme for any of these data types changes. If you make a breaking change, you may need to update these reference objects as well. Running those tests will also print out information about the commitments of these reference objects, which can be useful for generating test cases for ports. To run them and get the output, use ```bash -cargo test --all-features -p sequencer -- --nocapture --test-threads 1 reference_tests +cargo test --all-features -p espresso-types -- --nocapture --test-threads 1 reference_tests ``` diff --git a/data/chain_config.bin b/data/v1/chain_config.bin similarity index 100% rename from data/chain_config.bin rename to data/v1/chain_config.bin diff --git a/data/chain_config.json b/data/v1/chain_config.json similarity index 100% rename from data/chain_config.json rename to data/v1/chain_config.json diff --git a/data/fee_info.bin b/data/v1/fee_info.bin similarity index 100% rename from data/fee_info.bin rename to data/v1/fee_info.bin diff --git a/data/fee_info.json b/data/v1/fee_info.json similarity index 100% rename from data/fee_info.json rename to data/v1/fee_info.json diff --git a/data/header.bin b/data/v1/header.bin similarity index 100% rename from data/header.bin rename to data/v1/header.bin diff --git a/data/header.json b/data/v1/header.json similarity index 100% rename from data/header.json rename to data/v1/header.json diff --git a/data/l1_block.bin b/data/v1/l1_block.bin similarity index 100% rename from data/l1_block.bin rename to data/v1/l1_block.bin diff --git a/data/l1_block.json b/data/v1/l1_block.json similarity index 100% rename from data/l1_block.json rename to data/v1/l1_block.json diff --git a/data/ns_table.bin b/data/v1/ns_table.bin similarity index 100% rename from data/ns_table.bin rename to data/v1/ns_table.bin diff --git a/data/ns_table.json b/data/v1/ns_table.json similarity index 100% rename from data/ns_table.json rename to data/v1/ns_table.json diff --git a/data/payload.bin b/data/v1/payload.bin similarity index 100% rename from data/payload.bin rename to data/v1/payload.bin diff --git a/data/payload.json b/data/v1/payload.json similarity index 100% rename from data/payload.json rename to data/v1/payload.json diff --git a/data/transaction.bin b/data/v1/transaction.bin similarity index 100% rename from data/transaction.bin rename to data/v1/transaction.bin diff --git a/data/transaction.json b/data/v1/transaction.json similarity index 100% rename from data/transaction.json rename to data/v1/transaction.json diff --git a/data/tx_index.bin b/data/v1/tx_index.bin similarity index 100% rename from data/tx_index.bin rename to data/v1/tx_index.bin diff --git a/data/tx_index.json b/data/v1/tx_index.json similarity index 100% rename from data/tx_index.json rename to data/v1/tx_index.json diff --git a/data/v2/header.bin b/data/v2/header.bin new file mode 100644 index 0000000000000000000000000000000000000000..6faa7f07f82f6bf95d34ad6785f6fa7135579d60 GIT binary patch literal 828 zcmb_aJ4;z(5>O&4Xe>k|h!*a9cSsRm2Ndry=BkAt$nM$Q%0_G?u@S6m_?|7YS4+Dy$z=vA-t*$0n~Tq z)>fjp&|l^FW_I_Aw%B+r_AI=Lq`oo)7jmTHU_x0QP#TQQoDaA4MO3PbI6gcx-W~bQ z!N#Zga%1mQspHSn?cw%;h32GgcW3kXOS|5C@V@8gXiRL0I5Ia;97{@zqtDZHVm3{t zX3wSGycm44{^JpT`)&E<^yk|BrSGr*Hg+DqYZLC-QMz*Qt1C*JtpEGpSJaiC^8? zFOAijDykq0Ay}YP%8S3PdRs#>ra)+ct+E`jmqc2LgU`i438py-HiKNn69Wt!*DMeR z0*oUW7gZ&SDb+z($t>XtsAGz_%;yx8_FQ0!0|ALC(cpu0j0&q5reLV%+GLO8g8uToS literal 0 HcmV?d00001 diff --git a/data/v2/header.json b/data/v2/header.json new file mode 100644 index 0000000000..41d8e1e814 --- /dev/null +++ b/data/v2/header.json @@ -0,0 +1,45 @@ +{ + "fields": { + "block_merkle_tree_root": "MERKLE_COMM~AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAQA", + "builder_commitment": "BUILDER_COMMITMENT~jlEvJoHPETCSwXF6UKcD22zOjfoHGuyVFTVkP_BNc-no", + "builder_signature": { + "r": "0xa1c3795850b7b490e616b60fead89753841fbc9fffe1a939d483f1d959ad1c45", + "s": "0x20228f5b63b14792d371dce479978e45020f19602189ef6d325b73029a2848ac", + "v": 27 + }, + "chain_config": { + "chain_config": { + "Left": { + "base_fee": "0", + "chain_id": "35353", + "fee_contract": "0x0000000000000000000000000000000000000000", + "fee_recipient": "0x0000000000000000000000000000000000000000", + "max_block_size": "10240" + } + } + }, + "fee_info": { + "account": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "amount": "0" + }, + "fee_merkle_tree_root": "MERKLE_COMM~AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAKA", + "height": 42, + "l1_finalized": { + "hash": "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", + "number": 123, + "timestamp": "0x456" + }, + "l1_head": 124, + "ns_table": { + "bytes": "AwAAAO7/wAAcBgAAobC5EkAOAABksAWiXBQAAA==" + }, + "payload_commitment": "HASH~u-mEo1mwByROUhnvO7pBFitcD0UEvruK-b8WONkKoCLQ", + "timestamp": 789 + }, + "version": { + "Version": { + "major": 0, + "minor": 2 + } + } +} \ No newline at end of file diff --git a/data/v3/header.bin b/data/v3/header.bin new file mode 100644 index 0000000000000000000000000000000000000000..6813851426a2190776b61060f37e50af2e0d7bf8 GIT binary patch literal 828 zcmb_aJ!_Of5Z*Hv0}&KM5{S9t5D7sG_j`9pk^At4J92N-GZukB*nM}mvXL}F(+E~V zQV8~b6t>sufggWBZ6$vo0ZDAN(f2Ovo!UgjDdw4(efF7IW)Ui&6R2k+eSfm=5yG3= z5J2-s>CsuQiVGJ94E%@o?>DAm4KhGXeCDE~&N{0Gf^vR`a|xc$H&IOzp3 z#%Ud7;F*`kYE2bYkcAK|P%7omziqf%Lo%j7Xo0P=9I%%}T8V?t#Xt$BISDp{T*VUu z3>?=i5C;N`BN-P%C5kE4L0HKw;R>i@inz?@At>#+z!V1p5>ukV2k96URxwP$P|dZ; IX5$0D14s^zTmS$7 literal 0 HcmV?d00001 diff --git a/data/v3/header.json b/data/v3/header.json new file mode 100644 index 0000000000..5e5b6cddcb --- /dev/null +++ b/data/v3/header.json @@ -0,0 +1,45 @@ +{ + "fields": { + "block_merkle_tree_root": "MERKLE_COMM~AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAQA", + "builder_commitment": "BUILDER_COMMITMENT~jlEvJoHPETCSwXF6UKcD22zOjfoHGuyVFTVkP_BNc-no", + "builder_signature": { + "r": "0xa1c3795850b7b490e616b60fead89753841fbc9fffe1a939d483f1d959ad1c45", + "s": "0x20228f5b63b14792d371dce479978e45020f19602189ef6d325b73029a2848ac", + "v": 27 + }, + "chain_config": { + "chain_config": { + "Left": { + "base_fee": "0", + "chain_id": "35353", + "fee_contract": "0x0000000000000000000000000000000000000000", + "fee_recipient": "0x0000000000000000000000000000000000000000", + "max_block_size": "10240" + } + } + }, + "fee_info": { + "account": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "amount": "0" + }, + "fee_merkle_tree_root": "MERKLE_COMM~AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAKA", + "height": 42, + "l1_finalized": { + "hash": "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", + "number": 123, + "timestamp": "0x456" + }, + "l1_head": 124, + "ns_table": { + "bytes": "AwAAAO7/wAAcBgAAobC5EkAOAABksAWiXBQAAA==" + }, + "payload_commitment": "HASH~u-mEo1mwByROUhnvO7pBFitcD0UEvruK-b8WONkKoCLQ", + "timestamp": 789 + }, + "version": { + "Version": { + "major": 0, + "minor": 3 + } + } +} \ No newline at end of file diff --git a/hotshot-state-prover/src/bin/state-prover.rs b/hotshot-state-prover/src/bin/state-prover.rs index f1f25a027d..78d5329265 100644 --- a/hotshot-state-prover/src/bin/state-prover.rs +++ b/hotshot-state-prover/src/bin/state-prover.rs @@ -1,14 +1,17 @@ +use std::{str::FromStr as _, time::Duration}; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use cld::ClDuration; use es_version::SEQUENCER_VERSION; -use ethers::providers::{Http, Middleware, Provider}; -use ethers::signers::{coins_bip39::English, MnemonicBuilder, Signer}; -use ethers::types::Address; +use ethers::{ + providers::{Http, Middleware, Provider}, + signers::{coins_bip39::English, MnemonicBuilder, Signer}, + types::Address, +}; use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; use hotshot_state_prover::service::{run_prover_once, run_prover_service, StateProverConfig}; use snafu::Snafu; -use std::{str::FromStr as _, time::Duration}; use url::Url; #[derive(Parser)] diff --git a/hotshot-state-prover/src/circuit.rs b/hotshot-state-prover/src/circuit.rs index e1e2603b9c..f46aa5e75b 100644 --- a/hotshot-state-prover/src/circuit.rs +++ b/hotshot-state-prover/src/circuit.rs @@ -372,21 +372,21 @@ where #[cfg(test)] mod tests { - use super::{build, GenericLightClientState}; - use crate::test_utils::{key_pairs_for_testing, stake_table_for_testing}; use ark_ed_on_bn254::EdwardsConfig as Config; use ethers::types::U256; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableScheme}; use jf_crhf::CRHF; + use jf_relation::Circuit; use jf_rescue::crhf::VariableLengthRescueCRHF; use jf_signature::{ schnorr::{SchnorrSignatureScheme, Signature}, SignatureScheme, }; - - use jf_relation::Circuit; use jf_utils::test_rng; + use super::{build, GenericLightClientState}; + use crate::test_utils::{key_pairs_for_testing, stake_table_for_testing}; + type F = ark_ed_on_bn254::Fq; const ST_CAPACITY: usize = 20; diff --git a/hotshot-state-prover/src/mock_ledger.rs b/hotshot-state-prover/src/mock_ledger.rs index 0b7b061f76..5fe608b091 100644 --- a/hotshot-state-prover/src/mock_ledger.rs +++ b/hotshot-state-prover/src/mock_ledger.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use anyhow::Result; use ark_bn254::Bn254; use ark_ed_on_bn254::EdwardsConfig; @@ -6,37 +8,38 @@ use ark_std::{ rand::{rngs::StdRng, CryptoRng, Rng, RngCore}, UniformRand, }; - -use ethers::{abi, utils}; use ethers::{ + abi, abi::Token, types::{H256, U256}, + utils, }; -use hotshot_contract_adapter::jellyfish::{field_to_u256, open_key, u256_to_field}; -use hotshot_contract_adapter::light_client::ParsedLightClientState; -use hotshot_stake_table::vec_based::StakeTable; - -use crate::{ - generate_state_update_proof, preprocess, service::one_honest_threshold, Proof, VerifyingKey, +use hotshot_contract_adapter::{ + jellyfish::{field_to_u256, open_key, u256_to_field}, + light_client::ParsedLightClientState, }; -use hotshot_types::traits::stake_table::StakeTableScheme; +use hotshot_stake_table::vec_based::StakeTable; use hotshot_types::{ light_client::{GenericLightClientState, GenericPublicInput, LightClientState}, - traits::stake_table::SnapshotVersion, + traits::stake_table::{SnapshotVersion, StakeTableScheme}, }; use itertools::izip; use jf_pcs::prelude::UnivariateUniversalParams; -use jf_plonk::proof_system::{PlonkKzgSnark, UniversalSNARK}; -use jf_plonk::transcript::SolidityTranscript; +use jf_plonk::{ + proof_system::{PlonkKzgSnark, UniversalSNARK}, + transcript::SolidityTranscript, +}; use jf_relation::{Arithmetization, Circuit, PlonkCircuit}; -use jf_signature::schnorr::Signature; use jf_signature::{ bls_over_bn254::{BLSOverBN254CurveSignatureScheme, VerKey as BLSVerKey}, - schnorr::SchnorrSignatureScheme, + schnorr::{SchnorrSignatureScheme, Signature}, SignatureScheme, }; use jf_utils::test_rng; -use std::collections::HashMap; + +use crate::{ + generate_state_update_proof, preprocess, service::one_honest_threshold, Proof, VerifyingKey, +}; type F = ark_ed_on_bn254::Fq; type SchnorrVerKey = jf_signature::schnorr::VerKey; diff --git a/hotshot-state-prover/src/service.rs b/hotshot-state-prover/src/service.rs index df5263767b..41342255a5 100644 --- a/hotshot-state-prover/src/service.rs +++ b/hotshot-state-prover/src/service.rs @@ -1,6 +1,10 @@ //! A light client prover service -use crate::snark::{generate_state_update_proof, Proof, ProvingKey}; +use std::{ + iter, + time::{Duration, Instant}, +}; + use anyhow::{anyhow, Context, Result}; use async_std::{ io, @@ -12,41 +16,41 @@ use displaydoc::Display; use ethers::{ core::k256::ecdsa::SigningKey, middleware::SignerMiddleware, - providers::Http, - providers::{Middleware, Provider, ProviderError}, + providers::{Http, Middleware, Provider, ProviderError}, signers::{LocalWallet, Signer, Wallet}, types::{Address, U256}, }; use futures::FutureExt; -use hotshot_contract_adapter::jellyfish::{u256_to_field, ParsedPlonkProof}; -use hotshot_contract_adapter::light_client::ParsedLightClientState; -use hotshot_stake_table::vec_based::config::FieldType; -use hotshot_stake_table::vec_based::StakeTable; -use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme as _}; +use hotshot_contract_adapter::{ + jellyfish::{u256_to_field, ParsedPlonkProof}, + light_client::ParsedLightClientState, +}; +use hotshot_stake_table::vec_based::{config::FieldType, StakeTable}; use hotshot_types::{ light_client::{ CircuitField, GenericPublicInput, LightClientState, PublicInput, StateSignaturesBundle, StateVerKey, }, - traits::signature_key::StakeTableEntryType, + signature_key::BLSPubKey, + traits::{ + signature_key::StakeTableEntryType, + stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme as _}, + }, + PeerConfig, }; -use hotshot_types::{signature_key::BLSPubKey, PeerConfig}; - use jf_pcs::prelude::UnivariateUniversalParams; use jf_plonk::errors::PlonkError; use jf_relation::Circuit as _; use jf_signature::constants::CS_ID_SCHNORR; use serde::Deserialize; -use std::{ - iter, - time::{Duration, Instant}, -}; use surf_disco::Client; use tide_disco::{error::ServerError, Api}; use time::ext::InstantExt; use url::Url; use vbs::version::StaticVersionType; +use crate::snark::{generate_state_update_proof, Proof, ProvingKey}; + type F = ark_ed_on_bn254::Fq; /// A wallet with local signer and connected to network via http @@ -499,8 +503,6 @@ impl std::error::Error for ProverError {} #[cfg(test)] mod test { - use super::*; - use crate::mock_ledger::{MockLedger, MockSystemParam}; use anyhow::Result; use ark_ed_on_bn254::EdwardsConfig; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; @@ -514,6 +516,9 @@ mod test { use jf_utils::test_rng; use sequencer_utils::deployer; + use super::*; + use crate::mock_ledger::{MockLedger, MockSystemParam}; + const STAKE_TABLE_CAPACITY_FOR_TEST: usize = 10; const BLOCKS_PER_EPOCH: u32 = 10; diff --git a/hotshot-state-prover/src/snark.rs b/hotshot-state-prover/src/snark.rs index 1e9f5c1f14..ae85424396 100644 --- a/hotshot-state-prover/src/snark.rs +++ b/hotshot-state-prover/src/snark.rs @@ -5,6 +5,8 @@ use ark_std::{ rand::{CryptoRng, RngCore}, }; use ethers::types::U256; +/// BLS verification key, base field and Schnorr verification key +pub use hotshot_stake_table::vec_based::config::QCVerKey; use hotshot_types::light_client::{CircuitField, LightClientState, PublicInput, StateVerKey}; use jf_plonk::{ errors::PlonkError, @@ -12,9 +14,6 @@ use jf_plonk::{ transcript::SolidityTranscript, }; use jf_signature::schnorr::Signature; - -/// BLS verification key, base field and Schnorr verification key -pub use hotshot_stake_table::vec_based::config::QCVerKey; /// Proving key pub type ProvingKey = jf_plonk::proof_system::structs::ProvingKey; /// Verifying key @@ -96,11 +95,6 @@ where #[cfg(test)] mod tests { - use super::{generate_state_update_proof, preprocess, CircuitField, UniversalSrs}; - use crate::{ - circuit::build_for_preprocessing, - test_utils::{key_pairs_for_testing, stake_table_for_testing}, - }; use ark_bn254::Bn254; use ark_ec::pairing::Pairing; use ark_ed_on_bn254::EdwardsConfig as Config; @@ -126,6 +120,12 @@ mod tests { }; use jf_utils::test_rng; + use super::{generate_state_update_proof, preprocess, CircuitField, UniversalSrs}; + use crate::{ + circuit::build_for_preprocessing, + test_utils::{key_pairs_for_testing, stake_table_for_testing}, + }; + const ST_CAPACITY: usize = 20; // FIXME(Chengyu): see diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 4d063acefc..42eec1ab29 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -59,10 +59,10 @@ derivative = "2.2" derive_more = { workspace = true } dotenvy = { workspace = true } es-version = { workspace = true } +espresso-types = { path = "../types", features = ["testing"] } ethers = { workspace = true } ethers-contract-derive = "2.0.10" futures = { workspace = true } -paste = "1.0" hotshot = { workspace = true } hotshot-contract-adapter = { workspace = true } @@ -89,13 +89,14 @@ jf-vid = { workspace = true } libp2p = { workspace = true } num-traits = "0.2.18" num_enum = "0.7" +paste = { workspace = true } portpicker = { workspace = true } -rand = "0.8.5" +rand = { workspace = true } rand_chacha = { workspace = true } rand_distr = { workspace = true } sequencer-utils = { path = "../utils" } serde = { workspace = true } -serde_json = "^1.0.113" +serde_json = { workspace = true } sha2 = "0.10" # TODO temporary, used only for VID, should be set in hotshot snafu = { workspace = true } static_assertions = "1" diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 5cf77ddabc..9f1beb57d7 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1,11 +1,5 @@ -use self::data_source::{HotShotConfigDataSource, PublicHotShotConfig, StateSignatureDataSource}; -use crate::{ - network, - persistence::{ChainConfigPersistence, SequencerPersistence}, - state::{BlockMerkleTree, FeeAccountProof}, - state_signature::StateSigner, - ChainConfig, NamespaceId, Node, NodeState, PubKey, SeqTypes, SequencerContext, Transaction, -}; +use std::pin::Pin; + use anyhow::{bail, Context}; use async_once_cell::Lazy; use async_std::sync::{Arc, RwLock}; @@ -13,7 +7,11 @@ use async_trait::async_trait; use committable::Commitment; use data_source::{CatchupDataSource, SubmitDataSource}; use derivative::Derivative; -use ethers::prelude::{Address, U256}; +use espresso_types::{ + v0::traits::SequencerPersistence, AccountQueryData, BlockMerkleTree, ChainConfig, + FeeAccountProof, NodeState, PubKey, Transaction, +}; +use ethers::prelude::Address; use futures::{ future::{BoxFuture, Future, FutureExt}, stream::{BoxStream, Stream}, @@ -27,10 +25,14 @@ use hotshot_types::{ HotShotConfig, }; use jf_merkle_tree::MerkleTreeScheme; -use serde::{Deserialize, Serialize}; -use std::pin::Pin; use vbs::version::StaticVersionType; +use self::data_source::{HotShotConfigDataSource, PublicHotShotConfig, StateSignatureDataSource}; +use crate::{ + network, persistence::ChainConfigPersistence, state_signature::StateSigner, Node, SeqTypes, + SequencerContext, +}; + pub mod data_source; pub mod endpoints; pub mod fs; @@ -40,18 +42,6 @@ mod update; pub use options::Options; -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct AccountQueryData { - pub balance: U256, - pub proof: FeeAccountProof, -} - -impl From<(FeeAccountProof, U256)> for AccountQueryData { - fn from((proof, balance): (FeeAccountProof, U256)) -> Self { - Self { balance, proof } - } -} - pub type BlocksFrontier = ::MembershipProof; type BoxLazy = Pin>>>; @@ -361,25 +351,23 @@ impl, Ver: StaticVersionType + 'static, P: Sequencer #[cfg(any(test, feature = "testing"))] pub mod test_helpers { - use super::*; - use crate::{ - catchup::{mock::MockStateCatchup, StateCatchup}, - genesis::Upgrade, - persistence::{no_storage, PersistenceOptions}, - state::{BlockMerkleTree, ValidatedState}, - testing::{run_test_builder, wait_for_decide_on_handle, TestConfig, TestConfigBuilder}, - }; + use std::{collections::BTreeMap, time::Duration}; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::sleep; use committable::Committable; use es_version::{SequencerVersion, SEQUENCER_VERSION}; + use espresso_types::{ + mock::MockStateCatchup, + v0::traits::{PersistenceOptions, StateCatchup}, + NamespaceId, Upgrade, ValidatedState, + }; use ethers::{prelude::Address, utils::Anvil}; use futures::{ future::{join_all, FutureExt}, stream::StreamExt, }; use hotshot::types::{Event, EventType}; - use hotshot_contract_adapter::light_client::ParsedLightClientState; use hotshot_types::{ event::LeafInfo, @@ -388,11 +376,16 @@ pub mod test_helpers { use itertools::izip; use jf_merkle_tree::{MerkleCommitment, MerkleTreeScheme}; use portpicker::pick_unused_port; - use std::{collections::BTreeMap, time::Duration}; use surf_disco::Client; use tide_disco::error::ServerError; use vbs::version::Version; + use super::*; + use crate::{ + persistence::no_storage, + testing::{run_test_builder, wait_for_decide_on_handle, TestConfig, TestConfigBuilder}, + }; + pub const STAKE_TABLE_CAPACITY_FOR_TEST: u64 = 10; pub struct TestNetwork { @@ -673,7 +666,7 @@ pub mod test_helpers { setup_logging(); setup_backtrace(); - let txn = Transaction::new(NamespaceId::from(1), vec![1, 2, 3, 4]); + let txn = Transaction::new(NamespaceId::from(1_u32), vec![1, 2, 3, 4]); let port = pick_unused_port().expect("No ports free"); @@ -780,7 +773,7 @@ pub mod test_helpers { { if leaf_chain .iter() - .any(|LeafInfo { leaf, .. }| leaf.block_header().height > 2) + .any(|LeafInfo { leaf, .. }| leaf.block_header().height() > 2) { break; } @@ -844,18 +837,12 @@ pub mod test_helpers { #[cfg(test)] #[espresso_macros::generic_tests] mod api_tests { - use self::options::HotshotEvents; - - use super::*; - use crate::{ - testing::{wait_for_decide_on_handle, TestConfigBuilder}, - Header, NamespaceId, - }; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use committable::Committable; use data_source::testing::TestableSequencerDataSource; use endpoints::NamespaceProofQueryData; use es_version::SequencerVersion; + use espresso_types::{Header, NamespaceId}; use ethers::utils::Anvil; use futures::stream::StreamExt; use hotshot_query_service::availability::{LeafQueryData, VidCommonQueryData}; @@ -867,6 +854,10 @@ mod api_tests { }; use tide_disco::error::ServerError; + use self::options::HotshotEvents; + use super::*; + use crate::testing::{wait_for_decide_on_handle, TestConfigBuilder}; + #[async_std::test] pub(crate) async fn submit_test_with_query_module() { let storage = D::create_storage().await; @@ -891,7 +882,7 @@ mod api_tests { setup_backtrace(); // Arbitrary transaction, arbitrary namespace ID - let ns_id = NamespaceId::from(42); + let ns_id = NamespaceId::from(42_u32); let txn = Transaction::new(ns_id, vec![1, 2, 3, 4]); // Start query service. @@ -959,14 +950,14 @@ mod api_tests { ns_proof .verify( - &header.ns_table, - &header.payload_commitment, + header.ns_table(), + &header.payload_commitment(), vid_common.common(), ) .unwrap(); } else { // Namespace proof should be present if ns_id exists in ns_table - assert!(header.ns_table.find_ns_id(&ns_id).is_none()); + assert!(header.ns_table().find_ns_id(&ns_id).is_none()); assert!(ns_query_res.transactions.is_empty()); } @@ -1051,25 +1042,20 @@ mod api_tests { #[cfg(test)] mod test { - use self::{ - data_source::testing::TestableSequencerDataSource, sql::DataSource as SqlDataSource, - }; - use super::*; - use crate::{ - catchup::{mock::MockStateCatchup, StatePeers}, - genesis::{Upgrade, UpgradeType}, - persistence::no_storage, - state::{FeeAccount, FeeAmount, ValidatedState}, - testing::{TestConfig, TestConfigBuilder}, - Header, - }; + use std::time::Duration; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::sleep; use committable::{Commitment, Committable}; use es_version::{SequencerVersion, SEQUENCER_VERSION}; + use espresso_types::{ + mock::MockStateCatchup, FeeAccount, FeeAmount, Header, Upgrade, UpgradeType, ValidatedState, + }; use ethers::utils::Anvil; - use futures::future::{self, join_all}; - use futures::stream::{StreamExt, TryStreamExt}; + use futures::{ + future::{self, join_all}, + stream::{StreamExt, TryStreamExt}, + }; use hotshot::types::EventType; use hotshot_query_service::{ availability::{BlockQueryData, LeafQueryData}, @@ -1081,7 +1067,6 @@ mod test { }; use jf_merkle_tree::prelude::{MerkleProof, Sha3Node}; use portpicker::pick_unused_port; - use std::time::Duration; use surf_disco::Client; use test_helpers::{ catchup_test_helper, state_signature_test_helper, status_test_helper, submit_test_helper, @@ -1090,6 +1075,16 @@ mod test { use tide_disco::{app::AppHealth, error::ServerError, healthcheck::HealthStatus}; use vbs::version::Version; + use self::{ + data_source::testing::TestableSequencerDataSource, sql::DataSource as SqlDataSource, + }; + use super::*; + use crate::{ + catchup::StatePeers, + persistence::no_storage, + testing::{TestConfig, TestConfigBuilder}, + }; + #[async_std::test] async fn test_healthcheck() { setup_logging(); diff --git a/sequencer/src/api/data_source.rs b/sequencer/src/api/data_source.rs index 4bc71a2cc3..8b374b7521 100644 --- a/sequencer/src/api/data_source.rs +++ b/sequencer/src/api/data_source.rs @@ -1,17 +1,12 @@ use std::{num::NonZeroUsize, time::Duration}; -use super::{ - fs, - options::{Options, Query}, - sql, AccountQueryData, BlocksFrontier, -}; -use crate::{ - persistence::{self, SequencerPersistence}, - ChainConfig, PubKey, SeqTypes, Transaction, -}; use anyhow::bail; use async_trait::async_trait; use committable::Commitment; +use espresso_types::{ + v0::traits::{PersistenceOptions, SequencerPersistence}, + ChainConfig, PubKey, Transaction, +}; use ethers::prelude::Address; use futures::future::Future; use hotshot_query_service::{ @@ -25,13 +20,22 @@ use hotshot_types::{ data::ViewNumber, light_client::StateSignatureRequestBody, traits::network::ConnectedNetwork, ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, }; - use serde::Serialize; use tide_disco::Url; use vbs::version::StaticVersionType; use vec1::Vec1; -pub trait DataSourceOptions: persistence::PersistenceOptions { +use super::{ + fs, + options::{Options, Query}, + sql, AccountQueryData, BlocksFrontier, +}; +use crate::{ + persistence::{self}, + SeqTypes, +}; + +pub trait DataSourceOptions: PersistenceOptions { type DataSource: SequencerDataSource; fn enable_query_module(&self, opt: Options, query: Query) -> Options; @@ -297,8 +301,7 @@ impl From> for PublicHotShotConfig { #[cfg(test)] pub(crate) mod testing { - use super::super::Options; - use super::*; + use super::{super::Options, *}; #[async_trait] pub(crate) trait TestableSequencerDataSource: SequencerDataSource { diff --git a/sequencer/src/api/endpoints.rs b/sequencer/src/api/endpoints.rs index 5c28f56f12..1f8161c79a 100644 --- a/sequencer/src/api/endpoints.rs +++ b/sequencer/src/api/endpoints.rs @@ -1,24 +1,14 @@ //! Sequencer-specific API endpoint handlers. -use serde::de::Error as _; use std::{ collections::{BTreeSet, HashMap}, env, }; -use super::{ - data_source::{ - CatchupDataSource, HotShotConfigDataSource, SequencerDataSource, StateSignatureDataSource, - SubmitDataSource, - }, - StorageState, -}; -use crate::{ - block::NsProof, persistence::SequencerPersistence, NamespaceId, PubKey, SeqTypes, Transaction, -}; use anyhow::Result; use async_std::sync::{Arc, RwLock}; use committable::Committable; +use espresso_types::{NamespaceId, NsProof, PubKey, Transaction}; use futures::{try_join, FutureExt}; use hotshot_query_service::{ availability::{self, AvailabilityDataSource, CustomSnafu, FetchBlockSnafu}, @@ -33,16 +23,24 @@ use hotshot_types::{ data::ViewNumber, traits::{network::ConnectedNetwork, node_implementation::ConsensusTime}, }; -use serde::{Deserialize, Serialize}; +use serde::{de::Error as _, Deserialize, Serialize}; use snafu::OptionExt; use tagged_base64::TaggedBase64; use tide_disco::{ method::{ReadState, WriteState}, Api, Error as _, StatusCode, }; - use vbs::version::StaticVersionType; +use super::{ + data_source::{ + CatchupDataSource, HotShotConfigDataSource, SequencerDataSource, StateSignatureDataSource, + SubmitDataSource, + }, + StorageState, +}; +use crate::{SeqTypes, SequencerPersistence}; + #[derive(Clone, Debug, Serialize, Deserialize)] pub struct NamespaceProofQueryData { pub proof: Option, diff --git a/sequencer/src/api/fs.rs b/sequencer/src/api/fs.rs index ce184ba650..542eeb7fe6 100644 --- a/sequencer/src/api/fs.rs +++ b/sequencer/src/api/fs.rs @@ -1,8 +1,10 @@ -use super::data_source::{CatchupDataSource, Provider, SequencerDataSource}; -use crate::{persistence::fs::Options, SeqTypes}; +use std::path::Path; + use async_trait::async_trait; use hotshot_query_service::data_source::FileSystemDataSource; -use std::path::Path; + +use super::data_source::{CatchupDataSource, Provider, SequencerDataSource}; +use crate::{persistence::fs::Options, SeqTypes}; pub type DataSource = FileSystemDataSource; @@ -28,9 +30,10 @@ impl CatchupDataSource for DataSource {} #[cfg(test)] mod impl_testable_data_source { + use tempfile::TempDir; + use super::*; use crate::api::{self, data_source::testing::TestableSequencerDataSource}; - use tempfile::TempDir; #[async_trait] impl TestableSequencerDataSource for DataSource { @@ -52,9 +55,7 @@ mod impl_testable_data_source { #[cfg(test)] mod generic_tests { - use super::super::api_tests; - use super::DataSource; - + use super::{super::api_tests, DataSource}; // For some reason this is the only way to import the macro defined in another module of this // crate. use crate::*; diff --git a/sequencer/src/api/options.rs b/sequencer/src/api/options.rs index f74547555b..68039cd8e6 100644 --- a/sequencer/src/api/options.rs +++ b/sequencer/src/api/options.rs @@ -1,27 +1,14 @@ //! Sequencer-specific API options and initialization. -use super::{ - data_source::{ - provider, CatchupDataSource, HotShotConfigDataSource, SequencerDataSource, - StateSignatureDataSource, SubmitDataSource, - }, - endpoints, fs, sql, - update::update_loop, - ApiState, StorageState, -}; -use crate::{ - context::{SequencerContext, TaskList}, - persistence::{self, SequencerPersistence}, - state::{update_state_storage_loop, BlockMerkleTree, FeeMerkleTree}, - PubKey, -}; use anyhow::bail; use async_std::sync::{Arc, RwLock}; use clap::Parser; +use espresso_types::{v0::traits::SequencerPersistence, BlockMerkleTree, FeeMerkleTree, PubKey}; use futures::{ channel::oneshot, future::{BoxFuture, Future, FutureExt}, }; +use hotshot_events_service::events::Error as EventStreamingError; use hotshot_query_service::{ data_source::{ExtensibleDataSource, MetricsDataSource}, status::{self, UpdateStatusData}, @@ -38,7 +25,20 @@ use tide_disco::{ }; use vbs::version::StaticVersionType; -use hotshot_events_service::events::Error as EventStreamingError; +use super::{ + data_source::{ + provider, CatchupDataSource, HotShotConfigDataSource, SequencerDataSource, + StateSignatureDataSource, SubmitDataSource, + }, + endpoints, fs, sql, + update::update_loop, + ApiState, StorageState, +}; +use crate::{ + context::{SequencerContext, TaskList}, + persistence, + state::update_state_storage_loop, +}; #[derive(Clone, Debug)] pub struct Options { diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index 6b9afac5f7..7aee88ba5e 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -1,18 +1,7 @@ -use super::{ - data_source::{CatchupDataSource, Provider, SequencerDataSource}, - AccountQueryData, BlocksFrontier, -}; -use crate::{ - persistence::{ - sql::{sql_param, transaction, Options}, - ChainConfigPersistence, - }, - state::{BlockMerkleTree, FeeAccountProof, FeeMerkleTree}, - ChainConfig, SeqTypes, -}; use anyhow::{bail, Context}; use async_trait::async_trait; use committable::Commitment; +use espresso_types::{BlockMerkleTree, ChainConfig, FeeAccountProof, FeeMerkleTree}; use ethers::prelude::Address; use futures::FutureExt; use hotshot_query_service::{ @@ -26,6 +15,18 @@ use hotshot_query_service::{ use hotshot_types::data::ViewNumber; use jf_merkle_tree::{prelude::MerkleNode, MerkleTreeScheme}; +use super::{ + data_source::{CatchupDataSource, Provider, SequencerDataSource}, + AccountQueryData, BlocksFrontier, +}; +use crate::{ + persistence::{ + sql::{sql_param, transaction, Options}, + ChainConfigPersistence, + }, + SeqTypes, +}; + pub type DataSource = SqlDataSource; #[async_trait] @@ -183,11 +184,11 @@ impl ChainConfigPersistence for DataSource { #[cfg(test)] mod impl_testable_data_source { + use hotshot_query_service::data_source::storage::sql::testing::TmpDb; + use super::*; use crate::api::{self, data_source::testing::TestableSequencerDataSource}; - use hotshot_query_service::data_source::storage::sql::testing::TmpDb; - fn tmp_options(db: &TmpDb) -> Options { Options { port: Some(db.port()), @@ -218,9 +219,7 @@ mod impl_testable_data_source { #[cfg(test)] mod generic_tests { - use super::super::api_tests; - use super::DataSource; - + use super::{super::api_tests, DataSource}; // For some reason this is the only way to import the macro defined in another module of this // crate. use crate::*; diff --git a/sequencer/src/api/update.rs b/sequencer/src/api/update.rs index a1e3936ca3..4efb4eba90 100644 --- a/sequencer/src/api/update.rs +++ b/sequencer/src/api/update.rs @@ -1,14 +1,16 @@ //! Update loop for query API state. -use super::{data_source::SequencerDataSource, StorageState}; -use crate::{persistence::SequencerPersistence, PubKey, SeqTypes}; use async_std::sync::{Arc, RwLock}; +use espresso_types::{v0::traits::SequencerPersistence, PubKey}; use futures::stream::{Stream, StreamExt}; use hotshot::types::Event; use hotshot_query_service::data_source::{UpdateDataSource, VersionedDataSource}; use hotshot_types::traits::network::ConnectedNetwork; use vbs::version::StaticVersionType; +use super::{data_source::SequencerDataSource, StorageState}; +use crate::SeqTypes; + pub(super) async fn update_loop( state: Arc>>, mut events: impl Stream> + Unpin, diff --git a/sequencer/src/bin/cdn-broker.rs b/sequencer/src/bin/cdn-broker.rs index 0e6266261d..a2a3500d6c 100644 --- a/sequencer/src/bin/cdn-broker.rs +++ b/sequencer/src/bin/cdn-broker.rs @@ -1,14 +1,14 @@ //! The following is the main `Broker` binary, which just instantiates and runs //! a `Broker` object. use anyhow::{Context, Result}; -use cdn_broker::reexports::crypto::signature::KeyPair; -use cdn_broker::{Broker, Config}; +use cdn_broker::{reexports::crypto::signature::KeyPair, Broker, Config}; use clap::Parser; -use hotshot_types::traits::node_implementation::NodeType; -use hotshot_types::traits::signature_key::SignatureKey; -use sequencer::network::cdn::{ProductionDef, WrappedSignatureKey}; -use sequencer::options::parse_size; -use sequencer::SeqTypes; +use espresso_types::SeqTypes; +use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; +use sequencer::{ + network::cdn::{ProductionDef, WrappedSignatureKey}, + options::parse_size, +}; use sha2::Digest; use tracing_subscriber::EnvFilter; diff --git a/sequencer/src/bin/cdn-marshal.rs b/sequencer/src/bin/cdn-marshal.rs index 7b15f23789..445c9b6654 100644 --- a/sequencer/src/bin/cdn-marshal.rs +++ b/sequencer/src/bin/cdn-marshal.rs @@ -4,7 +4,8 @@ use anyhow::{Context, Result}; use cdn_marshal::{Config, Marshal}; use clap::Parser; -use sequencer::{network::cdn::ProductionDef, options::parse_size, SeqTypes}; +use espresso_types::SeqTypes; +use sequencer::{network::cdn::ProductionDef, options::parse_size}; use tracing_subscriber::EnvFilter; #[derive(Parser, Debug)] diff --git a/sequencer/src/bin/cdn-whitelist.rs b/sequencer/src/bin/cdn-whitelist.rs index 4281a0ee6b..44fd97ff3c 100644 --- a/sequencer/src/bin/cdn-whitelist.rs +++ b/sequencer/src/bin/cdn-whitelist.rs @@ -7,12 +7,12 @@ use std::{str::FromStr, sync::Arc}; use anyhow::{Context, Result}; use cdn_broker::reexports::discovery::{DiscoveryClient, Embedded, Redis}; use clap::Parser; +use espresso_types::SeqTypes; use hotshot_orchestrator::{ client::{OrchestratorClient, ValidatorArgs}, config::NetworkConfig, }; use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; -use sequencer::SeqTypes; use surf_disco::Url; #[derive(Parser, Debug)] diff --git a/sequencer/src/bin/commitment-task.rs b/sequencer/src/bin/commitment-task.rs index 78f0a14506..1e29526c04 100644 --- a/sequencer/src/bin/commitment-task.rs +++ b/sequencer/src/bin/commitment-task.rs @@ -1,15 +1,16 @@ +use std::{io, time::Duration}; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::spawn; use clap::Parser; use es_version::SEQUENCER_VERSION; use ethers::prelude::*; use futures::FutureExt; -use sequencer::hotshot_commitment::{run_hotshot_commitment_task, CommitmentTaskOptions}; -use sequencer::options::parse_duration; -use std::io; -use std::time::Duration; -use tide_disco::error::ServerError; -use tide_disco::Api; +use sequencer::{ + hotshot_commitment::{run_hotshot_commitment_task, CommitmentTaskOptions}, + options::parse_duration, +}; +use tide_disco::{error::ServerError, Api}; use url::Url; use vbs::version::StaticVersionType; @@ -114,9 +115,7 @@ mod test { use portpicker::pick_unused_port; use surf_disco::Client; - use super::start_http_server; - use super::Address; - use super::ServerError; + use super::{start_http_server, Address, ServerError}; #[async_std::test] async fn test_get_hotshot_contract() { diff --git a/sequencer/src/bin/count-transactions.rs b/sequencer/src/bin/count-transactions.rs index 2a33452b23..6d028bf374 100644 --- a/sequencer/src/bin/count-transactions.rs +++ b/sequencer/src/bin/count-transactions.rs @@ -1,16 +1,15 @@ //! Utility program to count transactions sequenced by HotShot. +use std::{cmp::max, collections::HashSet, time::Duration}; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::sleep; use clap::Parser; use committable::Committable; use es_version::SequencerVersion; +use espresso_types::SeqTypes; use futures::future::join_all; use hotshot_query_service::availability::BlockQueryData; -use sequencer::SeqTypes; -use std::cmp::max; -use std::collections::HashSet; -use std::time::Duration; use surf_disco::Url; /// Utility program to count transactions sequenced by HotShot. diff --git a/sequencer/src/bin/deploy.rs b/sequencer/src/bin/deploy.rs index 7a508c1322..4f64ada923 100644 --- a/sequencer/src/bin/deploy.rs +++ b/sequencer/src/bin/deploy.rs @@ -1,10 +1,11 @@ +use std::{fs::File, io::stdout, path::PathBuf}; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use futures::FutureExt; use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; use hotshot_state_prover::service::light_client_genesis; use sequencer_utils::deployer::{deploy, ContractGroup, Contracts, DeployedContracts}; -use std::{fs::File, io::stdout, path::PathBuf}; use url::Url; /// Deploy contracts needed to run the sequencer. diff --git a/sequencer/src/bin/dev-cdn.rs b/sequencer/src/bin/dev-cdn.rs index 064142d713..b828a9a9ba 100644 --- a/sequencer/src/bin/dev-cdn.rs +++ b/sequencer/src/bin/dev-cdn.rs @@ -5,18 +5,14 @@ use std::path::Path; use anyhow::Result; use async_compatibility_layer::art::async_spawn; -use cdn_broker::reexports::crypto::signature::KeyPair; -use cdn_broker::{Broker, Config as BrokerConfig}; +use cdn_broker::{reexports::crypto::signature::KeyPair, Broker, Config as BrokerConfig}; use cdn_marshal::{Config as MarshalConfig, Marshal}; use clap::Parser; - -use hotshot_types::traits::node_implementation::NodeType; -use hotshot_types::traits::signature_key::SignatureKey; +use espresso_types::SeqTypes; +use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; use portpicker::pick_unused_port; -use rand::rngs::StdRng; -use rand::{RngCore, SeedableRng}; +use rand::{rngs::StdRng, RngCore, SeedableRng}; use sequencer::network::cdn::{TestingDef, WrappedSignatureKey}; -use sequencer::SeqTypes; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] diff --git a/sequencer/src/bin/espresso-bridge.rs b/sequencer/src/bin/espresso-bridge.rs index 63045b7b45..22546214b0 100644 --- a/sequencer/src/bin/espresso-bridge.rs +++ b/sequencer/src/bin/espresso-bridge.rs @@ -1,9 +1,12 @@ +use std::time::Duration; + use anyhow::{bail, ensure, Context}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::{sync::Arc, task::sleep}; use clap::Parser; use contract_bindings::fee_contract::FeeContract; use es_version::SequencerVersion; +use espresso_types::{eth_signature_key::EthKeyPair, FeeAccount, FeeAmount, FeeMerkleTree, Header}; use ethers::{ middleware::{Middleware, SignerMiddleware}, providers::Provider, @@ -14,12 +17,6 @@ use jf_merkle_tree::{ prelude::{MerkleProof, Sha3Node}, MerkleTreeScheme, }; -use sequencer::{ - eth_signature_key::EthKeyPair, - state::{FeeAccount, FeeAmount, FeeMerkleTree}, - Header, -}; -use std::time::Duration; use surf_disco::{error::ClientError, Url}; type EspressoClient = surf_disco::Client; @@ -204,15 +201,15 @@ async fn deposit(opt: Deposit) -> anyhow::Result<()> { continue; } }; - let Some(l1_finalized) = header.l1_finalized else { + let Some(l1_finalized) = header.l1_finalized() else { continue; }; if l1_finalized.number >= l1_block { - tracing::info!(block = header.height, "deposit finalized on Espresso"); - break header.height; + tracing::info!(block = header.height(), "deposit finalized on Espresso"); + break header.height(); } else { tracing::debug!( - block = header.height, + block = header.height(), l1_block, ?l1_finalized, "waiting for deposit on Espresso" diff --git a/sequencer/src/bin/espresso-dev-node.rs b/sequencer/src/bin/espresso-dev-node.rs index 06e8652d0c..f2e5cb3619 100644 --- a/sequencer/src/bin/espresso-dev-node.rs +++ b/sequencer/src/bin/espresso-dev-node.rs @@ -206,6 +206,7 @@ mod tests { use contract_bindings::light_client::LightClient; use es_version::SequencerVersion; use escargot::CargoBuild; + use espresso_types::{BlockMerkleTree, Header, SeqTypes, Transaction}; use ethers::types::{Address, U256}; use futures::TryStreamExt; use hotshot_query_service::{ @@ -214,10 +215,7 @@ mod tests { }; use jf_merkle_tree::MerkleTreeScheme; use portpicker::pick_unused_port; - use sequencer::{ - api::endpoints::NamespaceProofQueryData, state::BlockMerkleTree, Header, SeqTypes, - Transaction, - }; + use sequencer::api::endpoints::NamespaceProofQueryData; use sequencer_utils::{init_signer, AnvilOptions}; use surf_disco::Client; use tide_disco::error::ServerError; @@ -303,7 +301,7 @@ mod tests { assert!(!builder_address.is_empty()); - let tx = Transaction::new(100.into(), vec![1, 2, 3]); + let tx = Transaction::new(100_u32.into(), vec![1, 2, 3]); let hash: Commitment = api_client .post("submit/submit") diff --git a/sequencer/src/bin/keygen.rs b/sequencer/src/bin/keygen.rs index 2ef1892a16..0274db37e0 100644 --- a/sequencer/src/bin/keygen.rs +++ b/sequencer/src/bin/keygen.rs @@ -1,19 +1,19 @@ //! Utility program to generate keypairs -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use std::{ + fs::{self, File}, + io::Write, + path::PathBuf, +}; use anyhow::anyhow; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::{Parser, ValueEnum}; use derive_more::Display; use ethers::utils::hex; use hotshot::types::SignatureKey; use hotshot_types::{light_client::StateKeyPair, signature_key::BLSPubKey}; use rand::{RngCore, SeedableRng}; -use std::{ - fs::{self, File}, - io::Write, - path::PathBuf, -}; use tracing::info_span; #[derive(Clone, Copy, Debug, Display, Default, ValueEnum)] diff --git a/sequencer/src/bin/nasty-client.rs b/sequencer/src/bin/nasty-client.rs index f752ba18db..489ffb23b4 100644 --- a/sequencer/src/bin/nasty-client.rs +++ b/sequencer/src/bin/nasty-client.rs @@ -12,6 +12,15 @@ //! provides a healthcheck endpoint as well as a prometheus endpoint which provides metrics like the //! count of various types of actions performed and the number of open streams. +use std::{ + borrow::Cow, + collections::{BTreeMap, HashMap}, + fmt::Debug, + pin::Pin, + sync::Arc, + time::{Duration, Instant}, +}; + use anyhow::{bail, ensure, Context}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::{ @@ -22,6 +31,7 @@ use clap::Parser; use committable::Committable; use derivative::Derivative; use es_version::{SequencerVersion, SEQUENCER_VERSION}; +use espresso_types::{BlockMerkleTree, FeeMerkleTree, Header, SeqTypes}; use futures::{ future::{FutureExt, TryFuture, TryFutureExt}, stream::{Peekable, StreamExt}, @@ -36,21 +46,8 @@ use jf_merkle_tree::{ ForgetableMerkleTreeScheme, MerkleCommitment, MerkleTreeScheme, UniversalMerkleTreeScheme, }; use rand::{seq::SliceRandom, RngCore}; -use sequencer::{ - api::endpoints::NamespaceProofQueryData, - options::parse_duration, - state::{BlockMerkleTree, FeeMerkleTree}, - Header, SeqTypes, -}; +use sequencer::{api::endpoints::NamespaceProofQueryData, options::parse_duration}; use serde::de::DeserializeOwned; -use std::{ - borrow::Cow, - collections::{BTreeMap, HashMap}, - fmt::Debug, - pin::Pin, - sync::Arc, - time::{Duration, Instant}, -}; use strum::{EnumDiscriminants, VariantArray}; use surf_disco::{error::ClientError, socket, Error, StatusCode, Url}; use tide_disco::{error::ServerError, App}; @@ -382,7 +379,7 @@ impl Queryable for Header { } fn payload_hash(&self) -> String { - self.payload_commitment.to_string() + self.payload_commitment().to_string() } } @@ -791,42 +788,43 @@ impl ResourceManager
{ // Sanity check the window: prev and next should be correct bookends. if let Some(prev) = &window.prev { ensure!( - prev.timestamp < start, - format!("prev header {} is later than {start}", prev.height) + prev.timestamp() < start, + format!("prev header {} is later than {start}", prev.height()) ); } if let Some(next) = &window.next { ensure!( - next.timestamp >= end, - format!("next header {} is earlier than {end}", next.height) + next.timestamp() >= end, + format!("next header {} is earlier than {end}", next.height()) ); } // Each header in the window proper should have an appropriate timestamp. let mut prev = window.prev; for header in window.window { ensure!( - header.timestamp >= start && header.timestamp < end, + header.timestamp() >= start && header.timestamp() < end, format!( "header {} with timestamp {} is not in window [{start}, {end})", - header.height, header.timestamp + header.height(), + header.timestamp() ) ); if let Some(prev) = prev { ensure!( - prev.height + 1 == header.height, + prev.height() + 1 == header.height(), format!( "headers in window from {start} to {end} are not consecutive (prev = {}, curr = {})", - prev.height, - header.height, + prev.height(), + header.height(), ), ); ensure!( - prev.timestamp <= header.timestamp, + prev.timestamp() <= header.timestamp(), format!( "headers in window from {start} to {end} have decreasing timestamps (prev = {}, curr = {})", - prev.timestamp, - header.timestamp, + prev.timestamp(), + header.timestamp(), ), ); } @@ -877,11 +875,15 @@ impl ResourceManager
{ // Check that the proof proves inclusion of `index_header` at position `index` relative to // `block_header`. - BlockMerkleTree::verify(block_header.block_merkle_tree_root.digest(), index, &proof) - .context("malformed merkle proof")? - .or_else(|_| bail!("invalid merkle proof"))?; + BlockMerkleTree::verify( + block_header.block_merkle_tree_root().digest(), + index, + &proof, + ) + .context("malformed merkle proof")? + .or_else(|_| bail!("invalid merkle proof"))?; ensure!( - proof.elem() == Some(&index_header.commit()), + proof.elem() == Some(index_header.commit().as_ref()), "merkle proof is for wrong element: {:?} != {:?}", proof.elem(), index_header.commit() @@ -895,12 +897,12 @@ impl ResourceManager
{ "get block proof by state commitment", block, index, - commitment = %block_header.block_merkle_tree_root, + commitment = %block_header.block_merkle_tree_root(), ), || async { self.get::<::MembershipProof>(format!( "block-state/commit/{}/{index}", - block_header.block_merkle_tree_root, + block_header.block_merkle_tree_root(), )) .await }, @@ -927,7 +929,7 @@ impl ResourceManager
{ .await }) .await?; - let builder_address = builder_header.fee_info.account(); + let builder_address = builder_header.fee_info().account(); // Get the header of the state snapshot we're going to query so we can later verify our // results. @@ -954,7 +956,7 @@ impl ResourceManager
{ // Check that the proof is valid relative to `builder_header`. if proof.elem().is_some() { FeeMerkleTree::verify( - block_header.fee_merkle_tree_root.digest(), + block_header.fee_merkle_tree_root().digest(), builder_address, &proof, ) @@ -962,7 +964,7 @@ impl ResourceManager
{ .or_else(|_| bail!("invalid membership proof"))?; } else { ensure!( - FeeMerkleTree::from_commitment(block_header.fee_merkle_tree_root) + FeeMerkleTree::from_commitment(block_header.fee_merkle_tree_root()) .non_membership_verify(builder_address, &proof) .context("malformed non-membership proof")?, "invalid non-membership proof" @@ -977,12 +979,12 @@ impl ResourceManager
{ "get account proof by state commitment", block, %builder_address, - commitment = %block_header.fee_merkle_tree_root, + commitment = %block_header.fee_merkle_tree_root(), ), || async { self.get::<::MembershipProof>(format!( "fee-state/commit/{}/{builder_address}", - block_header.fee_merkle_tree_root, + block_header.fee_merkle_tree_root(), )) .await }, @@ -1011,13 +1013,17 @@ impl ResourceManager> { self.get(format!("availability/header/{block}")).await }) .await?; - let num_namespaces = header.ns_table.iter().count(); + let num_namespaces = header.ns_table().iter().count(); if num_namespaces == 0 { tracing::info!("not fetching namespace because block {block} is empty"); return Ok(()); } - let ns_index = header.ns_table.iter().nth(index % num_namespaces).unwrap(); - let ns = header.ns_table.read_ns_id(&ns_index).unwrap(); + let ns_index = header + .ns_table() + .iter() + .nth(index % num_namespaces) + .unwrap(); + let ns = header.ns_table().read_ns_id(&ns_index).unwrap(); let ns_proof: NamespaceProofQueryData = self .retry(info_span!("fetch namespace", %ns), || async { @@ -1041,8 +1047,8 @@ impl ResourceManager> { .proof .unwrap() .verify( - &header.ns_table, - &header.payload_commitment, + header.ns_table(), + &header.payload_commitment(), vid_common.common() ) .is_some(), diff --git a/sequencer/src/bin/orchestrator.rs b/sequencer/src/bin/orchestrator.rs index 143645f65e..935c924373 100644 --- a/sequencer/src/bin/orchestrator.rs +++ b/sequencer/src/bin/orchestrator.rs @@ -1,16 +1,16 @@ +use std::{num::NonZeroUsize, time::Duration}; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use clap::Parser; use derive_more::From; +use espresso_types::PubKey; use ethers::utils::hex::{self, FromHexError}; -use hotshot_orchestrator::config::Libp2pConfig; -use hotshot_orchestrator::{config::NetworkConfig, run_orchestrator}; -use sequencer::{ - options::{parse_duration, Ratio}, - PubKey, +use hotshot_orchestrator::{ + config::{Libp2pConfig, NetworkConfig}, + run_orchestrator, }; +use sequencer::options::{parse_duration, Ratio}; use snafu::Snafu; -use std::num::NonZeroUsize; -use std::time::Duration; use url::Url; use vec1::Vec1; diff --git a/sequencer/src/bin/pub-key.rs b/sequencer/src/bin/pub-key.rs index 16ab6584e5..a0e686b497 100644 --- a/sequencer/src/bin/pub-key.rs +++ b/sequencer/src/bin/pub-key.rs @@ -1,12 +1,13 @@ +use std::str::FromStr; + use anyhow::bail; use clap::Parser; +use espresso_types::{PrivKey, PubKey}; use hotshot::{traits::implementations::derive_libp2p_peer_id, types::BLSPubKey}; use hotshot_types::{ light_client::{StateKeyPair, StateSignKey}, traits::signature_key::SignatureKey, }; -use sequencer::{PrivKey, PubKey}; -use std::str::FromStr; #[derive(Clone, Debug)] enum PrivateKey { diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index ba2941d048..36e5c0a99e 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -1,10 +1,16 @@ //! Utility program to submit random transactions to an Espresso Sequencer. +use std::{ + collections::HashMap, + time::{Duration, Instant}, +}; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::{sleep, spawn}; use clap::Parser; use committable::{Commitment, Committable}; use es_version::{SequencerVersion, SEQUENCER_VERSION}; +use espresso_types::{SeqTypes, Transaction}; use futures::{ channel::mpsc::{self, Sender}, sink::SinkExt, @@ -14,14 +20,7 @@ use hotshot_query_service::{availability::BlockQueryData, types::HeightIndexed, use rand::{Rng, RngCore, SeedableRng}; use rand_chacha::ChaChaRng; use rand_distr::Distribution; -use sequencer::{ - options::{parse_duration, parse_size}, - SeqTypes, Transaction, -}; -use std::{ - collections::HashMap, - time::{Duration, Instant}, -}; +use sequencer::options::{parse_duration, parse_size}; use surf_disco::{Client, Url}; use tide_disco::{error::ServerError, App}; use vbs::version::StaticVersionType; diff --git a/sequencer/src/bin/verify-headers.rs b/sequencer/src/bin/verify-headers.rs index 4385ed9a58..d0a8a7aa29 100644 --- a/sequencer/src/bin/verify-headers.rs +++ b/sequencer/src/bin/verify-headers.rs @@ -1,15 +1,14 @@ //! Utility program to verify properties of headers sequenced by HotShot. +use std::{cmp::max, process::exit, time::Duration}; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::{sync::Arc, task::sleep}; use clap::Parser; +use espresso_types::{Header, L1BlockInfo}; use ethers::prelude::*; use futures::future::join_all; use itertools::Itertools; -use sequencer::{Header, L1BlockInfo}; -use std::cmp::max; -use std::process::exit; -use std::time::Duration; use surf_disco::Url; use vbs::version::StaticVersionType; @@ -76,31 +75,31 @@ async fn verify_header( let mut ok = true; - if !opt.no_timestamps && header.timestamp < parent.timestamp { + if !opt.no_timestamps && header.timestamp() < parent.timestamp() { tracing::error!( "header {height} has decreasing timestamp: {} -> {}", - parent.timestamp, - header.timestamp + parent.timestamp(), + header.timestamp() ); ok = false; } - if !opt.no_l1_heads && header.l1_head < parent.l1_head { + if !opt.no_l1_heads && header.l1_head() < parent.l1_head() { tracing::error!( "header {height} has decreasing L1 head: {} -> {}", - parent.l1_head, - header.l1_head + parent.l1_head(), + header.l1_head() ); ok = false; } - if !opt.no_l1_finalized && header.l1_finalized < parent.l1_finalized { + if !opt.no_l1_finalized && header.l1_finalized() < parent.l1_finalized() { tracing::error!( "header {height} has decreasing L1 finalized: {:?} -> {:?}", - parent.l1_finalized, - header.l1_finalized + parent.l1_finalized(), + header.l1_finalized() ); ok = false; - if let (Some(l1), Some(l1_finalized)) = (l1, &header.l1_finalized) { + if let (Some(l1), Some(l1_finalized)) = (l1, &header.l1_finalized()) { let l1_block = get_l1_block(l1, l1_finalized.number).await; if *l1_finalized != l1_block { tracing::error!( diff --git a/sequencer/src/block.rs b/sequencer/src/block.rs deleted file mode 100644 index 033c7253fb..0000000000 --- a/sequencer/src/block.rs +++ /dev/null @@ -1,8 +0,0 @@ -mod full_payload; -mod namespace_payload; -mod uint_bytes; - -pub use full_payload::{NsProof, NsTable, NsTableValidationError, Payload, PayloadByteLen}; - -#[cfg(test)] -mod test; diff --git a/sequencer/src/block/full_payload.rs b/sequencer/src/block/full_payload.rs deleted file mode 100644 index 61247ec87e..0000000000 --- a/sequencer/src/block/full_payload.rs +++ /dev/null @@ -1,9 +0,0 @@ -mod ns_proof; -mod ns_table; -mod payload; - -pub use ns_proof::NsProof; -pub use ns_table::{NsIndex, NsTable, NsTableValidationError}; -pub use payload::{Payload, PayloadByteLen}; - -pub(in crate::block) use ns_table::NsIter; diff --git a/sequencer/src/block/namespace_payload.rs b/sequencer/src/block/namespace_payload.rs deleted file mode 100644 index ecd894f86e..0000000000 --- a/sequencer/src/block/namespace_payload.rs +++ /dev/null @@ -1,12 +0,0 @@ -mod iter; -mod ns_payload; -mod ns_payload_range; -mod tx_proof; -mod types; - -pub use iter::{Index, Iter}; -pub use tx_proof::TxProof; - -pub(in crate::block) use ns_payload::{NsPayload, NsPayloadOwned}; -pub(in crate::block) use ns_payload_range::NsPayloadRange; -pub(in crate::block) use types::NsPayloadBuilder; diff --git a/sequencer/src/catchup.rs b/sequencer/src/catchup.rs index beb9aff19a..fbc2b2c7a3 100644 --- a/sequencer/src/catchup.rs +++ b/sequencer/src/catchup.rs @@ -1,113 +1,22 @@ -use crate::{ - api::{data_source::CatchupDataSource, AccountQueryData, BlocksFrontier}, - options::{parse_duration, Ratio}, - persistence::PersistenceOptions, - state::{BlockMerkleTree, FeeAccount, FeeMerkleCommitment}, - ChainConfig, -}; +use std::sync::Arc; + use anyhow::{bail, Context}; -use async_std::{sync::RwLock, task::sleep}; +use async_std::sync::RwLock; use async_trait::async_trait; -use clap::Parser; use committable::Commitment; -use futures::future::{BoxFuture, FutureExt, TryFutureExt}; +use espresso_types::{ + v0::traits::{PersistenceOptions, StateCatchup}, + AccountQueryData, BackoffParams, BlockMerkleTree, ChainConfig, FeeAccount, FeeMerkleCommitment, +}; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime as _}; use jf_merkle_tree::{prelude::MerkleNode, ForgetableMerkleTreeScheme, MerkleTreeScheme}; -use rand::Rng; use serde::de::DeserializeOwned; -use std::{cmp::min, fmt::Debug, sync::Arc, time::Duration}; use surf_disco::Request; use tide_disco::error::ServerError; use url::Url; use vbs::version::StaticVersionType; -#[derive(Clone, Copy, Debug, Parser, PartialEq, Eq, PartialOrd, Ord)] -pub struct BackoffParams { - /// Exponential backoff exponent. - #[clap( - long = "catchup-backoff-factor", - env = "ESPRESSO_SEQUENCER_CATCHUP_BACKOFF_FACTOR", - default_value = "4" - )] - factor: u32, - - /// Exponential backoff base delay. - #[clap( - long = "catchup-base-retry-delay", - env = "ESPRESSO_SEQUENCER_CATCHUP_BASE_RETRY_DELAY", - default_value = "20ms", - value_parser = parse_duration - )] - base: Duration, - - /// Exponential max delay. - #[clap( - long = "catchup-max-retry-delay", - env = "ESPRESSO_SEQUENCER_CATCHUP_MAX_RETRY_DELAY", - default_value = "5s", - value_parser = parse_duration - )] - max: Duration, - - /// Exponential backoff jitter as a ratio of the backoff delay, numerator:denominator. - #[clap( - long = "catchup-backoff-jitter", - env = "ESPRESSO_SEQUENCER_CATCHUP_BACKOFF_JITTER", - default_value = "1:10" - )] - jitter: Ratio, -} - -impl Default for BackoffParams { - fn default() -> Self { - Self::parse_from(std::iter::empty::()) - } -} - -impl BackoffParams { - async fn retry( - &self, - mut state: S, - f: impl for<'a> Fn(&'a mut S) -> BoxFuture<'a, anyhow::Result>, - ) -> T { - let mut delay = self.base; - loop { - match f(&mut state).await { - Ok(res) => break res, - Err(err) => { - tracing::warn!( - "Retryable operation failed, will retry after {delay:?}: {err:#}" - ); - sleep(delay).await; - delay = self.backoff(delay); - } - } - } - } - - #[must_use] - fn backoff(&self, delay: Duration) -> Duration { - if delay >= self.max { - return self.max; - } - - let mut rng = rand::thread_rng(); - - // Increase the backoff by the backoff factor. - let ms = (delay * self.factor).as_millis() as u64; - - // Sample a random jitter factor in the range [0, self.jitter]. - let jitter_num = rng.gen_range(0..self.jitter.numerator); - let jitter_den = self.jitter.denominator; - - // Increase the delay by the jitter factor. - let jitter = ms * jitter_num / jitter_den; - let delay = Duration::from_millis(ms + jitter); - - // Bound the delay by the maximum. - min(delay, self.max) - } -} +use crate::api::{data_source::CatchupDataSource, BlocksFrontier}; // This newtype is probably not worth having. It's only used to be able to log // URLs before doing requests. @@ -130,85 +39,6 @@ impl Client { } } -#[async_trait] -pub trait StateCatchup: Send + Sync + std::fmt::Debug { - /// Try to fetch the given account state, failing without retrying if unable. - async fn try_fetch_account( - &self, - height: u64, - view: ViewNumber, - fee_merkle_tree_root: FeeMerkleCommitment, - account: FeeAccount, - ) -> anyhow::Result; - - /// Fetch the given list of accounts, retrying on transient errors. - async fn fetch_accounts( - &self, - height: u64, - view: ViewNumber, - fee_merkle_tree_root: FeeMerkleCommitment, - accounts: Vec, - ) -> anyhow::Result> { - let mut ret = vec![]; - for account in accounts { - let account = self - .backoff() - .retry(self, |provider| { - provider - .try_fetch_account(height, view, fee_merkle_tree_root, account) - .map_err(|err| err.context("fetching account {account}")) - .boxed() - }) - .await; - ret.push(account); - } - Ok(ret) - } - - /// Try to fetch and remember the blocks frontier, failing without retrying if unable. - async fn try_remember_blocks_merkle_tree( - &self, - height: u64, - view: ViewNumber, - mt: &mut BlockMerkleTree, - ) -> anyhow::Result<()>; - - /// Fetch and remember the blocks frontier, retrying on transient errors. - async fn remember_blocks_merkle_tree( - &self, - height: u64, - view: ViewNumber, - mt: &mut BlockMerkleTree, - ) -> anyhow::Result<()> { - self.backoff() - .retry(mt, |mt| { - self.try_remember_blocks_merkle_tree(height, view, mt) - .map_err(|err| err.context("fetching frontier")) - .boxed() - }) - .await; - Ok(()) - } - - async fn try_fetch_chain_config( - &self, - commitment: Commitment, - ) -> anyhow::Result; - - async fn fetch_chain_config(&self, commitment: Commitment) -> ChainConfig { - self.backoff() - .retry(self, |provider| { - provider - .try_fetch_chain_config(commitment) - .map_err(|err| err.context("fetching chain config")) - .boxed() - }) - .await - } - - fn backoff(&self) -> &BackoffParams; -} - /// A catchup implementation that falls back to a remote provider, but prefers a local provider when /// supported. pub(crate) async fn local_and_remote( @@ -352,7 +182,7 @@ impl SqlStateCatchup { #[async_trait] impl StateCatchup for SqlStateCatchup where - T: CatchupDataSource + Debug + Send + Sync, + T: CatchupDataSource + std::fmt::Debug + Send + Sync, { #[tracing::instrument(skip(self))] async fn try_fetch_account( @@ -404,276 +234,3 @@ where &self.backoff } } - -#[async_trait] -impl StateCatchup for Box { - async fn try_fetch_account( - &self, - height: u64, - view: ViewNumber, - fee_merkle_tree_root: FeeMerkleCommitment, - account: FeeAccount, - ) -> anyhow::Result { - (**self) - .try_fetch_account(height, view, fee_merkle_tree_root, account) - .await - } - - async fn fetch_accounts( - &self, - height: u64, - view: ViewNumber, - fee_merkle_tree_root: FeeMerkleCommitment, - accounts: Vec, - ) -> anyhow::Result> { - (**self) - .fetch_accounts(height, view, fee_merkle_tree_root, accounts) - .await - } - - async fn try_remember_blocks_merkle_tree( - &self, - height: u64, - view: ViewNumber, - mt: &mut BlockMerkleTree, - ) -> anyhow::Result<()> { - (**self) - .try_remember_blocks_merkle_tree(height, view, mt) - .await - } - - async fn remember_blocks_merkle_tree( - &self, - height: u64, - view: ViewNumber, - mt: &mut BlockMerkleTree, - ) -> anyhow::Result<()> { - (**self).remember_blocks_merkle_tree(height, view, mt).await - } - - async fn try_fetch_chain_config( - &self, - commitment: Commitment, - ) -> anyhow::Result { - (**self).try_fetch_chain_config(commitment).await - } - - async fn fetch_chain_config(&self, commitment: Commitment) -> ChainConfig { - (**self).fetch_chain_config(commitment).await - } - - fn backoff(&self) -> &BackoffParams { - (**self).backoff() - } -} - -#[async_trait] -impl StateCatchup for Arc { - async fn try_fetch_account( - &self, - height: u64, - view: ViewNumber, - fee_merkle_tree_root: FeeMerkleCommitment, - account: FeeAccount, - ) -> anyhow::Result { - (**self) - .try_fetch_account(height, view, fee_merkle_tree_root, account) - .await - } - - async fn fetch_accounts( - &self, - height: u64, - view: ViewNumber, - fee_merkle_tree_root: FeeMerkleCommitment, - accounts: Vec, - ) -> anyhow::Result> { - (**self) - .fetch_accounts(height, view, fee_merkle_tree_root, accounts) - .await - } - - async fn try_remember_blocks_merkle_tree( - &self, - height: u64, - view: ViewNumber, - mt: &mut BlockMerkleTree, - ) -> anyhow::Result<()> { - (**self) - .try_remember_blocks_merkle_tree(height, view, mt) - .await - } - - async fn remember_blocks_merkle_tree( - &self, - height: u64, - view: ViewNumber, - mt: &mut BlockMerkleTree, - ) -> anyhow::Result<()> { - (**self).remember_blocks_merkle_tree(height, view, mt).await - } - - async fn try_fetch_chain_config( - &self, - commitment: Commitment, - ) -> anyhow::Result { - (**self).try_fetch_chain_config(commitment).await - } - - async fn fetch_chain_config(&self, commitment: Commitment) -> ChainConfig { - (**self).fetch_chain_config(commitment).await - } - - fn backoff(&self) -> &BackoffParams { - (**self).backoff() - } -} - -/// Catchup from multiple providers tries each provider in a round robin fashion until it succeeds. -#[async_trait] -impl StateCatchup for Vec { - #[tracing::instrument(skip(self))] - async fn try_fetch_account( - &self, - height: u64, - view: ViewNumber, - fee_merkle_tree_root: FeeMerkleCommitment, - account: FeeAccount, - ) -> anyhow::Result { - for provider in self { - match provider - .try_fetch_account(height, view, fee_merkle_tree_root, account) - .await - { - Ok(account) => return Ok(account), - Err(err) => { - tracing::warn!(%account, ?provider, "failed to fetch account: {err:#}"); - } - } - } - - bail!("could not fetch account from any provider"); - } - - #[tracing::instrument(skip(self, mt))] - async fn try_remember_blocks_merkle_tree( - &self, - height: u64, - view: ViewNumber, - mt: &mut BlockMerkleTree, - ) -> anyhow::Result<()> { - for provider in self { - match provider - .try_remember_blocks_merkle_tree(height, view, mt) - .await - { - Ok(()) => return Ok(()), - Err(err) => { - tracing::warn!(?provider, "failed to fetch frontier: {err:#}"); - } - } - } - - bail!("could not fetch account from any provider"); - } - - async fn try_fetch_chain_config( - &self, - commitment: Commitment, - ) -> anyhow::Result { - for provider in self { - match provider.try_fetch_chain_config(commitment).await { - Ok(cf) => return Ok(cf), - Err(err) => { - tracing::warn!(?provider, "failed to fetch chain config: {err:#}"); - } - } - } - - bail!("could not fetch chain config from any provider"); - } - - fn backoff(&self) -> &BackoffParams { - // Use whichever provider's backoff is most conservative. - self.iter() - .map(|p| p.backoff()) - .max() - .expect("provider list not empty") - } -} - -#[cfg(any(test, feature = "testing"))] -pub mod mock { - use super::*; - use crate::state::{FeeAccountProof, ValidatedState}; - use jf_merkle_tree::MerkleTreeScheme; - use std::collections::HashMap; - - #[derive(Debug, Clone, Default)] - pub struct MockStateCatchup { - backoff: BackoffParams, - state: HashMap>, - } - - impl FromIterator<(ViewNumber, Arc)> for MockStateCatchup { - fn from_iter)>>(iter: I) -> Self { - Self { - backoff: Default::default(), - state: iter.into_iter().collect(), - } - } - } - - #[async_trait] - impl StateCatchup for MockStateCatchup { - async fn try_fetch_account( - &self, - _height: u64, - view: ViewNumber, - fee_merkle_tree_root: FeeMerkleCommitment, - account: FeeAccount, - ) -> anyhow::Result { - let src = &self.state[&view].fee_merkle_tree; - assert_eq!(src.commitment(), fee_merkle_tree_root); - - tracing::info!("catchup: fetching account {account:?} for view {view:?}"); - Ok(FeeAccountProof::prove(src, account.into()) - .unwrap_or_else(|| panic!("Account {account:?} not in memory")) - .into()) - } - - async fn try_remember_blocks_merkle_tree( - &self, - _height: u64, - view: ViewNumber, - mt: &mut BlockMerkleTree, - ) -> anyhow::Result<()> { - tracing::info!("catchup: fetching frontier for view {view:?}"); - let src = &self.state[&view].block_merkle_tree; - - assert_eq!(src.commitment(), mt.commitment()); - assert!( - src.num_leaves() > 0, - "catchup should not be triggered when blocks tree is empty" - ); - - let index = src.num_leaves() - 1; - let (elem, proof) = src.lookup(index).expect_ok().unwrap(); - mt.remember(index, elem, proof.clone()) - .expect("Proof verifies"); - - Ok(()) - } - - async fn try_fetch_chain_config( - &self, - _commitment: Commitment, - ) -> anyhow::Result { - Ok(ChainConfig::default()) - } - - fn backoff(&self) -> &BackoffParams { - &self.backoff - } - } -} diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index e399d821cc..1340f9abb2 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -1,9 +1,14 @@ +use std::fmt::Display; + use anyhow::Context; use async_std::{ sync::{Arc, RwLock}, task::{spawn, JoinHandle}, }; use derivative::Derivative; +use espresso_types::{ + v0::traits::SequencerPersistence, NodeState, PubKey, Transaction, ValidatedState, +}; use futures::{ future::{join_all, Future}, stream::{Stream, StreamExt}, @@ -13,6 +18,7 @@ use hotshot::{ types::{Event, SystemContextHandle}, Memberships, SystemContext, }; +use hotshot_events_service::events_source::{EventConsumer, EventsStreamer}; use hotshot_example_types::auction_results_provider_types::TestAuctionResultsProvider; use hotshot_orchestrator::client::OrchestratorClient; use hotshot_query_service::Leaf; @@ -22,15 +28,10 @@ use hotshot_types::{ traits::{election::Membership, metrics::Metrics, network::ConnectedNetwork}, HotShotConfig, }; -use std::fmt::Display; use url::Url; use vbs::version::StaticVersionType; -use crate::{ - persistence::SequencerPersistence, state_signature::StateSigner, static_stake_table_commitment, - Node, NodeState, PubKey, SeqTypes, Transaction, ValidatedState, -}; -use hotshot_events_service::events_source::{EventConsumer, EventsStreamer}; +use crate::{state_signature::StateSigner, static_stake_table_commitment, Node, SeqTypes}; /// The consensus handle pub type Consensus = SystemContextHandle>; diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index 9c013090d3..03c278b163 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -1,17 +1,11 @@ -use crate::{ - l1_client::L1BlockInfo, - state::{FeeAccount, FeeAmount}, - ChainConfig, -}; -use anyhow::Context; -use derive_more::{Display, From, Into}; -use sequencer_utils::{impl_serde_from_string_or_integer, ser::FromStringOrInteger}; -use serde::{Deserialize, Serialize}; use std::{ collections::{BTreeMap, HashMap}, path::Path, }; -use time::{format_description::well_known::Rfc3339 as TimestampFormat, OffsetDateTime}; + +use anyhow::Context; +use espresso_types::{ChainConfig, FeeAccount, FeeAmount, GenesisHeader, L1BlockInfo, Upgrade}; +use serde::{Deserialize, Serialize}; use vbs::version::Version; /// Initial configuration of an Espresso stake table. @@ -39,60 +33,6 @@ pub enum L1Finalized { Number { number: u64 }, } -#[derive(Hash, Copy, Clone, Debug, Display, PartialEq, Eq, From, Into)] -#[display(fmt = "{}", "_0.format(&TimestampFormat).unwrap()")] -pub struct Timestamp(OffsetDateTime); - -impl_serde_from_string_or_integer!(Timestamp); - -impl Default for Timestamp { - fn default() -> Self { - Self::from_integer(0).unwrap() - } -} - -impl Timestamp { - pub fn unix_timestamp(&self) -> u64 { - self.0.unix_timestamp() as u64 - } -} - -impl FromStringOrInteger for Timestamp { - type Binary = u64; - type Integer = u64; - - fn from_binary(b: Self::Binary) -> anyhow::Result { - Self::from_integer(b) - } - - fn from_integer(i: Self::Integer) -> anyhow::Result { - let unix = i.try_into().context("timestamp out of range")?; - Ok(Self( - OffsetDateTime::from_unix_timestamp(unix).context("invalid timestamp")?, - )) - } - - fn from_string(s: String) -> anyhow::Result { - Ok(Self( - OffsetDateTime::parse(&s, &TimestampFormat).context("invalid timestamp")?, - )) - } - - fn to_binary(&self) -> anyhow::Result { - Ok(self.unix_timestamp()) - } - - fn to_string(&self) -> anyhow::Result { - Ok(format!("{self}")) - } -} - -/// Information about the genesis state which feeds into the genesis block header. -#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] -pub struct GenesisHeader { - pub timestamp: Timestamp, -} - /// Genesis of an Espresso chain. #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Genesis { @@ -107,31 +47,16 @@ pub struct Genesis { pub upgrades: BTreeMap, } -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(untagged)] -#[serde(rename_all = "snake_case")] -pub enum UpgradeType { - // Note: Wrapping this in a tuple variant causes deserialization to fail because - // the 'chain_config' name is also provided in the TOML input. - ChainConfig { chain_config: ChainConfig }, -} +mod upgrade_serialization { -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct Upgrade { - pub view: u64, - pub propose_window: u64, - #[serde(flatten)] - pub upgrade_type: UpgradeType, -} + use std::{collections::BTreeMap, fmt}; -mod upgrade_serialization { - use crate::genesis::{Upgrade, UpgradeType}; - use serde::ser::SerializeSeq; + use espresso_types::{Upgrade, UpgradeType}; use serde::{ de::{SeqAccess, Visitor}, + ser::SerializeSeq, Deserialize, Deserializer, Serializer, }; - use std::{collections::BTreeMap, fmt}; use vbs::version::Version; pub fn serialize(map: &BTreeMap, serializer: S) -> Result @@ -223,11 +148,13 @@ impl Genesis { #[cfg(test)] mod test { - use super::*; - + use espresso_types::{L1BlockInfo, Timestamp}; use ethers::prelude::{Address, H160, H256}; + use sequencer_utils::ser::FromStringOrInteger; use toml::toml; + use super::*; + #[test] fn test_genesis_from_toml_with_optional_fields() { let toml = toml! { diff --git a/sequencer/src/hotshot_commitment.rs b/sequencer/src/hotshot_commitment.rs index cf4759d252..46efa54ac9 100644 --- a/sequencer/src/hotshot_commitment.rs +++ b/sequencer/src/hotshot_commitment.rs @@ -1,7 +1,10 @@ +use std::{error::Error, time::Duration}; + use anyhow::anyhow; use async_std::{sync::Arc, task::sleep}; use async_trait::async_trait; use contract_bindings::hot_shot::{HotShot, HotShotErrors, Qc}; +use espresso_types::Header; use ethers::prelude::*; use futures::{ future, @@ -12,12 +15,10 @@ use rand::SeedableRng; use rand_chacha::ChaChaRng; use rand_distr::Distribution; use sequencer_utils::{commitment_to_u256, contract_send, init_signer, Signer}; -use std::error::Error; -use std::time::Duration; use surf_disco::Url; use vbs::version::StaticVersionType; -use crate::{Header, SeqTypes}; +use crate::SeqTypes; const RETRY_DELAY: Duration = Duration::from_secs(1); @@ -274,19 +275,19 @@ fn build_sequence_batches_txn( #[cfg(test)] mod test { - use super::*; - use crate::{l1_client::L1Client, Leaf, NodeState, ValidatedState}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::spawn; use committable::Committable; use contract_bindings::hot_shot::{NewBlocksCall, NewBlocksFilter}; + use espresso_types::{L1Client, Leaf, NodeState, ValidatedState}; use ethers::{abi::AbiDecode, providers::Middleware}; use futures::FutureExt; use hotshot_types::simple_certificate::QuorumCertificate; - use sequencer_utils::test_utils::TestL1System; - use sequencer_utils::AnvilOptions; + use sequencer_utils::{test_utils::TestL1System, AnvilOptions}; use surf_disco::{Error, StatusCode}; + use super::*; + const TEST_MNEMONIC: &str = "test test test test test test test test test test test junk"; #[derive(Clone, Debug, Default)] @@ -330,7 +331,7 @@ mod test { async fn mock_leaf(height: u64, node_state: &NodeState) -> LeafQueryData { let mut leaf = Leaf::genesis(&ValidatedState::default(), node_state).await; let mut qc = QuorumCertificate::genesis(&ValidatedState::default(), node_state).await; - leaf.block_header_mut().height = height; + *leaf.block_header_mut().height_mut() = height; qc.data.leaf_commit = leaf.commit(); LeafQueryData::new(leaf, qc).unwrap() } diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index b617fbaefc..eb0cfac639 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -1,52 +1,45 @@ pub mod api; -pub mod block; pub mod catchup; -mod chain_config; pub mod context; -pub mod eth_signature_key; pub mod genesis; -mod header; + pub mod hotshot_commitment; pub mod options; pub mod state_signature; mod message_compat_tests; -mod reference_tests; -use crate::catchup::BackoffParams; use anyhow::Context; use async_std::sync::RwLock; -use async_trait::async_trait; -use catchup::{StateCatchup, StatePeers}; +use catchup::StatePeers; use context::SequencerContext; +use espresso_types::{BackoffParams, L1Client, NodeState, PubKey, SeqTypes, ValidatedState}; use ethers::types::U256; #[cfg(feature = "libp2p")] use futures::FutureExt; -use genesis::{GenesisHeader, L1Finalized, Upgrade}; - -// Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support - +use genesis::L1Finalized; use hotshot_example_types::auction_results_provider_types::TestAuctionResultsProvider; -use l1_client::L1Client; - +// Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support use libp2p::Multiaddr; use network::libp2p::split_off_peer_id; -use state::FeeAccount; use state_signature::static_stake_table_commitment; use url::Url; -pub mod l1_client; pub mod persistence; pub mod state; -pub mod transaction; + +#[cfg(feature = "libp2p")] +use std::time::Duration; +use std::{collections::BTreeMap, fmt::Debug, marker::PhantomData, net::SocketAddr, sync::Arc}; use derivative::Derivative; +use espresso_types::v0::traits::{PersistenceOptions, SequencerPersistence}; +pub use genesis::Genesis; +#[cfg(feature = "libp2p")] +use hotshot::traits::implementations::{CombinedNetworks, Libp2pNetwork}; use hotshot::{ - traits::{ - election::static_committee::GeneralStaticCommittee, - implementations::{ - derive_libp2p_peer_id, CdnMetricsValue, KeyPair, MemoryNetwork, PushCdnNetwork, Topic, - WrappedSignatureKey, - }, + traits::implementations::{ + derive_libp2p_peer_id, CdnMetricsValue, KeyPair, MemoryNetwork, PushCdnNetwork, Topic, + WrappedSignatureKey, }, types::SignatureKey, }; @@ -55,44 +48,21 @@ use hotshot_orchestrator::{ config::NetworkConfig, }; use hotshot_types::{ - consensus::CommitmentMap, - data::{DaProposal, QuorumProposal, VidDisperseShare, ViewNumber}, - event::HotShotAction, + data::ViewNumber, light_client::{StateKeyPair, StateSignKey}, - message::Proposal, signature_key::{BLSPrivKey, BLSPubKey}, - simple_certificate::QuorumCertificate, traits::{ metrics::Metrics, network::ConnectedNetwork, node_implementation::{NodeImplementation, NodeType}, signature_key::{BuilderSignatureKey, StakeTableEntryType}, - states::InstanceState, - storage::Storage, }, - utils::{BuilderCommitment, View}, + utils::BuilderCommitment, ValidatorConfig, }; -use persistence::{PersistenceOptions, SequencerPersistence}; -use serde::{Deserialize, Serialize}; -use snafu::Snafu; -use std::{collections::BTreeMap, fmt::Debug, marker::PhantomData, net::SocketAddr, sync::Arc}; -use vbs::version::{StaticVersion, StaticVersionType, Version}; - -#[cfg(feature = "libp2p")] -use std::time::Duration; - -#[cfg(feature = "libp2p")] -use hotshot::traits::implementations::{CombinedNetworks, Libp2pNetwork}; - -pub use block::Payload; -pub use chain_config::ChainConfig; -pub use genesis::Genesis; -pub use header::Header; -pub use l1_client::L1BlockInfo; pub use options::Options; -pub use state::ValidatedState; -pub use transaction::{NamespaceId, Transaction}; +use serde::{Deserialize, Serialize}; +use vbs::version::StaticVersionType; pub mod network; /// The Sequencer node is generic over the hotshot CommChannel. @@ -115,17 +85,6 @@ impl, P: SequencerPersistence> Clone for Node } } -#[derive( - Clone, Copy, Debug, Default, Hash, Eq, PartialEq, PartialOrd, Ord, Deserialize, Serialize, -)] -pub struct SeqTypes; - -pub type Leaf = hotshot_types::data::Leaf; -pub type Event = hotshot::types::Event; - -pub type PubKey = BLSPubKey; -pub type PrivKey = ::PrivateKey; - impl, P: SequencerPersistence> NodeImplementation for Node { @@ -134,173 +93,6 @@ impl, P: SequencerPersistence> NodeImplementation Storage for Arc> { - async fn append_vid( - &self, - proposal: &Proposal>, - ) -> anyhow::Result<()> { - self.write().await.append_vid(proposal).await - } - - async fn append_da( - &self, - proposal: &Proposal>, - ) -> anyhow::Result<()> { - self.write().await.append_da(proposal).await - } - async fn record_action(&self, view: ViewNumber, action: HotShotAction) -> anyhow::Result<()> { - self.write().await.record_action(view, action).await - } - async fn update_high_qc(&self, _high_qc: QuorumCertificate) -> anyhow::Result<()> { - Ok(()) - } - - async fn update_undecided_state( - &self, - leaves: CommitmentMap, - state: BTreeMap>, - ) -> anyhow::Result<()> { - self.write() - .await - .update_undecided_state(leaves, state) - .await - } - - async fn append_proposal( - &self, - proposal: &Proposal>, - ) -> anyhow::Result<()> { - self.write().await.append_quorum_proposal(proposal).await - } -} - -#[derive(Debug, Clone)] -pub struct NodeState { - pub node_id: u64, - pub chain_config: ChainConfig, - pub l1_client: L1Client, - pub peers: Arc, - pub genesis_header: GenesisHeader, - pub genesis_state: ValidatedState, - pub l1_genesis: Option, - pub upgrades: BTreeMap, - pub current_version: Version, -} - -impl NodeState { - pub fn new( - node_id: u64, - chain_config: ChainConfig, - l1_client: L1Client, - catchup: impl StateCatchup + 'static, - ) -> Self { - Self { - node_id, - chain_config, - l1_client, - peers: Arc::new(catchup), - genesis_header: Default::default(), - genesis_state: ValidatedState { - chain_config: chain_config.into(), - ..Default::default() - }, - l1_genesis: None, - upgrades: Default::default(), - current_version: ::Base::version(), - } - } - - #[cfg(any(test, feature = "testing"))] - pub fn mock() -> Self { - Self::new( - 0, - ChainConfig::default(), - L1Client::new("http://localhost:3331".parse().unwrap(), 10000), - catchup::mock::MockStateCatchup::default(), - ) - } - - pub fn with_l1(mut self, l1_client: L1Client) -> Self { - self.l1_client = l1_client; - self - } - - pub fn with_genesis(mut self, state: ValidatedState) -> Self { - self.genesis_state = state; - self - } - - pub fn with_chain_config(mut self, cfg: ChainConfig) -> Self { - self.chain_config = cfg; - self - } - - pub fn with_upgrades(mut self, upgrades: BTreeMap) -> Self { - self.upgrades = upgrades; - self - } -} - -// This allows us to turn on `Default` on InstanceState trait -// which is used in `HotShot` by `TestBuilderImplementation`. -#[cfg(any(test, feature = "testing"))] -impl Default for NodeState { - fn default() -> Self { - Self::new( - 1u64, - ChainConfig::default(), - L1Client::new("http://localhost:3331".parse().unwrap(), 10000), - catchup::mock::MockStateCatchup::default(), - ) - } -} - -impl InstanceState for NodeState {} - -impl NodeType for SeqTypes { - type Time = ViewNumber; - type BlockHeader = Header; - type BlockPayload = Payload; - type SignatureKey = PubKey; - type Transaction = Transaction; - type InstanceState = NodeState; - type ValidatedState = ValidatedState; - type Membership = GeneralStaticCommittee; - type BuilderSignatureKey = FeeAccount; - type Base = StaticVersion<0, 1>; - type Upgrade = StaticVersion<0, 2>; - const UPGRADE_HASH: [u8; 32] = [ - 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, - 0, 0, - ]; -} - -#[derive(Clone, Debug, Snafu, Deserialize, Serialize)] -pub enum Error { - // TODO: Can we nest these errors in a `ValidationError` to group them? - - // Parent state commitment of block doesn't match current state commitment - IncorrectParent, - - // New view number isn't strictly after current view - IncorrectView, - - // Genesis block either has zero or more than one transaction - GenesisWrongSize, - - // Genesis transaction not present in genesis block - MissingGenesis, - - // Genesis transaction in non-genesis block - UnexpectedGenesis, - - // Merkle tree error - MerkleTreeError { error: String }, - - BlockBuilding, -} - #[derive(Clone, Debug)] pub struct NetworkParams { /// The address where a CDN marshal is located @@ -560,24 +352,27 @@ pub fn empty_builder_commitment() -> BuilderCommitment { #[cfg(any(test, feature = "testing"))] pub mod testing { - use super::*; - use crate::{ - catchup::mock::MockStateCatchup, - eth_signature_key::EthKeyPair, - persistence::no_storage::{self, NoStorage}, - }; + use std::{collections::HashMap, time::Duration}; + use api::test_helpers::TestNetworkUpgrades; use committable::Committable; + use espresso_types::{ + eth_signature_key::EthKeyPair, + mock::MockStateCatchup, + v0::traits::{PersistenceOptions, StateCatchup}, + ChainConfig, Event, FeeAccount, PubKey, SeqTypes, Transaction, Upgrade, + }; use futures::{ future::join_all, stream::{Stream, StreamExt}, }; - use genesis::Upgrade; - use hotshot::traits::{ - implementations::{MasterMap, MemoryNetwork}, - BlockPayload, + use hotshot::{ + traits::{ + implementations::{MasterMap, MemoryNetwork}, + BlockPayload, + }, + types::EventType::Decide, }; - use hotshot::types::EventType::Decide; use hotshot_stake_table::vec_based::StakeTable; use hotshot_testing::block_builder::{ BuilderTask, SimpleBuilderImplementation, TestBuilderImplementation, @@ -589,10 +384,11 @@ pub mod testing { ExecutionType, HotShotConfig, PeerConfig, }; use portpicker::pick_unused_port; - use std::collections::HashMap; - use std::time::Duration; use vbs::version::Version; + use super::*; + use crate::persistence::no_storage::{self, NoStorage}; + const STAKE_TABLE_CAPACITY_FOR_TEST: u64 = 10; pub async fn run_test_builder( @@ -929,12 +725,9 @@ pub mod testing { #[cfg(test)] mod test { - use self::testing::run_test_builder; - - use super::*; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; - use es_version::SequencerVersion; + use espresso_types::{Header, NamespaceId, Payload, Transaction}; use futures::StreamExt; use hotshot::types::EventType::Decide; use hotshot_types::{ @@ -946,6 +739,9 @@ mod test { use sequencer_utils::AnvilOptions; use testing::{wait_for_decide_on_handle, TestConfigBuilder}; + use self::testing::run_test_builder; + use super::*; + #[async_std::test] async fn test_skeleton_instantiation() { setup_logging(); @@ -977,7 +773,7 @@ mod test { } // Submit target transaction to handle - let txn = Transaction::new(NamespaceId::from(1), vec![1, 2, 3]); + let txn = Transaction::new(NamespaceId::from(1_u32), vec![1, 2, 3]); handles[0] .submit_transaction(txn.clone()) .await @@ -1050,18 +846,18 @@ mod test { // the fields which should be monotonic are. for LeafInfo { leaf, .. } in leaf_chain.iter().rev() { let header = leaf.block_header().clone(); - if header.height == 0 { + if header.height() == 0 { parent = header; continue; } - assert_eq!(header.height, parent.height + 1); - assert!(header.timestamp >= parent.timestamp); - assert!(header.l1_head >= parent.l1_head); - assert!(header.l1_finalized >= parent.l1_finalized); + assert_eq!(header.height(), parent.height() + 1); + assert!(header.timestamp() >= parent.timestamp()); + assert!(header.l1_head() >= parent.l1_head()); + assert!(header.l1_finalized() >= parent.l1_finalized()); parent = header; } - if parent.height >= success_height { + if parent.height() >= success_height { break; } } diff --git a/sequencer/src/main.rs b/sequencer/src/main.rs index 7eb2b8d05b..a5a878176f 100644 --- a/sequencer/src/main.rs +++ b/sequencer/src/main.rs @@ -163,21 +163,23 @@ where #[cfg(test)] mod test { - use super::*; + use std::time::Duration; + use async_std::task::spawn; use es_version::SequencerVersion; + use espresso_types::PubKey; use hotshot_types::{light_client::StateKeyPair, traits::signature_key::SignatureKey}; use portpicker::pick_unused_port; use sequencer::{ api::options::{Http, Status}, genesis::StakeTableConfig, persistence::fs, - PubKey, }; - use std::time::Duration; use surf_disco::{error::ClientError, Client, Url}; use tempfile::TempDir; + use super::*; + #[async_std::test] async fn test_startup_before_orchestrator() { setup_logging(); diff --git a/sequencer/src/message_compat_tests.rs b/sequencer/src/message_compat_tests.rs index f794fdd601..a5ddc469ca 100644 --- a/sequencer/src/message_compat_tests.rs +++ b/sequencer/src/message_compat_tests.rs @@ -14,9 +14,11 @@ //! If this test is failing and you did not intend to change the consensus API, figure out what //! code changed caused the serialization change and revert it. -use crate::{Leaf, NodeState, Payload, PubKey, SeqTypes, Transaction, ValidatedState}; +use std::path::Path; + use committable::Committable; use es_version::SequencerVersion; +use espresso_types::{Leaf, NodeState, PubKey, ValidatedState}; use hotshot::traits::election::static_committee::GeneralStaticCommittee; use hotshot_types::{ data::{ @@ -44,13 +46,15 @@ use hotshot_types::{ use jf_vid::VidScheme; use pretty_assertions::assert_eq; use serde_json::Value; -use std::path::Path; use vbs::{version::Version, BinarySerializer}; type Serializer = vbs::Serializer; #[async_std::test] +#[cfg(feature = "testing")] async fn test_message_compat() { + use espresso_types::{Payload, SeqTypes, Transaction}; + let (sender, priv_key) = PubKey::generated_from_seed_indexed(Default::default(), 0); let signature = PubKey::sign(&priv_key, &[]).unwrap(); let membership = GeneralStaticCommittee::new(&[], vec![sender.stake_table_entry(1)], vec![], 0); @@ -64,7 +68,7 @@ async fn test_message_compat() { }; let leaf = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()).await; let block_header = leaf.block_header().clone(); - let transaction = Transaction::new(1.into(), vec![1, 2, 3]); + let transaction = Transaction::new(1_u32.into(), vec![1, 2, 3]); let (payload, metadata) = Payload::from_transactions( [transaction.clone()], &ValidatedState::default(), @@ -88,7 +92,7 @@ async fn test_message_compat() { view: ViewNumber::genesis(), }; let da_data = DaData { - payload_commit: block_header.payload_commitment, + payload_commit: block_header.payload_commitment(), }; let consensus_messages = [ diff --git a/sequencer/src/network/mod.rs b/sequencer/src/network/mod.rs index 5ce9e2dfbb..57d666ac42 100644 --- a/sequencer/src/network/mod.rs +++ b/sequencer/src/network/mod.rs @@ -1,3 +1,5 @@ +use espresso_types::PubKey; + use super::*; pub mod cdn; diff --git a/sequencer/src/options.rs b/sequencer/src/options.rs index 68a8148112..1eb88bf7bd 100644 --- a/sequencer/src/options.rs +++ b/sequencer/src/options.rs @@ -1,15 +1,4 @@ -use crate::{api, catchup::BackoffParams, persistence}; -use anyhow::{bail, Context}; -use bytesize::ByteSize; -use clap::{error::ErrorKind, Args, FromArgMatches, Parser}; -use cld::ClDuration; use core::fmt::Display; -use derivative::Derivative; -use derive_more::From; -use hotshot_types::light_client::StateSignKey; -use hotshot_types::signature_key::BLSPrivKey; -use libp2p::Multiaddr; -use snafu::Snafu; use std::{ cmp::Ordering, collections::{HashMap, HashSet}, @@ -20,8 +9,21 @@ use std::{ str::FromStr, time::Duration, }; + +use anyhow::{bail, Context}; +use bytesize::ByteSize; +use clap::{error::ErrorKind, Args, FromArgMatches, Parser}; +use cld::ClDuration; +use derivative::Derivative; +use derive_more::From; +use espresso_types::BackoffParams; +use hotshot_types::{light_client::StateSignKey, signature_key::BLSPrivKey}; +use libp2p::Multiaddr; +use snafu::Snafu; use url::Url; +use crate::{api, persistence}; + // This options struct is a bit unconventional. The sequencer has multiple optional modules which // can be added, in any combination, to the service. These include, for example, the API server. // Each of these modules has its own options, which are all required if the module is added but can diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 3c5470cbef..0ba888e913 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -8,260 +8,14 @@ //! an extension that node operators can opt into. This module defines the minimum level of //! persistence which is _required_ to run a node. -use crate::{ - catchup::BackoffParams, ChainConfig, Leaf, NodeState, PubKey, SeqTypes, StateCatchup, - ValidatedState, ViewNumber, -}; -use anyhow::{bail, ensure, Context}; -use async_std::sync::Arc; use async_trait::async_trait; -use committable::{Commitment, Committable}; -use hotshot::{ - traits::ValidatedState as _, - types::{Event, EventType}, - HotShotInitializer, -}; -use hotshot_types::{ - consensus::CommitmentMap, - data::{DaProposal, QuorumProposal, VidDisperseShare}, - event::{HotShotAction, LeafInfo}, - message::Proposal, - simple_certificate::QuorumCertificate, - traits::node_implementation::ConsensusTime, - utils::View, -}; -use std::{cmp::max, collections::BTreeMap}; +use committable::Commitment; +use espresso_types::ChainConfig; pub mod fs; pub mod no_storage; pub mod sql; -pub type NetworkConfig = hotshot_orchestrator::config::NetworkConfig; - -#[async_trait] -pub trait PersistenceOptions: Clone + Send + Sync + 'static { - type Persistence: SequencerPersistence; - - async fn create(self) -> anyhow::Result; - async fn reset(self) -> anyhow::Result<()>; - - async fn create_catchup_provider( - self, - backoff: BackoffParams, - ) -> anyhow::Result> { - self.create().await?.into_catchup_provider(backoff) - } -} - -#[async_trait] -pub trait SequencerPersistence: Sized + Send + Sync + 'static { - /// Use this storage as a state catchup backend, if supported. - fn into_catchup_provider( - self, - _backoff: BackoffParams, - ) -> anyhow::Result> { - bail!("state catchup is not implemented for this persistence type"); - } - - /// Load the orchestrator config from storage. - /// - /// Returns `None` if no config exists (we are joining a network for the first time). Fails with - /// `Err` if it could not be determined whether a config exists or not. - async fn load_config(&self) -> anyhow::Result>; - - /// Save the orchestrator config to storage. - async fn save_config(&mut self, cfg: &NetworkConfig) -> anyhow::Result<()>; - - async fn collect_garbage(&mut self, view: ViewNumber) -> anyhow::Result<()>; - - /// Saves the latest decided leaf. - /// - /// If the height of the new leaf is not greater than the height of the previous decided leaf, - /// storage is not updated. - async fn save_anchor_leaf( - &mut self, - leaf: &Leaf, - qc: &QuorumCertificate, - ) -> anyhow::Result<()>; - - /// Load the highest view saved with [`save_voted_view`](Self::save_voted_view). - async fn load_latest_acted_view(&self) -> anyhow::Result>; - - /// Load the latest leaf saved with [`save_anchor_leaf`](Self::save_anchor_leaf). - async fn load_anchor_leaf(&self) - -> anyhow::Result)>>; - - /// Load undecided state saved by consensus before we shut down. - async fn load_undecided_state( - &self, - ) -> anyhow::Result, BTreeMap>)>>; - - /// Load the proposals saved by consensus - async fn load_quorum_proposals( - &self, - ) -> anyhow::Result>>>>; - - async fn load_vid_share( - &self, - view: ViewNumber, - ) -> anyhow::Result>>>; - async fn load_da_proposal( - &self, - view: ViewNumber, - ) -> anyhow::Result>>>; - - /// Load the latest known consensus state. - /// - /// Returns an initializer to resume HotShot from the latest saved state (or start from genesis, - /// if there is no saved state). - async fn load_consensus_state( - &self, - state: NodeState, - ) -> anyhow::Result> { - let genesis_validated_state = ValidatedState::genesis(&state).0; - let highest_voted_view = match self - .load_latest_acted_view() - .await - .context("loading last voted view")? - { - Some(view) => { - tracing::info!(?view, "starting from saved view"); - view - } - None => { - tracing::info!("no saved view, starting from genesis"); - ViewNumber::genesis() - } - }; - let (leaf, high_qc) = match self - .load_anchor_leaf() - .await - .context("loading anchor leaf")? - { - Some((leaf, high_qc)) => { - tracing::info!(?leaf, ?high_qc, "starting from saved leaf"); - ensure!( - leaf.view_number() == high_qc.view_number, - format!( - "loaded anchor leaf from view {:?}, but high QC is from view {:?}", - leaf.view_number(), - high_qc.view_number - ) - ); - (leaf, high_qc) - } - None => { - tracing::info!("no saved leaf, starting from genesis leaf"); - ( - Leaf::genesis(&genesis_validated_state, &state).await, - QuorumCertificate::genesis(&genesis_validated_state, &state).await, - ) - } - }; - let validated_state = if leaf.block_header().height == 0 { - // If we are starting from genesis, we can provide the full state. - Some(Arc::new(genesis_validated_state)) - } else { - // Otherwise, we will have to construct a sparse state and fetch missing data during - // catchup. - None - }; - - // If we are not starting from genesis, we start from the view following the maximum view - // between `highest_voted_view` and `leaf.view_number`. This prevents double votes from - // starting in a view in which we had already voted before the restart, and prevents - // unnecessary catchup from starting in a view earlier than the anchor leaf. - let mut view = max(highest_voted_view, leaf.view_number()); - if view != ViewNumber::genesis() { - view += 1; - } - - let (undecided_leaves, undecided_state) = self - .load_undecided_state() - .await - .context("loading undecided state")? - .unwrap_or_default(); - - let saved_proposals = self - .load_quorum_proposals() - .await - .context("loading saved proposals") - .unwrap_or_default() - .unwrap_or_default(); - - tracing::info!( - ?leaf, - ?view, - ?high_qc, - ?validated_state, - ?undecided_leaves, - ?undecided_state, - ?saved_proposals, - "loaded consensus state" - ); - Ok(HotShotInitializer::from_reload( - leaf, - state, - validated_state, - view, - saved_proposals, - high_qc, - undecided_leaves.into_values().collect(), - undecided_state, - )) - } - - /// Update storage based on an event from consensus. - async fn handle_event(&mut self, event: &Event) { - if let EventType::Decide { leaf_chain, qc, .. } = &event.event { - if let Some(LeafInfo { leaf, .. }) = leaf_chain.first() { - if qc.view_number != leaf.view_number() { - tracing::error!( - leaf_view = ?leaf.view_number(), - qc_view = ?qc.view_number, - "latest leaf and QC are from different views!", - ); - return; - } - if let Err(err) = self.save_anchor_leaf(leaf, qc).await { - tracing::error!( - ?leaf, - hash = %leaf.commit(), - "Failed to save anchor leaf. When restarting make sure anchor leaf is at least as recent as this leaf. {err:#}", - ); - } - - if let Err(err) = self.collect_garbage(leaf.view_number()).await { - tracing::error!("Failed to garbage collect. {err:#}",); - } - } - } - } - - async fn append_vid( - &mut self, - proposal: &Proposal>, - ) -> anyhow::Result<()>; - async fn append_da( - &mut self, - proposal: &Proposal>, - ) -> anyhow::Result<()>; - async fn record_action( - &mut self, - view: ViewNumber, - action: HotShotAction, - ) -> anyhow::Result<()>; - async fn update_undecided_state( - &mut self, - leaves: CommitmentMap, - state: BTreeMap>, - ) -> anyhow::Result<()>; - async fn append_quorum_proposal( - &mut self, - proposal: &Proposal>, - ) -> anyhow::Result<()>; -} - #[async_trait] pub trait ChainConfigPersistence: Sized + Send + Sync + 'static { async fn insert_chain_config(&mut self, chain_config: ChainConfig) -> anyhow::Result<()>; @@ -271,11 +25,13 @@ pub trait ChainConfigPersistence: Sized + Send + Sync + 'static { ) -> anyhow::Result; } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] mod testing { - use super::*; + use espresso_types::v0::traits::SequencerPersistence; + use super::*; + #[allow(dead_code)] #[async_trait] pub trait TestablePersistence: SequencerPersistence { type Storage; @@ -289,17 +45,25 @@ mod testing { #[espresso_macros::generic_tests] mod persistence_tests { - use super::*; - use crate::NodeState; - use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; + use std::collections::BTreeMap; - use hotshot::types::BLSPubKey; - use hotshot::types::SignatureKey; - use hotshot_types::traits::EncodeBytes; - use hotshot_types::{event::HotShotAction, vid::vid_scheme}; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; + use committable::Committable; + use espresso_types::{Leaf, NodeState, PubKey, SeqTypes, ValidatedState}; + use hotshot::types::{BLSPubKey, SignatureKey}; + use hotshot_types::{ + data::{DaProposal, QuorumProposal, VidDisperseShare, ViewNumber}, + event::HotShotAction, + message::Proposal, + simple_certificate::QuorumCertificate, + traits::{node_implementation::ConsensusTime, EncodeBytes}, + vid::vid_scheme, + }; use jf_vid::VidScheme; use testing::TestablePersistence; + use super::*; + #[async_std::test] pub async fn test_anchor_leaf() { setup_logging(); @@ -322,7 +86,7 @@ mod persistence_tests { // Store a newer leaf, make sure storage gets updated. let mut leaf2 = leaf1.clone(); - leaf2.block_header_mut().height += 1; + *leaf2.block_header_mut().height_mut() += 1; let mut qc2 = qc1.clone(); qc2.data.leaf_commit = leaf2.commit(); qc2.vote_commitment = qc2.data.commit(); diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index 2311cd0833..16084f56be 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -1,9 +1,17 @@ -use super::{NetworkConfig, PersistenceOptions, SequencerPersistence}; -use crate::{Leaf, SeqTypes, ViewNumber}; +use std::{ + collections::BTreeMap, + fs::{self, File, OpenOptions}, + io::{Read, Seek, SeekFrom, Write}, + path::{Path, PathBuf}, +}; + use anyhow::{anyhow, Context}; use async_trait::async_trait; use clap::Parser; - +use espresso_types::{ + v0::traits::{PersistenceOptions, SequencerPersistence}, + Leaf, NetworkConfig, SeqTypes, +}; use hotshot_types::{ consensus::CommitmentMap, data::{DaProposal, QuorumProposal, VidDisperseShare}, @@ -14,12 +22,8 @@ use hotshot_types::{ utils::View, vote::HasViewNumber, }; -use std::{ - collections::BTreeMap, - fs::{self, File, OpenOptions}, - io::{Read, Seek, SeekFrom, Write}, - path::{Path, PathBuf}, -}; + +use crate::ViewNumber; /// Options for file system backed persistence. #[derive(Parser, Clone, Debug)] @@ -556,10 +560,10 @@ fn migrate_network_config( #[cfg(test)] mod testing { - use super::super::testing::TestablePersistence; - use super::*; use tempfile::TempDir; + use super::{super::testing::TestablePersistence, *}; + #[async_trait] impl TestablePersistence for Persistence { type Storage = TempDir; @@ -576,9 +580,7 @@ mod testing { #[cfg(test)] mod generic_tests { - use super::super::persistence_tests; - use super::Persistence; - + use super::{super::persistence_tests, Persistence}; // For some reason this is the only way to import the macro defined in another module of this // crate. use crate::*; @@ -588,9 +590,10 @@ mod generic_tests { #[cfg(test)] mod test { - use super::*; use serde_json::json; + use super::*; + #[test] fn test_config_migrations_add_builder_urls() { let before = json!({ diff --git a/sequencer/src/persistence/no_storage.rs b/sequencer/src/persistence/no_storage.rs index 9a39296ebd..6576930e15 100644 --- a/sequencer/src/persistence/no_storage.rs +++ b/sequencer/src/persistence/no_storage.rs @@ -1,9 +1,13 @@ //! Mock implementation of persistence, for testing. #![cfg(any(test, feature = "testing"))] -use super::{NetworkConfig, PersistenceOptions, SequencerPersistence}; -use crate::{Leaf, SeqTypes, ViewNumber}; +use std::collections::BTreeMap; + use async_trait::async_trait; +use espresso_types::{ + v0::traits::{PersistenceOptions, SequencerPersistence}, + Leaf, NetworkConfig, +}; use hotshot_types::{ consensus::CommitmentMap, data::{DaProposal, QuorumProposal, VidDisperseShare}, @@ -12,7 +16,8 @@ use hotshot_types::{ simple_certificate::QuorumCertificate, utils::View, }; -use std::collections::BTreeMap; + +use crate::{SeqTypes, ViewNumber}; #[derive(Clone, Copy, Debug)] pub struct Options; diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 7446bfd6fe..4802abb89d 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -1,9 +1,5 @@ -use super::{NetworkConfig, PersistenceOptions, SequencerPersistence}; -use crate::{ - catchup::{BackoffParams, SqlStateCatchup, StateCatchup}, - options::parse_duration, - Leaf, SeqTypes, ViewNumber, -}; +use std::{collections::BTreeMap, time::Duration}; + use anyhow::Context; use async_std::{ stream::StreamExt, @@ -12,6 +8,10 @@ use async_std::{ use async_trait::async_trait; use clap::Parser; use derivative::Derivative; +use espresso_types::{ + v0::traits::{PersistenceOptions, SequencerPersistence, StateCatchup}, + BackoffParams, Leaf, NetworkConfig, +}; use futures::future::{BoxFuture, FutureExt}; use hotshot_query_service::data_source::{ storage::{ @@ -30,7 +30,8 @@ use hotshot_types::{ utils::View, vote::HasViewNumber, }; -use std::{collections::BTreeMap, time::Duration}; + +use crate::{catchup::SqlStateCatchup, options::parse_duration, SeqTypes, ViewNumber}; /// Options for Postgres-backed persistence. #[derive(Parser, Clone, Derivative, Default)] diff --git a/sequencer/src/state.rs b/sequencer/src/state.rs index 257e408080..a244fa5eda 100644 --- a/sequencer/src/state.rs +++ b/sequencer/src/state.rs @@ -1,413 +1,26 @@ -use crate::{ - api::data_source::CatchupDataSource, - block::{NsTableValidationError, PayloadByteLen}, - catchup::SqlStateCatchup, - chain_config::BlockSize, - chain_config::ResolvableChainConfig, - eth_signature_key::EthKeyPair, - genesis::UpgradeType, - persistence::ChainConfigPersistence, - ChainConfig, Header, Leaf, NodeState, SeqTypes, -}; -use anyhow::{bail, ensure, Context}; -use ark_serialize::{ - CanonicalDeserialize, CanonicalSerialize, Compress, Read, SerializationError, Valid, Validate, -}; -use async_std::stream::StreamExt; -use async_std::sync::RwLock; -use committable::{Commitment, Committable, RawCommitmentBuilder}; -use contract_bindings::fee_contract::DepositFilter; use core::fmt::Debug; -use derive_more::{Add, Display, From, Into, Mul, Sub}; -use ethers::{ - abi::Address, - types::U256, - utils::{parse_units, ParseUnits}, +use std::{sync::Arc, time::Duration}; + +use anyhow::{bail, ensure, Context}; +use async_std::{stream::StreamExt, sync::RwLock}; +use espresso_types::{ + BlockMerkleTree, ChainConfig, Delta, FeeAccount, FeeMerkleTree, ValidatedState, }; use futures::future::Future; use hotshot::traits::ValidatedState as HotShotState; use hotshot_query_service::{ availability::{AvailabilityDataSource, LeafQueryData}, data_source::VersionedDataSource, - explorer::MonetaryValue, - merklized_state::{MerklizedState, MerklizedStateHeightPersistence, UpdateStateData}, + merklized_state::{MerklizedStateHeightPersistence, UpdateStateData}, types::HeightIndexed, }; -use hotshot_types::{ - data::{BlockError, ViewNumber}, - traits::{ - block_contents::{BlockHeader, BuilderFee}, - node_implementation::ConsensusTime, - signature_key::BuilderSignatureKey, - states::StateDelta, - }, - vid::{VidCommon, VidSchemeType}, -}; -use itertools::Itertools; -use jf_merkle_tree::{ - prelude::{LightWeightSHA3MerkleTree, MerkleProof, Sha3Digest, Sha3Node}, - universal_merkle_tree::UniversalMerkleTree, - AppendableMerkleTreeScheme, ForgetableMerkleTreeScheme, ForgetableUniversalMerkleTreeScheme, - LookupResult, MerkleCommitment, MerkleTreeError, MerkleTreeScheme, - PersistentUniversalMerkleTreeScheme, ToTraversalPath, UniversalMerkleTreeScheme, -}; -use jf_vid::VidScheme; -use num_traits::CheckedSub; -use sequencer_utils::{ - impl_serde_from_string_or_integer, impl_to_fixed_bytes, ser::FromStringOrInteger, -}; -use serde::{Deserialize, Serialize}; -use std::sync::Arc; -use std::time::Duration; -use std::{collections::HashSet, ops::Add, str::FromStr}; -use thiserror::Error; +use jf_merkle_tree::{LookupResult, MerkleTreeScheme, ToTraversalPath, UniversalMerkleTreeScheme}; use vbs::version::Version; -const BLOCK_MERKLE_TREE_HEIGHT: usize = 32; -const FEE_MERKLE_TREE_HEIGHT: usize = 20; - -/// This enum is not used in code but functions as an index of -/// possible validation errors. -#[allow(dead_code)] -enum StateValidationError { - ProposalValidation(ProposalValidationError), - BuilderValidation(BuilderValidationError), - Fee(FeeError), -} - -#[derive(Hash, Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] -pub struct ValidatedState { - /// Frontier of Block Merkle Tree - pub block_merkle_tree: BlockMerkleTree, - /// Fee Merkle Tree - pub fee_merkle_tree: FeeMerkleTree, - pub chain_config: ResolvableChainConfig, -} - -#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] -pub struct Delta { - pub fees_delta: HashSet, -} - -impl StateDelta for Delta {} - -impl Default for ValidatedState { - fn default() -> Self { - let block_merkle_tree = BlockMerkleTree::from_elems( - Some(BLOCK_MERKLE_TREE_HEIGHT), - Vec::>::new(), - ) - .unwrap(); - - // Words of wisdom from @mrain: "capacity = arity^height" - // "For index space 2^160, arity 256 (2^8), - // you should set the height as 160/8=20" - let fee_merkle_tree = FeeMerkleTree::from_kv_set( - FEE_MERKLE_TREE_HEIGHT, - Vec::<(FeeAccount, FeeAmount)>::new(), - ) - .unwrap(); - - let chain_config = ResolvableChainConfig::from(ChainConfig::default()); - - Self { - block_merkle_tree, - fee_merkle_tree, - chain_config, - } - } -} - -impl ValidatedState { - /// Prefund an account with a given amount. Only for demo purposes. - pub fn prefund_account(&mut self, account: FeeAccount, amount: FeeAmount) { - self.fee_merkle_tree.update(account, amount).unwrap(); - } - - pub fn balance(&mut self, account: FeeAccount) -> Option { - match self.fee_merkle_tree.lookup(account) { - LookupResult::Ok(balance, _) => Some(*balance), - LookupResult::NotFound(_) => Some(0.into()), - LookupResult::NotInMemory => None, - } - } - - /// Find accounts that are not in memory. - /// - /// As an optimization we could try to apply updates and return the - /// forgotten accounts to be fetched from peers and update them later. - pub fn forgotten_accounts( - &self, - accounts: impl IntoIterator, - ) -> Vec { - accounts - .into_iter() - .unique() - .filter(|account| { - self.fee_merkle_tree - .lookup(*account) - .expect_not_in_memory() - .is_ok() - }) - .collect() - } - - /// Check if the merkle tree is available - pub fn need_to_fetch_blocks_mt_frontier(&self) -> bool { - let num_leaves = self.block_merkle_tree.num_leaves(); - if num_leaves == 0 { - false - } else { - self.block_merkle_tree - .lookup(num_leaves - 1) - .expect_ok() - .is_err() - } - } - - /// Insert a fee deposit receipt - pub fn insert_fee_deposit( - &mut self, - fee_info: FeeInfo, - ) -> anyhow::Result> { - Ok(self - .fee_merkle_tree - .update_with(fee_info.account, |balance| { - Some(balance.cloned().unwrap_or_default().add(fee_info.amount)) - })?) - } - - /// Charge a fee to an account, transferring the funds to the fee recipient account. - pub fn charge_fee(&mut self, fee_info: FeeInfo, recipient: FeeAccount) -> Result<(), FeeError> { - let fee_state = self.fee_merkle_tree.clone(); - - // Deduct the fee from the paying account. - let FeeInfo { account, amount } = fee_info; - let mut err = None; - let fee_state = fee_state.persistent_update_with(account, |balance| { - let balance = balance.copied(); - let Some(updated) = balance.unwrap_or_default().checked_sub(&amount) else { - // Return an error without updating the account. - err = Some(FeeError::InsufficientFunds { balance, amount }); - return balance; - }; - if updated == FeeAmount::default() { - // Delete the account from the tree if its balance ended up at 0; this saves some - // space since the account is no longer carrying any information. - None - } else { - // Otherwise store the updated balance. - Some(updated) - } - })?; - - // Fail if there was an error during `persistent_update_with` (e.g. insufficient balance). - if let Some(err) = err { - return Err(err); - } - - // If we successfully deducted the fee from the source account, increment the balance of the - // recipient account. - let fee_state = fee_state.persistent_update_with(recipient, |balance| { - Some(balance.copied().unwrap_or_default() + amount) - })?; - - // If the whole update was successful, update the original state. - self.fee_merkle_tree = fee_state; - Ok(()) - } -} - -#[cfg(any(test, feature = "testing"))] -impl ValidatedState { - pub fn forget(&self) -> Self { - Self { - fee_merkle_tree: FeeMerkleTree::from_commitment(self.fee_merkle_tree.commitment()), - block_merkle_tree: BlockMerkleTree::from_commitment( - self.block_merkle_tree.commitment(), - ), - chain_config: ResolvableChainConfig::from(self.chain_config.commit()), - } - } -} - -/// Possible proposal validation failures -#[derive(Error, Debug, Eq, PartialEq)] -pub enum ProposalValidationError { - #[error("Invalid ChainConfig: expected={expected}, proposal={proposal}")] - InvalidChainConfig { expected: String, proposal: String }, - - #[error( - "Invalid Payload Size: (max_block_size={max_block_size}, proposed_block_size={block_size})" - )] - MaxBlockSizeExceeded { - max_block_size: BlockSize, - block_size: BlockSize, - }, - #[error("Insufficient Fee: block_size={max_block_size}, base_fee={base_fee}, proposed_fee={proposed_fee}")] - InsufficientFee { - max_block_size: BlockSize, - base_fee: FeeAmount, - proposed_fee: FeeAmount, - }, - #[error("Invalid Height: parent_height={parent_height}, proposal_height={proposal_height}")] - InvalidHeight { - parent_height: u64, - proposal_height: u64, - }, - #[error("Invalid Block Root Error: expected={expected_root}, proposal={proposal_root}")] - InvalidBlockRoot { - expected_root: BlockMerkleCommitment, - proposal_root: BlockMerkleCommitment, - }, - #[error("Invalid Fee Root Error: expected={expected_root}, proposal={proposal_root}")] - InvalidFeeRoot { - expected_root: FeeMerkleCommitment, - proposal_root: FeeMerkleCommitment, - }, - #[error("Invalid namespace table: {err}")] - InvalidNsTable { err: NsTableValidationError }, -} - -impl From for ProposalValidationError { - fn from(err: NsTableValidationError) -> Self { - Self::InvalidNsTable { err } - } -} - -pub fn validate_proposal( - state: &ValidatedState, - expected_chain_config: ChainConfig, - parent_leaf: &Leaf, - proposal: &Header, - vid_common: &VidCommon, -) -> Result<(), ProposalValidationError> { - let parent_header = parent_leaf.block_header(); - - // validate `ChainConfig` - if proposal.chain_config.commit() != expected_chain_config.commit() { - return Err(ProposalValidationError::InvalidChainConfig { - expected: format!("{:?}", expected_chain_config), - proposal: format!("{:?}", proposal.chain_config), - }); - } - - // validate block size and fee - let block_size = VidSchemeType::get_payload_byte_len(vid_common) as u64; - if block_size > *expected_chain_config.max_block_size { - return Err(ProposalValidationError::MaxBlockSizeExceeded { - max_block_size: expected_chain_config.max_block_size, - block_size: block_size.into(), - }); - } - - if proposal.fee_info.amount() < expected_chain_config.base_fee * block_size { - return Err(ProposalValidationError::InsufficientFee { - max_block_size: expected_chain_config.max_block_size, - base_fee: expected_chain_config.base_fee, - proposed_fee: proposal.fee_info.amount(), - }); - } - - // validate height - if proposal.height != parent_header.height + 1 { - return Err(ProposalValidationError::InvalidHeight { - parent_height: parent_header.height, - proposal_height: proposal.height, - }); - } - - let ValidatedState { - block_merkle_tree, - fee_merkle_tree, - .. - } = state; - - let block_merkle_tree_root = block_merkle_tree.commitment(); - if proposal.block_merkle_tree_root != block_merkle_tree_root { - return Err(ProposalValidationError::InvalidBlockRoot { - expected_root: block_merkle_tree_root, - proposal_root: proposal.block_merkle_tree_root, - }); - } - - let fee_merkle_tree_root = fee_merkle_tree.commitment(); - if proposal.fee_merkle_tree_root != fee_merkle_tree_root { - return Err(ProposalValidationError::InvalidFeeRoot { - expected_root: fee_merkle_tree_root, - proposal_root: proposal.fee_merkle_tree_root, - }); - } - - proposal - .ns_table - .validate(&PayloadByteLen::from_vid_common(vid_common))?; - - Ok(()) -} - -/// Possible charge fee failures -#[derive(Error, Debug, Eq, PartialEq)] -pub enum FeeError { - #[error("Insuficcient Funds: have {balance:?}, required {amount:?}")] - InsufficientFunds { - balance: Option, - amount: FeeAmount, - }, - #[error("Merkle Tree Error: {0}")] - MerkleTreeError(MerkleTreeError), -} - -impl From for FeeError { - fn from(item: MerkleTreeError) -> Self { - Self::MerkleTreeError(item) - } -} - -fn charge_fee( - state: &mut ValidatedState, - delta: &mut Delta, - fee_info: FeeInfo, - recipient: FeeAccount, -) -> Result<(), FeeError> { - state.charge_fee(fee_info, recipient)?; - delta.fees_delta.extend([fee_info.account, recipient]); - Ok(()) -} - -/// Possible builder validation failures -#[derive(Error, Debug, Eq, PartialEq)] -pub enum BuilderValidationError { - #[error("Builder signature not found")] - SignatureNotFound, - #[error("Fee amount out of range: {0}")] - FeeAmountOutOfRange(FeeAmount), - #[error("Invalid Builder Signature")] - InvalidBuilderSignature, -} - -/// Validate builder account by verifying signature -fn validate_builder_fee(proposed_header: &Header) -> Result<(), BuilderValidationError> { - // Beware of Malice! - let signature = proposed_header - .builder_signature - .ok_or(BuilderValidationError::SignatureNotFound)?; - let fee_amount = proposed_header.fee_info.amount().as_u64().ok_or( - BuilderValidationError::FeeAmountOutOfRange(proposed_header.fee_info.amount()), - )?; - - // verify signature - if !proposed_header.fee_info.account.validate_fee_signature( - &signature, - fee_amount, - proposed_header.metadata(), - &proposed_header.payload_commitment(), - ) { - return Err(BuilderValidationError::InvalidBuilderSignature); - } - - Ok(()) -} +use crate::{ + api::data_source::CatchupDataSource, catchup::SqlStateCatchup, + persistence::ChainConfigPersistence, NodeState, SeqTypes, +}; async fn compute_state_update( state: &ValidatedState, @@ -423,16 +36,16 @@ async fn compute_state_update( // Check internal consistency. let parent_header = parent_leaf.block_header(); ensure!( - state.block_merkle_tree.commitment() == parent_header.block_merkle_tree_root, + state.block_merkle_tree.commitment() == parent_header.block_merkle_tree_root(), "internal error! in-memory block tree {:?} does not match parent header {:?}", state.block_merkle_tree.commitment(), - parent_header.block_merkle_tree_root + parent_header.block_merkle_tree_root() ); ensure!( - state.fee_merkle_tree.commitment() == parent_header.fee_merkle_tree_root, + state.fee_merkle_tree.commitment() == parent_header.fee_merkle_tree_root(), "internal error! in-memory fee tree {:?} does not match parent header {:?}", state.fee_merkle_tree.commitment(), - parent_header.fee_merkle_tree_root + parent_header.fee_merkle_tree_root() ); state @@ -680,804 +293,23 @@ impl SequencerStateDataSource for T where { } -impl ValidatedState { - pub(crate) async fn apply_header( - &self, - instance: &NodeState, - parent_leaf: &Leaf, - proposed_header: &Header, - version: Version, - ) -> anyhow::Result<(Self, Delta)> { - // Clone state to avoid mutation. Consumer can take update - // through returned value. - - let mut validated_state = self.clone(); - validated_state.apply_upgrade(instance, version); - - let chain_config = validated_state - .get_chain_config(instance, &proposed_header.chain_config) - .await?; - - if Some(chain_config) != validated_state.chain_config.resolve() { - validated_state.chain_config = chain_config.into(); - } - - let l1_deposits = get_l1_deposits( - instance, - proposed_header, - parent_leaf, - chain_config.fee_contract, - ) - .await; - - // Find missing fee state entries. We will need to use the builder account which is paying a - // fee and the recipient account which is receiving it, plus any counts receiving deposits - // in this block. - let missing_accounts = self.forgotten_accounts( - [proposed_header.fee_info.account, chain_config.fee_recipient] - .into_iter() - .chain(l1_deposits.iter().map(|fee_info| fee_info.account)), - ); - - let parent_height = parent_leaf.height(); - let parent_view = parent_leaf.view_number(); - - // Ensure merkle tree has frontier - if self.need_to_fetch_blocks_mt_frontier() { - tracing::info!( - parent_height, - ?parent_view, - "fetching block frontier from peers" - ); - instance - .peers - .as_ref() - .remember_blocks_merkle_tree( - parent_height, - parent_view, - &mut validated_state.block_merkle_tree, - ) - .await?; - } - - // Fetch missing fee state entries - if !missing_accounts.is_empty() { - tracing::info!( - parent_height, - ?parent_view, - ?missing_accounts, - "fetching missing accounts from peers" - ); - - let missing_account_proofs = instance - .peers - .as_ref() - .fetch_accounts( - parent_height, - parent_view, - validated_state.fee_merkle_tree.commitment(), - missing_accounts, - ) - .await?; - - // Remember the fee state entries - for account in missing_account_proofs.iter() { - account - .proof - .remember(&mut validated_state.fee_merkle_tree) - .expect("proof previously verified"); - } - } - - let mut delta = Delta::default(); - - let mut validated_state = - apply_proposal(&validated_state, &mut delta, parent_leaf, l1_deposits); - - charge_fee( - &mut validated_state, - &mut delta, - proposed_header.fee_info, - chain_config.fee_recipient, - )?; - - Ok((validated_state, delta)) - } - - /// Updates the `ValidatedState` if a protocol upgrade has occurred. - pub(crate) fn apply_upgrade(&mut self, instance: &NodeState, version: Version) { - // Check for protocol upgrade based on sequencer version - if version <= instance.current_version { - return; - } - - let Some(upgrade) = instance.upgrades.get(&version) else { - return; - }; - - match upgrade.upgrade_type { - UpgradeType::ChainConfig { chain_config } => { - self.chain_config = chain_config.into(); - } - } - } - - /// Retrieves the `ChainConfig`. - /// - /// Returns the `NodeState` `ChainConfig` if the `ValidatedState` `ChainConfig` commitment matches the `NodeState` `ChainConfig`` commitment. - /// If the commitments do not match, it returns the `ChainConfig` available in either `ValidatedState` or proposed header. - /// If neither has the `ChainConfig`, it fetches the config from the peers. - /// - /// Returns an error if it fails to fetch the `ChainConfig` from the peers. - pub(crate) async fn get_chain_config( - &self, - instance: &NodeState, - header_cf: &ResolvableChainConfig, - ) -> anyhow::Result { - let state_cf = self.chain_config; - - if state_cf.commit() == instance.chain_config.commit() { - return Ok(instance.chain_config); - } - - let cf = match (state_cf.resolve(), header_cf.resolve()) { - (Some(cf), _) => cf, - (_, Some(cf)) if cf.commit() == state_cf.commit() => cf, - (_, Some(_)) | (None, None) => { - instance - .peers - .as_ref() - .fetch_chain_config(state_cf.commit()) - .await - } - }; - - Ok(cf) - } -} - -pub async fn get_l1_deposits( - instance: &NodeState, - header: &Header, - parent_leaf: &Leaf, - fee_contract_address: Option
, -) -> Vec { - if let (Some(addr), Some(block_info)) = (fee_contract_address, header.l1_finalized) { - instance - .l1_client - .get_finalized_deposits( - addr, - parent_leaf - .block_header() - .l1_finalized - .map(|block_info| block_info.number), - block_info.number, - ) - .await - } else { - vec![] - } -} - -#[must_use] -fn apply_proposal( - validated_state: &ValidatedState, - delta: &mut Delta, - parent_leaf: &Leaf, - l1_deposits: Vec, -) -> ValidatedState { - let mut validated_state = validated_state.clone(); - // pushing a block into merkle tree shouldn't fail - validated_state - .block_merkle_tree - .push(parent_leaf.block_header().commit()) - .unwrap(); - - for FeeInfo { account, amount } in l1_deposits.iter() { - validated_state - .fee_merkle_tree - .update_with(account, |balance| { - Some(balance.cloned().unwrap_or_default().add(*amount)) - }) - .expect("update_with succeeds"); - delta.fees_delta.insert(*account); - } - - validated_state -} - -impl HotShotState for ValidatedState { - type Error = BlockError; - type Instance = NodeState; - - type Time = ViewNumber; - - type Delta = Delta; - fn on_commit(&self) {} - /// Validate parent against known values (from state) and validate - /// proposal descends from parent. Returns updated `ValidatedState`. - #[tracing::instrument( - skip_all, - fields( - node_id = instance.node_id, - view = ?parent_leaf.view_number(), - height = parent_leaf.height(), - ), - )] - async fn validate_and_apply_header( - &self, - instance: &Self::Instance, - parent_leaf: &Leaf, - proposed_header: &Header, - vid_common: VidCommon, - version: Version, - ) -> Result<(Self, Self::Delta), Self::Error> { - //validate builder fee - if let Err(err) = validate_builder_fee(proposed_header) { - tracing::error!("invalid builder fee: {err:#}"); - return Err(BlockError::InvalidBlockHeader); - } - - // Unwrapping here is okay as we retry in a loop - //so we should either get a validated state or until hotshot cancels the task - let (validated_state, delta) = self - .apply_header(instance, parent_leaf, proposed_header, version) - .await - .unwrap(); - - let chain_config = validated_state - .chain_config - .resolve() - .expect("Chain Config not found in validated state"); - - // validate the proposal - if let Err(err) = validate_proposal( - &validated_state, - chain_config, - parent_leaf, - proposed_header, - &vid_common, - ) { - tracing::error!("invalid proposal: {err:#}"); - return Err(BlockError::InvalidBlockHeader); - } - - // log successful progress about once in 10 - 20 seconds, - // TODO: we may want to make this configurable - if parent_leaf.view_number().u64() % 10 == 0 { - tracing::info!("validated and applied new header"); - } - Ok((validated_state, delta)) - } - /// Construct the state with the given block header. - /// - /// This can also be used to rebuild the state for catchup. - fn from_header(block_header: &Header) -> Self { - let fee_merkle_tree = if block_header.fee_merkle_tree_root.size() == 0 { - // If the commitment tells us that the tree is supposed to be empty, it is convenient to - // just create an empty tree, rather than a commitment-only tree. - FeeMerkleTree::new(FEE_MERKLE_TREE_HEIGHT) - } else { - FeeMerkleTree::from_commitment(block_header.fee_merkle_tree_root) - }; - let block_merkle_tree = if block_header.block_merkle_tree_root.size() == 0 { - // If the commitment tells us that the tree is supposed to be empty, it is convenient to - // just create an empty tree, rather than a commitment-only tree. - BlockMerkleTree::new(BLOCK_MERKLE_TREE_HEIGHT) - } else { - BlockMerkleTree::from_commitment(block_header.block_merkle_tree_root) - }; - Self { - fee_merkle_tree, - block_merkle_tree, - chain_config: block_header.chain_config, - } - } - /// Construct a genesis validated state. - #[must_use] - fn genesis(instance: &Self::Instance) -> (Self, Self::Delta) { - (instance.genesis_state.clone(), Delta::default()) - } -} - -// Required for TestableState -#[cfg(any(test, feature = "testing"))] -impl std::fmt::Display for ValidatedState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{self:#?}") - } -} - -#[cfg(any(test, feature = "testing"))] -impl hotshot_types::traits::states::TestableState for ValidatedState { - fn create_random_transaction( - _state: Option<&Self>, - rng: &mut dyn rand::RngCore, - _padding: u64, - ) -> crate::Transaction { - crate::Transaction::random(rng) - } -} - -pub type BlockMerkleTree = LightWeightSHA3MerkleTree>; -pub type BlockMerkleCommitment = ::Commitment; - -impl MerklizedState for BlockMerkleTree { - type Key = Self::Index; - type Entry = Commitment
; - type T = Sha3Node; - type Commit = Self::Commitment; - type Digest = Sha3Digest; - - fn state_type() -> &'static str { - "block_merkle_tree" - } - - fn header_state_commitment_field() -> &'static str { - "block_merkle_tree_root" - } - - fn tree_height() -> usize { - BLOCK_MERKLE_TREE_HEIGHT - } - - fn insert_path( - &mut self, - key: Self::Key, - proof: &MerkleProof, - ) -> anyhow::Result<()> { - let Some(elem) = proof.elem() else { - bail!("BlockMerkleTree does not support non-membership proofs"); - }; - self.remember(key, elem, proof)?; - Ok(()) - } -} - -#[derive( - Hash, - Copy, - Clone, - Debug, - Deserialize, - Serialize, - PartialEq, - Eq, - CanonicalSerialize, - CanonicalDeserialize, -)] -/// `FeeInfo` holds data related to builder fees. -pub struct FeeInfo { - account: FeeAccount, - amount: FeeAmount, -} -impl FeeInfo { - pub fn new(account: impl Into, amount: impl Into) -> Self { - Self { - account: account.into(), - amount: amount.into(), - } - } - /// The minimum fee paid by the given builder account for a proposed block. - // TODO this function should take the block size as an input, we need to get this information - // from HotShot. - pub fn base_fee(account: FeeAccount) -> Self { - Self { - account, - amount: FeeAmount::default(), - } - } - - pub fn genesis() -> Self { - Self { - account: Default::default(), - amount: Default::default(), - } - } - - pub fn account(&self) -> FeeAccount { - self.account - } - - pub fn amount(&self) -> FeeAmount { - self.amount - } -} - -impl From> for FeeInfo { - fn from(fee: BuilderFee) -> Self { - Self { - amount: fee.fee_amount.into(), - account: fee.fee_account, - } - } -} - -impl From for FeeInfo { - fn from(item: DepositFilter) -> Self { - Self { - amount: item.amount.into(), - account: item.user.into(), - } - } -} - -impl Committable for FeeInfo { - fn commit(&self) -> Commitment { - RawCommitmentBuilder::new(&Self::tag()) - .fixed_size_field("account", &self.account.to_fixed_bytes()) - .fixed_size_field("amount", &self.amount.to_fixed_bytes()) - .finalize() - } - fn tag() -> String { - "FEE_INFO".into() - } -} - -// New Type for `U256` in order to implement `CanonicalSerialize` and -// `CanonicalDeserialize` -#[derive( - Default, - Hash, - Copy, - Clone, - Debug, - Display, - PartialEq, - Eq, - PartialOrd, - Ord, - Add, - Sub, - Mul, - From, - Into, -)] -#[display(fmt = "{_0}")] -pub struct FeeAmount(U256); - -impl_serde_from_string_or_integer!(FeeAmount); -impl_to_fixed_bytes!(FeeAmount, U256); - -impl From for FeeAmount { - fn from(amt: u64) -> Self { - Self(amt.into()) - } -} - -impl From for MonetaryValue { - fn from(value: FeeAmount) -> Self { - MonetaryValue::eth(value.0.as_u128() as i128) - } -} - -impl CheckedSub for FeeAmount { - fn checked_sub(&self, v: &Self) -> Option { - self.0.checked_sub(v.0).map(FeeAmount) - } -} - -impl FromStr for FeeAmount { - type Err = ::Err; - - fn from_str(s: &str) -> Result { - Ok(Self(s.parse()?)) - } -} - -impl FromStringOrInteger for FeeAmount { - type Binary = U256; - type Integer = u64; - - fn from_binary(b: Self::Binary) -> anyhow::Result { - Ok(Self(b)) - } - - fn from_integer(i: Self::Integer) -> anyhow::Result { - Ok(i.into()) - } - - fn from_string(s: String) -> anyhow::Result { - // For backwards compatibility, we have an ad hoc parser for WEI amounts represented as hex - // strings. - if let Some(s) = s.strip_prefix("0x") { - return Ok(Self(s.parse()?)); - } - - // Strip an optional non-numeric suffix, which will be interpreted as a unit. - let (base, unit) = s - .split_once(char::is_whitespace) - .unwrap_or((s.as_str(), "wei")); - match parse_units(base, unit)? { - ParseUnits::U256(n) => Ok(Self(n)), - ParseUnits::I256(_) => bail!("amount cannot be negative"), - } - } - - fn to_binary(&self) -> anyhow::Result { - Ok(self.0) - } - - fn to_string(&self) -> anyhow::Result { - Ok(format!("{self}")) - } -} - -impl FeeAmount { - pub fn as_u64(&self) -> Option { - if self.0 <= u64::MAX.into() { - Some(self.0.as_u64()) - } else { - None - } - } -} - -// New Type for `Address` in order to implement `CanonicalSerialize` and -// `CanonicalDeserialize` -#[derive( - Default, - Hash, - Copy, - Clone, - Debug, - Display, - Deserialize, - Serialize, - PartialEq, - Eq, - PartialOrd, - Ord, - From, - Into, -)] -#[display(fmt = "{_0:x}")] -pub struct FeeAccount(Address); -impl FeeAccount { - /// Return inner `Address` - pub fn address(&self) -> Address { - self.0 - } - /// Return byte slice representation of inner `Address` type - pub fn as_bytes(&self) -> &[u8] { - self.0.as_bytes() - } - /// Return array containing underlying bytes of inner `Address` type - pub fn to_fixed_bytes(self) -> [u8; 20] { - self.0.to_fixed_bytes() - } - pub fn test_key_pair() -> EthKeyPair { - EthKeyPair::from_mnemonic( - "test test test test test test test test test test test junk", - 0u32, - ) - .unwrap() - } -} - -impl FromStr for FeeAccount { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - Ok(Self(s.parse()?)) - } -} - -impl Valid for FeeAmount { - fn check(&self) -> Result<(), SerializationError> { - Ok(()) - } -} - -impl Valid for FeeAccount { - fn check(&self) -> Result<(), SerializationError> { - Ok(()) - } -} - -impl CanonicalSerialize for FeeAmount { - fn serialize_with_mode( - &self, - mut writer: W, - _compress: Compress, - ) -> Result<(), SerializationError> { - Ok(writer.write_all(&self.to_fixed_bytes())?) - } - - fn serialized_size(&self, _compress: Compress) -> usize { - core::mem::size_of::() - } -} -impl CanonicalDeserialize for FeeAmount { - fn deserialize_with_mode( - mut reader: R, - _compress: Compress, - _validate: Validate, - ) -> Result { - let mut bytes = [0u8; core::mem::size_of::()]; - reader.read_exact(&mut bytes)?; - let value = U256::from_little_endian(&bytes); - Ok(Self(value)) - } -} -impl CanonicalSerialize for FeeAccount { - fn serialize_with_mode( - &self, - mut writer: W, - _compress: Compress, - ) -> Result<(), SerializationError> { - Ok(writer.write_all(&self.0.to_fixed_bytes())?) - } - - fn serialized_size(&self, _compress: Compress) -> usize { - core::mem::size_of::
() - } -} -impl CanonicalDeserialize for FeeAccount { - fn deserialize_with_mode( - mut reader: R, - _compress: Compress, - _validate: Validate, - ) -> Result { - let mut bytes = [0u8; core::mem::size_of::
()]; - reader.read_exact(&mut bytes)?; - let value = Address::from_slice(&bytes); - Ok(Self(value)) - } -} - -impl ToTraversalPath<256> for FeeAccount { - fn to_traversal_path(&self, height: usize) -> Vec { - self.0 - .to_fixed_bytes() - .into_iter() - .take(height) - .map(|i| i as usize) - .collect() - } -} - -pub type FeeMerkleTree = UniversalMerkleTree; -pub type FeeMerkleCommitment = ::Commitment; - -impl MerklizedState for FeeMerkleTree { - type Key = Self::Index; - type Entry = Self::Element; - type T = Sha3Node; - type Commit = Self::Commitment; - type Digest = Sha3Digest; - - fn state_type() -> &'static str { - "fee_merkle_tree" - } - - fn header_state_commitment_field() -> &'static str { - "fee_merkle_tree_root" - } - - fn tree_height() -> usize { - FEE_MERKLE_TREE_HEIGHT - } - - fn insert_path( - &mut self, - key: Self::Key, - proof: &MerkleProof, - ) -> anyhow::Result<()> { - match proof.elem() { - Some(elem) => self.remember(key, elem, proof)?, - None => self.non_membership_remember(key, proof)?, - } - Ok(()) - } -} - -/// A proof of the balance of an account in the fee ledger. -/// -/// If the account of interest does not exist in the fee state, this is a Merkle non-membership -/// proof, and the balance is implicitly zero. Otherwise, this is a normal Merkle membership proof. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct FeeAccountProof { - account: Address, - proof: FeeMerkleProof, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -enum FeeMerkleProof { - Presence(::MembershipProof), - Absence(::NonMembershipProof), -} - -impl FeeAccountProof { - pub(crate) fn presence( - pos: FeeAccount, - proof: ::MembershipProof, - ) -> Self { - Self { - account: pos.into(), - proof: FeeMerkleProof::Presence(proof), - } - } - - pub(crate) fn absence( - pos: FeeAccount, - proof: ::NonMembershipProof, - ) -> Self { - Self { - account: pos.into(), - proof: FeeMerkleProof::Absence(proof), - } - } - - pub fn prove(tree: &FeeMerkleTree, account: Address) -> Option<(Self, U256)> { - match tree.universal_lookup(FeeAccount(account)) { - LookupResult::Ok(balance, proof) => Some(( - Self { - account, - proof: FeeMerkleProof::Presence(proof), - }, - balance.0, - )), - LookupResult::NotFound(proof) => Some(( - Self { - account, - proof: FeeMerkleProof::Absence(proof), - }, - 0.into(), - )), - LookupResult::NotInMemory => None, - } - } - - pub fn verify(&self, comm: &FeeMerkleCommitment) -> anyhow::Result { - match &self.proof { - FeeMerkleProof::Presence(proof) => { - ensure!( - FeeMerkleTree::verify(comm.digest(), FeeAccount(self.account), proof)?.is_ok(), - "invalid proof" - ); - Ok(proof - .elem() - .context("presence proof is missing account balance")? - .0) - } - FeeMerkleProof::Absence(proof) => { - let tree = FeeMerkleTree::from_commitment(comm); - ensure!( - tree.non_membership_verify(FeeAccount(self.account), proof)?, - "invalid proof" - ); - Ok(0.into()) - } - } - } - - pub fn remember(&self, tree: &mut FeeMerkleTree) -> anyhow::Result<()> { - match &self.proof { - FeeMerkleProof::Presence(proof) => { - tree.remember( - FeeAccount(self.account), - proof - .elem() - .context("presence proof is missing account balance")?, - proof, - )?; - Ok(()) - } - FeeMerkleProof::Absence(proof) => { - tree.non_membership_remember(FeeAccount(self.account), proof)?; - Ok(()) - } - } - } -} - #[cfg(test)] mod test { - use super::*; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; - use hotshot_types::vid::vid_scheme; + use espresso_types::{ + validate_proposal, BlockSize, FeeAccount, FeeAccountProof, FeeAmount, FeeError, FeeInfo, + FeeMerkleProof, Leaf, ProposalValidationError, + }; + use ethers::{abi::Address, types::U256}; + use hotshot_types::{ + traits::signature_key::BuilderSignatureKey, + vid::{vid_scheme, VidSchemeType}, + }; + use jf_merkle_tree::{ForgetableMerkleTreeScheme, MerkleTreeError}; use jf_vid::VidScheme; + use sequencer_utils::ser::FromStringOrInteger; + + use super::*; #[test] fn test_fee_proofs() { @@ -1580,7 +412,7 @@ mod test { ProposalValidationError::InsufficientFee { max_block_size: instance.chain_config.max_block_size, base_fee: instance.chain_config.base_fee, - proposed_fee: header.fee_info.amount() + proposed_fee: header.fee_info().amount() }, err ); diff --git a/sequencer/src/state_signature.rs b/sequencer/src/state_signature.rs index d3f2964d3f..8488bdddc8 100644 --- a/sequencer/src/state_signature.rs +++ b/sequencer/src/state_signature.rs @@ -1,17 +1,19 @@ //! Utilities for generating and storing the most recent light client state signatures. -use crate::{Leaf, SeqTypes, StateKeyPair}; +use std::collections::{HashMap, VecDeque}; + use ark_ff::PrimeField; use ark_serialize::CanonicalSerialize; use async_std::sync::RwLock; +use espresso_types::Leaf; use hotshot::types::{Event, EventType}; use hotshot_stake_table::vec_based::StakeTable; -use hotshot_types::light_client::{ - CircuitField, LightClientState, StateSignatureRequestBody, StateVerKey, -}; use hotshot_types::{ event::LeafInfo, - light_client::{StateSignature, StateSignatureScheme}, + light_client::{ + CircuitField, LightClientState, StateSignature, StateSignatureRequestBody, + StateSignatureScheme, StateVerKey, + }, signature_key::BLSPubKey, traits::{ node_implementation::ConsensusTime, @@ -21,14 +23,14 @@ use hotshot_types::{ PeerConfig, }; use jf_crhf::CRHF; -use jf_rescue::crhf::VariableLengthRescueCRHF; -use jf_rescue::RescueError; +use jf_rescue::{crhf::VariableLengthRescueCRHF, RescueError}; use jf_signature::SignatureScheme; -use std::collections::{HashMap, VecDeque}; use surf_disco::{Client, Url}; use tide_disco::error::ServerError; use vbs::version::StaticVersionType; +use crate::{SeqTypes, StateKeyPair}; + /// A relay server that's collecting and serving the light client state signatures pub mod relay_server; @@ -151,12 +153,12 @@ fn form_light_client_state( let header = leaf.block_header(); let mut block_comm_root_bytes = vec![]; header - .block_merkle_tree_root + .block_merkle_tree_root() .serialize_compressed(&mut block_comm_root_bytes)?; let mut fee_ledger_comm_bytes = vec![]; header - .fee_merkle_tree_root + .fee_merkle_tree_root() .serialize_compressed(&mut fee_ledger_comm_bytes)?; Ok(LightClientState { view_number: leaf.view_number().u64() as usize, diff --git a/sequencer/src/state_signature/relay_server.rs b/sequencer/src/state_signature/relay_server.rs index 9c78c7c568..082482cc9f 100644 --- a/sequencer/src/state_signature/relay_server.rs +++ b/sequencer/src/state_signature/relay_server.rs @@ -1,4 +1,8 @@ -use super::{LightClientState, StateSignatureRequestBody}; +use std::{ + collections::{BTreeSet, HashMap}, + path::PathBuf, +}; + use async_compatibility_layer::channel::OneShotReceiver; use async_std::sync::RwLock; use clap::Args; @@ -9,10 +13,6 @@ use hotshot_types::light_client::{ StateSignature, StateSignatureScheme, StateSignaturesBundle, StateVerKey, }; use jf_signature::SignatureScheme; -use std::{ - collections::{BTreeSet, HashMap}, - path::PathBuf, -}; use tide_disco::{ api::ApiError, error::ServerError, @@ -22,6 +22,8 @@ use tide_disco::{ use url::Url; use vbs::version::StaticVersionType; +use super::{LightClientState, StateSignatureRequestBody}; + /// State that checks the light client state update and the signature collection #[derive(Default)] struct StateRelayServerState { diff --git a/types/Cargo.toml b/types/Cargo.toml new file mode 100644 index 0000000000..c1a9acb53a --- /dev/null +++ b/types/Cargo.toml @@ -0,0 +1,59 @@ +[package] +name = "espresso-types" +version = "0.1.0" +authors = ["Espresso Systems "] +edition = "2021" + +[features] +testing = ["hotshot-testing"] + +[dependencies] + +anyhow = { workspace = true } +ark-serialize = { workspace = true } +async-compatibility-layer = { workspace = true } +async-once-cell = { workspace = true } +async-std = { workspace = true } +async-trait = { workspace = true } +base64-bytes = { workspace = true } +bincode = { workspace = true } +blake3 = { workspace = true } +bytesize = { workspace = true } +clap = { workspace = true } +cld = { workspace = true } +committable = { workspace = true } +contract-bindings = { path = "../contract-bindings" } +derivative = { workspace = true } +derive_more = { workspace = true } +es-version = { workspace = true } +ethers = { workspace = true } +fluent-asserter = "0.1.9" +futures = { workspace = true } +hotshot = { workspace = true } +hotshot-orchestrator = { workspace = true } +hotshot-query-service = { workspace = true, features = ["testing"] } +hotshot-testing = { workspace = true, optional = true } +hotshot-types = { workspace = true } +itertools = { workspace = true } +jf-merkle-tree = { workspace = true } +jf-utils = { workspace = true } # TODO temporary: used only for test_rng() +jf-vid = { workspace = true } +num-traits = { workspace = true } +paste = { workspace = true } +pretty_assertions = { workspace = true } +rand = { workspace = true } +rand_chacha = { workspace = true } +rand_distr = { workspace = true } +sequencer-utils = { path = "../utils" } +serde = { workspace = true } +serde_json = { workspace = true } +sha2 = "0.10" # TODO temporary, used only for VID, should be set in hotshot +snafu = { workspace = true } +static_assertions = { workspace = true } +tagged-base64 = { workspace = true } +thiserror = { workspace = true } +time = { workspace = true } +tracing = { workspace = true } +trait-set = { workspace = true } +url = { workspace = true } +vbs = { workspace = true } diff --git a/types/README.md b/types/README.md new file mode 100644 index 0000000000..b3bf5016b2 --- /dev/null +++ b/types/README.md @@ -0,0 +1,120 @@ +# Espresso Types + +This crate provides the data types that make up the Espresso Sequencing Marketplace, along with +the logic that defines how these types interact and evolve as the network operates. It also provides +a versioning system that enables network upgrades, including changes to data types, that maintains +adequate compatibility with older types so that newer versions of the software are able to interpret +an Espresso blockchain all the way back to genesis. + +## Design Principles + +### Compatibility Within Reason + +Blockchains have the unique problem of needing to maintain backwards compatibility with every +previous version of the protocol, so that old data can be interpreted and replayed as necessary to +derive the current blockchain state. Thus, it is highly desirable to have one set of types at any +given time, which is backwards compatible with all older shipped versions, and to minimize +differences between versions so that we can avoid as much as possible enum types and conditional +logic to handle different versions. + +To the extent that differences between versions are minimized, it is practical to maintain one +codebase with full backwards compatibility, with only minor conditionals limited in scope. Due to +this strong compatibility, changes made in this manner -- that is, affecting some logic but +maintaining one coherent set of types and backwards serialization compatibility -- correspond to +minor version changes. + +Over time, it is possible that these minor changes will accumulate to the point where it is +infeasible to handle all the various cases in one set of code. Or, a significant protocol upgrade +might make it impractical to maintain backwards compatibility using a single set of types and logic. +In this case, a _major_ version increment may be necessary, where we create a new set of types and +logic with a fresh slate and no backwards compatibility burden. In such cases, applications that use +this crate (e.g. consensus, archival query service) will be responsible for switching between two +sets of types (e.g. major versions 1 and 2) as necessary, depending on what part of the history of +the blockchain they are dealing with. + +### Separation of Data from Code + +Due to the constraints of serialization, and specifically the desirability of maintaining `serde` +compatibility as much as possible, the most practical way to handle different versions of data is to +have independent, parallel definitions of the data types for each supported version. These +definitions exist in their own namespaces within this crate, such as `v0_1::Header` for the `Header` +type from version 0.1, `v0_2::Header`, etc. + +Code, on the other hand, benefits from being as unified as possible. Having entirely separate +implementations for each version would make it harder to spot differences and similarities between +versions visually, increase the burden of maintenance and testing, and lead to large amounts of +duplicate code where logic hasn't changed between versions (or else a confusing mess of slightly +customizable helper functions shared across versions). + +As such, for each _major_ version, there is one implementation of the network logic that encompasses +all minor versions. Each major version defines top-level types like `v0::Header` which are +compatible across that entire major version. For example, `v0::Header` implements +`From` and `From`. Its serialization will output the appropriate minor +version format depending on which minor version was used to construct the header, and it implements +`deserialize_as(Version)` which interprets the input as the specified format version. + +This major version compatibility header implements all of the network logic for all minor versions +within its major version; operations on headers and states will follow the logic for the minor +version which was used to construct the header. + +## Repository Structure + +The repository is divided into top-level modules for each supported major version. All types from +the most recent major version are also re-exported from the top level of the crate. This allows +applications which intend to stay up-to-date with the latest types to import directly from the top +level, and then a simple `cargo update` is sufficient to bring in the latest types, at which point +the application can be updated as necessary. Meanwhile, applications that intend to pin to a +specific stable major version can import the versioned types from the appropriate module. + +The structure of each major version module mirrors the top level structure recursively. There are +sub-modules for each minor version within that major version, and the latest types for that major +version are reexported from the major version module itself. + +Note that the minor version sub-modules _only_ define data structures and derivable trait +implementations (such as `Debug` and `serde` traits). All operations on these data structures, +including constructors and field accessors, are defined in the major version module. This upholds +design principle 2, by separating the versioned data structure layouts from the version-agnostic +Rust interfaces we use to deal with these data structures. + +Each major version module also contains a `traits` submodule containing implementations of HotShot +traits for the types for that major version, allowing them to be used to instantiate HotShot +consensus and related applications, like the query service. + +## Conventions and Best Practices + +### Use re-exports to minimize duplicated data structures + +Data structures that have not changed from one minor version to the next can be re-exported from the +previous minor version. E.g. in `v0::v0_2`, we might have `pub use super::v0_1::ChainConfig` if the +`ChainConfig` type has not changed between these two versions. + +Data structures that have not changed across any minor version within a major version can be +re-exported in the major version module from the latest minor version, but a static assertion must +be present checking that the re-exported type is the same type as exported from each of the minor +version modules, e.g. + +```rust +pub use v0_2::ChainConfig; + +static_assert_unchanged_type!(ChainConfig); +``` + +### All fields are private + +The goal of each major version is to provide a consistent Rust interface that works regardless of +which minor version is being used for the underlying data structure. To achieve this while allowing +changes in the data layout, all fields should be private (or `pub(crate)`). All consumers of this +crate should access the data via public methods defined in the major version module, since the +implementation of these methods can often be changed without changing the interface in case the +data layout changes. + +### Unversioned types considered code + +The pain of maintaining parallel sets of versioned types means we should only do it when absolutely +necessary: for serializable types that are used either as consensus messages or persistent storage, +or for types used to define such types. + +Other types which are used only as part of the Rust API, as transient, in-memory types, should be +defined alongside implementations and treated as part of code, not data. An example is the +`EthKeyPair` type, which is only used as a convenient wrapper to hold a public and private key pair, +but does not appear as part of any serialized data structure. diff --git a/sequencer/src/eth_signature_key.rs b/types/src/eth_signature_key.rs similarity index 99% rename from sequencer/src/eth_signature_key.rs rename to types/src/eth_signature_key.rs index da822372bc..6843fd0b89 100644 --- a/sequencer/src/eth_signature_key.rs +++ b/types/src/eth_signature_key.rs @@ -1,3 +1,8 @@ +use std::{ + fmt::{Display, Formatter}, + hash::Hash, +}; + use ethers::{ core::k256::ecdsa::{SigningKey, VerifyingKey}, signers::{ @@ -10,12 +15,8 @@ use ethers::{ use hotshot_types::traits::signature_key::BuilderSignatureKey; use serde::{Deserialize, Serialize}; use snafu::Snafu; -use std::{ - fmt::{Display, Formatter}, - hash::Hash, -}; -use crate::state::FeeAccount; +use crate::FeeAccount; // Newtype because type doesn't implement Hash, Display, SerDe, Ord, PartialOrd #[derive(PartialEq, Eq, Clone)] @@ -144,9 +145,10 @@ impl BuilderSignatureKey for FeeAccount { #[cfg(test)] mod tests { - use super::*; use hotshot_types::traits::signature_key::BuilderSignatureKey; + use super::*; + impl EthKeyPair { pub fn for_test() -> Self { FeeAccount::generated_from_seed_indexed([0u8; 32], 0).1 diff --git a/types/src/lib.rs b/types/src/lib.rs new file mode 100644 index 0000000000..0ce6950965 --- /dev/null +++ b/types/src/lib.rs @@ -0,0 +1,7 @@ +pub mod v0; + +// Re-export the latest major version compatibility types. +pub use v0::*; + +pub mod eth_signature_key; +mod reference_tests; diff --git a/sequencer/src/reference_tests.rs b/types/src/reference_tests.rs similarity index 83% rename from sequencer/src/reference_tests.rs rename to types/src/reference_tests.rs index 0bc43bb0ba..5f904e7b16 100644 --- a/sequencer/src/reference_tests.rs +++ b/types/src/reference_tests.rs @@ -21,10 +21,8 @@ //! constant in this module with the "actual" value that can be found in the logs of the failing //! test. -use crate::{ - block::NsTable, state::FeeInfo, ChainConfig, FeeAccount, Header, L1BlockInfo, NamespaceId, - Payload, SeqTypes, Transaction, ValidatedState, -}; +use std::{fmt::Debug, path::Path, str::FromStr}; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use committable::Committable; use es_version::SequencerVersion; @@ -38,15 +36,26 @@ use rand::{Rng, RngCore}; use sequencer_utils::commitment_to_u256; use serde::{de::DeserializeOwned, Serialize}; use serde_json::Value; -use std::{fmt::Debug, path::Path, str::FromStr}; use tagged_base64::TaggedBase64; -use vbs::BinarySerializer; +use vbs::{ + version::{StaticVersion, StaticVersionType, Version}, + BinarySerializer, +}; + +use crate::{ + ChainConfig, FeeAccount, FeeInfo, Header, L1BlockInfo, NamespaceId, NsTable, Payload, SeqTypes, + Transaction, ValidatedState, +}; type Serializer = vbs::Serializer; async fn reference_payload() -> Payload { const NUM_NS_IDS: usize = 3; - let ns_ids: [NamespaceId; NUM_NS_IDS] = [12648430.into(), 314159265.into(), 2718281828.into()]; + let ns_ids: [NamespaceId; NUM_NS_IDS] = [ + 12648430_u32.into(), + 314159265_u32.into(), + 2718281828_u32.into(), + ]; let mut rng = jf_utils::test_rng(); let txs = { @@ -105,7 +114,7 @@ fn reference_fee_info() -> FeeInfo { const REFERENCE_FEE_INFO_COMMITMENT: &str = "FEE_INFO~xCCeTjJClBtwtOUrnAmT65LNTQGceuyjSJHUFfX6VRXR"; -async fn reference_header() -> Header { +async fn reference_header(version: Version) -> Header { let builder_key = FeeAccount::generated_from_seed_indexed(Default::default(), 0).1; let fee_info = reference_fee_info(); let payload = reference_payload().await; @@ -122,23 +131,26 @@ async fn reference_header() -> Header { let state = ValidatedState::default(); - Header { - height: 42, - timestamp: 789, - l1_head: 124, - l1_finalized: Some(reference_l1_block()), + Header::create( + reference_chain_config().into(), + 42, + 789, + 124, + Some(reference_l1_block()), payload_commitment, builder_commitment, ns_table, - block_merkle_tree_root: state.block_merkle_tree.commitment(), - fee_merkle_tree_root: state.fee_merkle_tree.commitment(), + state.fee_merkle_tree.commitment(), + state.block_merkle_tree.commitment(), fee_info, - chain_config: reference_chain_config().into(), - builder_signature: Some(builder_signature), - } + Some(builder_signature), + version, + ) } -const REFERENCE_HEADER_COMMITMENT: &str = "BLOCK~dh1KpdvvxSvnnPpOi2yI3DOg8h6ltr2Kv13iRzbQvtN2"; +const REFERENCE_V1_HEADER_COMMITMENT: &str = "BLOCK~dh1KpdvvxSvnnPpOi2yI3DOg8h6ltr2Kv13iRzbQvtN2"; +const REFERENCE_V2_HEADER_COMMITMENT: &str = "BLOCK~V0GJjL19nCrlm9n1zZ6gaOKEekSMCT6uR5P-h7Gi6UJR"; +const REFERENCE_V3_HEADER_COMMITMENT: &str = "BLOCK~X3j5MJWJrye2dKJv5uRLk5-z3augpUDftecBFO6dYahF"; fn reference_transaction(ns_id: NamespaceId, rng: &mut R) -> Transaction where @@ -157,6 +169,7 @@ async fn reference_tx_index() -> >::Transa } fn reference_test_without_committable( + version: &str, name: &str, reference: &T, ) { @@ -164,8 +177,13 @@ fn reference_test_without_committable( + version: &str, name: &str, reference: T, commitment: &str, @@ -245,7 +264,7 @@ fn reference_test( setup_logging(); setup_backtrace(); - reference_test_without_committable(name, &reference); + reference_test_without_committable(version, name, &reference); // Print information about the commitment that might be useful in generating tests for other // languages. @@ -274,17 +293,18 @@ Actual: {actual} #[async_std::test] async fn test_reference_payload() { - reference_test_without_committable("payload", &reference_payload().await); + reference_test_without_committable("v1", "payload", &reference_payload().await); } #[async_std::test] async fn test_reference_tx_index() { - reference_test_without_committable("tx_index", &reference_tx_index().await); + reference_test_without_committable("v1", "tx_index", &reference_tx_index().await); } #[async_std::test] async fn test_reference_ns_table() { reference_test( + "v1", "ns_table", reference_ns_table().await, REFERENCE_NS_TABLE_COMMITMENT, @@ -294,6 +314,7 @@ async fn test_reference_ns_table() { #[test] fn test_reference_l1_block() { reference_test( + "v1", "l1_block", reference_l1_block(), REFERENCE_L1_BLOCK_COMMITMENT, @@ -303,6 +324,7 @@ fn test_reference_l1_block() { #[test] fn test_reference_chain_config() { reference_test( + "v1", "chain_config", reference_chain_config(), REFERENCE_CHAIN_CONFIG_COMMITMENT, @@ -312,6 +334,7 @@ fn test_reference_chain_config() { #[test] fn test_reference_fee_info() { reference_test( + "v1", "fee_info", reference_fee_info(), REFERENCE_FEE_INFO_COMMITMENT, @@ -321,17 +344,33 @@ fn test_reference_fee_info() { #[async_std::test] async fn test_reference_header() { reference_test( + "v1", + "header", + reference_header(StaticVersion::<0, 1>::version()).await, + REFERENCE_V1_HEADER_COMMITMENT, + ); + + reference_test( + "v2", + "header", + reference_header(StaticVersion::<0, 2>::version()).await, + REFERENCE_V2_HEADER_COMMITMENT, + ); + + reference_test( + "v3", "header", - reference_header().await, - REFERENCE_HEADER_COMMITMENT, + reference_header(StaticVersion::<0, 3>::version()).await, + REFERENCE_V3_HEADER_COMMITMENT, ); } #[test] fn test_reference_transaction() { reference_test( + "v1", "transaction", - reference_transaction(12648430.into(), &mut jf_utils::test_rng()), + reference_transaction(12648430_u32.into(), &mut jf_utils::test_rng()), REFERENCE_TRANSACTION_COMMITMENT, ); } diff --git a/types/src/v0/error.rs b/types/src/v0/error.rs new file mode 100644 index 0000000000..58dea8b588 --- /dev/null +++ b/types/src/v0/error.rs @@ -0,0 +1,31 @@ +use core::fmt::Debug; + +use serde::{Deserialize, Serialize}; +use snafu::Snafu; + +// TODO: Remove global error +// issue #1681 (https://github.com/EspressoSystems/espresso-sequencer/issues/1681) +#[derive(Clone, Debug, Snafu, Deserialize, Serialize)] +pub enum Error { + // TODO: Can we nest these errors in a `ValidationError` to group them? + + // Parent state commitment of block doesn't match current state commitment + IncorrectParent, + + // New view number isn't strictly after current view + IncorrectView, + + // Genesis block either has zero or more than one transaction + GenesisWrongSize, + + // Genesis transaction not present in genesis block + MissingGenesis, + + // Genesis transaction in non-genesis block + UnexpectedGenesis, + + // Merkle tree error + MerkleTreeError { error: String }, + + BlockBuilding, +} diff --git a/types/src/v0/header.rs b/types/src/v0/header.rs new file mode 100644 index 0000000000..0083edb50b --- /dev/null +++ b/types/src/v0/header.rs @@ -0,0 +1,43 @@ +use committable::Commitment; +use serde::{Deserialize, Serialize}; +use vbs::version::Version; + +use crate::{v0_1, v0_2, v0_3, ChainConfig}; + +/// Each variant represents a specific minor version header. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub enum Header { + V1(v0_1::Header), + V2(v0_2::Header), + V3(v0_3::Header), +} + +/// Enum to represent the first field of different versions of a header +/// +/// In v1 headers, the first field is a ChainConfig, which contains either the chain config or its commitment. +/// For versions > 0.1, the first field contains the version. +/// +/// This enum has the same variant names and types in the same positions (0 and 1) as the Either enum, +/// ensuring identical serialization and deserialization for the Left and Right variants. +/// However, it will deserialize successfully in one additional case due to the Version variant. +/// +/// Variants: +/// - Left: Represents the ChainConfig variant in v1 headers. +/// - Right: Represents the chain config commitment variant in v1 headers. +/// - Version: Represents the versioned header for versions > 0.1. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub(crate) enum EitherOrVersion { + Left(ChainConfig), + Right(Commitment), + Version(Version), +} + +/// Headers with versions greater than 0.1 are serialized as `VersionedHeader` with the version field as `EitherOrVersion`. +/// This ensures that the first field is deserialized as the `EitherOrVersion::Version` variant. +/// This approach is necessary because `serde_flatten()` cannot be used with bincode, +/// as bincode does not support the `deserialize_any()`. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct VersionedHeader { + pub(crate) version: EitherOrVersion, + pub(crate) fields: Fields, +} diff --git a/types/src/v0/impls/block/full_payload.rs b/types/src/v0/impls/block/full_payload.rs new file mode 100644 index 0000000000..84e8d2b37b --- /dev/null +++ b/types/src/v0/impls/block/full_payload.rs @@ -0,0 +1,3 @@ +mod ns_proof; +mod ns_table; +mod payload; diff --git a/sequencer/src/block/full_payload/ns_proof.rs b/types/src/v0/impls/block/full_payload/ns_proof.rs similarity index 91% rename from sequencer/src/block/full_payload/ns_proof.rs rename to types/src/v0/impls/block/full_payload/ns_proof.rs index 104ca45f4f..e378bf422c 100644 --- a/sequencer/src/block/full_payload/ns_proof.rs +++ b/types/src/v0/impls/block/full_payload/ns_proof.rs @@ -1,27 +1,13 @@ -use crate::{ - block::{ - full_payload::{NsIndex, NsTable, Payload, PayloadByteLen}, - namespace_payload::NsPayloadOwned, - }, - NamespaceId, Transaction, -}; use hotshot_types::{ traits::EncodeBytes, - vid::{vid_scheme, LargeRangeProofType, VidCommitment, VidCommon, VidSchemeType}, + vid::{vid_scheme, VidCommitment, VidCommon, VidSchemeType}, }; use jf_vid::{ payload_prover::{PayloadProver, Statement}, VidScheme, }; -use serde::{Deserialize, Serialize}; -/// Proof of correctness for namespace payload bytes in a block. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct NsProof { - ns_index: NsIndex, - ns_payload: NsPayloadOwned, - ns_proof: Option, // `None` if ns_payload is empty -} +use crate::{NamespaceId, NsIndex, NsProof, NsTable, Payload, PayloadByteLen, Transaction}; impl NsProof { /// Returns the payload bytes for the `index`th namespace, along with a diff --git a/sequencer/src/block/full_payload/ns_table.rs b/types/src/v0/impls/block/full_payload/ns_table.rs similarity index 63% rename from sequencer/src/block/full_payload/ns_table.rs rename to types/src/v0/impls/block/full_payload/ns_table.rs index d2d2290ef1..994d68710b 100644 --- a/sequencer/src/block/full_payload/ns_table.rs +++ b/types/src/v0/impls/block/full_payload/ns_table.rs @@ -5,134 +5,19 @@ //! //! See [`NsTable`] for a full specification of the binary format of a namespace //! table. -use crate::{ - block::{ - full_payload::payload::PayloadByteLen, - namespace_payload::NsPayloadRange, - uint_bytes::{ - bytes_serde_impl, u32_from_bytes, u32_to_bytes, usize_from_bytes, usize_to_bytes, - }, - }, - NamespaceId, -}; +use std::{collections::HashSet, sync::Arc}; + use committable::{Commitment, Committable, RawCommitmentBuilder}; -use derive_more::Display; use hotshot_types::traits::EncodeBytes; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; -use std::{collections::HashSet, ops::Range, sync::Arc}; -use thiserror::Error; - -/// Byte lengths for the different items that could appear in a namespace table. -const NUM_NSS_BYTE_LEN: usize = 4; -const NS_OFFSET_BYTE_LEN: usize = 4; - -// TODO prefer [`NS_ID_BYTE_LEN`] set to `8` because [`NamespaceId`] is a `u64` -// but we need to maintain serialization compatibility. -// https://github.com/EspressoSystems/espresso-sequencer/issues/1574 -const NS_ID_BYTE_LEN: usize = 4; - -/// Raw binary data for a namespace table. -/// -/// Any sequence of bytes is a valid [`NsTable`]. -/// -/// # Binary format of a namespace table -/// -/// Byte lengths for the different items that could appear in a namespace table -/// are specified in local private constants [`NUM_NSS_BYTE_LEN`], -/// [`NS_OFFSET_BYTE_LEN`], [`NS_ID_BYTE_LEN`]. -/// -/// ## Number of entries in the namespace table -/// -/// The first [`NUM_NSS_BYTE_LEN`] bytes of the namespace table indicate the -/// number `n` of entries in the table as a little-endian unsigned integer. If -/// the entire table length is smaller than [`NUM_NSS_BYTE_LEN`] then the -/// missing bytes are zero-padded. -/// -/// The bytes in the namespace table beyond the first [`NUM_NSS_BYTE_LEN`] bytes -/// encode table entries. Each entry consumes exactly [`NS_ID_BYTE_LEN`] `+` -/// [`NS_OFFSET_BYTE_LEN`] bytes. -/// -/// The number `n` could be anything, including a number much larger than the -/// number of entries that could fit in the namespace table. As such, the actual -/// number of entries in the table is defined as the minimum of `n` and the -/// maximum number of whole entries that could fit in the table. -/// -/// See [`Self::in_bounds`] for clarification. -/// -/// ## Namespace table entry -/// -/// ### Namespace ID -/// -/// The first [`NS_ID_BYTE_LEN`] bytes of each table entry indicate the -/// [`NamespaceId`] for this namespace. Any table entry whose [`NamespaceId`] is -/// a duplicate of a previous entry is ignored. A correct count of the number of -/// *unique* (non-ignored) entries is given by `NsTable::iter().count()`. -/// -/// ### Namespace offset -/// -/// The next [`NS_OFFSET_BYTE_LEN`] bytes of each table entry indicate the -/// end-index of a namespace in the block payload bytes -/// [`Payload`](super::payload::Payload). This end-index is a little-endian -/// unsigned integer. -/// -/// # How to deduce a namespace's byte range -/// -/// In order to extract the payload bytes of a single namespace `N` from the -/// block payload one needs both the start- and end-indices for `N`. -/// -/// See [`Self::ns_range`] for clarification. What follows is a description of -/// what's implemented in [`Self::ns_range`]. -/// -/// If `N` occupies the `i`th entry in the namespace table for `i>0` then the -/// start-index for `N` is defined as the end-index of the `(i-1)`th entry in -/// the table. -/// -/// Even if the `(i-1)`the entry would otherwise be ignored (due to a duplicate -/// [`NamespaceId`] or any other reason), that entry's end-index still defines -/// the start-index of `N`. This rule guarantees that both start- and -/// end-indices for any namespace `N` can be read from a constant-size byte -/// range in the namespace table, and it eliminates the need to traverse an -/// unbounded number of previous entries of the namespace table looking for a -/// previous non-ignored entry. -/// -/// The start-index of the 0th entry in the table is implicitly defined to be -/// `0`. -/// -/// The start- and end-indices `(declared_start, declared_end)` declared in the -/// namespace table could be anything. As such, the actual start- and -/// end-indices `(start, end)` are defined so as to ensure that the byte range -/// is well-defined and in-bounds for the block payload: -/// ```ignore -/// end = min(declared_end, block_payload_byte_length) -/// start = min(declared_start, end) -/// ``` -/// -/// In a "honestly-prepared" namespace table the end-index of the final -/// namespace equals the byte length of the block payload. (Otherwise the block -/// payload might have bytes that are not included in any namespace.) -/// -/// It is possible that a namespace table could indicate two distinct namespaces -/// whose byte ranges overlap, though no "honestly-prepared" namespace table -/// would do this. -/// -/// TODO prefer [`NsTable`] to be a newtype like this -/// ```ignore -/// #[repr(transparent)] -/// #[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] -/// #[serde(transparent)] -/// pub struct NsTable(#[serde(with = "base64_bytes")] Vec); -/// ``` -/// but we need to maintain serialization compatibility. -/// -#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] -// Boilerplate: `#[serde(remote = "Self")]` needed to check invariants on -// deserialization. See -// https://github.com/serde-rs/serde/issues/1220#issuecomment-382589140 -#[serde(remote = "Self")] -pub struct NsTable { - #[serde(with = "base64_bytes")] - bytes: Vec, -} + +use crate::{ + v0::impls::block::uint_bytes::{ + bytes_serde_impl, u32_from_bytes, u32_to_bytes, usize_from_bytes, usize_to_bytes, + }, + NamespaceId, NsIndex, NsIter, NsPayloadRange, NsTable, NsTableBuilder, NsTableValidationError, + NumNss, PayloadByteLen, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, +}; // Boilerplate: `#[serde(remote = "Self")]` allows invariant checking on // deserialization via re-implementation of `Deserialize` in terms of default @@ -254,7 +139,7 @@ impl NsTable { /// Read subslice range for the `index`th namespace from the namespace /// table. - pub(in crate::block) fn ns_range( + pub(crate) fn ns_range( &self, index: &NsIndex, payload_byte_len: &PayloadByteLen, @@ -367,23 +252,9 @@ impl Committable for NsTable { } } -/// Return type for [`NsTable::validate`]. -#[derive(Error, Debug, Display, Eq, PartialEq)] -pub enum NsTableValidationError { - InvalidByteLen, - NonIncreasingEntries, - DuplicateNamespaceId, - InvalidHeader, // TODO this variant obsolete after https://github.com/EspressoSystems/espresso-sequencer/issues/1604 - InvalidFinalOffset, // TODO this variant obsolete after https://github.com/EspressoSystems/espresso-sequencer/issues/1604 - ExpectNonemptyNsTable, -} - -pub struct NsTableBuilder { - bytes: Vec, - num_entries: usize, -} - impl NsTableBuilder { + // >>>> change + #[allow(clippy::new_without_default)] pub fn new() -> Self { // pre-allocate space for the ns table header Self { @@ -422,9 +293,6 @@ impl NsTableBuilder { } } -/// Index for an entry in a ns table. -#[derive(Clone, Debug, Display, Eq, Hash, PartialEq)] -pub struct NsIndex(usize); bytes_serde_impl!(NsIndex, to_bytes, [u8; NUM_NSS_BYTE_LEN], from_bytes); impl NsIndex { @@ -436,18 +304,12 @@ impl NsIndex { } } -/// Number of entries in a namespace table. -pub struct NumNss(usize); - impl NumNss { pub fn in_bounds(&self, index: &NsIndex) -> bool { index.0 < self.0 } } -/// Return type for [`Payload::ns_iter`]. -pub(in crate::block) struct NsIter(Range); - impl NsIter { pub fn new(num_nss: &NumNss) -> Self { Self(0..num_nss.0) diff --git a/sequencer/src/block/full_payload/ns_table/test.rs b/types/src/v0/impls/block/full_payload/ns_table/test.rs similarity index 97% rename from sequencer/src/block/full_payload/ns_table/test.rs rename to types/src/v0/impls/block/full_payload/ns_table/test.rs index 6f02b74b4d..6379bff2a5 100644 --- a/sequencer/src/block/full_payload/ns_table/test.rs +++ b/types/src/v0/impls/block/full_payload/ns_table/test.rs @@ -1,18 +1,19 @@ -use super::{ - NsTable, NsTableBuilder, NsTableValidationError, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, - NUM_NSS_BYTE_LEN, -}; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use hotshot::traits::BlockPayload; +use rand::{Rng, RngCore}; + use crate::{ - block::{ + v0::impls::block::{ test::ValidTest, uint_bytes::{u32_max_from_byte_len, usize_max_from_byte_len, usize_to_bytes}, }, - NamespaceId, Payload, + v0_1::{ + NsTableBuilder, + NsTableValidationError::{self, *}, + NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, + }, + NamespaceId, NsTable, Payload, }; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use hotshot::traits::BlockPayload; -use rand::{Rng, RngCore}; -use NsTableValidationError::*; #[test] fn random_valid() { diff --git a/sequencer/src/block/full_payload/payload.rs b/types/src/v0/impls/block/full_payload/payload.rs similarity index 84% rename from sequencer/src/block/full_payload/payload.rs rename to types/src/v0/impls/block/full_payload/payload.rs index b0078ff092..4785a0b36b 100644 --- a/sequencer/src/block/full_payload/payload.rs +++ b/types/src/v0/impls/block/full_payload/payload.rs @@ -1,14 +1,7 @@ -use crate::{ - block::{ - full_payload::ns_table::{NsIndex, NsTable, NsTableBuilder}, - namespace_payload::{Index, Iter, NsPayload, NsPayloadBuilder, NsPayloadRange, TxProof}, - }, - ChainConfig, NamespaceId, NodeState, SeqTypes, Transaction, ValidatedState, -}; +use std::{collections::BTreeMap, sync::Arc}; use async_trait::async_trait; use committable::Committable; -use derive_more::Display; use hotshot_query_service::availability::QueryablePayload; use hotshot_types::{ traits::{BlockPayload, EncodeBytes}, @@ -16,40 +9,13 @@ use hotshot_types::{ vid::{VidCommon, VidSchemeType}, }; use jf_vid::VidScheme; -use serde::{Deserialize, Serialize}; use sha2::Digest; -use std::{collections::BTreeMap, sync::Arc}; -/// Raw payload data for an entire block. -/// -/// A block consists of two sequences of arbitrary bytes: -/// - `ns_table`: namespace table -/// - `ns_payloads`: namespace payloads -/// -/// Any sequence of bytes is a valid `ns_table`. Any sequence of bytes is a -/// valid `ns_payloads`. The contents of `ns_table` determine how to interpret -/// `ns_payload`. -/// -/// # Namespace table -/// -/// See [`NsTable`] for the format of a namespace table. -/// -/// # Namespace payloads -/// -/// A concatenation of payload bytes for multiple individual namespaces. -/// Namespace boundaries are dictated by `ns_table`. See [`NsPayload`] for the -/// format of a namespace payload. -#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] -pub struct Payload { - // Concatenated payload bytes for each namespace - // - // TODO want to rename thisfield to `ns_payloads`, but can't due to - // serialization compatibility. - #[serde(with = "base64_bytes")] - raw_payload: Vec, - - ns_table: NsTable, -} +use crate::{ + ChainConfig, Index, Iter, NamespaceId, NodeState, NsIndex, NsPayload, NsPayloadBuilder, + NsPayloadRange, NsTable, NsTableBuilder, Payload, PayloadByteLen, SeqTypes, Transaction, + TxProof, ValidatedState, +}; impl Payload { pub fn ns_table(&self) -> &NsTable { @@ -66,19 +32,19 @@ impl Payload { // CRATE-VISIBLE HELPERS START HERE - pub(in crate::block) fn read_ns_payload(&self, range: &NsPayloadRange) -> &NsPayload { + pub(crate) fn read_ns_payload(&self, range: &NsPayloadRange) -> &NsPayload { NsPayload::from_bytes_slice(&self.raw_payload[range.as_block_range()]) } /// Convenience wrapper for [`Self::read_ns_payload`]. /// /// `index` is not checked. Use `self.ns_table().in_bounds()` as needed. - pub(in crate::block) fn ns_payload(&self, index: &NsIndex) -> &NsPayload { + pub(crate) fn ns_payload(&self, index: &NsIndex) -> &NsPayload { let ns_payload_range = self.ns_table().ns_range(index, &self.byte_len()); self.read_ns_payload(&ns_payload_range) } - pub(in crate::block) fn byte_len(&self) -> PayloadByteLen { + pub(crate) fn byte_len(&self) -> PayloadByteLen { PayloadByteLen(self.raw_payload.len()) } @@ -185,6 +151,7 @@ impl BlockPayload for Payload { let payload = Self::from_transactions_sync(vec![], Default::default(), &Default::default()) .unwrap() .0; + let ns_table = payload.ns_table().clone(); (payload, ns_table) } @@ -266,17 +233,13 @@ impl EncodeBytes for Payload { } } -/// Byte length of a block payload, which includes all namespaces but *not* the -/// namespace table. -#[derive(Clone, Debug, Display, Eq, Hash, PartialEq)] -pub struct PayloadByteLen(usize); - impl PayloadByteLen { /// Extract payload byte length from a [`VidCommon`] and construct a new [`Self`] from it. pub fn from_vid_common(common: &VidCommon) -> Self { Self(usize::try_from(VidSchemeType::get_payload_byte_len(common)).unwrap()) } + #[allow(clippy::result_unit_err)] /// Is the payload byte length declared in a [`VidCommon`] equal [`Self`]? pub fn is_consistent(&self, common: &VidCommon) -> bool { // failure to convert to usize implies that `common` cannot be @@ -295,7 +258,7 @@ impl PayloadByteLen { self.0 == expected } - pub(in crate::block::full_payload) fn as_usize(&self) -> usize { + pub(in crate::v0::impls::block::full_payload) fn as_usize(&self) -> usize { self.0 } } diff --git a/types/src/v0/impls/block/mod.rs b/types/src/v0/impls/block/mod.rs new file mode 100644 index 0000000000..baca5b5e5c --- /dev/null +++ b/types/src/v0/impls/block/mod.rs @@ -0,0 +1,6 @@ +mod full_payload; +mod namespace_payload; +mod test; +mod uint_bytes; + +pub use uint_bytes::*; diff --git a/types/src/v0/impls/block/namespace_payload.rs b/types/src/v0/impls/block/namespace_payload.rs new file mode 100644 index 0000000000..b9bbf18305 --- /dev/null +++ b/types/src/v0/impls/block/namespace_payload.rs @@ -0,0 +1,5 @@ +mod iter; +mod ns_payload; +mod ns_payload_range; +mod tx_proof; +mod types; diff --git a/sequencer/src/block/namespace_payload/iter.rs b/types/src/v0/impls/block/namespace_payload/iter.rs similarity index 75% rename from sequencer/src/block/namespace_payload/iter.rs rename to types/src/v0/impls/block/namespace_payload/iter.rs index 09da31b9ad..3f460e09ab 100644 --- a/sequencer/src/block/namespace_payload/iter.rs +++ b/types/src/v0/impls/block/namespace_payload/iter.rs @@ -1,21 +1,10 @@ -use crate::block::{ - full_payload::{NsIndex, NsIter, Payload}, - namespace_payload::types::{TxIndex, TxIter}, -}; -use serde::{Deserialize, Serialize}; -use std::iter::Peekable; - -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct Index { - ns_index: NsIndex, - tx_index: TxIndex, -} +use crate::{Index, Iter, NsIndex, NsIter, Payload, TxIndex}; impl Index { pub fn ns(&self) -> &NsIndex { &self.ns_index } - pub(in crate::block) fn tx(&self) -> &TxIndex { + pub fn tx(&self) -> &TxIndex { &self.tx_index } } @@ -37,13 +26,6 @@ impl Ord for Index { } } -/// Cartesian product of [`NsIter`], [`TxIter`]. -pub struct Iter<'a> { - ns_iter: Peekable, - tx_iter: Option, - block: &'a Payload, -} - impl<'a> Iter<'a> { pub fn new(block: &'a Payload) -> Self { Self { diff --git a/sequencer/src/block/namespace_payload/ns_payload.rs b/types/src/v0/impls/block/namespace_payload/ns_payload.rs similarity index 82% rename from sequencer/src/block/namespace_payload/ns_payload.rs rename to types/src/v0/impls/block/namespace_payload/ns_payload.rs index e87876b64a..48b432859f 100644 --- a/sequencer/src/block/namespace_payload/ns_payload.rs +++ b/types/src/v0/impls/block/namespace_payload/ns_payload.rs @@ -1,19 +1,8 @@ use crate::{ - block::namespace_payload::types::{ - FromNsPayloadBytes, NsPayloadByteLen, NsPayloadBytesRange, NumTxs, NumTxsRange, - NumTxsUnchecked, TxIndex, TxIter, TxPayloadRange, TxTableEntriesRange, - }, - NamespaceId, Transaction, + v0::traits::{FromNsPayloadBytes, NsPayloadBytesRange}, + NamespaceId, NsPayload, NsPayloadByteLen, NumTxs, NumTxsRange, NumTxsUnchecked, Transaction, + TxIndex, TxIter, TxPayloadRange, TxTableEntriesRange, }; -use serde::{Deserialize, Serialize}; - -/// Raw binary data for a single namespace's payload. -/// -/// Any sequence of bytes is a valid [`NsPayload`]. -/// -/// See module-level documentation [`types`](super::types) for a full -/// specification of the binary format of a namespace. -pub(in crate::block) struct NsPayload([u8]); impl NsPayload { pub fn from_bytes_slice(bytes: &[u8]) -> &NsPayload { @@ -93,19 +82,14 @@ impl NsPayload { } } -#[repr(transparent)] -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -#[serde(transparent)] -pub(in crate::block) struct NsPayloadOwned(#[serde(with = "base64_bytes")] Vec); - /// Crazy boilerplate code to make it so that [`NsPayloadOwned`] is to /// [`NsPayload`] as [`Vec`] is to `[T]`. See [How can I create newtypes for /// an unsized type and its owned counterpart (like `str` and `String`) in safe /// Rust? - Stack Overflow](https://stackoverflow.com/q/64977525) mod ns_payload_owned { - use super::{NsPayload, NsPayloadOwned}; - use std::borrow::Borrow; - use std::ops::Deref; + use std::{borrow::Borrow, ops::Deref}; + + use crate::v0_1::{NsPayload, NsPayloadOwned}; impl NsPayload { // pub(super) because I want it visible everywhere in this file but I diff --git a/sequencer/src/block/namespace_payload/ns_payload/test.rs b/types/src/v0/impls/block/namespace_payload/ns_payload/test.rs similarity index 97% rename from sequencer/src/block/namespace_payload/ns_payload/test.rs rename to types/src/v0/impls/block/namespace_payload/ns_payload/test.rs index 78f9fd8f1f..abc59b74f5 100644 --- a/sequencer/src/block/namespace_payload/ns_payload/test.rs +++ b/types/src/v0/impls/block/namespace_payload/ns_payload/test.rs @@ -1,18 +1,15 @@ -use super::NsPayloadOwned; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; + use crate::{ - block::{ - namespace_payload::NsPayloadBuilder, - uint_bytes::{usize_max_from_byte_len, usize_to_bytes}, - }, - NamespaceId, + v0::impls::block::{usize_max_from_byte_len, usize_to_bytes}, + NamespaceId, NsPayloadBuilder, NsPayloadOwned, }; -use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; #[test] fn ns_payload_len() { setup_logging(); setup_backtrace(); - let ns_id = NamespaceId::from(69); // dummy + let ns_id = NamespaceId::from(69_u32); // dummy // ordinary valid ns_payload { @@ -102,7 +99,7 @@ fn ns_payload_len() { fn negative_len_txs() { setup_logging(); setup_backtrace(); - let ns_id = NamespaceId::from(69); // dummy + let ns_id = NamespaceId::from(69_u32); // dummy // 1 negative-length tx at the end, no overlapping tx bytes { @@ -170,7 +167,7 @@ fn negative_len_txs() { fn tx_table_header() { setup_logging(); setup_backtrace(); - let ns_id = NamespaceId::from(69); // dummy + let ns_id = NamespaceId::from(69_u32); // dummy // header declares 1 fewer txs, tx table bytes appear in tx payloads, wasted // pßayload bytes diff --git a/sequencer/src/block/namespace_payload/ns_payload_range.rs b/types/src/v0/impls/block/namespace_payload/ns_payload_range.rs similarity index 78% rename from sequencer/src/block/namespace_payload/ns_payload_range.rs rename to types/src/v0/impls/block/namespace_payload/ns_payload_range.rs index f2812f6fd9..99d4bb719f 100644 --- a/sequencer/src/block/namespace_payload/ns_payload_range.rs +++ b/types/src/v0/impls/block/namespace_payload/ns_payload_range.rs @@ -1,9 +1,6 @@ -use super::types::{NsPayloadByteLen, NsPayloadBytesRange}; use std::ops::Range; -/// Index range for a namespace payload inside a block payload. -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub(in crate::block) struct NsPayloadRange(Range); +use crate::{v0::traits::NsPayloadBytesRange, NsPayloadByteLen, NsPayloadRange}; impl NsPayloadRange { /// TODO restrict visibility? diff --git a/sequencer/src/block/namespace_payload/tx_proof.rs b/types/src/v0/impls/block/namespace_payload/tx_proof.rs similarity index 86% rename from sequencer/src/block/namespace_payload/tx_proof.rs rename to types/src/v0/impls/block/namespace_payload/tx_proof.rs index ee025c0f4b..b6f527215c 100644 --- a/sequencer/src/block/namespace_payload/tx_proof.rs +++ b/types/src/v0/impls/block/namespace_payload/tx_proof.rs @@ -1,49 +1,17 @@ -use crate::{ - block::{ - full_payload::{ - NsTable, {Payload, PayloadByteLen}, - }, - namespace_payload::{ - iter::Index, - types::{ - NumTxs, NumTxsRange, NumTxsUnchecked, TxIndex, TxPayloadRange, TxTableEntries, - TxTableEntriesRange, - }, - }, - }, - Transaction, -}; use hotshot_query_service::{VidCommitment, VidCommon}; use hotshot_types::{ traits::EncodeBytes, - vid::{vid_scheme, SmallRangeProofType, VidSchemeType}, + vid::{vid_scheme, VidSchemeType}, }; use jf_vid::{ payload_prover::{PayloadProver, Statement}, VidScheme, }; -use serde::{Deserialize, Serialize}; - -/// Proof of correctness for transaction bytes in a block. -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] -pub struct TxProof { - // Naming conventions for this struct's fields: - // - `payload_x`: bytes from the payload - // - `payload_proof_x`: a proof of those bytes from the payload - tx_index: TxIndex, - - // Number of txs declared in the tx table - payload_num_txs: NumTxsUnchecked, - payload_proof_num_txs: SmallRangeProofType, - - // Tx table entries for this tx - payload_tx_table_entries: TxTableEntries, - payload_proof_tx_table_entries: SmallRangeProofType, - - // This tx's payload bytes. - // `None` if this tx has zero length. - payload_proof_tx: Option, -} + +use crate::{ + Index, NsTable, NumTxs, NumTxsRange, Payload, PayloadByteLen, Transaction, TxPayloadRange, + TxProof, TxTableEntriesRange, +}; impl TxProof { /// Returns the [`Transaction`] indicated by `index`, along with a proof of diff --git a/sequencer/src/block/namespace_payload/types.rs b/types/src/v0/impls/block/namespace_payload/types.rs similarity index 79% rename from sequencer/src/block/namespace_payload/types.rs rename to types/src/v0/impls/block/namespace_payload/types.rs index 09860f80bd..3cfb5bfc96 100644 --- a/sequencer/src/block/namespace_payload/types.rs +++ b/types/src/v0/impls/block/namespace_payload/types.rs @@ -104,39 +104,19 @@ //! It is possible that a `tx_table` table could indicate two distinct //! transactions whose byte ranges overlap, though no "honestly-prepared" //! `tx_table` would do this. -use crate::block::uint_bytes::{bytes_serde_impl, usize_from_bytes, usize_to_bytes}; -use crate::Transaction; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::ops::Range; -/// Byte lengths for the different items that could appear in a tx table. -const NUM_TXS_BYTE_LEN: usize = 4; -const TX_OFFSET_BYTE_LEN: usize = 4; - -/// Data that can be deserialized from a subslice of namespace payload bytes. -/// -/// Companion trait for [`NsPayloadBytesRange`], which specifies the subslice of -/// namespace payload bytes to read. -pub trait FromNsPayloadBytes<'a> { - /// Deserialize `Self` from namespace payload bytes. - fn from_payload_bytes(bytes: &'a [u8]) -> Self; -} - -/// Specifies a subslice of namespace payload bytes to read. -/// -/// Companion trait for [`FromNsPayloadBytes`], which holds data that can be -/// deserialized from that subslice of bytes. -pub trait NsPayloadBytesRange<'a> { - type Output: FromNsPayloadBytes<'a>; - - /// Range relative to this ns payload - fn ns_payload_range(&self) -> Range; -} +use serde::{Deserialize, Deserializer, Serialize, Serializer}; -/// Number of txs in a namespace. -/// -/// Like [`NumTxsUnchecked`] but checked against a [`NsPayloadByteLen`]. -pub struct NumTxs(usize); +use crate::{ + v0::{ + impls::block::{bytes_serde_impl, usize_from_bytes, usize_to_bytes}, + traits::{FromNsPayloadBytes, NsPayloadBytesRange}, + }, + NsPayloadBuilder, NsPayloadByteLen, NumTxs, NumTxsRange, NumTxsUnchecked, Transaction, TxIndex, + TxIter, TxPayload, TxPayloadRange, TxTableEntries, TxTableEntriesRange, NUM_TXS_BYTE_LEN, + TX_OFFSET_BYTE_LEN, +}; impl NumTxs { /// Returns the minimum of: @@ -157,9 +137,6 @@ impl NumTxs { } } -/// Byte length of a namespace payload. -pub struct NsPayloadByteLen(usize); - impl NsPayloadByteLen { // TODO restrict visibility? pub fn from_usize(n: usize) -> Self { @@ -167,21 +144,12 @@ impl NsPayloadByteLen { } } -/// The part of a tx table that declares the number of txs in the payload. -/// -/// "Unchecked" because this quantity might exceed the number of tx table -/// entries that could fit into the namespace that contains it. -/// -/// Use [`NumTxs`] for the actual number of txs in this namespace. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct NumTxsUnchecked(usize); bytes_serde_impl!( NumTxsUnchecked, to_payload_bytes, [u8; NUM_TXS_BYTE_LEN], from_payload_bytes ); - impl NumTxsUnchecked { pub fn to_payload_bytes(&self) -> [u8; NUM_TXS_BYTE_LEN] { usize_to_bytes::(self.0) @@ -194,10 +162,6 @@ impl FromNsPayloadBytes<'_> for NumTxsUnchecked { } } -/// Byte range for the part of a tx table that declares the number of txs in the -/// payload. -pub struct NumTxsRange(Range); - impl NumTxsRange { pub fn new(byte_len: &NsPayloadByteLen) -> Self { Self(0..NUM_TXS_BYTE_LEN.min(byte_len.0)) @@ -212,16 +176,6 @@ impl NsPayloadBytesRange<'_> for NumTxsRange { } } -/// Entries from a tx table in a namespace for use in a transaction proof. -/// -/// Contains either one or two entries according to whether it was derived from -/// the first transaction in the namespace. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct TxTableEntries { - cur: usize, - prev: Option, // `None` if derived from the first transaction -} - // This serde impl uses Vec. We could save space by using an array of // length `TWO_ENTRIES_BYTE_LEN`, but then we need a way to distinguish // `prev=Some(0)` from `prev=None`. @@ -268,12 +222,6 @@ impl FromNsPayloadBytes<'_> for TxTableEntries { } } -/// Byte range for entries from a tx table for use in a transaction proof. -/// -/// This range covers either one or two entries from a tx table according to -/// whether it was derived from the first transaction in the namespace. -pub struct TxTableEntriesRange(Range); - impl TxTableEntriesRange { pub fn new(index: &TxIndex) -> Self { let start = if index.0 == 0 { @@ -306,9 +254,6 @@ impl NsPayloadBytesRange<'_> for TxTableEntriesRange { } } -/// A transaction's payload data. -pub struct TxPayload<'a>(&'a [u8]); - impl<'a> TxPayload<'a> { pub fn to_payload_bytes(&self) -> &'a [u8] { self.0 @@ -321,9 +266,6 @@ impl<'a> FromNsPayloadBytes<'a> for TxPayload<'a> { } } -/// Byte range for a transaction's payload data. -pub struct TxPayloadRange(Range); - impl TxPayloadRange { pub fn new( num_txs: &NumTxsUnchecked, @@ -355,9 +297,6 @@ impl<'a> NsPayloadBytesRange<'a> for TxPayloadRange { } } -/// Index for an entry in a tx table. -#[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub(in crate::block) struct TxIndex(usize); bytes_serde_impl!(TxIndex, to_bytes, [u8; NUM_TXS_BYTE_LEN], from_bytes); impl TxIndex { @@ -369,8 +308,6 @@ impl TxIndex { } } -pub(in crate::block) struct TxIter(Range); - impl TxIter { pub fn new(num_txs: &NumTxs) -> Self { Self(0..num_txs.0) @@ -386,17 +323,6 @@ impl Iterator for TxIter { } } -/// Build an individual namespace payload one transaction at a time. -/// -/// Use [`Self::append_tx`] to add each transaction. Use [`Self::into_bytes`] -/// when you're done. The returned bytes include a well-formed tx table and all -/// tx payloads. -#[derive(Default)] -pub(in crate::block) struct NsPayloadBuilder { - tx_table_entries: Vec, - tx_bodies: Vec, -} - impl NsPayloadBuilder { /// Add a transaction's payload to this namespace pub fn append_tx(&mut self, tx: Transaction) { diff --git a/sequencer/src/block/test.rs b/types/src/v0/impls/block/test.rs similarity index 97% rename from sequencer/src/block/test.rs rename to types/src/v0/impls/block/test.rs index 7edf76f52c..f48a433b7d 100644 --- a/sequencer/src/block/test.rs +++ b/types/src/v0/impls/block/test.rs @@ -1,18 +1,17 @@ -use crate::{ - block::{ - full_payload::{NsProof, Payload}, - namespace_payload::TxProof, - }, - chain_config::BlockSize, - ChainConfig, NamespaceId, NodeState, Transaction, ValidatedState, -}; +#![cfg(test)] +use std::collections::BTreeMap; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use hotshot::traits::BlockPayload; use hotshot_query_service::availability::QueryablePayload; use hotshot_types::{traits::EncodeBytes, vid::vid_scheme}; use jf_vid::VidScheme; use rand::RngCore; -use std::collections::BTreeMap; + +use crate::{ + BlockSize, ChainConfig, NamespaceId, NodeState, NsProof, Payload, Transaction, TxProof, + ValidatedState, +}; #[async_std::test] async fn basic_correctness() { diff --git a/sequencer/src/block/uint_bytes.rs b/types/src/v0/impls/block/uint_bytes.rs similarity index 99% rename from sequencer/src/block/uint_bytes.rs rename to types/src/v0/impls/block/uint_bytes.rs index 2296a8182a..8a248b57cb 100644 --- a/sequencer/src/block/uint_bytes.rs +++ b/types/src/v0/impls/block/uint_bytes.rs @@ -1,9 +1,10 @@ //! Serialization (and deserialization) of primitive unsigned integer types to //! (and from) an arbitrary fixed-length byte array. //! -use paste::paste; use std::mem::size_of; +use paste::paste; + // Use an ugly macro because it's difficult or impossible to be generic over // primitive types such as `usize`, `u64`. macro_rules! uint_bytes_impl { @@ -107,9 +108,10 @@ pub(super) use bytes_serde_impl; #[cfg(test)] mod test { + use std::mem::size_of; + use fluent_asserter::prelude::*; use paste::paste; - use std::mem::size_of; macro_rules! uint_bytes_test_impl { ($T:ty) => { diff --git a/sequencer/src/chain_config.rs b/types/src/v0/impls/chain_config.rs similarity index 79% rename from sequencer/src/chain_config.rs rename to types/src/v0/impls/chain_config.rs index 5140d6ca56..754b2db5bd 100644 --- a/sequencer/src/chain_config.rs +++ b/types/src/v0/impls/chain_config.rs @@ -1,20 +1,16 @@ -use crate::{ - options::parse_size, - state::{FeeAccount, FeeAmount}, -}; +use std::str::FromStr; + +use bytesize::ByteSize; use committable::{Commitment, Committable}; -use derive_more::{Deref, Display, From, Into}; -use ethers::types::{Address, U256}; +use derive_more::From; +use ethers::types::U256; use itertools::Either; use sequencer_utils::{ impl_serde_from_string_or_integer, impl_to_fixed_bytes, ser::FromStringOrInteger, }; -use serde::{Deserialize, Serialize}; -use std::str::FromStr; +use snafu::Snafu; -#[derive(Default, Hash, Copy, Clone, Debug, Display, PartialEq, Eq, From, Into)] -#[display(fmt = "{_0}")] -pub struct ChainId(U256); +use crate::{BlockSize, ChainConfig, ChainId, ResolvableChainConfig}; impl_serde_from_string_or_integer!(ChainId); impl_to_fixed_bytes!(ChainId, U256); @@ -54,10 +50,6 @@ impl From for ChainId { } } -#[derive(Hash, Copy, Clone, Debug, Default, Display, PartialEq, Eq, From, Into, Deref)] -#[display(fmt = "{_0}")] -pub struct BlockSize(u64); - impl_serde_from_string_or_integer!(BlockSize); impl FromStringOrInteger for BlockSize { @@ -85,33 +77,6 @@ impl FromStringOrInteger for BlockSize { } } -/// Global variables for an Espresso blockchain. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct ChainConfig { - /// Espresso chain ID - pub chain_id: ChainId, - - /// Maximum size in bytes of a block - pub max_block_size: BlockSize, - - /// Minimum fee in WEI per byte of payload - pub base_fee: FeeAmount, - - /// Fee contract address on L1. - /// - /// This is optional so that fees can easily be toggled on/off, with no need to deploy a - /// contract when they are off. In a future release, after fees are switched on and thoroughly - /// tested, this may be made mandatory. - pub fee_contract: Option
, - - /// Account that receives sequencing fees. - /// - /// This account in the Espresso fee ledger will always receive every fee paid in Espresso, - /// regardless of whether or not their is a `fee_contract` deployed. Once deployed, the fee - /// contract can decide what to do with tokens locked in this account in Espresso. - pub fee_recipient: FeeAccount, -} - impl Default for ChainConfig { fn default() -> Self { Self { @@ -144,11 +109,6 @@ impl Committable for ChainConfig { } } -#[derive(Clone, Debug, Copy, PartialEq, Deserialize, Serialize, Eq, Hash)] -pub struct ResolvableChainConfig { - chain_config: Either>, -} - impl ResolvableChainConfig { pub fn commit(&self) -> Commitment { match self.chain_config { @@ -180,6 +140,15 @@ impl From for ResolvableChainConfig { } } +#[derive(Clone, Debug, From, Snafu)] +pub struct ParseSizeError { + msg: String, +} + +pub fn parse_size(s: &str) -> Result { + Ok(s.parse::()?.0) +} + #[cfg(test)] mod tests { use super::*; diff --git a/types/src/v0/impls/fee_info.rs b/types/src/v0/impls/fee_info.rs new file mode 100644 index 0000000000..f4fe0acf67 --- /dev/null +++ b/types/src/v0/impls/fee_info.rs @@ -0,0 +1,377 @@ +// use crate::SeqTypes; + +use std::str::FromStr; + +use anyhow::{bail, ensure, Context}; +use ark_serialize::{ + CanonicalDeserialize, CanonicalSerialize, Compress, Read, SerializationError, Valid, Validate, +}; +use committable::{Commitment, Committable, RawCommitmentBuilder}; +use contract_bindings::fee_contract::DepositFilter; +use ethers::{ + prelude::{Address, U256}, + utils::{parse_units, ParseUnits}, +}; +use hotshot_query_service::explorer::MonetaryValue; +use hotshot_types::traits::block_contents::BuilderFee; +use jf_merkle_tree::{ + ForgetableMerkleTreeScheme, ForgetableUniversalMerkleTreeScheme, LookupResult, + MerkleCommitment, MerkleTreeError, MerkleTreeScheme, ToTraversalPath, + UniversalMerkleTreeScheme, +}; +use num_traits::CheckedSub; +use sequencer_utils::{ + impl_serde_from_string_or_integer, impl_to_fixed_bytes, ser::FromStringOrInteger, +}; +use thiserror::Error; + +use crate::{ + eth_signature_key::EthKeyPair, AccountQueryData, FeeAccount, FeeAccountProof, FeeAmount, + FeeInfo, FeeMerkleCommitment, FeeMerkleProof, FeeMerkleTree, SeqTypes, +}; + +/// Possible charge fee failures +#[derive(Error, Debug, Eq, PartialEq)] +pub enum FeeError { + #[error("Insuficcient Funds: have {balance:?}, required {amount:?}")] + InsufficientFunds { + balance: Option, + amount: FeeAmount, + }, + #[error("Merkle Tree Error: {0}")] + MerkleTreeError(MerkleTreeError), +} + +impl FeeInfo { + pub fn new(account: impl Into, amount: impl Into) -> Self { + Self { + account: account.into(), + amount: amount.into(), + } + } + /// The minimum fee paid by the given builder account for a proposed block. + // TODO this function should take the block size as an input, we need to get this information + // from HotShot. + pub fn base_fee(account: FeeAccount) -> Self { + Self { + account, + amount: FeeAmount::default(), + } + } + + pub fn genesis() -> Self { + Self { + account: Default::default(), + amount: Default::default(), + } + } + + pub fn account(&self) -> FeeAccount { + self.account + } + + pub fn amount(&self) -> FeeAmount { + self.amount + } +} + +impl From> for FeeInfo { + fn from(fee: BuilderFee) -> Self { + Self { + amount: fee.fee_amount.into(), + account: fee.fee_account, + } + } +} + +impl From for FeeInfo { + fn from(item: DepositFilter) -> Self { + Self { + amount: item.amount.into(), + account: item.user.into(), + } + } +} + +impl Committable for FeeInfo { + fn commit(&self) -> Commitment { + RawCommitmentBuilder::new(&Self::tag()) + .fixed_size_field("account", &self.account.to_fixed_bytes()) + .fixed_size_field("amount", &self.amount.to_fixed_bytes()) + .finalize() + } + fn tag() -> String { + "FEE_INFO".into() + } +} + +impl_serde_from_string_or_integer!(FeeAmount); +impl_to_fixed_bytes!(FeeAmount, U256); + +impl From for FeeAmount { + fn from(amt: u64) -> Self { + Self(amt.into()) + } +} + +impl From for MonetaryValue { + fn from(value: FeeAmount) -> Self { + MonetaryValue::eth(value.0.as_u128() as i128) + } +} + +impl CheckedSub for FeeAmount { + fn checked_sub(&self, v: &Self) -> Option { + self.0.checked_sub(v.0).map(FeeAmount) + } +} + +impl FromStr for FeeAmount { + type Err = ::Err; + + fn from_str(s: &str) -> Result { + Ok(Self(s.parse()?)) + } +} + +impl FromStringOrInteger for FeeAmount { + type Binary = U256; + type Integer = u64; + + fn from_binary(b: Self::Binary) -> anyhow::Result { + Ok(Self(b)) + } + + fn from_integer(i: Self::Integer) -> anyhow::Result { + Ok(i.into()) + } + + fn from_string(s: String) -> anyhow::Result { + // For backwards compatibility, we have an ad hoc parser for WEI amounts represented as hex + // strings. + if let Some(s) = s.strip_prefix("0x") { + return Ok(Self(s.parse()?)); + } + + // Strip an optional non-numeric suffix, which will be interpreted as a unit. + let (base, unit) = s + .split_once(char::is_whitespace) + .unwrap_or((s.as_str(), "wei")); + match parse_units(base, unit)? { + ParseUnits::U256(n) => Ok(Self(n)), + ParseUnits::I256(_) => bail!("amount cannot be negative"), + } + } + + fn to_binary(&self) -> anyhow::Result { + Ok(self.0) + } + + fn to_string(&self) -> anyhow::Result { + Ok(format!("{self}")) + } +} + +impl FeeAmount { + pub fn as_u64(&self) -> Option { + if self.0 <= u64::MAX.into() { + Some(self.0.as_u64()) + } else { + None + } + } +} +impl FeeAccount { + /// Return inner `Address` + pub fn address(&self) -> Address { + self.0 + } + /// Return byte slice representation of inner `Address` type + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } + /// Return array containing underlying bytes of inner `Address` type + pub fn to_fixed_bytes(self) -> [u8; 20] { + self.0.to_fixed_bytes() + } + pub fn test_key_pair() -> EthKeyPair { + EthKeyPair::from_mnemonic( + "test test test test test test test test test test test junk", + 0u32, + ) + .unwrap() + } +} + +impl FromStr for FeeAccount { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(Self(s.parse()?)) + } +} + +impl Valid for FeeAmount { + fn check(&self) -> Result<(), SerializationError> { + Ok(()) + } +} + +impl Valid for FeeAccount { + fn check(&self) -> Result<(), SerializationError> { + Ok(()) + } +} + +impl CanonicalSerialize for FeeAmount { + fn serialize_with_mode( + &self, + mut writer: W, + _compress: Compress, + ) -> Result<(), SerializationError> { + Ok(writer.write_all(&self.to_fixed_bytes())?) + } + + fn serialized_size(&self, _compress: Compress) -> usize { + core::mem::size_of::() + } +} +impl CanonicalDeserialize for FeeAmount { + fn deserialize_with_mode( + mut reader: R, + _compress: Compress, + _validate: Validate, + ) -> Result { + let mut bytes = [0u8; core::mem::size_of::()]; + reader.read_exact(&mut bytes)?; + let value = U256::from_little_endian(&bytes); + Ok(Self(value)) + } +} +impl CanonicalSerialize for FeeAccount { + fn serialize_with_mode( + &self, + mut writer: W, + _compress: Compress, + ) -> Result<(), SerializationError> { + Ok(writer.write_all(&self.0.to_fixed_bytes())?) + } + + fn serialized_size(&self, _compress: Compress) -> usize { + core::mem::size_of::
() + } +} +impl CanonicalDeserialize for FeeAccount { + fn deserialize_with_mode( + mut reader: R, + _compress: Compress, + _validate: Validate, + ) -> Result { + let mut bytes = [0u8; core::mem::size_of::
()]; + reader.read_exact(&mut bytes)?; + let value = Address::from_slice(&bytes); + Ok(Self(value)) + } +} + +impl ToTraversalPath<256> for FeeAccount { + fn to_traversal_path(&self, height: usize) -> Vec { + self.0 + .to_fixed_bytes() + .into_iter() + .take(height) + .map(|i| i as usize) + .collect() + } +} + +#[allow(dead_code)] +impl FeeAccountProof { + pub fn presence( + pos: FeeAccount, + proof: ::MembershipProof, + ) -> Self { + Self { + account: pos.into(), + proof: FeeMerkleProof::Presence(proof), + } + } + + pub fn absence( + pos: FeeAccount, + proof: ::NonMembershipProof, + ) -> Self { + Self { + account: pos.into(), + proof: FeeMerkleProof::Absence(proof), + } + } + + pub fn prove(tree: &FeeMerkleTree, account: Address) -> Option<(Self, U256)> { + match tree.universal_lookup(FeeAccount(account)) { + LookupResult::Ok(balance, proof) => Some(( + Self { + account, + proof: FeeMerkleProof::Presence(proof), + }, + balance.0, + )), + LookupResult::NotFound(proof) => Some(( + Self { + account, + proof: FeeMerkleProof::Absence(proof), + }, + 0.into(), + )), + LookupResult::NotInMemory => None, + } + } + + pub fn verify(&self, comm: &FeeMerkleCommitment) -> anyhow::Result { + match &self.proof { + FeeMerkleProof::Presence(proof) => { + ensure!( + FeeMerkleTree::verify(comm.digest(), FeeAccount(self.account), proof)?.is_ok(), + "invalid proof" + ); + Ok(proof + .elem() + .context("presence proof is missing account balance")? + .0) + } + FeeMerkleProof::Absence(proof) => { + let tree = FeeMerkleTree::from_commitment(comm); + ensure!( + tree.non_membership_verify(FeeAccount(self.account), proof)?, + "invalid proof" + ); + Ok(0.into()) + } + } + } + + pub fn remember(&self, tree: &mut FeeMerkleTree) -> anyhow::Result<()> { + match &self.proof { + FeeMerkleProof::Presence(proof) => { + tree.remember( + FeeAccount(self.account), + proof + .elem() + .context("presence proof is missing account balance")?, + proof, + )?; + Ok(()) + } + FeeMerkleProof::Absence(proof) => { + tree.non_membership_remember(FeeAccount(self.account), proof)?; + Ok(()) + } + } + } +} + +impl From<(FeeAccountProof, U256)> for AccountQueryData { + fn from((proof, balance): (FeeAccountProof, U256)) -> Self { + Self { balance, proof } + } +} diff --git a/sequencer/src/header.rs b/types/src/v0/impls/header.rs similarity index 56% rename from sequencer/src/header.rs rename to types/src/v0/impls/header.rs index 8b89f215c4..ca5ae1f733 100644 --- a/sequencer/src/header.rs +++ b/types/src/v0/impls/header.rs @@ -1,104 +1,78 @@ -use crate::{ - block::NsTable, - chain_config::ResolvableChainConfig, - eth_signature_key::BuilderSignature, - genesis::UpgradeType, - l1_client::L1Snapshot, - state::{BlockMerkleCommitment, FeeAccount, FeeAmount, FeeInfo, FeeMerkleCommitment}, - ChainConfig, L1BlockInfo, Leaf, NamespaceId, NodeState, SeqTypes, ValidatedState, -}; +use std::fmt; + use anyhow::{ensure, Context}; use ark_serialize::CanonicalSerialize; use committable::{Commitment, Committable, RawCommitmentBuilder}; -use hotshot_query_service::{availability::QueryableHeader, explorer::ExplorerHeader, Resolvable}; +use hotshot_query_service::{availability::QueryableHeader, explorer::ExplorerHeader}; use hotshot_types::{ traits::{ - block_contents::{BlockHeader, BlockPayload, BuilderFee}, + block_contents::{BlockHeader, BuilderFee}, node_implementation::NodeType, signature_key::BuilderSignatureKey, - ValidatedState as HotShotState, + BlockPayload, ValidatedState as _, }, utils::BuilderCommitment, vid::{VidCommitment, VidCommon}, }; -use jf_merkle_tree::prelude::*; -use serde::{Deserialize, Serialize}; +use jf_merkle_tree::{AppendableMerkleTreeScheme, MerkleTreeScheme}; +use serde::{ + de::{self, MapAccess, SeqAccess, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use serde_json::{Map, Value}; use snafu::Snafu; +use thiserror::Error; use time::OffsetDateTime; use vbs::version::Version; -/// A header is like a [`Block`] with the body replaced by a digest. -#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] -pub struct Header { - /// A commitment to a ChainConfig or a full ChainConfig. - pub chain_config: ResolvableChainConfig, - - pub height: u64, - pub timestamp: u64, - - /// The Espresso block header includes a reference to the current head of the L1 chain. - /// - /// Rollups can use this to facilitate bridging between the L1 and L2 in a deterministic way. - /// This field deterministically associates an L2 block with a recent L1 block the instant the - /// L2 block is sequenced. Rollups can then define the L2 state after this block as the state - /// obtained by executing all the transactions in this block _plus_ all the L1 deposits up to - /// the given L1 block number. Since there is no need to wait for the L2 block to be reflected - /// on the L1, this bridge design retains the low confirmation latency of HotShot. - /// - /// This block number indicates the unsafe head of the L1 chain, so it is subject to reorgs. For - /// this reason, the Espresso header does not include any information that might change in a - /// reorg, such as the L1 block timestamp or hash. It includes only the L1 block number, which - /// will always refer to _some_ block after a reorg: if the L1 head at the time this block was - /// sequenced gets reorged out, the L1 chain will eventually (and probably quickly) grow to the - /// same height once again, and a different block will exist with the same height. In this way, - /// Espresso does not have to handle L1 reorgs, and the Espresso blockchain will always be - /// reflective of the current state of the L1 blockchain. Rollups that use this block number - /// _do_ have to handle L1 reorgs, but each rollup and each rollup client can decide how many - /// confirmations they want to wait for on top of this `l1_head` before they consider an L2 - /// block finalized. This offers a tradeoff between low-latency L1-L2 bridges and finality. - /// - /// Rollups that want a stronger guarantee of finality, or that want Espresso to attest to data - /// from the L1 block that might change in reorgs, can instead use the latest L1 _finalized_ - /// block at the time this L2 block was sequenced: `l1_finalized`. - pub l1_head: u64, +use crate::{ + v0::header::{EitherOrVersion, VersionedHeader}, + v0_1, v0_2, v0_3, BlockMerkleCommitment, BlockSize, BuilderSignature, ChainConfig, FeeAccount, + FeeAmount, FeeInfo, FeeMerkleCommitment, Header, L1BlockInfo, L1Snapshot, Leaf, NamespaceId, + NodeState, NsTable, NsTableValidationError, ResolvableChainConfig, SeqTypes, UpgradeType, + ValidatedState, +}; - /// The Espresso block header includes information a bout the latest finalized L1 block. - /// - /// Similar to `l1_head`, rollups can use this information to implement a bridge between the L1 - /// and L2 while retaining the finality of low-latency block confirmations from HotShot. Since - /// this information describes the finalized L1 block, a bridge using this L1 block will have - /// much higher latency than a bridge using `l1_head`. In exchange, rollups that use the - /// finalized block do not have to worry about L1 reorgs, and can inject verifiable attestations - /// to the L1 block metadata (such as its timestamp or hash) into their execution layers, since - /// Espresso replicas will sign this information for the finalized L1 block. - /// - /// This block may be `None` in the rare case where Espresso has started shortly after the - /// genesis of the L1, and the L1 has yet to finalize a block. In all other cases it will be - /// `Some`. - pub l1_finalized: Option, +/// Possible proposal validation failures +#[derive(Error, Debug, Eq, PartialEq)] +pub enum ProposalValidationError { + #[error("Invalid ChainConfig: expected={expected}, proposal={proposal}")] + InvalidChainConfig { expected: String, proposal: String }, - pub payload_commitment: VidCommitment, - pub builder_commitment: BuilderCommitment, - pub ns_table: NsTable, - /// Root Commitment of Block Merkle Tree - pub block_merkle_tree_root: BlockMerkleCommitment, - /// Root Commitment of `FeeMerkleTree` - pub fee_merkle_tree_root: FeeMerkleCommitment, - /// Fee paid by the block builder - pub fee_info: FeeInfo, - /// Account (etheruem address) of builder - /// - /// This signature is not considered formally part of the header; it is just evidence proving - /// that other parts of the header (`fee_info`) are correct. It exists in the header so that it - /// is available to all nodes to be used during validation. But since it is checked during - /// consensus, any downstream client who has a proof of consensus finality of a header can trust - /// that `fee_info` is correct without relying on the signature. Thus, this signature is not - /// included in the header commitment. - pub builder_signature: Option, + #[error( + "Invalid Payload Size: (max_block_size={max_block_size}, proposed_block_size={block_size})" + )] + MaxBlockSizeExceeded { + max_block_size: BlockSize, + block_size: BlockSize, + }, + #[error("Insufficient Fee: block_size={max_block_size}, base_fee={base_fee}, proposed_fee={proposed_fee}")] + InsufficientFee { + max_block_size: BlockSize, + base_fee: FeeAmount, + proposed_fee: FeeAmount, + }, + #[error("Invalid Height: parent_height={parent_height}, proposal_height={proposal_height}")] + InvalidHeight { + parent_height: u64, + proposal_height: u64, + }, + #[error("Invalid Block Root Error: expected={expected_root}, proposal={proposal_root}")] + InvalidBlockRoot { + expected_root: BlockMerkleCommitment, + proposal_root: BlockMerkleCommitment, + }, + #[error("Invalid Fee Root Error: expected={expected_root}, proposal={proposal_root}")] + InvalidFeeRoot { + expected_root: FeeMerkleCommitment, + proposal_root: FeeMerkleCommitment, + }, + #[error("Invalid namespace table: {err}")] + InvalidNsTable { err: NsTableValidationError }, } -impl Committable for Header { - fn commit(&self) -> Commitment { +impl v0_1::Header { + pub(crate) fn commit(&self) -> Commitment
{ let mut bmt_bytes = vec![]; self.block_merkle_tree_root .serialize_with_mode(&mut bmt_bytes, ark_serialize::Compress::Yes) @@ -124,6 +98,24 @@ impl Committable for Header { .field("fee_info", self.fee_info.commit()) .finalize() } +} + +impl Committable for Header { + fn commit(&self) -> Commitment { + match self { + Self::V1(header) => header.commit(), + Self::V2(fields) => RawCommitmentBuilder::new(&Self::tag()) + .u64_field("version_major", 0) + .u64_field("version_minor", 2) + .field("fields", fields.commit()) + .finalize(), + Self::V3(fields) => RawCommitmentBuilder::new(&Self::tag()) + .u64_field("version_major", 0) + .u64_field("version_minor", 3) + .field("fields", fields.commit()) + .finalize(), + } + } fn tag() -> String { // We use the tag "BLOCK" since blocks are identified by the hash of their header. This will @@ -132,6 +124,254 @@ impl Committable for Header { } } +impl Header { + pub fn version(&self) -> Version { + match self { + Self::V1(_) => Version { major: 0, minor: 1 }, + Self::V2(_) => Version { major: 0, minor: 2 }, + Self::V3(_) => Version { major: 0, minor: 3 }, + } + } +} + +impl Serialize for Header { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self { + Self::V1(header) => header.serialize(serializer), + Self::V2(fields) => VersionedHeader { + version: EitherOrVersion::Version(Version { major: 0, minor: 2 }), + fields: fields.clone(), + } + .serialize(serializer), + Self::V3(fields) => VersionedHeader { + version: EitherOrVersion::Version(Version { major: 0, minor: 3 }), + fields: fields.clone(), + } + .serialize(serializer), + } + } +} + +impl<'de> Deserialize<'de> for Header { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct HeaderVisitor; + + impl<'de> Visitor<'de> for HeaderVisitor { + type Value = Header; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("Header") + } + + fn visit_seq(self, mut seq: V) -> Result + where + V: SeqAccess<'de>, + { + let chain_config_or_version: EitherOrVersion = seq + .next_element()? + .ok_or_else(|| de::Error::missing_field("chain_config"))?; + + match chain_config_or_version { + // For v0.1, the first field in the sequence of fields is the first field of the struct, so we call a function to get the rest of + // the fields from the sequence and pack them into the struct. + EitherOrVersion::Left(cfg) => Ok(Header::V1( + v0_1::Header::deserialize_with_chain_config(cfg.into(), seq)?, + )), + EitherOrVersion::Right(commit) => Ok(Header::V1( + v0_1::Header::deserialize_with_chain_config(commit.into(), seq)?, + )), + // For all versions > 0.1, the first "field" is not actually part of the `Header` struct. + // We just delegate directly to the derived deserialization impl for the appropriate version. + EitherOrVersion::Version(Version { major: 0, minor: 2 }) => Ok(Header::V2( + seq.next_element()? + .ok_or_else(|| de::Error::missing_field("fields"))?, + )), + EitherOrVersion::Version(Version { major: 0, minor: 3 }) => Ok(Header::V3( + seq.next_element()? + .ok_or_else(|| de::Error::missing_field("fields"))?, + )), + EitherOrVersion::Version(v) => { + Err(serde::de::Error::custom(format!("invalid version {v:?}"))) + } + } + } + + fn visit_map(self, mut map: V) -> Result + where + V: MapAccess<'de>, + { + // insert all the fields in the serde_map as the map may have out of order fields. + let mut serde_map: Map = Map::new(); + + while let Some(key) = map.next_key::()? { + serde_map.insert(key.trim().to_owned(), map.next_value()?); + } + + if let Some(v) = serde_map.get("version") { + let fields = serde_map + .get("fields") + .ok_or_else(|| de::Error::missing_field("fields"))?; + + let version = serde_json::from_value::(v.clone()) + .map_err(de::Error::custom)?; + let result = match version { + EitherOrVersion::Version(Version { major: 0, minor: 2 }) => Ok(Header::V2( + serde_json::from_value(fields.clone()).map_err(de::Error::custom)?, + )), + EitherOrVersion::Version(Version { major: 0, minor: 3 }) => Ok(Header::V3( + serde_json::from_value(fields.clone()).map_err(de::Error::custom)?, + )), + EitherOrVersion::Version(v) => { + Err(de::Error::custom(format!("invalid version {v:?}"))) + } + chain_config => Err(de::Error::custom(format!( + "expected version, found chain_config {chain_config:?}" + ))), + }; + return result; + } + + Ok(Header::V1( + serde_json::from_value(serde_map.into()).map_err(de::Error::custom)?, + )) + } + } + + // List of all possible fields of all versions of the `Header`. + // serde's `deserialize_struct` works by deserializing to a struct with a specific list of fields. + // The length of the fields list we provide is always going to be greater than the length of the target struct. + // In our case, we are deserializing to either a V1 Header or a VersionedHeader for versions > 0.1. + // We use serde_json and bincode serialization in the sequencer. + // Fortunately, serde_json ignores fields parameter and only cares about our Visitor implementation. + // - https://docs.rs/serde_json/1.0.120/serde_json/struct.Deserializer.html#method.deserialize_struct + // Bincode uses the length of the fields list, but the bincode deserialization only cares that the length of the fields + // is an upper bound of the target struct's fields length. + // - https://docs.rs/bincode/1.3.3/src/bincode/de/mod.rs.html#313 + // This works because the bincode deserializer only consumes the next field when `next_element` is called, + // and our visitor calls it the correct number of times. + // This would, however, break if the bincode deserializer implementation required an exact match of the field's length, + // consuming one element for each field. + let fields: &[&str] = &[ + "fields", + "chain_config", + "version", + "height", + "timestamp", + "l1_head", + "l1_finalized", + "payload_commitment", + "builder_commitment", + "ns_table", + "block_merkle_tree_root", + "fee_merkle_tree_root", + "fee_info", + "builder_signature", + ]; + + deserializer.deserialize_struct("Header", fields, HeaderVisitor) + } +} + +impl Header { + #[allow(clippy::too_many_arguments)] + pub(crate) fn create( + chain_config: ResolvableChainConfig, + height: u64, + timestamp: u64, + l1_head: u64, + l1_finalized: Option, + payload_commitment: VidCommitment, + builder_commitment: BuilderCommitment, + ns_table: NsTable, + fee_merkle_tree_root: FeeMerkleCommitment, + block_merkle_tree_root: BlockMerkleCommitment, + fee_info: FeeInfo, + builder_signature: Option, + version: Version, + ) -> Self { + let Version { major, minor } = version; + + // Ensure the major version is 0, otherwise panic + assert!(major == 0, "Invalid major version {major}"); + + match minor { + 1 => Self::V1(v0_1::Header { + chain_config, + height, + timestamp, + l1_head, + l1_finalized, + payload_commitment, + builder_commitment, + ns_table, + block_merkle_tree_root, + fee_merkle_tree_root, + fee_info, + builder_signature, + }), + 2 => Self::V2(v0_2::Header { + chain_config, + height, + timestamp, + l1_head, + l1_finalized, + payload_commitment, + builder_commitment, + ns_table, + block_merkle_tree_root, + fee_merkle_tree_root, + fee_info, + builder_signature, + }), + 3 => Self::V3(v0_3::Header { + chain_config, + height, + timestamp, + l1_head, + l1_finalized, + payload_commitment, + builder_commitment, + ns_table, + block_merkle_tree_root, + fee_merkle_tree_root, + fee_info, + builder_signature, + }), + // This case should never occur + // but if it does, we must panic + // because we don't have the versioned types for this version + _ => panic!("invalid version: {version}"), + } + } +} + +// Getter for a field which is the same across all versions. +macro_rules! field { + ($obj:ident.$name:ident) => { + match $obj { + Self::V1(data) => &data.$name, + Self::V2(data) => &data.$name, + Self::V3(data) => &data.$name, + } + }; +} + +macro_rules! field_mut { + ($obj:ident.$name:ident) => { + match $obj { + Self::V1(data) => &mut data.$name, + Self::V2(data) => &mut data.$name, + Self::V3(data) => &mut data.$name, + } + }; +} + impl Header { #[allow(clippy::too_many_arguments)] fn from_info( @@ -145,39 +385,46 @@ impl Header { mut timestamp: u64, mut state: ValidatedState, chain_config: ChainConfig, + version: Version, ) -> anyhow::Result { + ensure!( + version.major == 0, + "Invalid major version {}", + version.major + ); + // Increment height. let parent_header = parent_leaf.block_header(); - let height = parent_header.height + 1; + let height = parent_header.height() + 1; // Ensure the timestamp does not decrease. We can trust `parent.timestamp` because `parent` // has already been voted on by consensus. If our timestamp is behind, either f + 1 nodes // are lying about the current time, or our clock is just lagging. - if timestamp < parent_header.timestamp { + if timestamp < parent_header.timestamp() { tracing::warn!( "Espresso timestamp {timestamp} behind parent {}, local clock may be out of sync", - parent_header.timestamp + parent_header.timestamp() ); - timestamp = parent_header.timestamp; + timestamp = parent_header.timestamp(); } // Ensure the L1 block references don't decrease. Again, we can trust `parent.l1_*` are // accurate. - if l1.head < parent_header.l1_head { + if l1.head < parent_header.l1_head() { tracing::warn!( "L1 head {} behind parent {}, L1 client may be lagging", l1.head, - parent_header.l1_head + parent_header.l1_head() ); - l1.head = parent_header.l1_head; + l1.head = parent_header.l1_head(); } - if l1.finalized < parent_header.l1_finalized { + if l1.finalized < parent_header.l1_finalized() { tracing::warn!( "L1 finalized {:?} behind parent {:?}, L1 client may be lagging", l1.finalized, - parent_header.l1_finalized + parent_header.l1_finalized() ); - l1.finalized = parent_header.l1_finalized; + l1.finalized = parent_header.l1_finalized(); } // Enforce that the sequencer block timestamp is not behind the L1 block timestamp. This can @@ -192,7 +439,7 @@ impl Header { state .block_merkle_tree - .push(parent_header.commit()) + .push(parent_header.commit().as_ref()) .context("missing blocks frontier")?; let block_merkle_tree_root = state.block_merkle_tree.commitment(); @@ -220,12 +467,13 @@ impl Header { .context(format!("invalid builder fee {fee_info:?}"))?; let fee_merkle_tree_root = state.fee_merkle_tree.commitment(); - Ok(Self { - chain_config: chain_config.commit().into(), + + Ok(Self::create( + chain_config.commit().into(), height, timestamp, - l1_head: l1.head, - l1_finalized: l1.finalized, + l1.head, + l1.finalized, payload_commitment, builder_commitment, ns_table, @@ -233,7 +481,8 @@ impl Header { block_merkle_tree_root, fee_info, builder_signature, - }) + version, + )) } async fn get_chain_config( @@ -243,7 +492,7 @@ impl Header { let validated_cf = validated_state.chain_config; let instance_cf = instance_state.chain_config; - if validated_cf.commit() == instance_cf.commitment() { + if validated_cf.commit() == instance_cf.commit() { return instance_cf; } @@ -262,6 +511,138 @@ impl Header { } } +impl Header { + /// A commitment to a ChainConfig or a full ChainConfig. + pub fn chain_config(&self) -> &ResolvableChainConfig { + field!(self.chain_config) + } + + pub fn height(&self) -> u64 { + *field!(self.height) + } + + pub fn height_mut(&mut self) -> &mut u64 { + &mut *field_mut!(self.height) + } + + pub fn timestamp(&self) -> u64 { + *field!(self.timestamp) + } + + pub fn timestamp_mut(&mut self) -> &mut u64 { + &mut *field_mut!(self.timestamp) + } + + /// The Espresso block header includes a reference to the current head of the L1 chain. + /// + /// Rollups can use this to facilitate bridging between the L1 and L2 in a deterministic way. + /// This field deterministically associates an L2 block with a recent L1 block the instant the + /// L2 block is sequenced. Rollups can then define the L2 state after this block as the state + /// obtained by executing all the transactions in this block _plus_ all the L1 deposits up to + /// the given L1 block number. Since there is no need to wait for the L2 block to be reflected + /// on the L1, this bridge design retains the low confirmation latency of HotShot. + /// + /// This block number indicates the unsafe head of the L1 chain, so it is subject to reorgs. For + /// this reason, the Espresso header does not include any information that might change in a + /// reorg, such as the L1 block timestamp or hash. It includes only the L1 block number, which + /// will always refer to _some_ block after a reorg: if the L1 head at the time this block was + /// sequenced gets reorged out, the L1 chain will eventually (and probably quickly) grow to the + /// same height once again, and a different block will exist with the same height. In this way, + /// Espresso does not have to handle L1 reorgs, and the Espresso blockchain will always be + /// reflective of the current state of the L1 blockchain. Rollups that use this block number + /// _do_ have to handle L1 reorgs, but each rollup and each rollup client can decide how many + /// confirmations they want to wait for on top of this `l1_head` before they consider an L2 + /// block finalized. This offers a tradeoff between low-latency L1-L2 bridges and finality. + /// + /// Rollups that want a stronger guarantee of finality, or that want Espresso to attest to data + /// from the L1 block that might change in reorgs, can instead use the latest L1 _finalized_ + /// block at the time this L2 block was sequenced: [`Self::l1_finalized`]. + pub fn l1_head(&self) -> u64 { + *field!(self.l1_head) + } + + pub fn l1_head_mut(&mut self) -> &mut u64 { + &mut *field_mut!(self.l1_head) + } + + /// The Espresso block header includes information a bout the latest finalized L1 block. + /// + /// Similar to [`l1_head`](Self::l1_head), rollups can use this information to implement a + /// bridge between the L1 and L2 while retaining the finality of low-latency block confirmations + /// from HotShot. Since this information describes the finalized L1 block, a bridge using this + /// L1 block will have much higher latency than a bridge using [`l1_head`](Self::l1_head). In + /// exchange, rollups that use the finalized block do not have to worry about L1 reorgs, and can + /// inject verifiable attestations to the L1 block metadata (such as its timestamp or hash) into + /// their execution layers, since Espresso replicas will sign this information for the finalized + /// L1 block. + /// + /// This block may be `None` in the rare case where Espresso has started shortly after the + /// genesis of the L1, and the L1 has yet to finalize a block. In all other cases it will be + /// `Some`. + pub fn l1_finalized(&self) -> Option { + *field!(self.l1_finalized) + } + + pub fn l1_finalized_mut(&mut self) -> &mut Option { + &mut *field_mut!(self.l1_finalized) + } + + pub fn payload_commitment(&self) -> VidCommitment { + *field!(self.payload_commitment) + } + + pub fn payload_commitment_mut(&mut self) -> &mut VidCommitment { + &mut *field_mut!(self.payload_commitment) + } + + pub fn builder_commitment(&self) -> &BuilderCommitment { + field!(self.builder_commitment) + } + + pub fn builder_commitment_mut(&mut self) -> &mut BuilderCommitment { + &mut *field_mut!(self.builder_commitment) + } + + pub fn ns_table(&self) -> &NsTable { + field!(self.ns_table) + } + + /// Root Commitment of Block Merkle Tree + pub fn block_merkle_tree_root(&self) -> BlockMerkleCommitment { + *field!(self.block_merkle_tree_root) + } + + pub fn block_merkle_tree_root_mut(&mut self) -> &mut BlockMerkleCommitment { + &mut *field_mut!(self.block_merkle_tree_root) + } + + /// Root Commitment of `FeeMerkleTree` + pub fn fee_merkle_tree_root(&self) -> FeeMerkleCommitment { + *field!(self.fee_merkle_tree_root) + } + + pub fn fee_merkle_tree_root_mut(&mut self) -> &mut FeeMerkleCommitment { + &mut *field_mut!(self.fee_merkle_tree_root) + } + + /// Fee paid by the block builder + pub fn fee_info(&self) -> FeeInfo { + *field!(self.fee_info) + } + + /// Account (etheruem address) of builder + /// + /// This signature is not considered formally part of the header; it is just evidence proving + /// that other parts of the header ([`fee_info`](Self::fee_info)) are correct. It exists in the + /// header so that it is available to all nodes to be used during validation. But since it is + /// checked during consensus, any downstream client who has a proof of consensus finality of a + /// header can trust that [`fee_info`](Self::fee_info) is correct without relying on the + /// signature. Thus, this signature is not included in the header commitment. + pub fn builder_signature(&self) -> Option { + *field!(self.builder_signature) + } +} + #[derive(Debug, Snafu)] #[snafu(display("Invalid Block Header {msg}"))] pub struct InvalidBlockHeader { @@ -287,7 +668,7 @@ impl BlockHeader for Header { fields( node_id = instance_state.node_id, view = ?parent_leaf.view_number(), - height = parent_leaf.block_header().height, + height = parent_leaf.block_header().height(), ), )] @@ -335,7 +716,7 @@ impl BlockHeader for Header { addr, parent_leaf .block_header() - .l1_finalized + .l1_finalized() .map(|block_info| block_info.number), block_info.number, ) @@ -402,6 +783,7 @@ impl BlockHeader for Header { OffsetDateTime::now_utc().unix_timestamp() as u64, validated_state, chain_config, + version, )?) } @@ -419,51 +801,51 @@ impl BlockHeader for Header { let block_merkle_tree_root = block_merkle_tree.commitment(); let fee_merkle_tree_root = fee_merkle_tree.commitment(); - Self { - // The genesis header needs to be completely deterministic, so we can't sample real - // timestamps or L1 values. - chain_config: instance_state.chain_config.into(), - height: 0, - timestamp: instance_state.genesis_header.timestamp.unix_timestamp(), - l1_finalized: instance_state.l1_genesis, - // Make sure the L1 head is not behind the finalized block. - l1_head: instance_state + // The Header is versioned, + // so we create the genesis header for the current version of the sequencer. + Self::create( + instance_state.chain_config.into(), + 0, + instance_state.genesis_header.timestamp.unix_timestamp(), + instance_state .l1_genesis .map(|block| block.number) .unwrap_or_default(), + instance_state.l1_genesis, payload_commitment, - builder_commitment, - ns_table, - block_merkle_tree_root, + builder_commitment.clone(), + ns_table.clone(), fee_merkle_tree_root, - fee_info: FeeInfo::genesis(), - builder_signature: None, - } + block_merkle_tree_root, + FeeInfo::genesis(), + None, + instance_state.current_version, + ) } fn block_number(&self) -> u64 { - self.height + self.height() } fn payload_commitment(&self) -> VidCommitment { - self.payload_commitment + self.payload_commitment() } fn metadata( &self, ) -> &<::BlockPayload as BlockPayload>::Metadata { - &self.ns_table + self.ns_table() } /// Commit over fee_amount, payload_commitment and metadata fn builder_commitment(&self) -> BuilderCommitment { - self.builder_commitment.clone() + self.builder_commitment().clone() } } impl QueryableHeader for Header { fn timestamp(&self) -> u64 { - self.timestamp + self.timestamp() } } @@ -474,15 +856,15 @@ impl ExplorerHeader for Header { type NamespaceId = NamespaceId; fn proposer_id(&self) -> Self::ProposerId { - self.fee_info.account() + self.fee_info().account() } fn fee_info_account(&self) -> Self::WalletAddress { - self.fee_info.account() + self.fee_info().account() } fn fee_info_balance(&self) -> Self::BalanceAmount { - self.fee_info.amount() + self.fee_info().amount() } /// reward_balance at the moment is only implemented as a stub, as block @@ -495,9 +877,9 @@ impl ExplorerHeader for Header { } fn namespace_ids(&self) -> Vec { - self.ns_table + self.ns_table() .iter() - .map(|i| self.ns_table.read_ns_id_unchecked(&i)) + .map(|i| self.ns_table().read_ns_id_unchecked(&i)) .collect() } } @@ -506,16 +888,6 @@ impl ExplorerHeader for Header { mod test_headers { use std::sync::Arc; - use super::*; - use crate::{ - catchup::mock::MockStateCatchup, - eth_signature_key::EthKeyPair, - l1_client::L1Client, - state::{ - validate_proposal, BlockMerkleTree, FeeAccount, FeeMerkleTree, ProposalValidationError, - }, - NodeState, - }; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use ethers::{ types::{Address, U256}, @@ -523,7 +895,18 @@ mod test_headers { }; use hotshot_types::{traits::signature_key::BuilderSignatureKey, vid::vid_scheme}; use jf_vid::VidScheme; - use vbs::version::{StaticVersion, StaticVersionType}; + use v0_1::{BlockMerkleTree, FeeMerkleTree, L1Client}; + use vbs::{ + bincode_serializer::BincodeSerializer, + version::{StaticVersion, StaticVersionType}, + BinarySerializer, + }; + + use super::*; + use crate::{ + eth_signature_key::EthKeyPair, v0::impls::instance_state::mock::MockStateCatchup, + validate_proposal, NodeState, + }; #[derive(Debug, Default)] #[must_use] @@ -557,15 +940,15 @@ mod test_headers { let genesis = GenesisForTest::default().await; let mut parent = genesis.header.clone(); - parent.timestamp = self.parent_timestamp; - parent.l1_head = self.parent_l1_head; - parent.l1_finalized = self.parent_l1_finalized; + *parent.timestamp_mut() = self.parent_timestamp; + *parent.l1_head_mut() = self.parent_l1_head; + *parent.l1_finalized_mut() = self.parent_l1_finalized; let mut parent_leaf = genesis.leaf.clone(); *parent_leaf.block_header_mut() = parent.clone(); let block_merkle_tree = - BlockMerkleTree::from_elems(Some(32), Vec::>::new()).unwrap(); + BlockMerkleTree::from_elems(Some(32), Vec::<[u8; 32]>::new()).unwrap(); let fee_info = FeeInfo::genesis(); let fee_merkle_tree = FeeMerkleTree::from_kv_set( @@ -585,13 +968,13 @@ mod test_headers { &fee_key, fee_amount, &genesis.ns_table, - &genesis.header.payload_commitment, + &genesis.header.payload_commitment(), ) .unwrap(); let header = Header::from_info( - genesis.header.payload_commitment, - genesis.header.builder_commitment, + genesis.header.payload_commitment(), + genesis.header.builder_commitment().clone(), genesis.ns_table, &parent_leaf, L1Snapshot { @@ -607,12 +990,13 @@ mod test_headers { self.timestamp, validated_state.clone(), genesis.instance_state.chain_config, + Version { major: 0, minor: 1 }, ) .unwrap(); - assert_eq!(header.height, parent.height + 1); - assert_eq!(header.timestamp, self.expected_timestamp); - assert_eq!(header.l1_head, self.expected_l1_head); - assert_eq!(header.l1_finalized, self.expected_l1_finalized); + assert_eq!(header.height(), parent.height() + 1); + assert_eq!(header.timestamp(), self.expected_timestamp); + assert_eq!(header.l1_head(), self.expected_l1_head); + assert_eq!(header.l1_finalized(), self.expected_l1_finalized); // Check deposits were inserted before computing the fee merkle tree root. for fee_info in self.l1_deposits { @@ -620,12 +1004,12 @@ mod test_headers { } assert_eq!( validated_state.fee_merkle_tree.commitment(), - header.fee_merkle_tree_root, + header.fee_merkle_tree_root(), ); assert_eq!( block_merkle_tree, - BlockMerkleTree::from_elems(Some(32), Vec::>::new()).unwrap() + BlockMerkleTree::from_elems(Some(32), Vec::<[u8; 32]>::new()).unwrap() ); } } @@ -822,10 +1206,12 @@ mod test_headers { *parent_leaf.block_header_mut() = parent_header.clone(); // Populate the tree with an initial `push`. - block_merkle_tree.push(genesis.header.commit()).unwrap(); + block_merkle_tree + .push(genesis.header.commit().as_ref()) + .unwrap(); let block_merkle_tree_root = block_merkle_tree.commitment(); validated_state.block_merkle_tree = block_merkle_tree.clone(); - parent_header.block_merkle_tree_root = block_merkle_tree_root; + *parent_header.block_merkle_tree_root_mut() = block_merkle_tree_root; let mut proposal = parent_header.clone(); let ver = StaticVersion::<1, 0>::version(); @@ -847,7 +1233,7 @@ mod test_headers { assert_eq!( ProposalValidationError::InvalidChainConfig { expected: format!("{:?}", chain_config), - proposal: format!("{:?}", proposal.chain_config) + proposal: format!("{:?}", proposal.chain_config()) }, err ); @@ -876,7 +1262,7 @@ mod test_headers { ); // proposed `Header` root should include parent + parent.commit - proposal.height += 1; + *proposal.height_mut() += 1; let validated_state = validated_state .apply_header(&genesis.instance_state, &parent_leaf, &proposal, ver) @@ -896,7 +1282,7 @@ mod test_headers { assert_eq!( ProposalValidationError::InvalidBlockRoot { expected_root: validated_state.block_merkle_tree.commitment(), - proposal_root: proposal.block_merkle_tree_root + proposal_root: proposal.block_merkle_tree_root() }, err ); @@ -920,15 +1306,17 @@ mod test_headers { let fee_merkle_tree = parent_state.fee_merkle_tree.clone(); // Populate the tree with an initial `push`. - block_merkle_tree.push(genesis.header.commit()).unwrap(); + block_merkle_tree + .push(genesis.header.commit().as_ref()) + .unwrap(); let block_merkle_tree_root = block_merkle_tree.commitment(); let fee_merkle_tree_root = fee_merkle_tree.commitment(); parent_state.block_merkle_tree = block_merkle_tree.clone(); parent_state.fee_merkle_tree = fee_merkle_tree.clone(); let mut parent_header = genesis.header.clone(); - parent_header.block_merkle_tree_root = block_merkle_tree_root; - parent_header.fee_merkle_tree_root = fee_merkle_tree_root; + *parent_header.block_merkle_tree_root_mut() = block_merkle_tree_root; + *parent_header.fee_merkle_tree_root_mut() = fee_merkle_tree_root; let mut parent_leaf = genesis.leaf.clone(); *parent_leaf.block_header_mut() = parent_header.clone(); @@ -945,7 +1333,7 @@ mod test_headers { // the element (header commitment) does not match the one in the proof. let key_pair = EthKeyPair::for_test(); let fee_amount = 0u64; - let payload_commitment = parent_header.payload_commitment; + let payload_commitment = parent_header.payload_commitment(); let builder_commitment = parent_header.builder_commitment(); let ns_table = genesis.ns_table; let fee_signature = @@ -960,7 +1348,7 @@ mod test_headers { &genesis_state, &parent_leaf, payload_commitment, - builder_commitment, + builder_commitment.clone(), ns_table, builder_fee, vid_common.clone(), @@ -979,7 +1367,7 @@ mod test_headers { } let mut block_merkle_tree = proposal_state.block_merkle_tree.clone(); - block_merkle_tree.push(proposal.commit()).unwrap(); + block_merkle_tree.push(proposal.commit().as_ref()).unwrap(); let proposal_state = proposal_state .apply_header( @@ -1002,7 +1390,7 @@ mod test_headers { assert_eq!( proposal_state.block_merkle_tree.commitment(), - proposal.block_merkle_tree_root + proposal.block_merkle_tree_root() ); } @@ -1019,4 +1407,100 @@ mod test_headers { .fee_account() .validate_builder_signature(&signature, &commitment)); } + + #[async_std::test] + async fn test_versioned_header_serialization() { + setup_logging(); + setup_backtrace(); + + let genesis = GenesisForTest::default().await; + let header = genesis.header.clone(); + let ns_table = genesis.ns_table; + + let (fee_account, _) = FeeAccount::generated_from_seed_indexed([0; 32], 0); + + let v1_header = Header::create( + genesis.instance_state.chain_config.into(), + 1, + 2, + 3, + Default::default(), + header.payload_commitment(), + header.builder_commitment().clone(), + ns_table.clone(), + header.fee_merkle_tree_root(), + header.block_merkle_tree_root(), + FeeInfo { + amount: 0.into(), + account: fee_account, + }, + Default::default(), + Version { major: 0, minor: 1 }, + ); + + let serialized = serde_json::to_string(&v1_header).unwrap(); + let deserialized: Header = serde_json::from_str(&serialized).unwrap(); + assert_eq!(v1_header, deserialized); + + let v2_header = Header::create( + genesis.instance_state.chain_config.into(), + 1, + 2, + 3, + Default::default(), + header.payload_commitment(), + header.builder_commitment().clone(), + ns_table.clone(), + header.fee_merkle_tree_root(), + header.block_merkle_tree_root(), + FeeInfo { + amount: 0.into(), + account: fee_account, + }, + Default::default(), + Version { major: 0, minor: 2 }, + ); + + let serialized = serde_json::to_string(&v2_header).unwrap(); + let deserialized: Header = serde_json::from_str(&serialized).unwrap(); + assert_eq!(v2_header, deserialized); + + let v3_header = Header::create( + genesis.instance_state.chain_config.into(), + 1, + 2, + 3, + Default::default(), + header.payload_commitment(), + header.builder_commitment().clone(), + ns_table.clone(), + header.fee_merkle_tree_root(), + header.block_merkle_tree_root(), + FeeInfo { + amount: 0.into(), + account: fee_account, + }, + Default::default(), + Version { major: 0, minor: 3 }, + ); + + let serialized = serde_json::to_string(&v3_header).unwrap(); + let deserialized: Header = serde_json::from_str(&serialized).unwrap(); + assert_eq!(v3_header, deserialized); + + let v1_bytes = BincodeSerializer::>::serialize(&v1_header).unwrap(); + let deserialized: Header = + BincodeSerializer::>::deserialize(&v1_bytes).unwrap(); + assert_eq!(v1_header, deserialized); + + let v2_bytes = BincodeSerializer::>::serialize(&v2_header).unwrap(); + let deserialized: Header = + BincodeSerializer::>::deserialize(&v2_bytes).unwrap(); + assert_eq!(v2_header, deserialized); + + let v3_bytes = BincodeSerializer::>::serialize(&v3_header).unwrap(); + let deserialized: Header = + BincodeSerializer::>::deserialize(&v3_bytes).unwrap(); + assert_eq!(v3_header, deserialized); + } } diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs new file mode 100644 index 0000000000..c47abcda14 --- /dev/null +++ b/types/src/v0/impls/instance_state.rs @@ -0,0 +1,162 @@ +use std::{collections::BTreeMap, sync::Arc}; + +use hotshot_types::traits::{node_implementation::NodeType, states::InstanceState}; +use vbs::version::{StaticVersionType, Version}; + +use crate::{ + v0::traits::StateCatchup, ChainConfig, L1Client, NodeState, SeqTypes, Upgrade, ValidatedState, +}; + +impl NodeState { + pub fn new( + node_id: u64, + chain_config: ChainConfig, + l1_client: L1Client, + catchup: impl StateCatchup + 'static, + ) -> Self { + Self { + node_id, + chain_config, + l1_client, + peers: Arc::new(catchup), + genesis_header: Default::default(), + genesis_state: ValidatedState { + chain_config: chain_config.into(), + ..Default::default() + }, + l1_genesis: None, + upgrades: Default::default(), + current_version: ::Base::version(), + } + } + + #[cfg(any(test, feature = "testing"))] + pub fn mock() -> Self { + Self::new( + 0, + ChainConfig::default(), + L1Client::new("http://localhost:3331".parse().unwrap(), 10000), + mock::MockStateCatchup::default(), + ) + } + + pub fn with_l1(mut self, l1_client: L1Client) -> Self { + self.l1_client = l1_client; + self + } + + pub fn with_genesis(mut self, state: ValidatedState) -> Self { + self.genesis_state = state; + self + } + + pub fn with_chain_config(mut self, cfg: ChainConfig) -> Self { + self.chain_config = cfg; + self + } + + pub fn with_upgrades(mut self, upgrades: BTreeMap) -> Self { + self.upgrades = upgrades; + self + } +} + +// This allows us to turn on `Default` on InstanceState trait +// which is used in `HotShot` by `TestBuilderImplementation`. +#[cfg(any(test, feature = "testing"))] +impl Default for NodeState { + fn default() -> Self { + Self::new( + 1u64, + ChainConfig::default(), + L1Client::new("http://localhost:3331".parse().unwrap(), 10000), + mock::MockStateCatchup::default(), + ) + } +} + +impl InstanceState for NodeState {} + +#[cfg(any(test, feature = "testing"))] +pub mod mock { + use std::collections::HashMap; + + use async_trait::async_trait; + use committable::Commitment; + use hotshot_types::data::ViewNumber; + use jf_merkle_tree::{ForgetableMerkleTreeScheme, MerkleTreeScheme}; + + use super::*; + use crate::{ + v0_1::{AccountQueryData, FeeAccountProof}, + BackoffParams, BlockMerkleTree, FeeAccount, FeeMerkleCommitment, + }; + + #[derive(Debug, Clone, Default)] + pub struct MockStateCatchup { + backoff: BackoffParams, + state: HashMap>, + } + + impl FromIterator<(ViewNumber, Arc)> for MockStateCatchup { + fn from_iter)>>(iter: I) -> Self { + Self { + backoff: Default::default(), + state: iter.into_iter().collect(), + } + } + } + + #[async_trait] + impl StateCatchup for MockStateCatchup { + async fn try_fetch_account( + &self, + _height: u64, + view: ViewNumber, + fee_merkle_tree_root: FeeMerkleCommitment, + account: FeeAccount, + ) -> anyhow::Result { + let src = &self.state[&view].fee_merkle_tree; + assert_eq!(src.commitment(), fee_merkle_tree_root); + + tracing::info!("catchup: fetching account {account:?} for view {view:?}"); + Ok(FeeAccountProof::prove(src, account.into()) + .unwrap_or_else(|| panic!("Account {account:?} not in memory")) + .into()) + } + + async fn try_remember_blocks_merkle_tree( + &self, + _height: u64, + view: ViewNumber, + mt: &mut BlockMerkleTree, + ) -> anyhow::Result<()> { + tracing::info!("catchup: fetching frontier for view {view:?}"); + let src = &self.state[&view].block_merkle_tree; + + assert_eq!(src.commitment(), mt.commitment()); + assert!( + src.num_leaves() > 0, + "catchup should not be triggered when blocks tree is empty" + ); + + let index = src.num_leaves() - 1; + let (elem, proof) = src.lookup(index).expect_ok().unwrap(); + mt.remember(index, elem, proof.clone()) + .expect("Proof verifies"); + + Ok(()) + } + + async fn try_fetch_chain_config( + &self, + _commitment: Commitment, + ) -> anyhow::Result { + Ok(ChainConfig::default()) + } + + fn backoff(&self) -> &BackoffParams { + &self.backoff + } + } +} diff --git a/sequencer/src/l1_client.rs b/types/src/v0/impls/l1.rs similarity index 85% rename from sequencer/src/l1_client.rs rename to types/src/v0/impls/l1.rs index f88d7a5443..771160abb2 100644 --- a/sequencer/src/l1_client.rs +++ b/types/src/v0/impls/l1.rs @@ -1,44 +1,21 @@ -//! L1 Client -//! -//! [`L1Client`] defines the interface that Espresso Sequencer nodes use to interact with the L1. -//! Sequencer nodes must read from the L1 to populate Espresso block metadata such as the L1 chain -//! height, which is used to facilitate bridging between the L1 and any rollups which are running on -//! the sequencer. -//! -//! This client runs asynchronously, updating an in-memory snapshot of the relevant L1 information -//! each time a new L1 block is published. This design as a few advantages: -//! * The L1 client is not synchronized with or triggered by HotShot consensus. It can run in pace -//! with the L1, which makes it easy to use a subscription instead of polling for new blocks, -//! vastly reducing the number of L1 RPC calls we make. -//! * HotShot block building does not interact directly with the L1; it simply reads the latest -//! snapshot from the client's memory. This means that block production is instant and infallible. -//! Any failures or delays in interacting with the L1 will just slow the updating of the L1 -//! snapshot, which will cause the block builder to propose with a slightly old snapshot, but they -//! will still be able to propose on time. - -use crate::state::FeeInfo; +use std::{ + cmp::{min, Ordering}, + sync::Arc, + time::Duration, +}; + use async_std::task::sleep; use committable::{Commitment, Committable, RawCommitmentBuilder}; use contract_bindings::fee_contract::FeeContract; -use ethers::prelude::*; +use ethers::prelude::{H256, U256, *}; use futures::{ join, stream::{self, StreamExt}, }; -use serde::{Deserialize, Serialize}; -use std::{ - cmp::{min, Ordering}, - sync::Arc, - time::Duration, -}; use url::Url; -#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, Hash, PartialEq, Eq)] -pub struct L1BlockInfo { - pub number: u64, - pub timestamp: U256, - pub hash: H256, -} +use super::L1BlockInfo; +use crate::{FeeInfo, L1Client, L1Snapshot}; impl PartialOrd for L1BlockInfo { fn partial_cmp(&self, other: &Self) -> Option { @@ -52,27 +29,6 @@ impl Ord for L1BlockInfo { } } -#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, Hash, PartialEq, Eq)] -pub struct L1Snapshot { - /// The relevant snapshot of the L1 includes a reference to the current head of the L1 chain. - /// - /// Note that the L1 head is subject to changing due to a reorg. However, no reorg will change - /// the _number_ of this block in the chain: L1 block numbers will always be sequentially - /// increasing. Therefore, the sequencer does not have to worry about reorgs invalidating this - /// snapshot. - pub head: u64, - - /// The snapshot also includes information about the latest finalized L1 block. - /// - /// Since this block is finalized (ie cannot be reorged) we can include specific information - /// about the particular block, such as its hash and timestamp. - /// - /// This block may be `None` in the rare case where Espresso has started shortly after the - /// genesis of the L1, and the L1 has yet to finalize a block. In all other cases it will be - /// `Some`. - pub finalized: Option, -} - impl Committable for L1BlockInfo { fn commit(&self) -> Commitment { let mut timestamp = [0u8; 32]; @@ -93,14 +49,18 @@ impl Committable for L1BlockInfo { } } -#[derive(Clone, Debug)] -/// An Http Provider and configuration to interact with the L1. -pub struct L1Client { - retry_delay: Duration, - /// `Provider` from `ethers-provider`. - provider: Arc>, - /// Maximum number of L1 blocks that can be scanned for events in a single query. - events_max_block_range: u64, +impl L1BlockInfo { + pub fn number(&self) -> u64 { + self.number + } + + pub fn timestamp(&self) -> U256 { + self.timestamp + } + + pub fn hash(&self) -> H256 { + self.hash + } } impl L1Client { @@ -294,12 +254,14 @@ async fn get_finalized_block( #[cfg(test)] mod test { - use super::*; - use crate::NodeState; + use std::ops::Add; + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use contract_bindings::fee_contract::FeeContract; use ethers::utils::{parse_ether, Anvil}; - use std::ops::Add; + + use super::*; + use crate::NodeState; #[async_std::test] async fn test_l1_block_fetching() -> anyhow::Result<()> { diff --git a/types/src/v0/impls/mod.rs b/types/src/v0/impls/mod.rs new file mode 100644 index 0000000000..3940fb0097 --- /dev/null +++ b/types/src/v0/impls/mod.rs @@ -0,0 +1,15 @@ +pub use super::*; + +mod block; +mod chain_config; +mod fee_info; +mod header; +mod instance_state; +mod l1; +mod state; +mod transaction; + +pub use fee_info::FeeError; +pub use header::ProposalValidationError; +pub use instance_state::mock; +pub use state::{validate_proposal, BuilderValidationError, StateValidationError}; diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs new file mode 100644 index 0000000000..681fd92307 --- /dev/null +++ b/types/src/v0/impls/state.rs @@ -0,0 +1,918 @@ +use std::ops::Add; + +use anyhow::bail; +use committable::Committable; +use ethers::types::Address; +use hotshot_query_service::merklized_state::MerklizedState; +use hotshot_types::{ + data::{BlockError, ViewNumber}, + traits::{ + block_contents::BlockHeader, node_implementation::ConsensusTime, + signature_key::BuilderSignatureKey, states::StateDelta, ValidatedState as HotShotState, + }, + vid::{VidCommon, VidSchemeType}, +}; +use itertools::Itertools; +use jf_merkle_tree::{ + prelude::{MerkleProof, Sha3Digest, Sha3Node}, + AppendableMerkleTreeScheme, ForgetableMerkleTreeScheme, ForgetableUniversalMerkleTreeScheme, + LookupResult, MerkleCommitment, MerkleTreeError, MerkleTreeScheme, + PersistentUniversalMerkleTreeScheme, UniversalMerkleTreeScheme, +}; +use jf_vid::VidScheme; +use num_traits::CheckedSub; +use thiserror::Error; +use vbs::version::Version; + +use super::{fee_info::FeeError, header::ProposalValidationError}; +use crate::{ + BlockMerkleTree, ChainConfig, Delta, FeeAccount, FeeAmount, FeeInfo, FeeMerkleTree, Header, + Leaf, NodeState, NsTableValidationError, PayloadByteLen, ResolvableChainConfig, SeqTypes, + UpgradeType, ValidatedState, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, +}; + +/// Possible builder validation failures +#[derive(Error, Debug, Eq, PartialEq)] +pub enum BuilderValidationError { + #[error("Builder signature not found")] + SignatureNotFound, + #[error("Fee amount out of range: {0}")] + FeeAmountOutOfRange(FeeAmount), + #[error("Invalid Builder Signature")] + InvalidBuilderSignature, +} + +/// This enum is not used in code but functions as an index of +/// possible validation errors. +#[allow(dead_code)] +pub enum StateValidationError { + ProposalValidation(ProposalValidationError), + BuilderValidation(BuilderValidationError), + Fee(FeeError), +} + +impl StateDelta for Delta {} + +impl Default for ValidatedState { + fn default() -> Self { + let block_merkle_tree = + BlockMerkleTree::from_elems(Some(BLOCK_MERKLE_TREE_HEIGHT), Vec::<[u8; 32]>::new()) + .unwrap(); + + // Words of wisdom from @mrain: "capacity = arity^height" + // "For index space 2^160, arity 256 (2^8), + // you should set the height as 160/8=20" + let fee_merkle_tree = FeeMerkleTree::from_kv_set( + FEE_MERKLE_TREE_HEIGHT, + Vec::<(FeeAccount, FeeAmount)>::new(), + ) + .unwrap(); + + let chain_config = ResolvableChainConfig::from(ChainConfig::default()); + + Self { + block_merkle_tree, + fee_merkle_tree, + chain_config, + } + } +} + +impl ValidatedState { + /// Prefund an account with a given amount. Only for demo purposes. + pub fn prefund_account(&mut self, account: FeeAccount, amount: FeeAmount) { + self.fee_merkle_tree.update(account, amount).unwrap(); + } + + pub fn balance(&mut self, account: FeeAccount) -> Option { + match self.fee_merkle_tree.lookup(account) { + LookupResult::Ok(balance, _) => Some(*balance), + LookupResult::NotFound(_) => Some(0.into()), + LookupResult::NotInMemory => None, + } + } + + /// Find accounts that are not in memory. + /// + /// As an optimization we could try to apply updates and return the + /// forgotten accounts to be fetched from peers and update them later. + pub fn forgotten_accounts( + &self, + accounts: impl IntoIterator, + ) -> Vec { + accounts + .into_iter() + .unique() + .filter(|account| { + self.fee_merkle_tree + .lookup(*account) + .expect_not_in_memory() + .is_ok() + }) + .collect() + } + + /// Check if the merkle tree is available + pub fn need_to_fetch_blocks_mt_frontier(&self) -> bool { + let num_leaves = self.block_merkle_tree.num_leaves(); + if num_leaves == 0 { + false + } else { + self.block_merkle_tree + .lookup(num_leaves - 1) + .expect_ok() + .is_err() + } + } + + /// Insert a fee deposit receipt + pub fn insert_fee_deposit( + &mut self, + fee_info: FeeInfo, + ) -> anyhow::Result> { + Ok(self + .fee_merkle_tree + .update_with(fee_info.account, |balance| { + Some(balance.cloned().unwrap_or_default().add(fee_info.amount)) + })?) + } + + /// Charge a fee to an account, transferring the funds to the fee recipient account. + pub fn charge_fee(&mut self, fee_info: FeeInfo, recipient: FeeAccount) -> Result<(), FeeError> { + let fee_state = self.fee_merkle_tree.clone(); + + // Deduct the fee from the paying account. + let FeeInfo { account, amount } = fee_info; + let mut err = None; + let fee_state = fee_state.persistent_update_with(account, |balance| { + let balance = balance.copied(); + let Some(updated) = balance.unwrap_or_default().checked_sub(&amount) else { + // Return an error without updating the account. + err = Some(FeeError::InsufficientFunds { balance, amount }); + return balance; + }; + if updated == FeeAmount::default() { + // Delete the account from the tree if its balance ended up at 0; this saves some + // space since the account is no longer carrying any information. + None + } else { + // Otherwise store the updated balance. + Some(updated) + } + })?; + + // Fail if there was an error during `persistent_update_with` (e.g. insufficient balance). + if let Some(err) = err { + return Err(err); + } + + // If we successfully deducted the fee from the source account, increment the balance of the + // recipient account. + let fee_state = fee_state.persistent_update_with(recipient, |balance| { + Some(balance.copied().unwrap_or_default() + amount) + })?; + + // If the whole update was successful, update the original state. + self.fee_merkle_tree = fee_state; + Ok(()) + } +} + +#[cfg(any(test, feature = "testing"))] +impl ValidatedState { + pub fn forget(&self) -> Self { + Self { + fee_merkle_tree: FeeMerkleTree::from_commitment(self.fee_merkle_tree.commitment()), + block_merkle_tree: BlockMerkleTree::from_commitment( + self.block_merkle_tree.commitment(), + ), + chain_config: ResolvableChainConfig::from(self.chain_config.commit()), + } + } +} + +impl From for ProposalValidationError { + fn from(err: NsTableValidationError) -> Self { + Self::InvalidNsTable { err } + } +} + +pub fn validate_proposal( + state: &ValidatedState, + expected_chain_config: ChainConfig, + parent_leaf: &Leaf, + proposal: &Header, + vid_common: &VidCommon, +) -> Result<(), ProposalValidationError> { + let parent_header = parent_leaf.block_header(); + + // validate `ChainConfig` + if proposal.chain_config().commit() != expected_chain_config.commit() { + return Err(ProposalValidationError::InvalidChainConfig { + expected: format!("{:?}", expected_chain_config), + proposal: format!("{:?}", proposal.chain_config()), + }); + } + + // validate block size and fee + let block_size = VidSchemeType::get_payload_byte_len(vid_common) as u64; + if block_size > *expected_chain_config.max_block_size { + return Err(ProposalValidationError::MaxBlockSizeExceeded { + max_block_size: expected_chain_config.max_block_size, + block_size: block_size.into(), + }); + } + + if proposal.fee_info().amount() < expected_chain_config.base_fee * block_size { + return Err(ProposalValidationError::InsufficientFee { + max_block_size: expected_chain_config.max_block_size, + base_fee: expected_chain_config.base_fee, + proposed_fee: proposal.fee_info().amount(), + }); + } + + // validate height + if proposal.height() != parent_header.height() + 1 { + return Err(ProposalValidationError::InvalidHeight { + parent_height: parent_header.height(), + proposal_height: proposal.height(), + }); + } + + let ValidatedState { + block_merkle_tree, + fee_merkle_tree, + .. + } = state; + + let block_merkle_tree_root = block_merkle_tree.commitment(); + if proposal.block_merkle_tree_root() != block_merkle_tree_root { + return Err(ProposalValidationError::InvalidBlockRoot { + expected_root: block_merkle_tree_root, + proposal_root: proposal.block_merkle_tree_root(), + }); + } + + let fee_merkle_tree_root = fee_merkle_tree.commitment(); + if proposal.fee_merkle_tree_root() != fee_merkle_tree_root { + return Err(ProposalValidationError::InvalidFeeRoot { + expected_root: fee_merkle_tree_root, + proposal_root: proposal.fee_merkle_tree_root(), + }); + } + + proposal + .ns_table() + .validate(&PayloadByteLen::from_vid_common(vid_common))?; + + Ok(()) +} + +impl From for FeeError { + fn from(item: MerkleTreeError) -> Self { + Self::MerkleTreeError(item) + } +} + +fn charge_fee( + state: &mut ValidatedState, + delta: &mut Delta, + fee_info: FeeInfo, + recipient: FeeAccount, +) -> Result<(), FeeError> { + state.charge_fee(fee_info, recipient)?; + delta.fees_delta.extend([fee_info.account, recipient]); + Ok(()) +} + +/// Validate builder account by verifying signature +fn validate_builder_fee(proposed_header: &Header) -> Result<(), BuilderValidationError> { + // Beware of Malice! + let signature = proposed_header + .builder_signature() + .ok_or(BuilderValidationError::SignatureNotFound)?; + let fee_amount = proposed_header.fee_info().amount().as_u64().ok_or( + BuilderValidationError::FeeAmountOutOfRange(proposed_header.fee_info().amount()), + )?; + + // verify signature + if !proposed_header.fee_info().account.validate_fee_signature( + &signature, + fee_amount, + proposed_header.metadata(), + &proposed_header.payload_commitment(), + ) { + return Err(BuilderValidationError::InvalidBuilderSignature); + } + + Ok(()) +} + +impl ValidatedState { + pub async fn apply_header( + &self, + instance: &NodeState, + parent_leaf: &Leaf, + proposed_header: &Header, + version: Version, + ) -> anyhow::Result<(Self, Delta)> { + // Clone state to avoid mutation. Consumer can take update + // through returned value. + + let mut validated_state = self.clone(); + validated_state.apply_upgrade(instance, version); + + let chain_config = validated_state + .get_chain_config(instance, proposed_header.chain_config()) + .await?; + + if Some(chain_config) != validated_state.chain_config.resolve() { + validated_state.chain_config = chain_config.into(); + } + + let l1_deposits = get_l1_deposits( + instance, + proposed_header, + parent_leaf, + chain_config.fee_contract, + ) + .await; + + // Find missing fee state entries. We will need to use the builder account which is paying a + // fee and the recipient account which is receiving it, plus any counts receiving deposits + // in this block. + let missing_accounts = self.forgotten_accounts( + [ + proposed_header.fee_info().account, + chain_config.fee_recipient, + ] + .into_iter() + .chain(l1_deposits.iter().map(|fee_info| fee_info.account)), + ); + + let parent_height = parent_leaf.height(); + let parent_view = parent_leaf.view_number(); + + // Ensure merkle tree has frontier + if self.need_to_fetch_blocks_mt_frontier() { + tracing::info!( + parent_height, + ?parent_view, + "fetching block frontier from peers" + ); + instance + .peers + .as_ref() + .remember_blocks_merkle_tree( + parent_height, + parent_view, + &mut validated_state.block_merkle_tree, + ) + .await?; + } + + // Fetch missing fee state entries + if !missing_accounts.is_empty() { + tracing::info!( + parent_height, + ?parent_view, + ?missing_accounts, + "fetching missing accounts from peers" + ); + + let missing_account_proofs = instance + .peers + .as_ref() + .fetch_accounts( + parent_height, + parent_view, + validated_state.fee_merkle_tree.commitment(), + missing_accounts, + ) + .await?; + + // Remember the fee state entries + for account in missing_account_proofs.iter() { + account + .proof + .remember(&mut validated_state.fee_merkle_tree) + .expect("proof previously verified"); + } + } + + let mut delta = Delta::default(); + + let mut validated_state = + apply_proposal(&validated_state, &mut delta, parent_leaf, l1_deposits); + + charge_fee( + &mut validated_state, + &mut delta, + proposed_header.fee_info(), + chain_config.fee_recipient, + )?; + + Ok((validated_state, delta)) + } + + /// Updates the `ValidatedState` if a protocol upgrade has occurred. + pub(crate) fn apply_upgrade(&mut self, instance: &NodeState, version: Version) { + // Check for protocol upgrade based on sequencer version + if version <= instance.current_version { + return; + } + + let Some(upgrade) = instance.upgrades.get(&version) else { + return; + }; + + match upgrade.upgrade_type { + UpgradeType::ChainConfig { chain_config } => { + self.chain_config = chain_config.into(); + } + } + } + + /// Retrieves the `ChainConfig`. + /// + /// Returns the `NodeState` `ChainConfig` if the `ValidatedState` `ChainConfig` commitment matches the `NodeState` `ChainConfig`` commitment. + /// If the commitments do not match, it returns the `ChainConfig` available in either `ValidatedState` or proposed header. + /// If neither has the `ChainConfig`, it fetches the config from the peers. + /// + /// Returns an error if it fails to fetch the `ChainConfig` from the peers. + pub(crate) async fn get_chain_config( + &self, + instance: &NodeState, + header_cf: &ResolvableChainConfig, + ) -> anyhow::Result { + let state_cf = self.chain_config; + + if state_cf.commit() == instance.chain_config.commit() { + return Ok(instance.chain_config); + } + + let cf = match (state_cf.resolve(), header_cf.resolve()) { + (Some(cf), _) => cf, + (_, Some(cf)) if cf.commit() == state_cf.commit() => cf, + (_, Some(_)) | (None, None) => { + instance + .peers + .as_ref() + .fetch_chain_config(state_cf.commit()) + .await + } + }; + + Ok(cf) + } +} + +pub async fn get_l1_deposits( + instance: &NodeState, + header: &Header, + parent_leaf: &Leaf, + fee_contract_address: Option
, +) -> Vec { + if let (Some(addr), Some(block_info)) = (fee_contract_address, header.l1_finalized()) { + instance + .l1_client + .get_finalized_deposits( + addr, + parent_leaf + .block_header() + .l1_finalized() + .map(|block_info| block_info.number), + block_info.number, + ) + .await + } else { + vec![] + } +} + +#[must_use] +fn apply_proposal( + validated_state: &ValidatedState, + delta: &mut Delta, + parent_leaf: &Leaf, + l1_deposits: Vec, +) -> ValidatedState { + let mut validated_state = validated_state.clone(); + // pushing a block into merkle tree shouldn't fail + validated_state + .block_merkle_tree + .push(parent_leaf.block_header().commit().as_ref()) + .unwrap(); + + for FeeInfo { account, amount } in l1_deposits.iter() { + validated_state + .fee_merkle_tree + .update_with(account, |balance| { + Some(balance.cloned().unwrap_or_default().add(*amount)) + }) + .expect("update_with succeeds"); + delta.fees_delta.insert(*account); + } + + validated_state +} + +impl HotShotState for ValidatedState { + type Error = BlockError; + type Instance = NodeState; + + type Time = ViewNumber; + + type Delta = Delta; + fn on_commit(&self) {} + /// Validate parent against known values (from state) and validate + /// proposal descends from parent. Returns updated `ValidatedState`. + #[tracing::instrument( + skip_all, + fields( + node_id = instance.node_id, + view = ?parent_leaf.view_number(), + height = parent_leaf.height(), + ), + )] + async fn validate_and_apply_header( + &self, + instance: &Self::Instance, + parent_leaf: &Leaf, + proposed_header: &Header, + vid_common: VidCommon, + version: Version, + ) -> Result<(Self, Self::Delta), Self::Error> { + //validate builder fee + if let Err(err) = validate_builder_fee(proposed_header) { + tracing::error!("invalid builder fee: {err:#}"); + return Err(BlockError::InvalidBlockHeader); + } + + // Unwrapping here is okay as we retry in a loop + //so we should either get a validated state or until hotshot cancels the task + let (validated_state, delta) = self + .apply_header(instance, parent_leaf, proposed_header, version) + .await + .unwrap(); + + let chain_config = validated_state + .chain_config + .resolve() + .expect("Chain Config not found in validated state"); + + // validate the proposal + if let Err(err) = validate_proposal( + &validated_state, + chain_config, + parent_leaf, + proposed_header, + &vid_common, + ) { + tracing::error!("invalid proposal: {err:#}"); + return Err(BlockError::InvalidBlockHeader); + } + + // log successful progress about once in 10 - 20 seconds, + // TODO: we may want to make this configurable + if parent_leaf.view_number().u64() % 10 == 0 { + tracing::info!("validated and applied new header"); + } + Ok((validated_state, delta)) + } + /// Construct the state with the given block header. + /// + /// This can also be used to rebuild the state for catchup. + fn from_header(block_header: &Header) -> Self { + let fee_merkle_tree = if block_header.fee_merkle_tree_root().size() == 0 { + // If the commitment tells us that the tree is supposed to be empty, it is convenient to + // just create an empty tree, rather than a commitment-only tree. + FeeMerkleTree::new(FEE_MERKLE_TREE_HEIGHT) + } else { + FeeMerkleTree::from_commitment(block_header.fee_merkle_tree_root()) + }; + let block_merkle_tree = if block_header.block_merkle_tree_root().size() == 0 { + // If the commitment tells us that the tree is supposed to be empty, it is convenient to + // just create an empty tree, rather than a commitment-only tree. + BlockMerkleTree::new(BLOCK_MERKLE_TREE_HEIGHT) + } else { + BlockMerkleTree::from_commitment(block_header.block_merkle_tree_root()) + }; + Self { + fee_merkle_tree, + block_merkle_tree, + chain_config: *block_header.chain_config(), + } + } + /// Construct a genesis validated state. + #[must_use] + fn genesis(instance: &Self::Instance) -> (Self, Self::Delta) { + (instance.genesis_state.clone(), Delta::default()) + } +} + +// Required for TestableState +#[cfg(any(test, feature = "testing"))] +impl std::fmt::Display for ValidatedState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{self:#?}") + } +} + +#[cfg(any(test, feature = "testing"))] +impl hotshot_types::traits::states::TestableState for ValidatedState { + fn create_random_transaction( + _state: Option<&Self>, + rng: &mut dyn rand::RngCore, + _padding: u64, + ) -> crate::Transaction { + crate::Transaction::random(rng) + } +} + +impl MerklizedState for BlockMerkleTree { + type Key = Self::Index; + type Entry = [u8; 32]; + type T = Sha3Node; + type Commit = Self::Commitment; + type Digest = Sha3Digest; + + fn state_type() -> &'static str { + "block_merkle_tree" + } + + fn header_state_commitment_field() -> &'static str { + "block_merkle_tree_root" + } + + fn tree_height() -> usize { + BLOCK_MERKLE_TREE_HEIGHT + } + + fn insert_path( + &mut self, + key: Self::Key, + proof: &MerkleProof, + ) -> anyhow::Result<()> { + let Some(elem) = proof.elem() else { + bail!("BlockMerkleTree does not support non-membership proofs"); + }; + self.remember(key, elem, proof)?; + Ok(()) + } +} + +impl MerklizedState for FeeMerkleTree { + type Key = Self::Index; + type Entry = Self::Element; + type T = Sha3Node; + type Commit = Self::Commitment; + type Digest = Sha3Digest; + + fn state_type() -> &'static str { + "fee_merkle_tree" + } + + fn header_state_commitment_field() -> &'static str { + "fee_merkle_tree_root" + } + + fn tree_height() -> usize { + FEE_MERKLE_TREE_HEIGHT + } + + fn insert_path( + &mut self, + key: Self::Key, + proof: &MerkleProof, + ) -> anyhow::Result<()> { + match proof.elem() { + Some(elem) => self.remember(key, elem, proof)?, + None => self.non_membership_remember(key, proof)?, + } + Ok(()) + } +} + +#[cfg(test)] +mod test { + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; + use ethers::types::U256; + use hotshot_types::vid::vid_scheme; + use jf_vid::VidScheme; + use sequencer_utils::ser::FromStringOrInteger; + + use super::*; + use crate::{BlockSize, FeeAccountProof, FeeMerkleProof}; + + #[test] + fn test_fee_proofs() { + setup_logging(); + setup_backtrace(); + + let mut tree = ValidatedState::default().fee_merkle_tree; + let account1 = Address::random(); + let account2 = Address::default(); + tracing::info!(%account1, %account2); + + let balance1 = U256::from(100); + tree.update(FeeAccount(account1), FeeAmount(balance1)) + .unwrap(); + + // Membership proof. + let (proof1, balance) = FeeAccountProof::prove(&tree, account1).unwrap(); + tracing::info!(?proof1, %balance); + assert_eq!(balance, balance1); + assert!(matches!(proof1.proof, FeeMerkleProof::Presence(_))); + assert_eq!(proof1.verify(&tree.commitment()).unwrap(), balance1); + + // Non-membership proof. + let (proof2, balance) = FeeAccountProof::prove(&tree, account2).unwrap(); + tracing::info!(?proof2, %balance); + assert_eq!(balance, 0.into()); + assert!(matches!(proof2.proof, FeeMerkleProof::Absence(_))); + assert_eq!(proof2.verify(&tree.commitment()).unwrap(), 0.into()); + + // Test forget/remember. We cannot generate proofs in a completely sparse tree: + let mut tree = FeeMerkleTree::from_commitment(tree.commitment()); + assert!(FeeAccountProof::prove(&tree, account1).is_none()); + assert!(FeeAccountProof::prove(&tree, account2).is_none()); + // After remembering the proofs, we can generate proofs again: + proof1.remember(&mut tree).unwrap(); + proof2.remember(&mut tree).unwrap(); + FeeAccountProof::prove(&tree, account1).unwrap(); + FeeAccountProof::prove(&tree, account2).unwrap(); + } + + #[async_std::test] + async fn test_validation_max_block_size() { + setup_logging(); + setup_backtrace(); + + const MAX_BLOCK_SIZE: usize = 10; + let payload = [0; 2 * MAX_BLOCK_SIZE]; + let vid_common = vid_scheme(1).disperse(payload).unwrap().common; + + let state = ValidatedState::default(); + let instance = NodeState::mock().with_chain_config(ChainConfig { + max_block_size: (MAX_BLOCK_SIZE as u64).into(), + base_fee: 0.into(), + ..Default::default() + }); + let parent = Leaf::genesis(&instance.genesis_state, &instance).await; + let header = parent.block_header(); + + // Validation fails because the proposed block exceeds the maximum block size. + let err = validate_proposal(&state, instance.chain_config, &parent, header, &vid_common) + .unwrap_err(); + + tracing::info!(%err, "task failed successfully"); + assert_eq!( + ProposalValidationError::MaxBlockSizeExceeded { + max_block_size: instance.chain_config.max_block_size, + block_size: BlockSize::from_integer( + VidSchemeType::get_payload_byte_len(&vid_common).into() + ) + .unwrap() + }, + err + ); + } + + #[async_std::test] + async fn test_validation_base_fee() { + setup_logging(); + setup_backtrace(); + + let max_block_size = 10; + let payload = [0; 1]; + let vid_common = vid_scheme(1).disperse(payload).unwrap().common; + + let state = ValidatedState::default(); + let instance = NodeState::mock().with_chain_config(ChainConfig { + base_fee: 1000.into(), // High base fee + max_block_size: max_block_size.into(), + ..Default::default() + }); + let parent = Leaf::genesis(&instance.genesis_state, &instance).await; + let header = parent.block_header(); + + // Validation fails because the genesis fee (0) is too low. + let err = validate_proposal(&state, instance.chain_config, &parent, header, &vid_common) + .unwrap_err(); + + tracing::info!(%err, "task failed successfully"); + assert_eq!( + ProposalValidationError::InsufficientFee { + max_block_size: instance.chain_config.max_block_size, + base_fee: instance.chain_config.base_fee, + proposed_fee: header.fee_info().amount() + }, + err + ); + } + + #[test] + fn test_charge_fee() { + setup_logging(); + setup_backtrace(); + + let src = FeeAccount::generated_from_seed_indexed([0; 32], 0).0; + let dst = FeeAccount::generated_from_seed_indexed([0; 32], 1).0; + let amt = FeeAmount::from(1); + + let fee_info = FeeInfo::new(src, amt); + + let new_state = || { + let mut state = ValidatedState::default(); + state.prefund_account(src, amt); + state + }; + + tracing::info!("test successful fee"); + let mut state = new_state(); + state.charge_fee(fee_info, dst).unwrap(); + assert_eq!(state.balance(src), Some(0.into())); + assert_eq!(state.balance(dst), Some(amt)); + + tracing::info!("test insufficient balance"); + let err = state.charge_fee(fee_info, dst).unwrap_err(); + assert_eq!(state.balance(src), Some(0.into())); + assert_eq!(state.balance(dst), Some(amt)); + assert_eq!( + FeeError::InsufficientFunds { + balance: None, + amount: amt + }, + err + ); + + tracing::info!("test src not in memory"); + let mut state = new_state(); + state.fee_merkle_tree.forget(src).expect_ok().unwrap(); + assert_eq!( + FeeError::MerkleTreeError(MerkleTreeError::ForgottenLeaf), + state.charge_fee(fee_info, dst).unwrap_err() + ); + + tracing::info!("test dst not in memory"); + let mut state = new_state(); + state.prefund_account(dst, amt); + state.fee_merkle_tree.forget(dst).expect_ok().unwrap(); + assert_eq!( + FeeError::MerkleTreeError(MerkleTreeError::ForgottenLeaf), + state.charge_fee(fee_info, dst).unwrap_err() + ); + } + + #[test] + fn test_fee_amount_serde_json_as_decimal() { + let amt = FeeAmount::from(123); + let serialized = serde_json::to_string(&amt).unwrap(); + + // The value is serialized as a decimal string. + assert_eq!(serialized, "\"123\""); + + // Deserialization produces the original value + let deserialized: FeeAmount = serde_json::from_str(&serialized).unwrap(); + assert_eq!(deserialized, amt); + } + + #[test] + fn test_fee_amount_from_units() { + for (unit, multiplier) in [ + ("wei", 1), + ("gwei", 1_000_000_000), + ("eth", 1_000_000_000_000_000_000), + ] { + let amt: FeeAmount = serde_json::from_str(&format!("\"1 {unit}\"")).unwrap(); + assert_eq!(amt, multiplier.into()); + } + } + + #[test] + fn test_fee_amount_serde_json_from_hex() { + // For backwards compatibility, fee amounts can also be deserialized from a 0x-prefixed hex + // string. + let amt: FeeAmount = serde_json::from_str("\"0x123\"").unwrap(); + assert_eq!(amt, FeeAmount::from(0x123)); + } + + #[test] + fn test_fee_amount_serde_json_from_number() { + // For convenience, fee amounts can also be deserialized from a JSON number. + let amt: FeeAmount = serde_json::from_str("123").unwrap(); + assert_eq!(amt, FeeAmount::from(123)); + } + + #[test] + fn test_fee_amount_serde_bincode_unchanged() { + // For non-human-readable formats, FeeAmount just serializes as the underlying U256. + let n = U256::from(123); + let amt = FeeAmount(n); + assert_eq!( + bincode::serialize(&n).unwrap(), + bincode::serialize(&amt).unwrap(), + ); + } +} diff --git a/sequencer/src/transaction.rs b/types/src/v0/impls/transaction.rs similarity index 59% rename from sequencer/src/transaction.rs rename to types/src/v0/impls/transaction.rs index 1df957abe9..32c2cc0c0d 100644 --- a/sequencer/src/transaction.rs +++ b/types/src/v0/impls/transaction.rs @@ -1,41 +1,9 @@ -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use committable::{Commitment, Committable}; -use derive_more::Display; use hotshot_query_service::explorer::ExplorerTransaction; use hotshot_types::traits::block_contents::Transaction as HotShotTransaction; -use serde::{de::Error, Deserialize, Deserializer, Serialize}; +use serde::{de::Error, Deserialize, Deserializer}; -/// TODO [`NamespaceId`] has historical debt to repay: -/// - -/// - It must fit into 4 bytes in order to maintain serialization compatibility -/// for [`crate::block::NsTable`], yet it currently occupies 8 bytes in order -/// to maintain [`serde`] serialization compatibility with [`Transaction`]. -/// - Thus, it's a newtype for `u64` that impls `From` and has a manual -/// impl for [`serde::Deserialize`] that deserializes a `u64` but then returns -/// an error if the value cannot fit into a `u32`. This is ugly. In the future -/// we need to break serialization compatibility so that `NsTable` and -/// `Transaction` can agree on the byte length for `NamespaceId` and all this -/// cruft should be removed. -/// - We should move [`NamespaceId`] to `crate::block::full_payload::ns_table` -/// module because that's where it's byte length is dictated, so that's where -/// it makes the most sense to put serialization. See -/// -#[derive( - Clone, - Copy, - Serialize, - Debug, - Display, - PartialEq, - Eq, - Hash, - CanonicalDeserialize, - CanonicalSerialize, - PartialOrd, - Ord, -)] -#[display(fmt = "{_0}")] -pub struct NamespaceId(u64); +use crate::{NamespaceId, Transaction}; impl From for NamespaceId { fn from(value: u32) -> Self { @@ -75,23 +43,6 @@ impl NamespaceId { } } -#[derive( - Clone, - Serialize, - Deserialize, - Debug, - PartialEq, - Eq, - Hash, - CanonicalSerialize, - CanonicalDeserialize, -)] -pub struct Transaction { - namespace: NamespaceId, - #[serde(with = "base64_bytes")] - payload: Vec, -} - impl Transaction { pub fn new(namespace: NamespaceId, payload: Vec) -> Self { Self { namespace, payload } diff --git a/types/src/v0/mod.rs b/types/src/v0/mod.rs new file mode 100644 index 0000000000..bd9bb77473 --- /dev/null +++ b/types/src/v0/mod.rs @@ -0,0 +1,154 @@ +use hotshot::traits::election::static_committee::GeneralStaticCommittee; +use hotshot_types::{ + data::ViewNumber, + signature_key::BLSPubKey, + traits::{node_implementation::NodeType, signature_key::SignatureKey}, +}; +use serde::{Deserialize, Serialize}; + +mod error; +mod header; +mod impls; +pub mod traits; +mod utils; +pub use error::*; +pub use header::Header; +pub use impls::{ + mock, validate_proposal, BuilderValidationError, FeeError, ProposalValidationError, + StateValidationError, +}; +pub use utils::*; +use vbs::version::StaticVersion; + +// This is the single source of truth for minor versions supported by this major version. +// +// It is written as a higher-level macro which takes a macro invocation as an argument and appends +// the comma-separated list of minor version identifiers to the arguments of the given invocation. +// This is to get around Rust's lazy macro expansion: this macro forces expansion of the given +// invocation. We would rather write something like `some_macro!(args, minor_versions!())`, but the +// `minor_versions!()` argument would not be expanded for pattern-matching in `some_macro!`, so +// instead we write `with_minor_versions!(some_macro!(args))`. +macro_rules! with_minor_versions { + ($m:ident!($($arg:tt),*)) => { + $m!($($arg,)* v0_1, v0_2, v0_3); + }; +} + +// Define sub-modules for each supported minor version. +macro_rules! define_modules { + ($($m:ident),+) => { + $(pub mod $m;)+ + }; +} +with_minor_versions!(define_modules!()); + +macro_rules! assert_eq_all_versions_of_type { + ($t:ident, $($m:ident),+) => { + static_assertions::assert_type_eq_all!($($m::$t),+); + }; +} + +macro_rules! reexport_latest_version_of_type { + ($t:ident, $m:ident) => { pub use $m::$t; }; + ($t:ident, $m1:ident, $($m:ident),+) => { + reexport_latest_version_of_type!($t, $($m),+); + } +} + +/// Re-export types which have not changed across any minor version. +macro_rules! reexport_unchanged_types { + ($($t:ident),+ $(,)?) => { + $( + with_minor_versions!(assert_eq_all_versions_of_type!($t)); + with_minor_versions!(reexport_latest_version_of_type!($t)); + )+ + } +} +reexport_unchanged_types!( + AccountQueryData, + BlockMerkleCommitment, + BlockMerkleTree, + BuilderSignature, + ChainConfig, + ChainId, + Delta, + FeeAccount, + FeeAccountProof, + FeeAmount, + FeeInfo, + FeeMerkleCommitment, + FeeMerkleProof, + FeeMerkleTree, + Index, + Iter, + L1BlockInfo, + L1Client, + L1Snapshot, + NamespaceId, + NodeState, + NsIndex, + NsIter, + NsPayload, + NsPayloadBuilder, + NsPayloadByteLen, + NsPayloadOwned, + NsPayloadRange, + NsProof, + NsTable, + NsTableBuilder, + NsTableValidationError, + NumNss, + NumTxs, + NumTxsRange, + NumTxsUnchecked, + Payload, + PayloadByteLen, + ResolvableChainConfig, + Transaction, + TxIndex, + TxIter, + TxPayload, + TxPayloadRange, + TxProof, + TxTableEntries, + TxTableEntriesRange, + Upgrade, + UpgradeType, + ValidatedState, + BlockSize, +); + +#[derive( + Clone, Copy, Debug, Default, Hash, Eq, PartialEq, PartialOrd, Ord, Deserialize, Serialize, +)] +pub struct SeqTypes; + +impl NodeType for SeqTypes { + type Time = ViewNumber; + type BlockHeader = Header; + type BlockPayload = Payload; + type SignatureKey = PubKey; + type Transaction = Transaction; + type InstanceState = NodeState; + type ValidatedState = ValidatedState; + type Membership = GeneralStaticCommittee; + type BuilderSignatureKey = FeeAccount; + type Base = StaticVersion<0, 1>; + type Upgrade = StaticVersion<0, 2>; + const UPGRADE_HASH: [u8; 32] = [ + 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, + ]; +} +pub type Leaf = hotshot_types::data::Leaf; +pub type Event = hotshot::types::Event; + +pub type PubKey = BLSPubKey; +pub type PrivKey = ::PrivateKey; + +pub type NetworkConfig = hotshot_orchestrator::config::NetworkConfig; + +pub use crate::v0_1::{ + BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, + NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, +}; diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs new file mode 100644 index 0000000000..ee976453ad --- /dev/null +++ b/types/src/v0/traits.rs @@ -0,0 +1,610 @@ +//! This module contains all the traits used for building the sequencer types. +//! It also includes some trait implementations that cannot be implemented in an external crate. +use std::{cmp::max, collections::BTreeMap, ops::Range, sync::Arc}; + +use anyhow::{bail, ensure, Context}; +use async_std::sync::RwLock; +use async_trait::async_trait; +use committable::{Commitment, Committable}; +use futures::{FutureExt, TryFutureExt}; +use hotshot::{types::EventType, HotShotInitializer}; +use hotshot_types::{ + consensus::CommitmentMap, + data::{DaProposal, QuorumProposal, VidDisperseShare, ViewNumber}, + event::{HotShotAction, LeafInfo}, + message::Proposal, + simple_certificate::QuorumCertificate, + traits::{ + node_implementation::ConsensusTime, storage::Storage, ValidatedState as HotShotState, + }, + utils::View, +}; +use serde::{de::DeserializeOwned, Serialize}; + +use crate::{ + AccountQueryData, BackoffParams, BlockMerkleTree, ChainConfig, Event, FeeAccount, + FeeMerkleCommitment, Leaf, NetworkConfig, NodeState, SeqTypes, ValidatedState, +}; + +#[async_trait] +pub trait StateCatchup: Send + Sync + std::fmt::Debug { + /// Try to fetch the given account state, failing without retrying if unable. + async fn try_fetch_account( + &self, + height: u64, + view: ViewNumber, + fee_merkle_tree_root: FeeMerkleCommitment, + account: FeeAccount, + ) -> anyhow::Result; + + /// Fetch the given list of accounts, retrying on transient errors. + async fn fetch_accounts( + &self, + height: u64, + view: ViewNumber, + fee_merkle_tree_root: FeeMerkleCommitment, + accounts: Vec, + ) -> anyhow::Result> { + let mut ret = vec![]; + for account in accounts { + let account = self + .backoff() + .retry(self, |provider| { + provider + .try_fetch_account(height, view, fee_merkle_tree_root, account) + .map_err(|err| err.context("fetching account {account}")) + .boxed() + }) + .await; + ret.push(account); + } + Ok(ret) + } + + /// Try to fetch and remember the blocks frontier, failing without retrying if unable. + async fn try_remember_blocks_merkle_tree( + &self, + height: u64, + view: ViewNumber, + mt: &mut BlockMerkleTree, + ) -> anyhow::Result<()>; + + /// Fetch and remember the blocks frontier, retrying on transient errors. + async fn remember_blocks_merkle_tree( + &self, + height: u64, + view: ViewNumber, + mt: &mut BlockMerkleTree, + ) -> anyhow::Result<()> { + self.backoff() + .retry(mt, |mt| { + self.try_remember_blocks_merkle_tree(height, view, mt) + .map_err(|err| err.context("fetching frontier")) + .boxed() + }) + .await; + Ok(()) + } + + async fn try_fetch_chain_config( + &self, + commitment: Commitment, + ) -> anyhow::Result; + + async fn fetch_chain_config(&self, commitment: Commitment) -> ChainConfig { + self.backoff() + .retry(self, |provider| { + provider + .try_fetch_chain_config(commitment) + .map_err(|err| err.context("fetching chain config")) + .boxed() + }) + .await + } + + fn backoff(&self) -> &BackoffParams; +} + +#[async_trait] +impl StateCatchup for Box { + async fn try_fetch_account( + &self, + height: u64, + view: ViewNumber, + fee_merkle_tree_root: FeeMerkleCommitment, + account: FeeAccount, + ) -> anyhow::Result { + (**self) + .try_fetch_account(height, view, fee_merkle_tree_root, account) + .await + } + + async fn fetch_accounts( + &self, + height: u64, + view: ViewNumber, + fee_merkle_tree_root: FeeMerkleCommitment, + accounts: Vec, + ) -> anyhow::Result> { + (**self) + .fetch_accounts(height, view, fee_merkle_tree_root, accounts) + .await + } + + async fn try_remember_blocks_merkle_tree( + &self, + height: u64, + view: ViewNumber, + mt: &mut BlockMerkleTree, + ) -> anyhow::Result<()> { + (**self) + .try_remember_blocks_merkle_tree(height, view, mt) + .await + } + + async fn remember_blocks_merkle_tree( + &self, + height: u64, + view: ViewNumber, + mt: &mut BlockMerkleTree, + ) -> anyhow::Result<()> { + (**self).remember_blocks_merkle_tree(height, view, mt).await + } + + async fn try_fetch_chain_config( + &self, + commitment: Commitment, + ) -> anyhow::Result { + (**self).try_fetch_chain_config(commitment).await + } + + async fn fetch_chain_config(&self, commitment: Commitment) -> ChainConfig { + (**self).fetch_chain_config(commitment).await + } + + fn backoff(&self) -> &BackoffParams { + (**self).backoff() + } +} + +#[async_trait] +impl StateCatchup for Arc { + async fn try_fetch_account( + &self, + height: u64, + view: ViewNumber, + fee_merkle_tree_root: FeeMerkleCommitment, + account: FeeAccount, + ) -> anyhow::Result { + (**self) + .try_fetch_account(height, view, fee_merkle_tree_root, account) + .await + } + + async fn fetch_accounts( + &self, + height: u64, + view: ViewNumber, + fee_merkle_tree_root: FeeMerkleCommitment, + accounts: Vec, + ) -> anyhow::Result> { + (**self) + .fetch_accounts(height, view, fee_merkle_tree_root, accounts) + .await + } + + async fn try_remember_blocks_merkle_tree( + &self, + height: u64, + view: ViewNumber, + mt: &mut BlockMerkleTree, + ) -> anyhow::Result<()> { + (**self) + .try_remember_blocks_merkle_tree(height, view, mt) + .await + } + + async fn remember_blocks_merkle_tree( + &self, + height: u64, + view: ViewNumber, + mt: &mut BlockMerkleTree, + ) -> anyhow::Result<()> { + (**self).remember_blocks_merkle_tree(height, view, mt).await + } + + async fn try_fetch_chain_config( + &self, + commitment: Commitment, + ) -> anyhow::Result { + (**self).try_fetch_chain_config(commitment).await + } + + async fn fetch_chain_config(&self, commitment: Commitment) -> ChainConfig { + (**self).fetch_chain_config(commitment).await + } + + fn backoff(&self) -> &BackoffParams { + (**self).backoff() + } +} + +/// Catchup from multiple providers tries each provider in a round robin fashion until it succeeds. +#[async_trait] +impl StateCatchup for Vec { + #[tracing::instrument(skip(self))] + async fn try_fetch_account( + &self, + height: u64, + view: ViewNumber, + fee_merkle_tree_root: FeeMerkleCommitment, + account: FeeAccount, + ) -> anyhow::Result { + for provider in self { + match provider + .try_fetch_account(height, view, fee_merkle_tree_root, account) + .await + { + Ok(account) => return Ok(account), + Err(err) => { + tracing::warn!(%account, ?provider, "failed to fetch account: {err:#}"); + } + } + } + + bail!("could not fetch account from any provider"); + } + + #[tracing::instrument(skip(self, mt))] + async fn try_remember_blocks_merkle_tree( + &self, + height: u64, + view: ViewNumber, + mt: &mut BlockMerkleTree, + ) -> anyhow::Result<()> { + for provider in self { + match provider + .try_remember_blocks_merkle_tree(height, view, mt) + .await + { + Ok(()) => return Ok(()), + Err(err) => { + tracing::warn!(?provider, "failed to fetch frontier: {err:#}"); + } + } + } + + bail!("could not fetch account from any provider"); + } + + async fn try_fetch_chain_config( + &self, + commitment: Commitment, + ) -> anyhow::Result { + for provider in self { + match provider.try_fetch_chain_config(commitment).await { + Ok(cf) => return Ok(cf), + Err(err) => { + tracing::warn!(?provider, "failed to fetch chain config: {err:#}"); + } + } + } + + bail!("could not fetch chain config from any provider"); + } + + fn backoff(&self) -> &BackoffParams { + // Use whichever provider's backoff is most conservative. + self.iter() + .map(|p| p.backoff()) + .max() + .expect("provider list not empty") + } +} + +#[async_trait] +pub trait PersistenceOptions: Clone + Send + Sync + 'static { + type Persistence: SequencerPersistence; + + async fn create(self) -> anyhow::Result; + async fn reset(self) -> anyhow::Result<()>; + + async fn create_catchup_provider( + self, + backoff: BackoffParams, + ) -> anyhow::Result> { + self.create().await?.into_catchup_provider(backoff) + } +} + +#[async_trait] +pub trait SequencerPersistence: Sized + Send + Sync + 'static { + /// Use this storage as a state catchup backend, if supported. + fn into_catchup_provider( + self, + _backoff: BackoffParams, + ) -> anyhow::Result> { + bail!("state catchup is not implemented for this persistence type"); + } + + /// Load the orchestrator config from storage. + /// + /// Returns `None` if no config exists (we are joining a network for the first time). Fails with + /// `Err` if it could not be determined whether a config exists or not. + async fn load_config(&self) -> anyhow::Result>; + + /// Save the orchestrator config to storage. + async fn save_config(&mut self, cfg: &NetworkConfig) -> anyhow::Result<()>; + + async fn collect_garbage(&mut self, view: ViewNumber) -> anyhow::Result<()>; + + /// Saves the latest decided leaf. + /// + /// If the height of the new leaf is not greater than the height of the previous decided leaf, + /// storage is not updated. + async fn save_anchor_leaf( + &mut self, + leaf: &Leaf, + qc: &QuorumCertificate, + ) -> anyhow::Result<()>; + + /// Load the highest view saved with [`save_voted_view`](Self::save_voted_view). + async fn load_latest_acted_view(&self) -> anyhow::Result>; + + /// Load the latest leaf saved with [`save_anchor_leaf`](Self::save_anchor_leaf). + async fn load_anchor_leaf(&self) + -> anyhow::Result)>>; + + /// Load undecided state saved by consensus before we shut down. + async fn load_undecided_state( + &self, + ) -> anyhow::Result, BTreeMap>)>>; + + /// Load the proposals saved by consensus + async fn load_quorum_proposals( + &self, + ) -> anyhow::Result>>>>; + + async fn load_vid_share( + &self, + view: ViewNumber, + ) -> anyhow::Result>>>; + async fn load_da_proposal( + &self, + view: ViewNumber, + ) -> anyhow::Result>>>; + + /// Load the latest known consensus state. + /// + /// Returns an initializer to resume HotShot from the latest saved state (or start from genesis, + /// if there is no saved state). + async fn load_consensus_state( + &self, + state: NodeState, + ) -> anyhow::Result> { + let genesis_validated_state = ValidatedState::genesis(&state).0; + let highest_voted_view = match self + .load_latest_acted_view() + .await + .context("loading last voted view")? + { + Some(view) => { + tracing::info!(?view, "starting from saved view"); + view + } + None => { + tracing::info!("no saved view, starting from genesis"); + ViewNumber::genesis() + } + }; + let (leaf, high_qc) = match self + .load_anchor_leaf() + .await + .context("loading anchor leaf")? + { + Some((leaf, high_qc)) => { + tracing::info!(?leaf, ?high_qc, "starting from saved leaf"); + ensure!( + leaf.view_number() == high_qc.view_number, + format!( + "loaded anchor leaf from view {:?}, but high QC is from view {:?}", + leaf.view_number(), + high_qc.view_number + ) + ); + (leaf, high_qc) + } + None => { + tracing::info!("no saved leaf, starting from genesis leaf"); + ( + Leaf::genesis(&genesis_validated_state, &state).await, + QuorumCertificate::genesis(&genesis_validated_state, &state).await, + ) + } + }; + let validated_state = if leaf.block_header().height() == 0 { + // If we are starting from genesis, we can provide the full state. + Some(Arc::new(genesis_validated_state)) + } else { + // Otherwise, we will have to construct a sparse state and fetch missing data during + // catchup. + None + }; + + // If we are not starting from genesis, we start from the view following the maximum view + // between `highest_voted_view` and `leaf.view_number`. This prevents double votes from + // starting in a view in which we had already voted before the restart, and prevents + // unnecessary catchup from starting in a view earlier than the anchor leaf. + let mut view = max(highest_voted_view, leaf.view_number()); + if view != ViewNumber::genesis() { + view += 1; + } + + let (undecided_leaves, undecided_state) = self + .load_undecided_state() + .await + .context("loading undecided state")? + .unwrap_or_default(); + + let saved_proposals = self + .load_quorum_proposals() + .await + .context("loading saved proposals") + .unwrap_or_default() + .unwrap_or_default(); + + tracing::info!( + ?leaf, + ?view, + ?high_qc, + ?validated_state, + ?undecided_leaves, + ?undecided_state, + ?saved_proposals, + "loaded consensus state" + ); + Ok(HotShotInitializer::from_reload( + leaf, + state, + validated_state, + view, + saved_proposals, + high_qc, + undecided_leaves.into_values().collect(), + undecided_state, + )) + } + + /// Update storage based on an event from consensus. + async fn handle_event(&mut self, event: &Event) { + if let EventType::Decide { leaf_chain, qc, .. } = &event.event { + if let Some(LeafInfo { leaf, .. }) = leaf_chain.first() { + if qc.view_number != leaf.view_number() { + tracing::error!( + leaf_view = ?leaf.view_number(), + qc_view = ?qc.view_number, + "latest leaf and QC are from different views!", + ); + return; + } + if let Err(err) = self.save_anchor_leaf(leaf, qc).await { + tracing::error!( + ?leaf, + hash = %leaf.commit(), + "Failed to save anchor leaf. When restarting make sure anchor leaf is at least as recent as this leaf. {err:#}", + ); + } + + if let Err(err) = self.collect_garbage(leaf.view_number()).await { + tracing::error!("Failed to garbage collect. {err:#}",); + } + } + } + } + + async fn append_vid( + &mut self, + proposal: &Proposal>, + ) -> anyhow::Result<()>; + async fn append_da( + &mut self, + proposal: &Proposal>, + ) -> anyhow::Result<()>; + async fn record_action( + &mut self, + view: ViewNumber, + action: HotShotAction, + ) -> anyhow::Result<()>; + async fn update_undecided_state( + &mut self, + leaves: CommitmentMap, + state: BTreeMap>, + ) -> anyhow::Result<()>; + async fn append_quorum_proposal( + &mut self, + proposal: &Proposal>, + ) -> anyhow::Result<()>; +} + +#[async_trait] +impl Storage for Arc> { + async fn append_vid( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + self.write().await.append_vid(proposal).await + } + + async fn append_da( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + self.write().await.append_da(proposal).await + } + async fn record_action(&self, view: ViewNumber, action: HotShotAction) -> anyhow::Result<()> { + self.write().await.record_action(view, action).await + } + async fn update_high_qc(&self, _high_qc: QuorumCertificate) -> anyhow::Result<()> { + Ok(()) + } + + async fn update_undecided_state( + &self, + leaves: CommitmentMap, + state: BTreeMap>, + ) -> anyhow::Result<()> { + self.write() + .await + .update_undecided_state(leaves, state) + .await + } + + async fn append_proposal( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + self.write().await.append_quorum_proposal(proposal).await + } +} + +/// Data that can be deserialized from a subslice of namespace payload bytes. +/// +/// Companion trait for [`NsPayloadBytesRange`], which specifies the subslice of +/// namespace payload bytes to read. +pub trait FromNsPayloadBytes<'a> { + /// Deserialize `Self` from namespace payload bytes. + fn from_payload_bytes(bytes: &'a [u8]) -> Self; +} + +/// Specifies a subslice of namespace payload bytes to read. +/// +/// Companion trait for [`FromNsPayloadBytes`], which holds data that can be +/// deserialized from that subslice of bytes. +pub trait NsPayloadBytesRange<'a> { + type Output: FromNsPayloadBytes<'a>; + + /// Range relative to this ns payload + fn ns_payload_range(&self) -> Range; +} + +/// Types which can be deserialized from either integers or strings. +/// +/// Some types can be represented as an integer or a string in human-readable formats like JSON or +/// TOML. For example, 1 GWEI might be represented by the integer `1000000000` or the string `"1 +/// gwei"`. Such types can implement `FromStringOrInteger` and then use [`impl_string_or_integer`] +/// to derive this user-friendly serialization. +/// +/// These types are assumed to have an efficient representation as an integral type in Rust -- +/// [`Self::Binary`] -- and will be serialized to and from this type when using a non-human-readable +/// encoding. With human readable encodings, serialization is always to a string. +pub trait FromStringOrInteger: Sized { + type Binary: Serialize + DeserializeOwned; + type Integer: Serialize + DeserializeOwned; + + fn from_binary(b: Self::Binary) -> anyhow::Result; + fn from_string(s: String) -> anyhow::Result; + fn from_integer(i: Self::Integer) -> anyhow::Result; + + fn to_binary(&self) -> anyhow::Result; + fn to_string(&self) -> anyhow::Result; +} diff --git a/types/src/v0/utils.rs b/types/src/v0/utils.rs new file mode 100644 index 0000000000..530f6230c2 --- /dev/null +++ b/types/src/v0/utils.rs @@ -0,0 +1,237 @@ +use std::{ + cmp::{min, Ordering}, + fmt::{self, Debug, Display, Formatter}, + num::ParseIntError, + str::FromStr, + time::Duration, +}; + +use anyhow::Context; +use async_std::task::sleep; +use clap::Parser; +use derive_more::{Display, From, Into}; +use futures::future::BoxFuture; +use rand::Rng; +use sequencer_utils::{impl_serde_from_string_or_integer, ser::FromStringOrInteger}; +use serde::{Deserialize, Serialize}; +use snafu::Snafu; +use time::{format_description::well_known::Rfc3339 as TimestampFormat, OffsetDateTime}; + +/// Information about the genesis state which feeds into the genesis block header. +#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] +pub struct GenesisHeader { + pub timestamp: Timestamp, +} + +#[derive(Hash, Copy, Clone, Debug, Display, PartialEq, Eq, From, Into)] +#[display(fmt = "{}", "_0.format(&TimestampFormat).unwrap()")] +pub struct Timestamp(OffsetDateTime); + +impl_serde_from_string_or_integer!(Timestamp); + +impl Default for Timestamp { + fn default() -> Self { + Self::from_integer(0).unwrap() + } +} + +impl Timestamp { + pub fn unix_timestamp(&self) -> u64 { + self.0.unix_timestamp() as u64 + } +} + +impl FromStringOrInteger for Timestamp { + type Binary = u64; + type Integer = u64; + + fn from_binary(b: Self::Binary) -> anyhow::Result { + Self::from_integer(b) + } + + fn from_integer(i: Self::Integer) -> anyhow::Result { + let unix = i.try_into().context("timestamp out of range")?; + Ok(Self( + OffsetDateTime::from_unix_timestamp(unix).context("invalid timestamp")?, + )) + } + + fn from_string(s: String) -> anyhow::Result { + Ok(Self( + OffsetDateTime::parse(&s, &TimestampFormat).context("invalid timestamp")?, + )) + } + + fn to_binary(&self) -> anyhow::Result { + Ok(self.unix_timestamp()) + } + + fn to_string(&self) -> anyhow::Result { + Ok(format!("{self}")) + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct Ratio { + pub numerator: u64, + pub denominator: u64, +} + +impl From for (u64, u64) { + fn from(r: Ratio) -> Self { + (r.numerator, r.denominator) + } +} + +impl Display for Ratio { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}:{}", self.numerator, self.denominator) + } +} + +impl PartialOrd for Ratio { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Ratio { + fn cmp(&self, other: &Self) -> Ordering { + (self.numerator * other.denominator).cmp(&(other.numerator * self.denominator)) + } +} + +#[derive(Debug, Snafu)] +pub enum ParseRatioError { + #[snafu(display("numerator and denominator must be separated by :"))] + MissingDelimiter, + InvalidNumerator { + err: ParseIntError, + }, + InvalidDenominator { + err: ParseIntError, + }, +} + +impl FromStr for Ratio { + type Err = ParseRatioError; + + fn from_str(s: &str) -> Result { + let (num, den) = s.split_once(':').ok_or(ParseRatioError::MissingDelimiter)?; + Ok(Self { + numerator: num + .parse() + .map_err(|err| ParseRatioError::InvalidNumerator { err })?, + denominator: den + .parse() + .map_err(|err| ParseRatioError::InvalidDenominator { err })?, + }) + } +} + +#[derive(Clone, Debug, Snafu)] +pub struct ParseDurationError { + reason: String, +} + +pub fn parse_duration(s: &str) -> Result { + cld::ClDuration::from_str(s) + .map(Duration::from) + .map_err(|err| ParseDurationError { + reason: err.to_string(), + }) +} + +pub const MIN_RETRY_DELAY: Duration = Duration::from_millis(500); +pub const MAX_RETRY_DELAY: Duration = Duration::from_secs(5); +pub const BACKOFF_FACTOR: u32 = 2; +// Exponential backoff jitter as a fraction of the backoff delay, (numerator, denominator). +pub const BACKOFF_JITTER: (u64, u64) = (1, 10); + +#[derive(Clone, Copy, Debug, Parser, PartialEq, Eq, PartialOrd, Ord)] +pub struct BackoffParams { + /// Exponential backoff exponent. + #[clap( + long = "catchup-backoff-factor", + env = "ESPRESSO_SEQUENCER_CATCHUP_BACKOFF_FACTOR", + default_value = "4" + )] + factor: u32, + + /// Exponential backoff base delay. + #[clap( + long = "catchup-base-retry-delay", + env = "ESPRESSO_SEQUENCER_CATCHUP_BASE_RETRY_DELAY", + default_value = "20ms", + value_parser = parse_duration + )] + base: Duration, + + /// Exponential max delay. + #[clap( + long = "catchup-max-retry-delay", + env = "ESPRESSO_SEQUENCER_CATCHUP_MAX_RETRY_DELAY", + default_value = "5s", + value_parser = parse_duration + )] + max: Duration, + + /// Exponential backoff jitter as a ratio of the backoff delay, numerator:denominator. + #[clap( + long = "catchup-backoff-jitter", + env = "ESPRESSO_SEQUENCER_CATCHUP_BACKOFF_JITTER", + default_value = "1:10" + )] + jitter: Ratio, +} + +impl Default for BackoffParams { + fn default() -> Self { + Self::parse_from(std::iter::empty::()) + } +} + +impl BackoffParams { + pub async fn retry( + &self, + mut state: S, + f: impl for<'a> Fn(&'a mut S) -> BoxFuture<'a, anyhow::Result>, + ) -> T { + let mut delay = self.base; + loop { + match f(&mut state).await { + Ok(res) => break res, + Err(err) => { + tracing::warn!( + "Retryable operation failed, will retry after {delay:?}: {err:#}" + ); + sleep(delay).await; + delay = self.backoff(delay); + } + } + } + } + + #[must_use] + pub fn backoff(&self, delay: Duration) -> Duration { + if delay >= self.max { + return self.max; + } + + let mut rng = rand::thread_rng(); + + // Increase the backoff by the backoff factor. + let ms = (delay * self.factor).as_millis() as u64; + + // Sample a random jitter factor in the range [0, self.jitter]. + let jitter_num = rng.gen_range(0..self.jitter.numerator); + let jitter_den = self.jitter.denominator; + + // Increase the delay by the jitter factor. + let jitter = ms * jitter_num / jitter_den; + let delay = Duration::from_millis(ms + jitter); + + // Bound the delay by the maximum. + min(delay, self.max) + } +} diff --git a/types/src/v0/v0_1/block.rs b/types/src/v0/v0_1/block.rs new file mode 100644 index 0000000000..c81eb6dba3 --- /dev/null +++ b/types/src/v0/v0_1/block.rs @@ -0,0 +1,307 @@ +use serde::{Deserialize, Serialize}; +use std::iter::Peekable; + +use derive_more::Display; +use std::ops::Range; +use thiserror::Error; + +use hotshot_types::vid::{LargeRangeProofType, SmallRangeProofType}; + +use std::default::Default; + +/// Proof of correctness for namespace payload bytes in a block. +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct NsProof { + pub(crate) ns_index: NsIndex, + pub(crate) ns_payload: NsPayloadOwned, + pub(crate) ns_proof: Option, // `None` if ns_payload is empty +} + +/// Byte lengths for the different items that could appear in a namespace table. +pub const NUM_NSS_BYTE_LEN: usize = 4; +pub const NS_OFFSET_BYTE_LEN: usize = 4; + +// TODO prefer [`NS_ID_BYTE_LEN`] set to `8` because [`NamespaceId`] is a `u64` +// but we need to maintain serialization compatibility. +// https://github.com/EspressoSystems/espresso-sequencer/issues/1574 +pub const NS_ID_BYTE_LEN: usize = 4; + +/// Raw binary data for a namespace table. +/// +/// Any sequence of bytes is a valid [`NsTable`]. +/// +/// # Binary format of a namespace table +/// +/// Byte lengths for the different items that could appear in a namespace table +/// are specified in local private constants [`NUM_NSS_BYTE_LEN`], +/// [`NS_OFFSET_BYTE_LEN`], [`NS_ID_BYTE_LEN`]. +/// +/// ## Number of entries in the namespace table +/// +/// The first [`NUM_NSS_BYTE_LEN`] bytes of the namespace table indicate the +/// number `n` of entries in the table as a little-endian unsigned integer. If +/// the entire table length is smaller than [`NUM_NSS_BYTE_LEN`] then the +/// missing bytes are zero-padded. +/// +/// The bytes in the namespace table beyond the first [`NUM_NSS_BYTE_LEN`] bytes +/// encode table entries. Each entry consumes exactly [`NS_ID_BYTE_LEN`] `+` +/// [`NS_OFFSET_BYTE_LEN`] bytes. +/// +/// The number `n` could be anything, including a number much larger than the +/// number of entries that could fit in the namespace table. As such, the actual +/// number of entries in the table is defined as the minimum of `n` and the +/// maximum number of whole entries that could fit in the table. +/// +/// See [`Self::in_bounds`] for clarification. +/// +/// ## Namespace table entry +/// +/// ### Namespace ID +/// +/// The first [`NS_ID_BYTE_LEN`] bytes of each table entry indicate the +/// [`NamespaceId`] for this namespace. Any table entry whose [`NamespaceId`] is +/// a duplicate of a previous entry is ignored. A correct count of the number of +/// *unique* (non-ignored) entries is given by `NsTable::iter().count()`. +/// +/// ### Namespace offset +/// +/// The next [`NS_OFFSET_BYTE_LEN`] bytes of each table entry indicate the +/// end-index of a namespace in the block payload bytes +/// [`Payload`](super::payload::Payload). This end-index is a little-endian +/// unsigned integer. +/// +/// # How to deduce a namespace's byte range +/// +/// In order to extract the payload bytes of a single namespace `N` from the +/// block payload one needs both the start- and end-indices for `N`. +/// +/// See [`Self::ns_range`] for clarification. What follows is a description of +/// what's implemented in [`Self::ns_range`]. +/// +/// If `N` occupies the `i`th entry in the namespace table for `i>0` then the +/// start-index for `N` is defined as the end-index of the `(i-1)`th entry in +/// the table. +/// +/// Even if the `(i-1)`the entry would otherwise be ignored (due to a duplicate +/// [`NamespaceId`] or any other reason), that entry's end-index still defines +/// the start-index of `N`. This rule guarantees that both start- and +/// end-indices for any namespace `N` can be read from a constant-size byte +/// range in the namespace table, and it eliminates the need to traverse an +/// unbounded number of previous entries of the namespace table looking for a +/// previous non-ignored entry. +/// +/// The start-index of the 0th entry in the table is implicitly defined to be +/// `0`. +/// +/// The start- and end-indices `(declared_start, declared_end)` declared in the +/// namespace table could be anything. As such, the actual start- and +/// end-indices `(start, end)` are defined so as to ensure that the byte range +/// is well-defined and in-bounds for the block payload: +/// ```ignore +/// end = min(declared_end, block_payload_byte_length) +/// start = min(declared_start, end) +/// ``` +/// +/// In a "honestly-prepared" namespace table the end-index of the final +/// namespace equals the byte length of the block payload. (Otherwise the block +/// payload might have bytes that are not included in any namespace.) +/// +/// It is possible that a namespace table could indicate two distinct namespaces +/// whose byte ranges overlap, though no "honestly-prepared" namespace table +/// would do this. +/// +/// TODO prefer [`NsTable`] to be a newtype like this +/// ```ignore +/// #[repr(transparent)] +/// #[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] +/// #[serde(transparent)] +/// pub struct NsTable(#[serde(with = "base64_bytes")] Vec); +/// ``` +/// but we need to maintain serialization compatibility. +/// +#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] +// Boilerplate: `#[serde(remote = "Self")]` needed to check invariants on +// deserialization. See +// https://github.com/serde-rs/serde/issues/1220#issuecomment-382589140 +#[serde(remote = "Self")] +pub struct NsTable { + #[serde(with = "base64_bytes")] + pub(crate) bytes: Vec, +} + +/// Return type for [`NsTable::validate`]. +#[derive(Error, Debug, Display, Eq, PartialEq)] +pub enum NsTableValidationError { + InvalidByteLen, + NonIncreasingEntries, + DuplicateNamespaceId, + InvalidHeader, // TODO this variant obsolete after https://github.com/EspressoSystems/espresso-sequencer/issues/1604 + InvalidFinalOffset, // TODO this variant obsolete after https://github.com/EspressoSystems/espresso-sequencer/issues/1604 + ExpectNonemptyNsTable, +} + +pub struct NsTableBuilder { + pub(crate) bytes: Vec, + pub(crate) num_entries: usize, +} + +/// Index for an entry in a ns table. +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct NsIndex(pub(crate) usize); + +/// Number of entries in a namespace table. +pub struct NumNss(pub(crate) usize); + +/// Return type for [`Payload::ns_iter`]. +pub struct NsIter(pub(crate) Range); + +/// Raw payload data for an entire block. +/// +/// A block consists of two sequences of arbitrary bytes: +/// - `ns_table`: namespace table +/// - `ns_payloads`: namespace payloads +/// +/// Any sequence of bytes is a valid `ns_table`. Any sequence of bytes is a +/// valid `ns_payloads`. The contents of `ns_table` determine how to interpret +/// `ns_payload`. +/// +/// # Namespace table +/// +/// See [`NsTable`] for the format of a namespace table. +/// +/// # Namespace payloads +/// +/// A concatenation of payload bytes for multiple individual namespaces. +/// Namespace boundaries are dictated by `ns_table`. See [`NsPayload`] for the +/// format of a namespace payload. +#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] +pub struct Payload { + // Concatenated payload bytes for each namespace + // + // TODO want to rename thisfield to `ns_payloads`, but can't due to + // serialization compatibility. + #[serde(with = "base64_bytes")] + pub(crate) raw_payload: Vec, + + pub(crate) ns_table: NsTable, +} + +/// Byte length of a block payload, which includes all namespaces but *not* the +/// namespace table. +#[derive(Clone, Debug, Display, Eq, Hash, PartialEq)] +pub struct PayloadByteLen(pub(crate) usize); + +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct Index { + pub(crate) ns_index: NsIndex, + pub(crate) tx_index: TxIndex, +} + +/// Cartesian product of [`NsIter`], [`TxIter`]. +pub struct Iter<'a> { + pub(crate) ns_iter: Peekable, + pub(crate) tx_iter: Option, + pub(crate) block: &'a Payload, +} + +/// Index range for a namespace payload inside a block payload. +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct NsPayloadRange(pub(crate) Range); + +/// Raw binary data for a single namespace's payload. +/// +/// Any sequence of bytes is a valid [`NsPayload`]. +/// +/// See module-level documentation [`types`](super::types) for a full +/// specification of the binary format of a namespace. +pub struct NsPayload(pub(crate) [u8]); + +#[repr(transparent)] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +#[serde(transparent)] +pub struct NsPayloadOwned(#[serde(with = "base64_bytes")] pub(crate) Vec); + +/// Proof of correctness for transaction bytes in a block. +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct TxProof { + // Naming conventions for this struct's fields: + // - `payload_x`: bytes from the payload + // - `payload_proof_x`: a proof of those bytes from the payload + pub(crate) tx_index: TxIndex, + + // Number of txs declared in the tx table + pub(crate) payload_num_txs: NumTxsUnchecked, + pub(crate) payload_proof_num_txs: SmallRangeProofType, + + // Tx table entries for this tx + pub(crate) payload_tx_table_entries: TxTableEntries, + pub(crate) payload_proof_tx_table_entries: SmallRangeProofType, + + // This tx's payload bytes. + // `None` if this tx has zero length. + pub(crate) payload_proof_tx: Option, +} + +/// Byte lengths for the different items that could appear in a tx table. +pub const NUM_TXS_BYTE_LEN: usize = 4; +pub const TX_OFFSET_BYTE_LEN: usize = 4; + +/// Number of txs in a namespace. +/// +/// Like [`NumTxsUnchecked`] but checked against a [`NsPayloadByteLen`]. +pub struct NumTxs(pub(crate) usize); + +/// Byte length of a namespace payload. +pub struct NsPayloadByteLen(pub(crate) usize); + +/// The part of a tx table that declares the number of txs in the payload. +/// +/// "Unchecked" because this quantity might exceed the number of tx table +/// entries that could fit into the namespace that contains it. +/// +/// Use [`NumTxs`] for the actual number of txs in this namespace. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct NumTxsUnchecked(pub(crate) usize); + +/// Byte range for the part of a tx table that declares the number of txs in the +/// payload. +pub struct NumTxsRange(pub(crate) Range); + +/// Entries from a tx table in a namespace for use in a transaction proof. +/// +/// Contains either one or two entries according to whether it was derived from +/// the first transaction in the namespace. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct TxTableEntries { + pub(crate) cur: usize, + pub(crate) prev: Option, // `None` if derived from the first transaction +} + +/// Byte range for entries from a tx table for use in a transaction proof. +/// +/// This range covers either one or two entries from a tx table according to +/// whether it was derived from the first transaction in the namespace. +pub struct TxTableEntriesRange(pub(crate) Range); + +/// A transaction's payload data. +pub struct TxPayload<'a>(pub(crate) &'a [u8]); + +/// Byte range for a transaction's payload data. +pub struct TxPayloadRange(pub(crate) Range); + +/// Index for an entry in a tx table. +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct TxIndex(pub(crate) usize); + +pub struct TxIter(pub(crate) Range); + +/// Build an individual namespace payload one transaction at a time. +/// +/// Use [`Self::append_tx`] to add each transaction. Use [`Self::into_bytes`] +/// when you're done. The returned bytes include a well-formed tx table and all +/// tx payloads. +#[derive(Default)] +pub struct NsPayloadBuilder { + pub(crate) tx_table_entries: Vec, + pub(crate) tx_bodies: Vec, +} diff --git a/types/src/v0/v0_1/chain_config.rs b/types/src/v0/v0_1/chain_config.rs new file mode 100644 index 0000000000..e74e36fda0 --- /dev/null +++ b/types/src/v0/v0_1/chain_config.rs @@ -0,0 +1,48 @@ +use committable::Commitment; +use derive_more::{Deref, Display, From, Into}; +use ethers::types::{Address, U256}; +use itertools::Either; + +use serde::{Deserialize, Serialize}; + +use crate::{FeeAccount, FeeAmount}; + +#[derive(Default, Hash, Copy, Clone, Debug, Display, PartialEq, Eq, From, Into)] +#[display(fmt = "{_0}")] +pub struct ChainId(pub U256); + +#[derive(Hash, Copy, Clone, Debug, Default, Display, PartialEq, Eq, From, Into, Deref)] +#[display(fmt = "{_0}")] +pub struct BlockSize(pub(crate) u64); + +/// Global variables for an Espresso blockchain. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct ChainConfig { + /// Espresso chain ID + pub chain_id: ChainId, + + /// Maximum size in bytes of a block + pub max_block_size: BlockSize, + + /// Minimum fee in WEI per byte of payload + pub base_fee: FeeAmount, + + /// Fee contract address on L1. + /// + /// This is optional so that fees can easily be toggled on/off, with no need to deploy a + /// contract when they are off. In a future release, after fees are switched on and thoroughly + /// tested, this may be made mandatory. + pub fee_contract: Option
, + + /// Account that receives sequencing fees. + /// + /// This account in the Espresso fee ledger will always receive every fee paid in Espresso, + /// regardless of whether or not their is a `fee_contract` deployed. Once deployed, the fee + /// contract can decide what to do with tokens locked in this account in Espresso. + pub fee_recipient: FeeAccount, +} + +#[derive(Clone, Debug, Copy, PartialEq, Deserialize, Serialize, Eq, Hash)] +pub struct ResolvableChainConfig { + pub(crate) chain_config: Either>, +} diff --git a/types/src/v0/v0_1/fee_info.rs b/types/src/v0/v0_1/fee_info.rs new file mode 100644 index 0000000000..d4354e2845 --- /dev/null +++ b/types/src/v0/v0_1/fee_info.rs @@ -0,0 +1,90 @@ +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use derive_more::{Add, Display, From, Into, Mul, Sub}; +use ethers::{abi::Address, types::U256}; +use jf_merkle_tree::{MerkleTreeScheme, UniversalMerkleTreeScheme}; +use serde::{Deserialize, Serialize}; + +use crate::FeeMerkleTree; + +// New Type for `U256` in order to implement `CanonicalSerialize` and +// `CanonicalDeserialize` +#[derive( + Default, + Hash, + Copy, + Clone, + Debug, + Display, + PartialEq, + Eq, + PartialOrd, + Ord, + Add, + Sub, + Mul, + From, + Into, +)] +#[display(fmt = "{_0}")] +pub struct FeeAmount(pub U256); + +// New Type for `Address` in order to implement `CanonicalSerialize` and +// `CanonicalDeserialize` +#[derive( + Default, + Hash, + Copy, + Clone, + Debug, + Display, + Deserialize, + Serialize, + PartialEq, + Eq, + PartialOrd, + Ord, + From, + Into, +)] +#[display(fmt = "{_0:x}")] +pub struct FeeAccount(pub Address); + +#[derive( + Hash, + Copy, + Clone, + Debug, + Deserialize, + Serialize, + PartialEq, + Eq, + CanonicalSerialize, + CanonicalDeserialize, +)] +/// `FeeInfo` holds data related to builder fees. +pub struct FeeInfo { + pub account: FeeAccount, + pub amount: FeeAmount, +} + +/// A proof of the balance of an account in the fee ledger. +/// +/// If the account of interest does not exist in the fee state, this is a Merkle non-membership +/// proof, and the balance is implicitly zero. Otherwise, this is a normal Merkle membership proof. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct FeeAccountProof { + pub account: Address, + pub proof: FeeMerkleProof, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub enum FeeMerkleProof { + Presence(::MembershipProof), + Absence(::NonMembershipProof), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct AccountQueryData { + pub balance: U256, + pub proof: FeeAccountProof, +} diff --git a/types/src/v0/v0_1/header.rs b/types/src/v0/v0_1/header.rs new file mode 100644 index 0000000000..550d10b3d5 --- /dev/null +++ b/types/src/v0/v0_1/header.rs @@ -0,0 +1,109 @@ +use crate::NsTable; + +use super::{ + BlockMerkleCommitment, BuilderSignature, FeeInfo, FeeMerkleCommitment, L1BlockInfo, + ResolvableChainConfig, +}; +use ark_serialize::CanonicalSerialize; +use committable::{Commitment, Committable, RawCommitmentBuilder}; +use hotshot_types::{utils::BuilderCommitment, vid::VidCommitment}; +use serde::{ + de::{self, SeqAccess}, + Deserialize, Serialize, +}; +/// A header is like a [`Block`] with the body replaced by a digest. +#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] +pub struct Header { + /// A commitment to a ChainConfig or a full ChainConfig. + pub(crate) chain_config: ResolvableChainConfig, + pub(crate) height: u64, + pub(crate) timestamp: u64, + pub(crate) l1_head: u64, + pub(crate) l1_finalized: Option, + pub(crate) payload_commitment: VidCommitment, + pub(crate) builder_commitment: BuilderCommitment, + pub(crate) ns_table: NsTable, + pub(crate) block_merkle_tree_root: BlockMerkleCommitment, + pub(crate) fee_merkle_tree_root: FeeMerkleCommitment, + pub(crate) fee_info: FeeInfo, + pub(crate) builder_signature: Option, +} + +macro_rules! element { + ($seq:expr, $field:ident) => { + $seq.next_element()? + .ok_or_else(|| de::Error::missing_field(stringify!($field)))? + }; +} + +impl Committable for Header { + fn commit(&self) -> Commitment { + let mut bmt_bytes = vec![]; + self.block_merkle_tree_root + .serialize_with_mode(&mut bmt_bytes, ark_serialize::Compress::Yes) + .unwrap(); + let mut fmt_bytes = vec![]; + self.fee_merkle_tree_root + .serialize_with_mode(&mut fmt_bytes, ark_serialize::Compress::Yes) + .unwrap(); + + RawCommitmentBuilder::new(&Self::tag()) + .field("chain_config", self.chain_config.commit()) + .u64_field("height", self.height) + .u64_field("timestamp", self.timestamp) + .u64_field("l1_head", self.l1_head) + .optional("l1_finalized", &self.l1_finalized) + .constant_str("payload_commitment") + .fixed_size_bytes(self.payload_commitment.as_ref().as_ref()) + .constant_str("builder_commitment") + .fixed_size_bytes(self.builder_commitment.as_ref()) + .field("ns_table", self.ns_table.commit()) + .var_size_field("block_merkle_tree_root", &bmt_bytes) + .var_size_field("fee_merkle_tree_root", &fmt_bytes) + .field("fee_info", self.fee_info.commit()) + .finalize() + } + + fn tag() -> String { + // We use the tag "BLOCK" since blocks are identified by the hash of their header. This will + // thus be more intuitive to users than "HEADER". + "BLOCK".into() + } +} + +impl Header { + pub fn deserialize_with_chain_config<'de, A>( + chain_config: ResolvableChainConfig, + mut seq: A, + ) -> Result + where + A: SeqAccess<'de>, + { + let height = element!(seq, height); + let timestamp = element!(seq, timestamp); + let l1_head = element!(seq, l1_head); + let l1_finalized = element!(seq, l1_finalized); + let payload_commitment = element!(seq, payload_commitment); + let builder_commitment = element!(seq, builder_commitment); + let ns_table = element!(seq, ns_table); + let block_merkle_tree_root = element!(seq, block_merkle_tree_root); + let fee_merkle_tree_root = element!(seq, fee_merkle_tree_root); + let fee_info = element!(seq, fee_info); + let builder_signature = element!(seq, builder_signature); + + Ok(Self { + chain_config, + height, + timestamp, + l1_head, + l1_finalized, + payload_commitment, + builder_commitment, + ns_table, + block_merkle_tree_root, + fee_merkle_tree_root, + fee_info, + builder_signature, + }) + } +} diff --git a/types/src/v0/v0_1/instance_state.rs b/types/src/v0/v0_1/instance_state.rs new file mode 100644 index 0000000000..feb2f50052 --- /dev/null +++ b/types/src/v0/v0_1/instance_state.rs @@ -0,0 +1,41 @@ +use std::collections::BTreeMap; + +use std::sync::Arc; + +use serde::{Deserialize, Serialize}; +use std::fmt::Debug; + +use crate::{v0::traits::StateCatchup, ChainConfig, GenesisHeader, L1BlockInfo, ValidatedState}; +use vbs::version::Version; + +use super::l1::L1Client; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(untagged)] +#[serde(rename_all = "snake_case")] +pub enum UpgradeType { + // Note: Wrapping this in a tuple variant causes deserialization to fail because + // the 'chain_config' name is also provided in the TOML input. + ChainConfig { chain_config: ChainConfig }, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct Upgrade { + pub view: u64, + pub propose_window: u64, + #[serde(flatten)] + pub upgrade_type: UpgradeType, +} + +#[derive(Debug, Clone)] +pub struct NodeState { + pub node_id: u64, + pub chain_config: ChainConfig, + pub l1_client: L1Client, + pub peers: Arc, + pub genesis_header: GenesisHeader, + pub genesis_state: ValidatedState, + pub l1_genesis: Option, + pub upgrades: BTreeMap, + pub current_version: Version, +} diff --git a/types/src/v0/v0_1/l1.rs b/types/src/v0/v0_1/l1.rs new file mode 100644 index 0000000000..e4029c48f3 --- /dev/null +++ b/types/src/v0/v0_1/l1.rs @@ -0,0 +1,44 @@ +use ethers::{ + prelude::{H256, U256}, + providers::{Http, Provider}, +}; +use serde::{Deserialize, Serialize}; +use std::{sync::Arc, time::Duration}; + +#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, Hash, PartialEq, Eq)] +pub struct L1BlockInfo { + pub number: u64, + pub timestamp: U256, + pub hash: H256, +} + +#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, Hash, PartialEq, Eq)] +pub struct L1Snapshot { + /// The relevant snapshot of the L1 includes a reference to the current head of the L1 chain. + /// + /// Note that the L1 head is subject to changing due to a reorg. However, no reorg will change + /// the _number_ of this block in the chain: L1 block numbers will always be sequentially + /// increasing. Therefore, the sequencer does not have to worry about reorgs invalidating this + /// snapshot. + pub head: u64, + + /// The snapshot also includes information about the latest finalized L1 block. + /// + /// Since this block is finalized (ie cannot be reorged) we can include specific information + /// about the particular block, such as its hash and timestamp. + /// + /// This block may be `None` in the rare case where Espresso has started shortly after the + /// genesis of the L1, and the L1 has yet to finalize a block. In all other cases it will be + /// `Some`. + pub finalized: Option, +} + +#[derive(Clone, Debug)] +/// An Http Provider and configuration to interact with the L1. +pub struct L1Client { + pub retry_delay: Duration, + /// `Provider` from `ethers-provider`. + pub provider: Arc>, + /// Maximum number of L1 blocks that can be scanned for events in a single query. + pub events_max_block_range: u64, +} diff --git a/types/src/v0/v0_1/mod.rs b/types/src/v0/v0_1/mod.rs new file mode 100644 index 0000000000..7115342c14 --- /dev/null +++ b/types/src/v0/v0_1/mod.rs @@ -0,0 +1,23 @@ +use vbs::version::Version; + +pub const VERSION: Version = Version { major: 0, minor: 1 }; + +mod block; +mod chain_config; +mod fee_info; +mod header; +mod instance_state; +mod l1; +mod signature; +mod state; +mod transaction; + +pub use block::*; +pub use chain_config::*; +pub use fee_info::*; +pub use header::Header; +pub use instance_state::*; +pub use l1::*; +pub use signature::BuilderSignature; +pub use state::*; +pub use transaction::*; diff --git a/types/src/v0/v0_1/signature.rs b/types/src/v0/v0_1/signature.rs new file mode 100644 index 0000000000..1b6c16fb28 --- /dev/null +++ b/types/src/v0/v0_1/signature.rs @@ -0,0 +1 @@ +pub type BuilderSignature = ethers::prelude::Signature; diff --git a/types/src/v0/v0_1/state.rs b/types/src/v0/v0_1/state.rs new file mode 100644 index 0000000000..9e2df8089c --- /dev/null +++ b/types/src/v0/v0_1/state.rs @@ -0,0 +1,40 @@ +use crate::ResolvableChainConfig; + +use super::{FeeAccount, FeeAmount}; + +use jf_merkle_tree::{ + prelude::{LightWeightSHA3MerkleTree, Sha3Digest, Sha3Node}, + universal_merkle_tree::UniversalMerkleTree, + MerkleTreeScheme, +}; +use serde::{Deserialize, Serialize}; + +// The block merkle tree accumulates header commitments. However, since the underlying +// representation of the commitment type remains the same even while the header itself changes, +// using the underlying type `[u8; 32]` allows us to use the same state type across minor versions. +pub type BlockMerkleTree = LightWeightSHA3MerkleTree<[u8; 32]>; +pub type BlockMerkleCommitment = ::Commitment; + +pub type FeeMerkleTree = UniversalMerkleTree; +pub type FeeMerkleCommitment = ::Commitment; + +use core::fmt::Debug; + +use std::collections::HashSet; + +pub const BLOCK_MERKLE_TREE_HEIGHT: usize = 32; +pub const FEE_MERKLE_TREE_HEIGHT: usize = 20; + +#[derive(Hash, Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] +pub struct ValidatedState { + /// Frontier of Block Merkle Tree + pub block_merkle_tree: BlockMerkleTree, + /// Fee Merkle Tree + pub fee_merkle_tree: FeeMerkleTree, + pub chain_config: ResolvableChainConfig, +} + +#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] +pub struct Delta { + pub fees_delta: HashSet, +} diff --git a/types/src/v0/v0_1/transaction.rs b/types/src/v0/v0_1/transaction.rs new file mode 100644 index 0000000000..cbdc301c0a --- /dev/null +++ b/types/src/v0/v0_1/transaction.rs @@ -0,0 +1,41 @@ +use derive_more::{Display, From, Into}; + +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use serde::{Deserialize, Serialize}; + +#[derive( + Clone, + Serialize, + Deserialize, + Debug, + PartialEq, + Eq, + Hash, + CanonicalSerialize, + CanonicalDeserialize, +)] +pub struct Transaction { + pub(crate) namespace: NamespaceId, + #[serde(with = "base64_bytes")] + pub(crate) payload: Vec, +} + +#[derive( + Clone, + Copy, + Serialize, + Debug, + Display, + PartialEq, + Eq, + Hash, + Into, + From, + Default, + CanonicalDeserialize, + CanonicalSerialize, + PartialOrd, + Ord, +)] +#[display(fmt = "{_0}")] +pub struct NamespaceId(pub(crate) u64); diff --git a/types/src/v0/v0_2/mod.rs b/types/src/v0/v0_2/mod.rs new file mode 100644 index 0000000000..9e745f21ff --- /dev/null +++ b/types/src/v0/v0_2/mod.rs @@ -0,0 +1,17 @@ +use vbs::version::Version; + +// Re-export types which haven't changed since the last minor version. +pub use super::v0_1::{ + AccountQueryData, BlockMerkleCommitment, BlockMerkleTree, BlockSize, BuilderSignature, + ChainConfig, ChainId, Delta, FeeAccount, FeeAccountProof, FeeAmount, FeeInfo, + FeeMerkleCommitment, FeeMerkleProof, FeeMerkleTree, Header, Index, Iter, L1BlockInfo, L1Client, + L1Snapshot, NamespaceId, NodeState, NsIndex, NsIter, NsPayload, NsPayloadBuilder, + NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, + NsTableValidationError, NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, + ResolvableChainConfig, Transaction, TxIndex, TxIter, TxPayload, TxPayloadRange, TxProof, + TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeType, ValidatedState, + BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, + NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, +}; + +pub const VERSION: Version = Version { major: 0, minor: 2 }; diff --git a/types/src/v0/v0_3/header.rs b/types/src/v0/v0_3/header.rs new file mode 100644 index 0000000000..29ca7d26d2 --- /dev/null +++ b/types/src/v0/v0_3/header.rs @@ -0,0 +1,60 @@ +use crate::NsTable; + +use super::{ + BlockMerkleCommitment, BuilderSignature, FeeInfo, FeeMerkleCommitment, L1BlockInfo, + ResolvableChainConfig, +}; +use ark_serialize::CanonicalSerialize; +use committable::{Commitment, Committable, RawCommitmentBuilder}; +use hotshot_types::{utils::BuilderCommitment, vid::VidCommitment}; +use serde::{Deserialize, Serialize}; + +// TODO : marketplace header +#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] +pub struct Header { + pub(crate) chain_config: ResolvableChainConfig, + pub(crate) height: u64, + pub(crate) timestamp: u64, + pub(crate) l1_head: u64, + pub(crate) l1_finalized: Option, + pub(crate) payload_commitment: VidCommitment, + pub(crate) builder_commitment: BuilderCommitment, + pub(crate) ns_table: NsTable, + pub(crate) block_merkle_tree_root: BlockMerkleCommitment, + pub(crate) fee_merkle_tree_root: FeeMerkleCommitment, + pub(crate) fee_info: FeeInfo, + pub(crate) builder_signature: Option, +} + +impl Committable for Header { + fn commit(&self) -> Commitment { + let mut bmt_bytes = vec![]; + self.block_merkle_tree_root + .serialize_with_mode(&mut bmt_bytes, ark_serialize::Compress::Yes) + .unwrap(); + let mut fmt_bytes = vec![]; + self.fee_merkle_tree_root + .serialize_with_mode(&mut fmt_bytes, ark_serialize::Compress::Yes) + .unwrap(); + + RawCommitmentBuilder::new(&Self::tag()) + .field("chain_config", self.chain_config.commit()) + .u64_field("height", self.height) + .u64_field("timestamp", self.timestamp) + .u64_field("l1_head", self.l1_head) + .optional("l1_finalized", &self.l1_finalized) + .constant_str("payload_commitment") + .fixed_size_bytes(self.payload_commitment.as_ref().as_ref()) + .constant_str("builder_commitment") + .fixed_size_bytes(self.builder_commitment.as_ref()) + .field("ns_table", self.ns_table.commit()) + .var_size_field("block_merkle_tree_root", &bmt_bytes) + .var_size_field("fee_merkle_tree_root", &fmt_bytes) + .field("fee_info", self.fee_info.commit()) + .finalize() + } + + fn tag() -> String { + "BLOCK".into() + } +} diff --git a/types/src/v0/v0_3/mod.rs b/types/src/v0/v0_3/mod.rs new file mode 100644 index 0000000000..1bf37d80ba --- /dev/null +++ b/types/src/v0/v0_3/mod.rs @@ -0,0 +1,21 @@ +use vbs::version::Version; + +// Re-export types which haven't changed since the last minor version. +pub use super::v0_1::{ + AccountQueryData, BlockMerkleCommitment, BlockMerkleTree, BlockSize, BuilderSignature, + ChainConfig, ChainId, Delta, FeeAccount, FeeAccountProof, FeeAmount, FeeInfo, + FeeMerkleCommitment, FeeMerkleProof, FeeMerkleTree, Index, Iter, L1BlockInfo, L1Client, + L1Snapshot, NamespaceId, NodeState, NsIndex, NsIter, NsPayload, NsPayloadBuilder, + NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, + NsTableValidationError, NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, + ResolvableChainConfig, Transaction, TxIndex, TxIter, TxPayload, TxPayloadRange, TxProof, + TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeType, ValidatedState, + BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, + NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, +}; + +pub const VERSION: Version = Version { major: 0, minor: 3 }; + +mod header; + +pub use header::Header; diff --git a/utils/src/deployer.rs b/utils/src/deployer.rs index 57be73dc65..ef3cd8b52c 100644 --- a/utils/src/deployer.rs +++ b/utils/src/deployer.rs @@ -1,3 +1,5 @@ +use std::{collections::HashMap, io::Write, ops::Deref}; + use anyhow::{ensure, Context}; use async_std::sync::Arc; use clap::{builder::OsStr, Parser, ValueEnum}; @@ -16,7 +18,6 @@ use derive_more::Display; use ethers::{prelude::*, signers::coins_bip39::English, solc::artifacts::BytecodeObject}; use futures::future::{BoxFuture, FutureExt}; use hotshot_contract_adapter::light_client::ParsedLightClientState; -use std::{collections::HashMap, io::Write, ops::Deref}; use url::Url; /// Set of predeployed contracts. diff --git a/utils/src/lib.rs b/utils/src/lib.rs index b3ecadee49..f1e038830c 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -1,3 +1,10 @@ +use std::{ + fmt::Debug, + path::{Path, PathBuf}, + process::{Child, Command}, + time::Duration, +}; + use anyhow::anyhow; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; use async_std::task::sleep; @@ -6,17 +13,10 @@ use ethers::{ abi::Detokenize, contract::builders::ContractCall, prelude::*, - providers::Middleware, - providers::{Http, Provider}, + providers::{Http, Middleware, Provider}, signers::{coins_bip39::English, Signer as _}, types::U256, }; -use std::path::{Path, PathBuf}; -use std::time::Duration; -use std::{ - fmt::Debug, - process::{Child, Command}, -}; use tempfile::TempDir; use url::Url; @@ -439,9 +439,10 @@ async fn wait_for_transaction_to_be_mined( #[cfg(test)] mod test { - use super::*; use committable::RawCommitmentBuilder; + use super::*; + struct TestCommittable; impl Committable for TestCommittable { diff --git a/utils/src/test_utils.rs b/utils/src/test_utils.rs index 0d6ad4b748..8b0033a971 100644 --- a/utils/src/test_utils.rs +++ b/utils/src/test_utils.rs @@ -1,4 +1,5 @@ -use crate::Signer; +use std::sync::Arc; + use anyhow::Result; use contract_bindings::hot_shot::HotShot; use ethers::{ @@ -6,7 +7,8 @@ use ethers::{ providers::{Http, Middleware, Provider}, signers::{coins_bip39::English, MnemonicBuilder, Signer as _}, }; -use std::sync::Arc; + +use crate::Signer; #[derive(Debug, Clone)] pub struct TestClient { From c318bf2ef3f8243cdb50ed218cfb494ee458dda5 Mon Sep 17 00:00:00 2001 From: rob-maron <132852777+rob-maron@users.noreply.github.com> Date: Wed, 10 Jul 2024 16:46:13 -0400 Subject: [PATCH 19/65] Fix `rustls` issue (#1690) fix `rustls` issue --- Cargo.lock | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 07b411c6bd..9822c77996 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1586,7 +1586,7 @@ dependencies = [ "rcgen 0.13.1", "redis", "rkyv", - "rustls 0.23.11", + "rustls 0.23.10", "rustls-pki-types", "sqlx", "thiserror", @@ -3620,7 +3620,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.11", + "rustls 0.23.10", "rustls-pki-types", ] @@ -4746,7 +4746,7 @@ dependencies = [ "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.11", + "rustls 0.23.10", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -5919,7 +5919,7 @@ dependencies = [ "quinn", "rand 0.8.5", "ring 0.17.8", - "rustls 0.23.11", + "rustls 0.23.10", "socket2 0.5.7", "thiserror", "tokio", @@ -6064,7 +6064,7 @@ dependencies = [ "libp2p-identity", "rcgen 0.11.3", "ring 0.17.8", - "rustls 0.23.11", + "rustls 0.23.10", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -7648,7 +7648,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.11", + "rustls 0.23.10", "thiserror", "tokio", "tracing", @@ -7664,7 +7664,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "rustc-hash", - "rustls 0.23.11", + "rustls 0.23.10", "slab", "thiserror", "tinyvec", @@ -8351,9 +8351,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.11" +version = "0.23.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4828ea528154ae444e5a642dbb7d5623354030dc9822b83fd9bb79683c7399d0" +checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" dependencies = [ "log", "once_cell", @@ -10156,7 +10156,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.11", + "rustls 0.23.10", "rustls-pki-types", "tokio", ] @@ -10664,7 +10664,7 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.23.11", + "rustls 0.23.10", "rustls-pki-types", "url", "webpki-roots 0.26.3", From 18f0035d2fd4655cd9b5a1f3c5cc75c5da4788d3 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Thu, 11 Jul 2024 15:59:54 +0500 Subject: [PATCH 20/65] markdown for upgrade docs --- data/genesis/demo.toml | 2 +- doc/upgrades.md | 62 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 1 deletion(-) create mode 100644 doc/upgrades.md diff --git a/data/genesis/demo.toml b/data/genesis/demo.toml index 5c52dc3861..73e3498951 100644 --- a/data/genesis/demo.toml +++ b/data/genesis/demo.toml @@ -12,7 +12,7 @@ fee_contract = '0xa15bb66138824a1c7167f5e85b957d04dd34e468' timestamp = "1970-01-01T00:00:00Z" [[upgrade]] -version = "0.2" +version = "0.1" view = 5 propose_window = 10 diff --git a/doc/upgrades.md b/doc/upgrades.md new file mode 100644 index 0000000000..627d664cc9 --- /dev/null +++ b/doc/upgrades.md @@ -0,0 +1,62 @@ + +# Upgrades + +Hotshot protocol supports upgrades through an Upgrade proposal mechanism. The Upgrade proposal is broadcast separately from the `QuorumProposal`, typically several views in advance of its attachment. The goal is to ensure ample time for nodes to receive and prepare for the upgrade process. + +Voting for the `UpgradeProposal` begins before its proposal. Sufficient votes are gathered to form an upgrade certificate. Once obtained, the proposal is broadcasted, and any node that receives it accepts and attaches it to its own `QuorumProposal`. + +## Enabling an Upgrade + +To enable an upgrade in Hotshot protocol: + +When preparing for an upgrade, it's essential to define the base version, the upgrade version, and a unique upgrade hash: + +- **Base Version:** Represents the current version of the protocol (`0.1` in this example). +- **Upgrade Version:** Specifies the version to which the protocol will upgrade once the process is successful (`0.2` in this example). +- **Upgrade Hash:** Acts as a unique identifier for the specific upgrade nodes are voting on. It distinguishes between different proposals of the same version upgrade, ensuring nodes vote and execute the correct one. It consists of a sequence of 32 bytes. + +These are defined in [NodeType implementation](../types/src/v0/mod.rs) for the Types (`SeqTypes` in our case). +```rust +impl NodeType for SeqTypes { + type Base = StaticVersion<0, 1>; + type Upgrade = StaticVersion<0, 2>; + const UPGRADE_HASH: [u8; 32] = [ + 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, + ], + .. +} +``` + +These parameters are fetched from the genesis TOML file and set in Hotshot config: + +- **start_voting_view:** view at which voting for the upgrade proposal starts. In our implementation, this is set to 1 so that voting begins as soon as the node is bootup. +- **stop_voting_view:** view at which voting for the upgrade proposal stops. To disable an upgrade, set this parameter to 0 or ensure `stop_voting_view` is less than `start_voting_view`. +- **start_proposing_view:** view at which the node proposes an upgrade. This should be set to when an upgrade is intended. If the current view > `start_proposing_view`, the node proposes as soon as `UpgradeCertificate` is formed. +- **stop_proposing_view:** The view after which the upgrade proposal is no longer valid. If the upgrade proposal fails and the current view > stop_proposing_view then the upgrade is never proposed again. + +The window between `start_proposing_view` and `stop_proposing_view` should provide sufficient time for nodes to continue proposing the upgrade until successful. + +Ensure that the `ESPRESSO_SEQUENCER_GENESIS_FILE` environment variable is defined to point to the path of the genesis TOML file. For an example with upgrades enabled, refer to [`data/genesis/demo.toml`](../data/genesis/demo.toml). + +### Example TOML Configuration + +```toml +[[upgrade]] +version = "0.1" +view = 5 +propose_window = 10 + +[upgrade.chain_config] +chain_id = 999999999 +base_fee = '2 wei' +max_block_size = '1mb' +fee_recipient = '0x0000000000000000000000000000000000000000' +fee_contract = '0xa15bb66138824a1c7167f5e85b957d04dd34e468' +``` +In the TOML configuration example above, the `upgrade` section defines an array of tables, each specifying upgrade parameters such as version, view, and propose window. + +- **Version:** Indicates the current version targeted for the upgrade. +- **View:** Represents the `start_proposing_view` value, marking when the upgrade proposal initiates. +- **Propose Window:** Refers to the view window between `start_proposing_view` and `stop_proposing_view`. + +We currently support only chain config upgrades. The `upgrade.chain_config` table contains the complete set of chain config parameters, which can be used, for example, to enable protocol fees or modify other parameters during an upgrade. From 41c113fe854f9e50fa1cc8cc3eebac065540949e Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Thu, 11 Jul 2024 16:03:26 +0500 Subject: [PATCH 21/65] comments for upgrade struct --- sequencer/src/api.rs | 8 +++---- sequencer/src/genesis.rs | 4 ++-- sequencer/src/lib.rs | 2 +- types/src/v0/v0_1/instance_state.rs | 36 ++++++++++++++++++++++++++++- 4 files changed, 42 insertions(+), 8 deletions(-) diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 9f1beb57d7..3674abf4fd 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1447,12 +1447,12 @@ mod test { ..Default::default() }; let mut map = std::collections::BTreeMap::new(); - let view = 5; + let start_proposing_view = 5; let propose_window = 10; map.insert( Version { major: 0, minor: 2 }, Upgrade { - view, + start_proposing_view, propose_window, upgrade_type: UpgradeType::ChainConfig { chain_config: chain_config_upgrade, @@ -1463,8 +1463,8 @@ mod test { let stop_voting_view = 100; let upgrades = TestNetworkUpgrades { upgrades: map, - start_proposing_view: view, - stop_proposing_view: view + propose_window, + start_proposing_view, + stop_proposing_view: start_proposing_view + propose_window, start_voting_view: 1, stop_voting_view, }; diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index 03c278b163..8ad111a66a 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -67,7 +67,7 @@ mod upgrade_serialization { for (version, upgrade) in map { seq.serialize_element(&( version.to_string(), - upgrade.view, + upgrade.start_proposing_view, upgrade.propose_window, upgrade.upgrade_type.clone(), ))?; @@ -115,7 +115,7 @@ mod upgrade_serialization { map.insert( version, Upgrade { - view: fields.view, + start_proposing_view: fields.view, propose_window: fields.propose_window, upgrade_type: fields.upgrade_type, }, diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index eb0cfac639..81a18bf172 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -202,7 +202,7 @@ pub async fn init_node( let version = Ver::version(); if let Some(upgrade) = genesis.upgrades.get(&version) { - let view = upgrade.view; + let view = upgrade.start_proposing_view; config.config.start_proposing_view = view; config.config.stop_proposing_view = view + upgrade.propose_window; config.config.start_voting_view = 1; diff --git a/types/src/v0/v0_1/instance_state.rs b/types/src/v0/v0_1/instance_state.rs index feb2f50052..2b0a5a1569 100644 --- a/types/src/v0/v0_1/instance_state.rs +++ b/types/src/v0/v0_1/instance_state.rs @@ -10,6 +10,7 @@ use vbs::version::Version; use super::l1::L1Client; +/// Represents the specific type of upgrade. #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(untagged)] #[serde(rename_all = "snake_case")] @@ -19,14 +20,33 @@ pub enum UpgradeType { ChainConfig { chain_config: ChainConfig }, } +/// Represents the upgrade config including the type of upgrade and upgrade parameters for hotshot config. #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Upgrade { - pub view: u64, + /// The view at which the upgrade is proposed. + /// + /// Note: Voting for the proposal begins before the upgrade is formally proposed. + /// In our implementation, `start_proposing_view` is set to `1`` for all upgrades, + /// so if an upgrade is planned then the voting starts as soon as node is started. + #[serde(rename = "view")] + pub start_proposing_view: u64, + + /// The time window during which the upgrade can be proposed. + /// + /// This parameter is used for setting the `stop_propose_window_view`. + /// `stop_proposing_view` is calculated as `start_proposing_view + propose_window`. pub propose_window: u64, + + /// The specific type of upgrade configuration. + /// + /// Currently, we only support chain configuration upgrades (`upgrade.chain_config` in genesis toml file). #[serde(flatten)] pub upgrade_type: UpgradeType, } +/// Represents the immutable state of a node. +/// +/// For mutable state, use `ValidatedState`. #[derive(Debug, Clone)] pub struct NodeState { pub node_id: u64, @@ -36,6 +56,20 @@ pub struct NodeState { pub genesis_header: GenesisHeader, pub genesis_state: ValidatedState, pub l1_genesis: Option, + + /// Map containing all planned and executed upgrades. + /// + /// Currently, only one upgrade can be executed at a time. + /// For multiple upgrades, the node needs to be restarted after each upgrade. + /// + /// This field serves as a record for planned and past upgrades, + /// listed in the genesis TOML file. It will be very useful if multiple upgrades + /// are supported in the future. pub upgrades: BTreeMap, + /// Current version of the sequencer. + /// + /// This version is checked to determine if an upgrade is planned, + /// and which version variant for versioned types (e.g., build V2 header is version is 0,2) to use + /// in functions such as genesis. pub current_version: Version, } From d98d2a4b098e07d4319ed871e63e456f752bdc22 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Thu, 11 Jul 2024 16:11:07 +0500 Subject: [PATCH 22/65] typos --- doc/upgrades.md | 8 ++++---- types/src/v0/v0_1/instance_state.rs | 5 +++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/doc/upgrades.md b/doc/upgrades.md index 627d664cc9..3c35a9b233 100644 --- a/doc/upgrades.md +++ b/doc/upgrades.md @@ -9,7 +9,7 @@ Voting for the `UpgradeProposal` begins before its proposal. Sufficient votes ar To enable an upgrade in Hotshot protocol: -When preparing for an upgrade, it's essential to define the base version, the upgrade version, and a unique upgrade hash: +When preparing for an upgrade, it is essential to define the base version, the upgrade version, and a upgrade hash: - **Base Version:** Represents the current version of the protocol (`0.1` in this example). - **Upgrade Version:** Specifies the version to which the protocol will upgrade once the process is successful (`0.2` in this example). @@ -29,7 +29,7 @@ impl NodeType for SeqTypes { These parameters are fetched from the genesis TOML file and set in Hotshot config: -- **start_voting_view:** view at which voting for the upgrade proposal starts. In our implementation, this is set to 1 so that voting begins as soon as the node is bootup. +- **start_voting_view:** view at which voting for the upgrade proposal starts. In our implementation, this is set to 1 so that voting begins as soon as the node is started. - **stop_voting_view:** view at which voting for the upgrade proposal stops. To disable an upgrade, set this parameter to 0 or ensure `stop_voting_view` is less than `start_voting_view`. - **start_proposing_view:** view at which the node proposes an upgrade. This should be set to when an upgrade is intended. If the current view > `start_proposing_view`, the node proposes as soon as `UpgradeCertificate` is formed. - **stop_proposing_view:** The view after which the upgrade proposal is no longer valid. If the upgrade proposal fails and the current view > stop_proposing_view then the upgrade is never proposed again. @@ -55,8 +55,8 @@ fee_contract = '0xa15bb66138824a1c7167f5e85b957d04dd34e468' ``` In the TOML configuration example above, the `upgrade` section defines an array of tables, each specifying upgrade parameters such as version, view, and propose window. -- **Version:** Indicates the current version targeted for the upgrade. -- **View:** Represents the `start_proposing_view` value, marking when the upgrade proposal initiates. +- **Version:** the current version targeted for the upgrade. +- **View:** Represents the `start_proposing_view` value at which the upgrade is proposed. - **Propose Window:** Refers to the view window between `start_proposing_view` and `stop_proposing_view`. We currently support only chain config upgrades. The `upgrade.chain_config` table contains the complete set of chain config parameters, which can be used, for example, to enable protocol fees or modify other parameters during an upgrade. diff --git a/types/src/v0/v0_1/instance_state.rs b/types/src/v0/v0_1/instance_state.rs index 2b0a5a1569..82f75ba4a7 100644 --- a/types/src/v0/v0_1/instance_state.rs +++ b/types/src/v0/v0_1/instance_state.rs @@ -69,7 +69,8 @@ pub struct NodeState { /// Current version of the sequencer. /// /// This version is checked to determine if an upgrade is planned, - /// and which version variant for versioned types (e.g., build V2 header is version is 0,2) to use - /// in functions such as genesis. + /// and which version variant for versioned types + /// to use in functions such as genesis. + /// (example: genesis returns V2 Header if version is 0.2) pub current_version: Version, } From 170160531021aa92f2041ed0f620f5b91f862e7d Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Thu, 11 Jul 2024 16:52:39 +0500 Subject: [PATCH 23/65] fee upgrade docs --- doc/upgrades.md | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/doc/upgrades.md b/doc/upgrades.md index 3c35a9b233..96bf9230ac 100644 --- a/doc/upgrades.md +++ b/doc/upgrades.md @@ -38,6 +38,7 @@ The window between `start_proposing_view` and `stop_proposing_view` should provi Ensure that the `ESPRESSO_SEQUENCER_GENESIS_FILE` environment variable is defined to point to the path of the genesis TOML file. For an example with upgrades enabled, refer to [`data/genesis/demo.toml`](../data/genesis/demo.toml). +Note: We currently support only chain config upgrade. ### Example TOML Configuration ```toml @@ -53,10 +54,19 @@ max_block_size = '1mb' fee_recipient = '0x0000000000000000000000000000000000000000' fee_contract = '0xa15bb66138824a1c7167f5e85b957d04dd34e468' ``` -In the TOML configuration example above, the `upgrade` section defines an array of tables, each specifying upgrade parameters such as version, view, and propose window. +In the TOML configuration example above, the `upgrade` section defines an array of tables, each specifying upgrade parameters: - **Version:** the current version targeted for the upgrade. - **View:** Represents the `start_proposing_view` value at which the upgrade is proposed. - **Propose Window:** Refers to the view window between `start_proposing_view` and `stop_proposing_view`. -We currently support only chain config upgrades. The `upgrade.chain_config` table contains the complete set of chain config parameters, which can be used, for example, to enable protocol fees or modify other parameters during an upgrade. +The `upgrade.chain_config` table contains the complete set of chain config parameters, which can be used, for example, to enable protocol fees or modify other parameters. + + +## Fee upgrade + +A successful Hotshot upgrade results in a new version, which allows us to update the chain config and execute the upgrade if there exists any. Chainconfig includes the fee parameters. The sequencer node has two states: NodeState and ValidatedState. NodeState is an immutable state that contains ResolvableChainConfig, whereas ValidatedState is a mutable state. To make updates to the chain config post-upgrade possible, ResolvableChainConfig is also added to ValidatedState. + +NodeState also includes two additional fields: upgrades and current_version. Functions like Header::new() and ValidatedState::apply_header() include a version parameter, which is used to apply upgrades by comparing this version with current_version in NodeState and fetching the upgrade if available from the upgrades BTreeMap in NodeState. + +In scenarios where nodes join the network or restart, missing the upgrade window may result in their validated_state having only a chain config commitment. In such cases, nodes need to catch up from their peers to get the updated full chain config. \ No newline at end of file From 4d4a8d901696bda5983a3ab62395172dae47a107 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Thu, 11 Jul 2024 16:57:58 +0500 Subject: [PATCH 24/65] highlight types --- doc/upgrades.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/upgrades.md b/doc/upgrades.md index 96bf9230ac..75e281d20f 100644 --- a/doc/upgrades.md +++ b/doc/upgrades.md @@ -65,8 +65,8 @@ The `upgrade.chain_config` table contains the complete set of chain config param ## Fee upgrade -A successful Hotshot upgrade results in a new version, which allows us to update the chain config and execute the upgrade if there exists any. Chainconfig includes the fee parameters. The sequencer node has two states: NodeState and ValidatedState. NodeState is an immutable state that contains ResolvableChainConfig, whereas ValidatedState is a mutable state. To make updates to the chain config post-upgrade possible, ResolvableChainConfig is also added to ValidatedState. +A successful Hotshot upgrade results in a new version, which allows us to update the `ChainConfig` and execute the upgrade if there exists any. `Chainconfig` includes the fee parameters. The sequencer node has two states: `NodeState` and `ValidatedState`. `NodeState` is an immutable state that contains `ResolvableChainConfig` (Enum of `ChainConfig`'s commitment and full `ChainConfig`), whereas `ValidatedState` is a mutable state. To make updates to the chain config post-upgrade possible, `ResolvableChainConfig` is also added to `ValidatedState`. -NodeState also includes two additional fields: upgrades and current_version. Functions like Header::new() and ValidatedState::apply_header() include a version parameter, which is used to apply upgrades by comparing this version with current_version in NodeState and fetching the upgrade if available from the upgrades BTreeMap in NodeState. +`NodeState` also includes two additional fields: `upgrades` and `current_version`. Functions like `Header::new()` and `ValidatedState::apply_header()` include a version parameter, which is used to apply upgrades by comparing this version with `current_version` in NodeState and fetching the upgrade if available from the upgrades BTreeMap in NodeState. In scenarios where nodes join the network or restart, missing the upgrade window may result in their validated_state having only a chain config commitment. In such cases, nodes need to catch up from their peers to get the updated full chain config. \ No newline at end of file From 649710ec621892fb3d3aca6f672af10064f7519e Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Thu, 11 Jul 2024 18:51:54 +0500 Subject: [PATCH 25/65] edit enabling upgrade section --- doc/upgrades.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/doc/upgrades.md b/doc/upgrades.md index 75e281d20f..103844bb2a 100644 --- a/doc/upgrades.md +++ b/doc/upgrades.md @@ -7,9 +7,7 @@ Voting for the `UpgradeProposal` begins before its proposal. Sufficient votes ar ## Enabling an Upgrade -To enable an upgrade in Hotshot protocol: - -When preparing for an upgrade, it is essential to define the base version, the upgrade version, and a upgrade hash: +To enable an upgrade in Hotshot protocol, it is essential to define the base version, the upgrade version, and a upgrade hash: - **Base Version:** Represents the current version of the protocol (`0.1` in this example). - **Upgrade Version:** Specifies the version to which the protocol will upgrade once the process is successful (`0.2` in this example). From f187c4f6bf0aecad89d2cec6700177bda871accd Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Thu, 11 Jul 2024 19:28:23 +0500 Subject: [PATCH 26/65] suggestions --- doc/upgrades.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/doc/upgrades.md b/doc/upgrades.md index 103844bb2a..b1ee48ca0e 100644 --- a/doc/upgrades.md +++ b/doc/upgrades.md @@ -3,7 +3,7 @@ Hotshot protocol supports upgrades through an Upgrade proposal mechanism. The Upgrade proposal is broadcast separately from the `QuorumProposal`, typically several views in advance of its attachment. The goal is to ensure ample time for nodes to receive and prepare for the upgrade process. -Voting for the `UpgradeProposal` begins before its proposal. Sufficient votes are gathered to form an upgrade certificate. Once obtained, the proposal is broadcasted, and any node that receives it accepts and attaches it to its own `QuorumProposal`. +After enough votes have been collected on the `UpgradeProposal`, an `UpgradeCertificate` is formed. This is attached to the next `QuorumProposal`, and any node that receives an `UpgradeCertificate` in this way re-attaches it to its own `QuorumProposal` until the network has upgraded, or (in rare cases) we failed to reach consensus on the `UpgradeCertificate`. ## Enabling an Upgrade @@ -27,10 +27,10 @@ impl NodeType for SeqTypes { These parameters are fetched from the genesis TOML file and set in Hotshot config: -- **start_voting_view:** view at which voting for the upgrade proposal starts. In our implementation, this is set to 1 so that voting begins as soon as the node is started. +- **start_voting_view:** view at which voting for the upgrade proposal starts. In our implementation, this is set to 1 so that voting is enabled as soon as the node is started. - **stop_voting_view:** view at which voting for the upgrade proposal stops. To disable an upgrade, set this parameter to 0 or ensure `stop_voting_view` is less than `start_voting_view`. -- **start_proposing_view:** view at which the node proposes an upgrade. This should be set to when an upgrade is intended. If the current view > `start_proposing_view`, the node proposes as soon as `UpgradeCertificate` is formed. -- **stop_proposing_view:** The view after which the upgrade proposal is no longer valid. If the upgrade proposal fails and the current view > stop_proposing_view then the upgrade is never proposed again. +- **start_proposing_view:** the earliest view in which the node can propose an upgrade. This should be set to when an upgrade is intended. If the current view > `start_proposing_view`, the node will send out an `UpgradeProposal`. +- **stop_proposing_view:** view after which the node stops proposing an upgrade. If the upgrade proposal fails and the current view > stop_proposing_view then the upgrade is never proposed again. The window between `start_proposing_view` and `stop_proposing_view` should provide sufficient time for nodes to continue proposing the upgrade until successful. @@ -65,6 +65,8 @@ The `upgrade.chain_config` table contains the complete set of chain config param A successful Hotshot upgrade results in a new version, which allows us to update the `ChainConfig` and execute the upgrade if there exists any. `Chainconfig` includes the fee parameters. The sequencer node has two states: `NodeState` and `ValidatedState`. `NodeState` is an immutable state that contains `ResolvableChainConfig` (Enum of `ChainConfig`'s commitment and full `ChainConfig`), whereas `ValidatedState` is a mutable state. To make updates to the chain config post-upgrade possible, `ResolvableChainConfig` is also added to `ValidatedState`. -`NodeState` also includes two additional fields: `upgrades` and `current_version`. Functions like `Header::new()` and `ValidatedState::apply_header()` include a version parameter, which is used to apply upgrades by comparing this version with `current_version` in NodeState and fetching the upgrade if available from the upgrades BTreeMap in NodeState. +`NodeState` also includes two additional fields: `upgrades` and `current_version`. Functions like `Header::new()` and `ValidatedState::apply_header()` include a version parameter, which is used to apply upgrades by checking if this version is greater than `current_version` in NodeState and fetching the upgrade, if available, from the upgrades BTreeMap in NodeState. -In scenarios where nodes join the network or restart, missing the upgrade window may result in their validated_state having only a chain config commitment. In such cases, nodes need to catch up from their peers to get the updated full chain config. \ No newline at end of file +In scenarios where nodes join the network or restart, missing the upgrade window may result in their ValidatedState having only a chain config commitment. In such cases, nodes need to catch up from their peers to get the full chain config for this chain config commitment. + +Note: For the fee upgrade to work, the builder must have sufficient funds to cover the fees. The Espresso bridge can be used to fund the builder. \ No newline at end of file From 866038c6aa5264e9ae549d860f875eb67901f891 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Thu, 11 Jul 2024 19:54:47 +0500 Subject: [PATCH 27/65] check upgrade version for enabling an upgrade instead of curr version --- data/genesis/demo.toml | 2 +- sequencer/src/lib.rs | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/data/genesis/demo.toml b/data/genesis/demo.toml index 73e3498951..5c52dc3861 100644 --- a/data/genesis/demo.toml +++ b/data/genesis/demo.toml @@ -12,7 +12,7 @@ fee_contract = '0xa15bb66138824a1c7167f5e85b957d04dd34e468' timestamp = "1970-01-01T00:00:00Z" [[upgrade]] -version = "0.1" +version = "0.2" view = 5 propose_window = 10 diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 81a18bf172..e9206cdd76 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -200,8 +200,10 @@ pub async fn init_node( } }; - let version = Ver::version(); - if let Some(upgrade) = genesis.upgrades.get(&version) { + if let Some(upgrade) = genesis + .upgrades + .get(&::Upgrade::VERSION) + { let view = upgrade.start_proposing_view; config.config.start_proposing_view = view; config.config.stop_proposing_view = view + upgrade.propose_window; From a527857b1f2a1a804057f21d8f0a4aefc045eb81 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Thu, 11 Jul 2024 19:56:43 +0500 Subject: [PATCH 28/65] update docs for upgrade::version --- doc/upgrades.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/upgrades.md b/doc/upgrades.md index b1ee48ca0e..c01bef0d4f 100644 --- a/doc/upgrades.md +++ b/doc/upgrades.md @@ -41,7 +41,7 @@ Note: We currently support only chain config upgrade. ```toml [[upgrade]] -version = "0.1" +version = "0.2" view = 5 propose_window = 10 @@ -54,7 +54,7 @@ fee_contract = '0xa15bb66138824a1c7167f5e85b957d04dd34e468' ``` In the TOML configuration example above, the `upgrade` section defines an array of tables, each specifying upgrade parameters: -- **Version:** the current version targeted for the upgrade. +- **Version:** the new version after an upgrade is successful. - **View:** Represents the `start_proposing_view` value at which the upgrade is proposed. - **Propose Window:** Refers to the view window between `start_proposing_view` and `stop_proposing_view`. From 6181610b5b5231551aaf427a521c78bbf581d295 Mon Sep 17 00:00:00 2001 From: Gus Gutoski Date: Thu, 11 Jul 2024 16:48:02 -0400 Subject: [PATCH 29/65] test: tampered namespace proofs (#1691) * stub for new tests ns_proof/tests.rs * ns_proof test: wrong inputs, refactor test * rename new file after rebase * add tests for hacked namespace proofs, tidy test * ns_payload test for negative len txs with abnormal payload byte len as per https://github.com/EspressoSystems/espresso-sequencer/pull/1675#issuecomment-2215475611 * add empty namespace, block to test as per https://github.com/EspressoSystems/espresso-sequencer/pull/1691#discussion_r1674402812 --- .../v0/impls/block/full_payload/ns_proof.rs | 3 + .../impls/block/full_payload/ns_proof/test.rs | 158 ++++++++++++++++++ .../namespace_payload/ns_payload/test.rs | 77 ++++++++- types/src/v0/impls/block/test.rs | 2 +- 4 files changed, 238 insertions(+), 2 deletions(-) create mode 100644 types/src/v0/impls/block/full_payload/ns_proof/test.rs diff --git a/types/src/v0/impls/block/full_payload/ns_proof.rs b/types/src/v0/impls/block/full_payload/ns_proof.rs index e378bf422c..75e052cd0e 100644 --- a/types/src/v0/impls/block/full_payload/ns_proof.rs +++ b/types/src/v0/impls/block/full_payload/ns_proof.rs @@ -156,3 +156,6 @@ impl NsProof { self.ns_payload.export_all_txs(ns_id) } } + +#[cfg(test)] +mod test; diff --git a/types/src/v0/impls/block/full_payload/ns_proof/test.rs b/types/src/v0/impls/block/full_payload/ns_proof/test.rs new file mode 100644 index 0000000000..d18831a91f --- /dev/null +++ b/types/src/v0/impls/block/full_payload/ns_proof/test.rs @@ -0,0 +1,158 @@ +use crate::{v0::impls::block::test::ValidTest, NsProof, Payload}; +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use futures::future; +use hotshot::traits::BlockPayload; +use hotshot_types::{ + traits::EncodeBytes, + vid::{vid_scheme, VidSchemeType}, +}; +use jf_vid::{VidDisperse, VidScheme}; + +#[async_std::test] +async fn ns_proof() { + let test_cases = vec![ + vec![ + vec![5, 8, 8], + vec![7, 9, 11], + vec![10, 5, 8], + vec![7, 8, 9], + vec![], + ], + vec![vec![1, 2, 3], vec![4, 5, 6]], + vec![], + ]; + + setup_logging(); + setup_backtrace(); + + let mut rng = jf_utils::test_rng(); + let mut tests = ValidTest::many_from_tx_lengths(test_cases, &mut rng); + + struct BlockInfo { + block: Payload, + vid: VidDisperse, + ns_proofs: Vec, + } + + let blocks: Vec = { + // compute blocks separately to avoid async error `captured variable + // cannot escape `FnMut` closure body` caused by mutable variable `vid` + // below. + let blocks_only = future::join_all(tests.iter().map(|t| async { + Payload::from_transactions(t.all_txs(), &Default::default(), &Default::default()) + .await + .unwrap() + .0 + })) + .await; + + let mut vid = vid_scheme(10); + blocks_only + .into_iter() + .map(|block| { + let vid = vid.disperse(block.encode()).unwrap(); + let ns_proofs: Vec = block + .ns_table() + .iter() + .map(|ns_index| NsProof::new(&block, &ns_index, &vid.common).unwrap()) + .collect(); + BlockInfo { + block, + vid, + ns_proofs, + } + }) + .collect() + }; + + // sanity: verify all valid namespace proofs + for ( + BlockInfo { + block, + vid, + ns_proofs, + }, + test, + ) in blocks.iter().zip(tests.iter_mut()) + { + for ns_proof in ns_proofs.iter() { + let ns_id = block.ns_table().read_ns_id(&ns_proof.ns_index).unwrap(); + let txs = test + .nss + .remove(&ns_id) + .unwrap_or_else(|| panic!("namespace {} missing from test", ns_id)); + + // verify ns_proof + let (ns_proof_txs, ns_proof_ns_id) = ns_proof + .verify(block.ns_table(), &vid.commit, &vid.common) + .unwrap_or_else(|| panic!("namespace {} proof verification failure", ns_id)); + + assert_eq!(ns_proof_ns_id, ns_id); + assert_eq!(ns_proof_txs, txs); + } + } + + assert!(blocks.len() >= 2, "need at least 2 test_cases"); + + let ns_proof_0_0 = &blocks[0].ns_proofs[0]; + let ns_table_0 = blocks[0].block.ns_table(); + let ns_table_1 = blocks[1].block.ns_table(); + let vid_commit_0 = &blocks[0].vid.commit; + let vid_commit_1 = &blocks[1].vid.commit; + let vid_common_0 = &blocks[0].vid.common; + let vid_common_1 = &blocks[1].vid.common; + + // mix and match ns_table, vid_commit, vid_common + { + // wrong ns_table + assert!(ns_proof_0_0 + .verify(ns_table_1, vid_commit_0, vid_common_0) + .is_none()); + + // wrong vid commitment + assert!(ns_proof_0_0 + .verify(ns_table_0, vid_commit_1, vid_common_0) + .is_none()); + + // wrong vid common + assert!(ns_proof_0_0 + .verify(ns_table_0, vid_commit_0, vid_common_1) + .is_none()); + + // wrong ns_proof + assert!(ns_proof_0_0 + .verify(ns_table_1, vid_commit_1, vid_common_1) + .is_none()); + } + + // hack the proof + { + ns_proof_0_0 + .verify(ns_table_0, vid_commit_0, vid_common_0) + .expect("sanity: correct proof should succeed"); + + let wrong_ns_index_ns_proof_0_0 = NsProof { + ns_index: blocks[0].ns_proofs[1].ns_index.clone(), + ..ns_proof_0_0.clone() + }; + assert!(wrong_ns_index_ns_proof_0_0 + .verify(ns_table_0, vid_commit_0, vid_common_0) + .is_none()); + + let wrong_ns_payload_ns_proof_0_0 = NsProof { + ns_payload: blocks[0].ns_proofs[1].ns_payload.clone(), + ..ns_proof_0_0.clone() + }; + assert!(wrong_ns_payload_ns_proof_0_0 + .verify(ns_table_0, vid_commit_0, vid_common_0) + .is_none()); + + let wrong_proof_ns_proof_0_0 = NsProof { + ns_proof: blocks[0].ns_proofs[1].ns_proof.clone(), + ..ns_proof_0_0.clone() + }; + assert!(wrong_proof_ns_proof_0_0 + .verify(ns_table_0, vid_commit_0, vid_common_0) + .is_none()); + } +} diff --git a/types/src/v0/impls/block/namespace_payload/ns_payload/test.rs b/types/src/v0/impls/block/namespace_payload/ns_payload/test.rs index abc59b74f5..d84c830e2e 100644 --- a/types/src/v0/impls/block/namespace_payload/ns_payload/test.rs +++ b/types/src/v0/impls/block/namespace_payload/ns_payload/test.rs @@ -163,6 +163,81 @@ fn negative_len_txs() { } } +#[test] +fn negative_len_txs_and_abnormal_payload_len() { + setup_logging(); + setup_backtrace(); + let ns_id = NamespaceId::from(69_u32); // dummy + + // 1 negative-length tx in the middle, overlapping tx bytes + // final tx partly truncated by short payload + { + let ns_payload = NsPayloadOwned::entries_body(&[20, 10, 30], 25); + let txs = ns_payload.export_all_txs(&ns_id); + assert_eq!(txs.len(), 3); + assert_eq!( + txs[0].payload(), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ); + assert!(txs[1].payload().is_empty()); + assert_eq!( + txs[2].payload(), + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + ); + } + + // 1 negative-length tx in the middle, overlapping tx bytes + // large payload has wasted space + { + let ns_payload = NsPayloadOwned::entries_body(&[20, 10, 30], 40); + let txs = ns_payload.export_all_txs(&ns_id); + assert_eq!(txs.len(), 3); + assert_eq!( + txs[0].payload(), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ); + assert!(txs[1].payload().is_empty()); + assert_eq!( + txs[2].payload(), + [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] + ); + } + + // all txs negative-length except the first + // tx partly truncated by short payload + { + let ns_payload = NsPayloadOwned::entries_body(&[30, 20, 10], 25); + let txs = ns_payload.export_all_txs(&ns_id); + assert_eq!(txs.len(), 3); + assert_eq!( + txs[0].payload(), + [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24 + ] + ); + assert!(txs[1].payload().is_empty()); + assert!(txs[2].payload().is_empty()); + } + + // all txs negative-length except the first + // large payload has wasted space + { + let ns_payload = NsPayloadOwned::entries_body(&[30, 20, 10], 40); + let txs = ns_payload.export_all_txs(&ns_id); + assert_eq!(txs.len(), 3); + assert_eq!( + txs[0].payload(), + [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29 + ] + ); + assert!(txs[1].payload().is_empty()); + assert!(txs[2].payload().is_empty()); + } +} + #[test] fn tx_table_header() { setup_logging(); @@ -170,7 +245,7 @@ fn tx_table_header() { let ns_id = NamespaceId::from(69_u32); // dummy // header declares 1 fewer txs, tx table bytes appear in tx payloads, wasted - // pßayload bytes + // payload bytes { let ns_payload = NsPayloadOwned::header_entries_body(2, &[10, 20, 30], 30); let txs = ns_payload.export_all_txs(&ns_id); diff --git a/types/src/v0/impls/block/test.rs b/types/src/v0/impls/block/test.rs index f48a433b7d..d919c44340 100644 --- a/types/src/v0/impls/block/test.rs +++ b/types/src/v0/impls/block/test.rs @@ -165,7 +165,7 @@ async fn enforce_max_block_size() { // TODO lots of infra here that could be reused in other tests. pub struct ValidTest { - nss: BTreeMap>, + pub nss: BTreeMap>, } impl ValidTest { From f58a443d2cdda0c52262310bf160c21b8c68c00c Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 11 Jul 2024 15:12:38 -0700 Subject: [PATCH 30/65] add transactions_per_batch and avg_transaction_size --- sequencer/src/bin/submit-transactions.rs | 51 +++++++++++++----------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index fa3c366ac0..4aa23b8999 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -223,6 +223,7 @@ async fn main() { #[cfg(feature = "benchmarking")] { num_successful_commits += 1; + println!("current num_successful_commits = {num_successful_commits}"); if !has_started && num_successful_commits >= start_round { has_started = true; start = Instant::now(); @@ -248,9 +249,7 @@ async fn main() { tracing::info!("average latency: {:?}", total_latency / total_transactions); #[cfg(feature = "benchmarking")] { - if !benchmark_finish - && (start_round..=end_round).contains(&num_successful_commits) - { + if has_started && !benchmark_finish { benchmark_minimum_latency = if total_transactions == 0 { latency } else { @@ -277,10 +276,16 @@ async fn main() { #[cfg(feature = "benchmarking")] if !benchmark_finish && num_successful_commits > end_round { + let transactions_per_batch = format!( + "{}~{}", + (opt.jobs as u64 * opt.min_batch_size), + (opt.jobs as u64 * opt.max_batch_size), + ); let benchmark_average_latency = benchmark_total_latency / benchmark_total_transactions; - let total_time_elapsed = start.elapsed(); // in seconds - let throughput_bytes_per_sec = - (total_throughput as u64) / std::cmp::max(total_time_elapsed.as_secs(), 1u64); + let avg_transaction_size = total_throughput as u32 / benchmark_total_transactions; + let total_time_elapsed_in_sec = start.elapsed(); // in seconds + let avg_throughput_bytes_per_sec = (total_throughput as u64) + / std::cmp::max(total_time_elapsed_in_sec.as_secs(), 1u64); // Open the CSV file in append mode let results_csv_file = OpenOptions::new() .create(true) @@ -289,29 +294,29 @@ async fn main() { .unwrap(); // Open a file for writing let mut wtr = Writer::from_writer(results_csv_file); + let mut pub_or_priv_pool = "private_pool_avg_latency_in_sec"; if opt.use_public_mempool() { - let _ = wtr.write_record([ - "public_pool_avg_latency_in_sec", - "minimum_latency_in_sec", - "maximum_latency_in_sec", - "throughput_bytes_per_sec", - "total_time_elapsed", - ]); - } else { - let _ = wtr.write_record([ - "private_pool_avg_latency_in_sec", - "minimum_latency_in_sec", - "maximum_latency_in_sec", - "throughput_bytes_per_sec", - "total_time_elapsed", - ]); + pub_or_priv_pool = "public_pool_avg_latency_in_sec"; } + let _ = wtr.write_record([ + "transaction_per_batch", + pub_or_priv_pool, + "minimum_latency_in_sec", + "maximum_latency_in_sec", + "avg_throughput_bytes_per_sec", + "total_transactions", + "avg_transaction_size", + "total_time_elapsed_in_sec", + ]); let _ = wtr.write_record(&[ + transactions_per_batch, benchmark_average_latency.as_secs().to_string(), benchmark_minimum_latency.as_secs().to_string(), benchmark_maximum_latency.as_secs().to_string(), - throughput_bytes_per_sec.to_string(), - total_time_elapsed.as_secs().to_string(), + avg_throughput_bytes_per_sec.to_string(), + benchmark_total_transactions.to_string(), + avg_transaction_size.to_string(), + total_time_elapsed_in_sec.as_secs().to_string(), ]); let _ = wtr.flush(); println!( From 8dfe8933a5a21331ead1fd296e2ddb5d0a2609d9 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 11 Jul 2024 15:40:11 -0700 Subject: [PATCH 31/65] add block_range --- sequencer/src/bin/submit-transactions.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index 7de488fcc4..2e2988b13d 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -187,11 +187,11 @@ async fn main() { // Keep track of the latency after warm up for benchmarking #[cfg(feature = "benchmarking")] - let mut num_successful_commits = 0; + let mut num_block = 0; #[cfg(feature = "benchmarking")] - let start_round = 20; + let start_round = 50; #[cfg(feature = "benchmarking")] - let end_round = 120; + let end_round = 150; #[cfg(feature = "benchmarking")] let mut benchmark_total_latency = Duration::default(); #[cfg(feature = "benchmarking")] @@ -221,8 +221,8 @@ async fn main() { tracing::debug!("got block {}", block.height()); #[cfg(feature = "benchmarking")] { - num_successful_commits += 1; - if !has_started && num_successful_commits >= start_round { + num_block += 1; + if !has_started && num_block >= start_round { has_started = true; start = Instant::now(); } @@ -273,7 +273,8 @@ async fn main() { } #[cfg(feature = "benchmarking")] - if !benchmark_finish && num_successful_commits > end_round { + if !benchmark_finish && num_block > end_round { + let block_range = format!("{}~{}", start_round, end_round,); let transaction_size_range = format!("{}~{}", opt.min_size, opt.max_size,); let transactions_per_batch_range = format!( "{}~{}", @@ -298,6 +299,7 @@ async fn main() { pub_or_priv_pool = "public_pool_avg_latency_in_sec"; } let _ = wtr.write_record([ + "block_range", "transaction_size_range", "transaction_per_batch_range", pub_or_priv_pool, @@ -309,6 +311,7 @@ async fn main() { "total_time_elapsed_in_sec", ]); let _ = wtr.write_record(&[ + block_range, transaction_size_range, transactions_per_batch_range, benchmark_average_latency.as_secs().to_string(), From 0194246423464903621349ce06d788ff90801a35 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 11 Jul 2024 16:37:51 -0700 Subject: [PATCH 32/65] add total_nodes --- docker-compose.yaml | 2 ++ sequencer/src/bin/submit-transactions.rs | 16 +++++++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index ec1339e59a..a3084d4e5c 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -446,6 +446,7 @@ services: ports: - "$ESPRESSO_SUBMIT_TRANSACTIONS_PUBLIC_PORT:8080" environment: + - ESPRESSO_ORCHESTRATOR_NUM_NODES - ESPRESSO_SUBMIT_TRANSACTIONS_PORT=8080 - ESPRESSO_SUBMIT_TRANSACTIONS_DELAY - ESPRESSO_SEQUENCER_URL @@ -465,6 +466,7 @@ services: ports: - "$ESPRESSO_SUBMIT_TRANSACTIONS_PRIVATE_PORT:8080" environment: + - ESPRESSO_ORCHESTRATOR_NUM_NODES - ESPRESSO_SUBMIT_TRANSACTIONS_PORT=8080 - ESPRESSO_SUBMIT_TRANSACTIONS_SUBMIT_URL=http://permissionless-builder:$ESPRESSO_BUILDER_SERVER_PORT/txn_submit - ESPRESSO_SUBMIT_TRANSACTIONS_DELAY diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index 2e2988b13d..67b5603c19 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -29,6 +29,8 @@ use vbs::version::StaticVersionType; use csv::Writer; #[cfg(feature = "benchmarking")] use std::fs::OpenOptions; +#[cfg(feature = "benchmarking")] +use std::num::NonZeroUsize; /// Submit random transactions to an Espresso Sequencer. #[derive(Clone, Debug, Parser)] @@ -128,6 +130,10 @@ struct Options { /// URL of the query service. #[clap(env = "ESPRESSO_SEQUENCER_URL")] url: Url, + + #[cfg(feature = "benchmarking")] + #[clap(short, long, env = "ESPRESSO_ORCHESTRATOR_NUM_NODES")] + num_nodes: NonZeroUsize, } impl Options { @@ -294,15 +300,17 @@ async fn main() { .unwrap(); // Open a file for writing let mut wtr = Writer::from_writer(results_csv_file); - let mut pub_or_priv_pool = "private_pool_avg_latency_in_sec"; + let mut pub_or_priv_pool = "private"; if opt.use_public_mempool() { - pub_or_priv_pool = "public_pool_avg_latency_in_sec"; + pub_or_priv_pool = "public"; } let _ = wtr.write_record([ + "total_nodes", "block_range", "transaction_size_range", "transaction_per_batch_range", - pub_or_priv_pool, + "pub_or_priv_pool", + "avg_latency_in_sec", "minimum_latency_in_sec", "maximum_latency_in_sec", "avg_throughput_bytes_per_sec", @@ -311,9 +319,11 @@ async fn main() { "total_time_elapsed_in_sec", ]); let _ = wtr.write_record(&[ + opt.num_nodes.to_string(), block_range, transaction_size_range, transactions_per_batch_range, + pub_or_priv_pool.to_string(), benchmark_average_latency.as_secs().to_string(), benchmark_minimum_latency.as_secs().to_string(), benchmark_maximum_latency.as_secs().to_string(), From a8a4a893337bdaec9b6dc84bb2e1f41b47977d96 Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 11 Jul 2024 17:20:28 -0700 Subject: [PATCH 33/65] parameterize start_block and end_block --- .env | 4 ++++ docker-compose.yaml | 4 ++++ sequencer/api/public-env-vars.toml | 2 ++ sequencer/src/bin/submit-transactions.rs | 23 ++++++++++++++++------- 4 files changed, 26 insertions(+), 7 deletions(-) diff --git a/.env b/.env index 63e027764a..07dd85a9c1 100644 --- a/.env +++ b/.env @@ -114,6 +114,10 @@ ESPRESSO_SUBMIT_TRANSACTIONS_DELAY=2s ESPRESSO_SUBMIT_TRANSACTIONS_PUBLIC_PORT=24010 ESPRESSO_SUBMIT_TRANSACTIONS_PRIVATE_PORT=24020 +# Benchmarks +ESPRESSO_BENCH_START_BLOCK=50 +ESPRESSO_BENCH_END_BLOCK=150 + # Query service fetch requests rate limit ESPRESSO_SEQUENCER_FETCH_RATE_LIMIT=25 diff --git a/docker-compose.yaml b/docker-compose.yaml index a3084d4e5c..936db67f69 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -449,6 +449,8 @@ services: - ESPRESSO_ORCHESTRATOR_NUM_NODES - ESPRESSO_SUBMIT_TRANSACTIONS_PORT=8080 - ESPRESSO_SUBMIT_TRANSACTIONS_DELAY + - ESPRESSO_BENCH_START_BLOCK + - ESPRESSO_BENCH_END_BLOCK - ESPRESSO_SEQUENCER_URL - RUST_LOG - RUST_LOG_FORMAT @@ -470,6 +472,8 @@ services: - ESPRESSO_SUBMIT_TRANSACTIONS_PORT=8080 - ESPRESSO_SUBMIT_TRANSACTIONS_SUBMIT_URL=http://permissionless-builder:$ESPRESSO_BUILDER_SERVER_PORT/txn_submit - ESPRESSO_SUBMIT_TRANSACTIONS_DELAY + - ESPRESSO_BENCH_START_BLOCK + - ESPRESSO_BENCH_END_BLOCK - ESPRESSO_SEQUENCER_URL - RUST_LOG - RUST_LOG_FORMAT diff --git a/sequencer/api/public-env-vars.toml b/sequencer/api/public-env-vars.toml index e37fcb1d6f..85ab45d161 100644 --- a/sequencer/api/public-env-vars.toml +++ b/sequencer/api/public-env-vars.toml @@ -102,6 +102,8 @@ variables = [ "ESPRESSO_SUBMIT_TRANSACTIONS_PORT", "ESPRESSO_SUBMIT_TRANSACTIONS_SLOW_TRANSACTION_WARNING_THRESHOLD", "ESPRESSO_SUBMIT_TRANSACTIONS_SUBMIT_URL", + "ESPRESSO_BENCH_START_BLOCK", + "ESPRESSO_BENCH_END_BLOCK", "FROM", "TO" ] diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index 67b5603c19..fc26d0ef75 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -131,9 +131,20 @@ struct Options { #[clap(env = "ESPRESSO_SEQUENCER_URL")] url: Url, + /// Relay num_nodes for benchmark results output #[cfg(feature = "benchmarking")] #[clap(short, long, env = "ESPRESSO_ORCHESTRATOR_NUM_NODES")] num_nodes: NonZeroUsize, + + /// The first block that benchmark starts counting in + #[cfg(feature = "benchmarking")] + #[clap(short, long, env = "ESPRESSO_BENCH_START_BLOCK")] + benchmark_start_block: NonZeroUsize, + + /// The final block that benchmark counts in + #[cfg(feature = "benchmarking")] + #[clap(short, long, env = "ESPRESSO_BENCH_END_BLOCK")] + benchmark_end_block: NonZeroUsize, } impl Options { @@ -195,10 +206,6 @@ async fn main() { #[cfg(feature = "benchmarking")] let mut num_block = 0; #[cfg(feature = "benchmarking")] - let start_round = 50; - #[cfg(feature = "benchmarking")] - let end_round = 150; - #[cfg(feature = "benchmarking")] let mut benchmark_total_latency = Duration::default(); #[cfg(feature = "benchmarking")] let mut benchmark_minimum_latency = Duration::default(); @@ -228,7 +235,7 @@ async fn main() { #[cfg(feature = "benchmarking")] { num_block += 1; - if !has_started && num_block >= start_round { + if !has_started && (num_block as usize) >= opt.benchmark_start_block.into() { has_started = true; start = Instant::now(); } @@ -279,8 +286,8 @@ async fn main() { } #[cfg(feature = "benchmarking")] - if !benchmark_finish && num_block > end_round { - let block_range = format!("{}~{}", start_round, end_round,); + if !benchmark_finish && (num_block as usize) >= opt.benchmark_end_block.into() { + let block_range = format!("{}~{}", opt.benchmark_start_block, opt.benchmark_end_block,); let transaction_size_range = format!("{}~{}", opt.min_size, opt.max_size,); let transactions_per_batch_range = format!( "{}~{}", @@ -306,6 +313,7 @@ async fn main() { } let _ = wtr.write_record([ "total_nodes", + "da_committee_size", "block_range", "transaction_size_range", "transaction_per_batch_range", @@ -319,6 +327,7 @@ async fn main() { "total_time_elapsed_in_sec", ]); let _ = wtr.write_record(&[ + opt.num_nodes.to_string(), opt.num_nodes.to_string(), block_range, transaction_size_range, From 7cbdaabccddcd63881879fc48342c01aa4f42dda Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Fri, 12 Jul 2024 05:46:52 +0500 Subject: [PATCH 34/65] fix upgrade: set time parameters --- data/genesis/demo.toml | 5 +- sequencer/src/api.rs | 52 +++---- sequencer/src/genesis.rs | 204 ++++++++++++++++++++++------ sequencer/src/lib.rs | 45 +++--- types/src/v0/v0_1/instance_state.rs | 34 ++--- 5 files changed, 231 insertions(+), 109 deletions(-) diff --git a/data/genesis/demo.toml b/data/genesis/demo.toml index 5c52dc3861..ec85c2bf65 100644 --- a/data/genesis/demo.toml +++ b/data/genesis/demo.toml @@ -13,9 +13,8 @@ timestamp = "1970-01-01T00:00:00Z" [[upgrade]] version = "0.2" -view = 5 -propose_window = 10 - +start_proposing_view = 5 +stop_proposing_view = 15 [upgrade.chain_config] chain_id = 999999999 diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 3674abf4fd..2408e007cc 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -351,7 +351,7 @@ impl, Ver: StaticVersionType + 'static, P: Sequencer #[cfg(any(test, feature = "testing"))] pub mod test_helpers { - use std::{collections::BTreeMap, time::Duration}; + use std::time::Duration; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use async_std::task::sleep; @@ -360,7 +360,7 @@ pub mod test_helpers { use espresso_types::{ mock::MockStateCatchup, v0::traits::{PersistenceOptions, StateCatchup}, - NamespaceId, Upgrade, ValidatedState, + NamespaceId, ValidatedState, }; use ethers::{prelude::Address, utils::Anvil}; use futures::{ @@ -378,7 +378,6 @@ pub mod test_helpers { use portpicker::pick_unused_port; use surf_disco::Client; use tide_disco::error::ServerError; - use vbs::version::Version; use super::*; use crate::{ @@ -503,15 +502,6 @@ pub mod test_helpers { } } - #[derive(Clone, Debug)] - pub struct TestNetworkUpgrades { - pub upgrades: BTreeMap, - pub start_proposing_view: u64, - pub stop_proposing_view: u64, - pub start_voting_view: u64, - pub stop_voting_view: u64, - } - impl TestNetwork { pub async fn new( cfg: TestNetworkConfig<{ NUM_NODES }, P, C>, @@ -529,7 +519,7 @@ pub mod test_helpers { .map(|(i, (state, persistence, catchup))| { let opt = cfg.api_config.clone(); let cfg = &cfg.network_config; - let upgrades_map = cfg.upgrades().map(|e| e.upgrades).unwrap_or_default(); + let upgrades_map = cfg.upgrades(); async move { if i == 0 { opt.serve( @@ -1049,7 +1039,8 @@ mod test { use committable::{Commitment, Committable}; use es_version::{SequencerVersion, SEQUENCER_VERSION}; use espresso_types::{ - mock::MockStateCatchup, FeeAccount, FeeAmount, Header, Upgrade, UpgradeType, ValidatedState, + mock::MockStateCatchup, v0_1::UpgradeMode, FeeAccount, FeeAmount, Header, Upgrade, + UpgradeType, ValidatedState, }; use ethers::utils::Anvil; use futures::{ @@ -1063,14 +1054,14 @@ mod test { }; use hotshot_types::{ event::LeafInfo, - traits::{metrics::NoMetrics, node_implementation::ConsensusTime}, + traits::{metrics::NoMetrics, node_implementation::{ConsensusTime, NodeType}}, }; use jf_merkle_tree::prelude::{MerkleProof, Sha3Node}; use portpicker::pick_unused_port; use surf_disco::Client; use test_helpers::{ catchup_test_helper, state_signature_test_helper, status_test_helper, submit_test_helper, - TestNetwork, TestNetworkConfigBuilder, TestNetworkUpgrades, + TestNetwork, TestNetworkConfigBuilder, }; use tide_disco::{app::AppHealth, error::ServerError, healthcheck::HealthStatus}; use vbs::version::Version; @@ -1446,28 +1437,27 @@ mod test { base_fee: 1.into(), ..Default::default() }; - let mut map = std::collections::BTreeMap::new(); - let start_proposing_view = 5; - let propose_window = 10; - map.insert( - Version { major: 0, minor: 2 }, + let mut upgrades = std::collections::BTreeMap::new(); + + upgrades.insert( + ::Upgrade::VERSION, Upgrade { - start_proposing_view, - propose_window, + start_voting_time: None, + stop_voting_time: None, + start_proposing_time: 0, + stop_proposing_time: u64::MAX, + start_voting_view: None, + stop_voting_view: None, + start_proposing_view: 1, + stop_proposing_view: 10, + mode: UpgradeMode::View, upgrade_type: UpgradeType::ChainConfig { chain_config: chain_config_upgrade, }, }, ); - let stop_voting_view = 100; - let upgrades = TestNetworkUpgrades { - upgrades: map, - start_proposing_view, - stop_proposing_view: start_proposing_view + propose_window, - start_voting_view: 1, - stop_voting_view, - }; + let stop_voting_view = u64::MAX; const NUM_NODES: usize = 5; let config = TestNetworkConfigBuilder::::with_num_nodes() diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index 8ad111a66a..5d21622da1 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -51,26 +51,71 @@ mod upgrade_serialization { use std::{collections::BTreeMap, fmt}; - use espresso_types::{Upgrade, UpgradeType}; + use espresso_types::{v0_1::UpgradeMode, Upgrade, UpgradeType}; use serde::{ de::{SeqAccess, Visitor}, ser::SerializeSeq, - Deserialize, Deserializer, Serializer, + Deserialize, Deserializer, Serialize, Serializer, }; use vbs::version::Version; + #[derive(Clone, Debug, Deserialize, Serialize)] + pub struct UpgradeTimeParams { + pub start_proposing_time: u64, + pub stop_proposing_time: u64, + pub start_voting_time: Option, + pub stop_voting_time: Option, + } + + #[derive(Clone, Debug, Deserialize, Serialize)] + pub struct UpgradeViewParams { + pub start_proposing_view: u64, + pub stop_proposing_view: u64, + pub start_voting_view: Option, + pub stop_voting_view: Option, + } + + /// Represents the specific type of upgrade. + #[derive(Clone, Debug, Deserialize, Serialize)] + #[serde(untagged)] + pub enum UpgradeParameters { + Time(UpgradeTimeParams), + View(UpgradeViewParams), + } + + #[derive(Deserialize)] + struct UpgradeFields { + version: String, + #[serde(flatten)] + params: UpgradeParameters, + #[serde(flatten)] + upgrade_type: UpgradeType, + } + pub fn serialize(map: &BTreeMap, serializer: S) -> Result where S: Serializer, { let mut seq = serializer.serialize_seq(Some(map.len()))?; for (version, upgrade) in map { - seq.serialize_element(&( - version.to_string(), - upgrade.start_proposing_view, - upgrade.propose_window, - upgrade.upgrade_type.clone(), - ))?; + match upgrade.mode { + UpgradeMode::View => seq.serialize_element(&( + version.to_string(), + upgrade.start_proposing_view, + upgrade.stop_proposing_view, + upgrade.start_voting_view, + upgrade.stop_voting_view, + upgrade.upgrade_type.clone(), + ))?, + UpgradeMode::Time => seq.serialize_element(&( + version.to_string(), + upgrade.start_proposing_time, + upgrade.stop_proposing_time, + upgrade.start_voting_time, + upgrade.stop_voting_time, + upgrade.upgrade_type.clone(), + ))?, + } } seq.end() } @@ -94,15 +139,6 @@ mod upgrade_serialization { { let mut map = BTreeMap::new(); - #[derive(Deserialize)] - struct UpgradeFields { - version: String, - view: u64, - propose_window: u64, - #[serde(flatten)] - upgrade_type: UpgradeType, - } - while let Some(fields) = seq.next_element::()? { // add try_from in Version let version: Vec<_> = fields.version.split('.').collect(); @@ -112,14 +148,38 @@ mod upgrade_serialization { minor: version[1].parse().expect("invalid version"), }; - map.insert( - version, - Upgrade { - start_proposing_view: fields.view, - propose_window: fields.propose_window, - upgrade_type: fields.upgrade_type, - }, - ); + match fields.params { + UpgradeParameters::Time(t) => map.insert( + version, + Upgrade { + start_voting_time: t.start_voting_time, + stop_voting_time: t.stop_voting_time, + start_proposing_time: t.start_proposing_time, + stop_proposing_time: t.stop_proposing_time, + start_voting_view: None, + stop_voting_view: None, + start_proposing_view: 0, + stop_proposing_view: u64::MAX, + mode: UpgradeMode::Time, + upgrade_type: fields.upgrade_type, + }, + ), + UpgradeParameters::View(v) => map.insert( + version, + Upgrade { + start_voting_time: None, + stop_voting_time: None, + start_proposing_time: 0, + stop_proposing_time: u64::MAX, + start_voting_view: v.start_voting_view, + stop_voting_view: v.stop_voting_view, + start_proposing_view: v.start_proposing_view, + stop_proposing_view: v.stop_proposing_view, + mode: UpgradeMode::View, + upgrade_type: fields.upgrade_type, + }, + ), + }; } Ok(map) @@ -148,7 +208,7 @@ impl Genesis { #[cfg(test)] mod test { - use espresso_types::{L1BlockInfo, Timestamp}; + use espresso_types::{v0_1::UpgradeMode, L1BlockInfo, Timestamp, UpgradeType}; use ethers::prelude::{Address, H160, H256}; use sequencer_utils::ser::FromStringOrInteger; use toml::toml; @@ -179,18 +239,6 @@ mod test { number = 64 timestamp = "0x123def" hash = "0x80f5dd11f2bdda2814cb1ad94ef30a47de02cf28ad68c89e104c00c4e51bb7a5" - - [[upgrade]] - version = "1.0" - view = 1 - propose_window = 10 - - [upgrade.chain_config] - chain_id = 12345 - max_block_size = 30000 - base_fee = 1 - fee_recipient = "0x0000000000000000000000000000000000000000" - fee_contract = "0x0000000000000000000000000000000000000000" } .to_string(); @@ -335,4 +383,84 @@ mod test { } ) } + + #[test] + fn test_genesis_toml_upgrade() { + // without optional fields + // with view settings + let toml = toml! { + [stake_table] + capacity = 10 + + [chain_config] + chain_id = 12345 + max_block_size = 30000 + base_fee = 1 + fee_recipient = "0x0000000000000000000000000000000000000000" + fee_contract = "0x0000000000000000000000000000000000000000" + + [header] + timestamp = 123456 + + [accounts] + "0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f" = 100000 + "0x0000000000000000000000000000000000000000" = 42 + + [l1_finalized] + number = 64 + timestamp = "0x123def" + hash = "0x80f5dd11f2bdda2814cb1ad94ef30a47de02cf28ad68c89e104c00c4e51bb7a5" + + [[upgrade]] + version = "0.2" + start_proposing_view = 1 + stop_proposing_view = 10 + + [upgrade.chain_config] + chain_id = 12345 + max_block_size = 30000 + base_fee = 1 + fee_recipient = "0x0000000000000000000000000000000000000000" + fee_contract = "0x0000000000000000000000000000000000000000" + } + .to_string(); + + let genesis: Genesis = toml::from_str(&toml).unwrap_or_else(|err| panic!("{err:#}")); + + let (version, genesis_upgrade) = genesis.upgrades.last_key_value().unwrap(); + + assert_eq!(*version, Version { major: 0, minor: 2 }); + + let upgrade = Upgrade { + start_voting_time: None, + stop_voting_time: None, + start_proposing_time: 0, + stop_proposing_time: u64::MAX, + start_voting_view: None, + stop_voting_view: None, + start_proposing_view: 1, + stop_proposing_view: 10, + mode: UpgradeMode::View, + upgrade_type: UpgradeType::ChainConfig { + chain_config: genesis.chain_config, + }, + }; + + assert_eq!(*genesis_upgrade, upgrade); + + let mut upgrades = BTreeMap::new(); + upgrades.insert(Version { major: 0, minor: 2 }, upgrade); + + let genesis = Genesis { + chain_config: genesis.chain_config, + stake_table: genesis.stake_table, + accounts: genesis.accounts, + l1_finalized: genesis.l1_finalized, + header: genesis.header, + upgrades, + }; + + let toml_from_genesis = toml::to_string(&genesis).unwrap(); + assert_eq!(toml, toml_from_genesis); + } } diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index e9206cdd76..f50092bf20 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -204,11 +204,15 @@ pub async fn init_node( .upgrades .get(&::Upgrade::VERSION) { - let view = upgrade.start_proposing_view; - config.config.start_proposing_view = view; - config.config.stop_proposing_view = view + upgrade.propose_window; - config.config.start_voting_view = 1; - config.config.stop_voting_view = u64::MAX; + config.config.start_proposing_view = upgrade.start_proposing_view; + config.config.stop_proposing_view = upgrade.stop_proposing_view; + config.config.start_voting_view = upgrade.start_voting_view.unwrap_or(0); + config.config.stop_voting_view = upgrade.stop_voting_view.unwrap_or(u64::MAX); + + config.config.start_proposing_time = upgrade.start_proposing_time; + config.config.stop_proposing_time = upgrade.stop_proposing_time; + config.config.start_voting_time = upgrade.start_voting_time.unwrap_or(0); + config.config.stop_voting_time = upgrade.stop_voting_time.unwrap_or(u64::MAX); } // If the `Libp2p` bootstrap nodes were supplied via the command line, override those @@ -356,7 +360,6 @@ pub fn empty_builder_commitment() -> BuilderCommitment { pub mod testing { use std::{collections::HashMap, time::Duration}; - use api::test_helpers::TestNetworkUpgrades; use committable::Committable; use espresso_types::{ eth_signature_key::EthKeyPair, @@ -423,7 +426,7 @@ pub mod testing { l1_url: Url, state_relay_url: Option, builder_port: Option, - upgrades: Option, + upgrades: BTreeMap, } impl TestConfigBuilder { @@ -442,21 +445,21 @@ pub mod testing { self } - pub fn upgrades(mut self, upgrades: TestNetworkUpgrades) -> Self { - self.upgrades = Some(upgrades); + pub fn upgrades(mut self, upgrades: BTreeMap) -> Self { + self.upgrades = upgrades; self } pub fn build(mut self) -> TestConfig { - if let Some(upgrades) = &self.upgrades { - self.config.start_proposing_view = upgrades.start_proposing_view; - self.config.stop_proposing_view = upgrades.stop_proposing_view; - self.config.start_voting_view = upgrades.start_voting_view; - self.config.stop_voting_view = upgrades.stop_voting_view; - self.config.start_proposing_time = 0; - self.config.stop_proposing_time = u64::MAX; - self.config.start_voting_time = 0; - self.config.stop_voting_time = u64::MAX; + if let Some(upgrade) = self.upgrades.get(&::Upgrade::VERSION) { + self.config.start_proposing_view = upgrade.start_proposing_view; + self.config.stop_proposing_view = upgrade.stop_proposing_view; + self.config.start_voting_view = upgrade.start_voting_view.unwrap_or(0); + self.config.stop_voting_view = upgrade.stop_voting_view.unwrap_or(u64::MAX); + self.config.start_proposing_time = upgrade.start_proposing_time; + self.config.stop_proposing_time = upgrade.stop_proposing_time; + self.config.start_voting_time = upgrade.start_voting_time.unwrap_or(0); + self.config.stop_voting_time = upgrade.stop_voting_time.unwrap_or(u64::MAX); } TestConfig { @@ -541,7 +544,7 @@ pub mod testing { l1_url: "http://localhost:8545".parse().unwrap(), state_relay_url: None, builder_port: None, - upgrades: None, + upgrades: Default::default(), } } } @@ -555,7 +558,7 @@ pub mod testing { l1_url: Url, state_relay_url: Option, builder_port: Option, - upgrades: Option, + upgrades: BTreeMap, } impl TestConfig { @@ -579,7 +582,7 @@ pub mod testing { self.l1_url.clone() } - pub fn upgrades(&self) -> Option { + pub fn upgrades(&self) -> BTreeMap { self.upgrades.clone() } diff --git a/types/src/v0/v0_1/instance_state.rs b/types/src/v0/v0_1/instance_state.rs index 82f75ba4a7..facf203721 100644 --- a/types/src/v0/v0_1/instance_state.rs +++ b/types/src/v0/v0_1/instance_state.rs @@ -11,7 +11,7 @@ use vbs::version::Version; use super::l1::L1Client; /// Represents the specific type of upgrade. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(untagged)] #[serde(rename_all = "snake_case")] pub enum UpgradeType { @@ -21,22 +21,18 @@ pub enum UpgradeType { } /// Represents the upgrade config including the type of upgrade and upgrade parameters for hotshot config. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct Upgrade { - /// The view at which the upgrade is proposed. - /// - /// Note: Voting for the proposal begins before the upgrade is formally proposed. - /// In our implementation, `start_proposing_view` is set to `1`` for all upgrades, - /// so if an upgrade is planned then the voting starts as soon as node is started. - #[serde(rename = "view")] + pub start_voting_time: Option, + pub stop_voting_time: Option, + pub start_proposing_time: u64, + pub stop_proposing_time: u64, + pub start_voting_view: Option, + pub stop_voting_view: Option, pub start_proposing_view: u64, - - /// The time window during which the upgrade can be proposed. - /// - /// This parameter is used for setting the `stop_propose_window_view`. - /// `stop_proposing_view` is calculated as `start_proposing_view + propose_window`. - pub propose_window: u64, - + pub stop_proposing_view: u64, + // View or time based + pub mode: UpgradeMode, /// The specific type of upgrade configuration. /// /// Currently, we only support chain configuration upgrades (`upgrade.chain_config` in genesis toml file). @@ -44,6 +40,12 @@ pub struct Upgrade { pub upgrade_type: UpgradeType, } +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub enum UpgradeMode { + View, + Time, +} + /// Represents the immutable state of a node. /// /// For mutable state, use `ValidatedState`. @@ -71,6 +73,6 @@ pub struct NodeState { /// This version is checked to determine if an upgrade is planned, /// and which version variant for versioned types /// to use in functions such as genesis. - /// (example: genesis returns V2 Header if version is 0.2) + /// (example: genesis returns V2 Header if version is 0.2) pub current_version: Version, } From 6634e37065fe59d76f40929fb348526259d0d28d Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Thu, 11 Jul 2024 17:50:43 -0700 Subject: [PATCH 35/65] remove benchmark calc from orchestrator --- sequencer/src/bin/submit-transactions.rs | 8 +- sequencer/src/context.rs | 136 ----------------------- 2 files changed, 4 insertions(+), 140 deletions(-) diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index fc26d0ef75..60b7280118 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -288,7 +288,7 @@ async fn main() { #[cfg(feature = "benchmarking")] if !benchmark_finish && (num_block as usize) >= opt.benchmark_end_block.into() { let block_range = format!("{}~{}", opt.benchmark_start_block, opt.benchmark_end_block,); - let transaction_size_range = format!("{}~{}", opt.min_size, opt.max_size,); + let transaction_size_range_in_bytes = format!("{}~{}", opt.min_size, opt.max_size,); let transactions_per_batch_range = format!( "{}~{}", (opt.jobs as u64 * opt.min_batch_size), @@ -315,7 +315,7 @@ async fn main() { "total_nodes", "da_committee_size", "block_range", - "transaction_size_range", + "transaction_size_range_in_bytes", "transaction_per_batch_range", "pub_or_priv_pool", "avg_latency_in_sec", @@ -323,14 +323,14 @@ async fn main() { "maximum_latency_in_sec", "avg_throughput_bytes_per_sec", "total_transactions", - "avg_transaction_size_bytes", + "avg_transaction_size_in_bytes", "total_time_elapsed_in_sec", ]); let _ = wtr.write_record(&[ opt.num_nodes.to_string(), opt.num_nodes.to_string(), block_range, - transaction_size_range, + transaction_size_range_in_bytes, transactions_per_batch_range, pub_or_priv_pool.to_string(), benchmark_average_latency.as_secs().to_string(), diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 83cbe6df72..560a7d6bdb 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -31,17 +31,6 @@ use hotshot_types::{ use url::Url; use vbs::version::StaticVersionType; -#[cfg(feature = "benchmarking")] -use hotshot::{traits::BlockPayload, types::EventType}; -#[cfg(feature = "benchmarking")] -use hotshot_orchestrator::client::BenchResults; -#[cfg(feature = "benchmarking")] -use hotshot_orchestrator::config::NetworkConfig; -#[cfg(feature = "benchmarking")] -use hotshot_types::traits::{block_contents::BlockHeader, node_implementation::ConsensusTime}; -#[cfg(feature = "benchmarking")] -use std::time::Instant; - use crate::{state_signature::StateSigner, static_stake_table_commitment, Node, SeqTypes}; /// The consensus handle pub type Consensus = SystemContextHandle>; @@ -258,141 +247,16 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp /// Start participating in consensus. pub async fn start_consensus(&self) { - #[cfg(feature = "benchmarking")] - let mut has_orchestrator_client = false; - #[cfg(feature = "benchmarking")] - let mut network_config: NetworkConfig = Default::default(); if let Some(orchestrator_client) = &self.wait_for_orchestrator { tracing::warn!("waiting for orchestrated start"); orchestrator_client .wait_for_all_nodes_ready(self.node_state.node_id) .await; - #[cfg(feature = "benchmarking")] - { - has_orchestrator_client = true; - network_config = orchestrator_client.get_config_after_collection().await; - } } else { tracing::error!("Cannot get info from orchestrator client"); } tracing::warn!("starting consensus"); self.handle.read().await.hotshot.start_consensus().await; - - #[cfg(feature = "benchmarking")] - if has_orchestrator_client { - // start_round is the number of rounds for warm up, which will not be counted in for benchmarking phase - let start_round: usize = 20; - let end_round: usize = start_round + network_config.rounds; - let mut event_stream = self.event_stream().await; - let mut num_successful_commits = 0; - let mut total_transactions_committed = 0; - let mut total_throughput = 0; - let node_index: u64 = self.node_state().node_id; - let mut start: Instant = Instant::now(); // will be re-assign once has_started turned to true - let mut has_started: bool = false; - loop { - match event_stream.next().await { - None => { - tracing::error!( - "Error in Benchmarking! Event stream completed before consensus ended." - ); - } - Some(Event { event, .. }) => { - match event { - EventType::Error { error } => { - tracing::error!("Error in consensus: {:?}", error); - } - EventType::Decide { - leaf_chain, - qc: _, - block_size, - } => { - if let Some(leaf_info) = leaf_chain.first() { - let leaf = &leaf_info.leaf; - tracing::info!( - "Decide event for leaf: {}", - *leaf.view_number() - ); - num_successful_commits += leaf_chain.len(); - - // only count in the info after warm up - if num_successful_commits >= start_round { - if !has_started { - start = Instant::now(); - has_started = true; - } - - // iterate all the decided transactions - if let Some(block_payload) = &leaf.block_payload() { - for tx in block_payload - .transactions(leaf.block_header().metadata()) - { - let payload_length = tx.into_payload().len(); - // Transaction = NamespaceId(u64) + payload(Vec) - let tx_sz = payload_length * std::mem::size_of::() // size of payload - + std::mem::size_of::() // size of the namespace - + std::mem::size_of::(); // size of the struct wrapper - total_throughput += tx_sz; - } - } - } - } - - if num_successful_commits >= start_round { - if let Some(size) = block_size { - total_transactions_committed += size; - } - } - - if num_successful_commits >= end_round { - let total_time_elapsed = start.elapsed(); // in seconds - let consensus_lock = - self.handle.read().await.hotshot.consensus(); - let consensus = consensus_lock.read().await; - let total_num_views = - usize::try_from(consensus.locked_view().u64()).unwrap(); - let failed_num_views = total_num_views - num_successful_commits; - let bench_results = if total_transactions_committed != 0 { - let throughput_bytes_per_sec = (total_throughput as u64) - / std::cmp::max(total_time_elapsed.as_secs(), 1u64); - BenchResults { - partial_results: "Unset".to_string(), - // latency will be reported in another struct - avg_latency_in_sec: 0, - num_latency: 1, - minimum_latency_in_sec: 0, - maximum_latency_in_sec: 0, - throughput_bytes_per_sec, - total_transactions_committed, - transaction_size_in_bytes: (total_throughput as u64) - / total_transactions_committed, // refer to `submit-transactions.rs` for the range of transaction size - total_time_elapsed_in_sec: total_time_elapsed.as_secs(), - total_num_views, - failed_num_views, - } - } else { - BenchResults::default() - }; - tracing::info!("[{node_index}]: {total_transactions_committed} committed from round {start_round} to {end_round} in {total_time_elapsed:?}, total number of views = {total_num_views}."); - if let Some(orchestrator_client) = &self.wait_for_orchestrator { - orchestrator_client.post_bench_results(bench_results).await; - } - break; - } - - if leaf_chain.len() > 1 { - tracing::warn!( - "Leaf chain is greater than 1 with len {}", - leaf_chain.len() - ); - } - } - _ => {} // mostly DA proposal - } - } - } - } - } } /// Spawn a background task attached to this context. From 76c2441ddcb18b9ec413a02b7185547cc626ad20 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Fri, 12 Jul 2024 05:51:10 +0500 Subject: [PATCH 36/65] lint --- sequencer/src/api.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 2408e007cc..f6c9632260 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1054,7 +1054,10 @@ mod test { }; use hotshot_types::{ event::LeafInfo, - traits::{metrics::NoMetrics, node_implementation::{ConsensusTime, NodeType}}, + traits::{ + metrics::NoMetrics, + node_implementation::{ConsensusTime, NodeType}, + }, }; use jf_merkle_tree::prelude::{MerkleProof, Sha3Node}; use portpicker::pick_unused_port; From 9fec6c3ed82c3d988bd34ef0d05c0be91ad42bf0 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Fri, 12 Jul 2024 07:09:51 +0500 Subject: [PATCH 37/65] fix serialization --- sequencer/src/genesis.rs | 81 +++++++++++++++++++++++++++++++--------- 1 file changed, 64 insertions(+), 17 deletions(-) diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index 5d21622da1..f911d76064 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -83,7 +83,7 @@ mod upgrade_serialization { View(UpgradeViewParams), } - #[derive(Deserialize)] + #[derive(Serialize, Deserialize)] struct UpgradeFields { version: String, #[serde(flatten)] @@ -99,22 +99,26 @@ mod upgrade_serialization { let mut seq = serializer.serialize_seq(Some(map.len()))?; for (version, upgrade) in map { match upgrade.mode { - UpgradeMode::View => seq.serialize_element(&( - version.to_string(), - upgrade.start_proposing_view, - upgrade.stop_proposing_view, - upgrade.start_voting_view, - upgrade.stop_voting_view, - upgrade.upgrade_type.clone(), - ))?, - UpgradeMode::Time => seq.serialize_element(&( - version.to_string(), - upgrade.start_proposing_time, - upgrade.stop_proposing_time, - upgrade.start_voting_time, - upgrade.stop_voting_time, - upgrade.upgrade_type.clone(), - ))?, + UpgradeMode::View => seq.serialize_element(&UpgradeFields { + version: version.to_string(), + params: UpgradeParameters::View(UpgradeViewParams { + start_proposing_view: upgrade.start_proposing_view, + stop_proposing_view: upgrade.stop_proposing_view, + start_voting_view: upgrade.start_voting_view, + stop_voting_view: upgrade.stop_voting_view, + }), + upgrade_type: upgrade.upgrade_type.clone(), + })?, + UpgradeMode::Time => seq.serialize_element(&UpgradeFields { + version: version.to_string(), + params: UpgradeParameters::Time(UpgradeTimeParams { + start_proposing_time: upgrade.start_proposing_time, + stop_proposing_time: upgrade.stop_proposing_time, + start_voting_time: upgrade.start_voting_time, + stop_voting_time: upgrade.stop_voting_time, + }), + upgrade_type: upgrade.upgrade_type.clone(), + })?, } } seq.end() @@ -462,5 +466,48 @@ mod test { let toml_from_genesis = toml::to_string(&genesis).unwrap(); assert_eq!(toml, toml_from_genesis); + + // set both time and view parameters + // this should err + let toml = toml! { + [stake_table] + capacity = 10 + + [chain_config] + chain_id = 12345 + max_block_size = 30000 + base_fee = 1 + fee_recipient = "0x0000000000000000000000000000000000000000" + fee_contract = "0x0000000000000000000000000000000000000000" + + [header] + timestamp = 123456 + + [accounts] + "0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f" = 100000 + "0x0000000000000000000000000000000000000000" = 42 + + [l1_finalized] + number = 64 + timestamp = "0x123def" + hash = "0x80f5dd11f2bdda2814cb1ad94ef30a47de02cf28ad68c89e104c00c4e51bb7a5" + + [[upgrade]] + version = "0.2" + start_proposing_view = 1 + stop_proposing_view = 10 + start_proposing_time = 1 + stop_proposing_time = 10 + + [upgrade.chain_config] + chain_id = 12345 + max_block_size = 30000 + base_fee = 1 + fee_recipient = "0x0000000000000000000000000000000000000000" + fee_contract = "0x0000000000000000000000000000000000000000" + } + .to_string(); + + toml::from_str::(&toml).unwrap_err(); } } From 3fddd7b8b884ab8038098082968f09b1f03bb881 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Fri, 12 Jul 2024 08:33:53 +0500 Subject: [PATCH 38/65] separate out time based and view based upgrade fields --- sequencer/src/api.rs | 20 ++- sequencer/src/genesis.rs | 193 ++++++++++++---------------- sequencer/src/lib.rs | 19 +-- types/src/v0/v0_1/instance_state.rs | 66 +++++++--- 4 files changed, 140 insertions(+), 158 deletions(-) diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index f6c9632260..221418f5cb 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1039,8 +1039,9 @@ mod test { use committable::{Commitment, Committable}; use es_version::{SequencerVersion, SEQUENCER_VERSION}; use espresso_types::{ - mock::MockStateCatchup, v0_1::UpgradeMode, FeeAccount, FeeAmount, Header, Upgrade, - UpgradeType, ValidatedState, + mock::MockStateCatchup, + v0_1::{UpgradeMode, ViewBasedUpgrade}, + FeeAccount, FeeAmount, Header, Upgrade, UpgradeType, ValidatedState, }; use ethers::utils::Anvil; use futures::{ @@ -1445,15 +1446,12 @@ mod test { upgrades.insert( ::Upgrade::VERSION, Upgrade { - start_voting_time: None, - stop_voting_time: None, - start_proposing_time: 0, - stop_proposing_time: u64::MAX, - start_voting_view: None, - stop_voting_view: None, - start_proposing_view: 1, - stop_proposing_view: 10, - mode: UpgradeMode::View, + mode: UpgradeMode::View(ViewBasedUpgrade { + start_voting_view: None, + stop_voting_view: None, + start_proposing_view: 1, + stop_proposing_view: 10, + }), upgrade_type: UpgradeType::ChainConfig { chain_config: chain_config_upgrade, }, diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index f911d76064..9cd66fb866 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -4,7 +4,10 @@ use std::{ }; use anyhow::Context; -use espresso_types::{ChainConfig, FeeAccount, FeeAmount, GenesisHeader, L1BlockInfo, Upgrade}; + +use espresso_types::Upgrade; +use espresso_types::{ChainConfig, FeeAccount, FeeAmount, GenesisHeader, L1BlockInfo}; + use serde::{Deserialize, Serialize}; use vbs::version::Version; @@ -51,75 +54,37 @@ mod upgrade_serialization { use std::{collections::BTreeMap, fmt}; - use espresso_types::{v0_1::UpgradeMode, Upgrade, UpgradeType}; + use espresso_types::{ + v0_1::{TimeBasedUpgrade, UpgradeMode, ViewBasedUpgrade}, + Upgrade, UpgradeType, + }; use serde::{ - de::{SeqAccess, Visitor}, + de::{self, SeqAccess, Visitor}, ser::SerializeSeq, Deserialize, Deserializer, Serialize, Serializer, }; use vbs::version::Version; - #[derive(Clone, Debug, Deserialize, Serialize)] - pub struct UpgradeTimeParams { - pub start_proposing_time: u64, - pub stop_proposing_time: u64, - pub start_voting_time: Option, - pub stop_voting_time: Option, - } - - #[derive(Clone, Debug, Deserialize, Serialize)] - pub struct UpgradeViewParams { - pub start_proposing_view: u64, - pub stop_proposing_view: u64, - pub start_voting_view: Option, - pub stop_voting_view: Option, - } - - /// Represents the specific type of upgrade. - #[derive(Clone, Debug, Deserialize, Serialize)] - #[serde(untagged)] - pub enum UpgradeParameters { - Time(UpgradeTimeParams), - View(UpgradeViewParams), - } - - #[derive(Serialize, Deserialize)] - struct UpgradeFields { - version: String, - #[serde(flatten)] - params: UpgradeParameters, - #[serde(flatten)] - upgrade_type: UpgradeType, - } - pub fn serialize(map: &BTreeMap, serializer: S) -> Result where S: Serializer, { + #[derive(Debug, Clone, Serialize, Deserialize)] + pub struct Fields { + pub version: String, + #[serde(flatten)] + pub mode: UpgradeMode, + #[serde(flatten)] + pub upgrade_type: UpgradeType, + } + let mut seq = serializer.serialize_seq(Some(map.len()))?; for (version, upgrade) in map { - match upgrade.mode { - UpgradeMode::View => seq.serialize_element(&UpgradeFields { - version: version.to_string(), - params: UpgradeParameters::View(UpgradeViewParams { - start_proposing_view: upgrade.start_proposing_view, - stop_proposing_view: upgrade.stop_proposing_view, - start_voting_view: upgrade.start_voting_view, - stop_voting_view: upgrade.stop_voting_view, - }), - upgrade_type: upgrade.upgrade_type.clone(), - })?, - UpgradeMode::Time => seq.serialize_element(&UpgradeFields { - version: version.to_string(), - params: UpgradeParameters::Time(UpgradeTimeParams { - start_proposing_time: upgrade.start_proposing_time, - stop_proposing_time: upgrade.stop_proposing_time, - start_voting_time: upgrade.start_voting_time, - stop_voting_time: upgrade.stop_voting_time, - }), - upgrade_type: upgrade.upgrade_type.clone(), - })?, - } + seq.serialize_element(&Fields { + version: version.to_string(), + mode: upgrade.mode.clone(), + upgrade_type: upgrade.upgrade_type.clone(), + })? } seq.end() } @@ -130,6 +95,20 @@ mod upgrade_serialization { { struct VecToHashMap; + #[derive(Debug, Clone, Serialize, Deserialize)] + pub struct Fields { + pub version: String, + // If both `time_based` and `view_based` fields are provided + // and we use an enum for deserialization, then one of the variant fields will be ignored. + // We want to raise an error in such a case to avoid ambiguity + #[serde(flatten)] + pub time_based: Option, + #[serde(flatten)] + pub view_based: Option, + #[serde(flatten)] + pub upgrade_type: UpgradeType, + } + impl<'de> Visitor<'de> for VecToHashMap { type Value = BTreeMap; @@ -143,7 +122,7 @@ mod upgrade_serialization { { let mut map = BTreeMap::new(); - while let Some(fields) = seq.next_element::()? { + while let Some(fields) = seq.next_element::()? { // add try_from in Version let version: Vec<_> = fields.version.split('.').collect(); @@ -152,38 +131,36 @@ mod upgrade_serialization { minor: version[1].parse().expect("invalid version"), }; - match fields.params { - UpgradeParameters::Time(t) => map.insert( - version, - Upgrade { - start_voting_time: t.start_voting_time, - stop_voting_time: t.stop_voting_time, - start_proposing_time: t.start_proposing_time, - stop_proposing_time: t.stop_proposing_time, - start_voting_view: None, - stop_voting_view: None, - start_proposing_view: 0, - stop_proposing_view: u64::MAX, - mode: UpgradeMode::Time, - upgrade_type: fields.upgrade_type, - }, - ), - UpgradeParameters::View(v) => map.insert( - version, - Upgrade { - start_voting_time: None, - stop_voting_time: None, - start_proposing_time: 0, - stop_proposing_time: u64::MAX, - start_voting_view: v.start_voting_view, - stop_voting_view: v.stop_voting_view, - start_proposing_view: v.start_proposing_view, - stop_proposing_view: v.stop_proposing_view, - mode: UpgradeMode::View, - upgrade_type: fields.upgrade_type, - }, - ), - }; + match (fields.time_based, fields.view_based) { + (Some(_), Some(_)) => { + return Err(de::Error::custom( + "both view and time mode parameters are set", + )) + } + (None, None) => { + return Err(de::Error::custom( + "no view or time mode parameters provided", + )) + } + (None, Some(v)) => { + map.insert( + version, + Upgrade { + mode: UpgradeMode::View(v), + upgrade_type: fields.upgrade_type, + }, + ); + } + (Some(t), None) => { + map.insert( + version, + Upgrade { + mode: UpgradeMode::Time(t), + upgrade_type: fields.upgrade_type.clone(), + }, + ); + } + } } Ok(map) @@ -212,7 +189,10 @@ impl Genesis { #[cfg(test)] mod test { - use espresso_types::{v0_1::UpgradeMode, L1BlockInfo, Timestamp, UpgradeType}; + use espresso_types::{ + v0_1::{UpgradeMode, ViewBasedUpgrade}, + L1BlockInfo, Timestamp, UpgradeType, + }; use ethers::prelude::{Address, H160, H256}; use sequencer_utils::ser::FromStringOrInteger; use toml::toml; @@ -418,7 +398,7 @@ mod test { [[upgrade]] version = "0.2" start_proposing_view = 1 - stop_proposing_view = 10 + stop_proposing_view = 15 [upgrade.chain_config] chain_id = 12345 @@ -436,15 +416,12 @@ mod test { assert_eq!(*version, Version { major: 0, minor: 2 }); let upgrade = Upgrade { - start_voting_time: None, - stop_voting_time: None, - start_proposing_time: 0, - stop_proposing_time: u64::MAX, - start_voting_view: None, - stop_voting_view: None, - start_proposing_view: 1, - stop_proposing_view: 10, - mode: UpgradeMode::View, + mode: UpgradeMode::View(ViewBasedUpgrade { + start_voting_view: None, + stop_voting_view: None, + start_proposing_view: 1, + stop_proposing_view: 15, + }), upgrade_type: UpgradeType::ChainConfig { chain_config: genesis.chain_config, }, @@ -455,18 +432,6 @@ mod test { let mut upgrades = BTreeMap::new(); upgrades.insert(Version { major: 0, minor: 2 }, upgrade); - let genesis = Genesis { - chain_config: genesis.chain_config, - stake_table: genesis.stake_table, - accounts: genesis.accounts, - l1_finalized: genesis.l1_finalized, - header: genesis.header, - upgrades, - }; - - let toml_from_genesis = toml::to_string(&genesis).unwrap(); - assert_eq!(toml, toml_from_genesis); - // set both time and view parameters // this should err let toml = toml! { diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index f50092bf20..ca05884ba6 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -204,15 +204,7 @@ pub async fn init_node( .upgrades .get(&::Upgrade::VERSION) { - config.config.start_proposing_view = upgrade.start_proposing_view; - config.config.stop_proposing_view = upgrade.stop_proposing_view; - config.config.start_voting_view = upgrade.start_voting_view.unwrap_or(0); - config.config.stop_voting_view = upgrade.stop_voting_view.unwrap_or(u64::MAX); - - config.config.start_proposing_time = upgrade.start_proposing_time; - config.config.stop_proposing_time = upgrade.stop_proposing_time; - config.config.start_voting_time = upgrade.start_voting_time.unwrap_or(0); - config.config.stop_voting_time = upgrade.stop_voting_time.unwrap_or(u64::MAX); + upgrade.set_hotshot_config(&mut config.config); } // If the `Libp2p` bootstrap nodes were supplied via the command line, override those @@ -452,14 +444,7 @@ pub mod testing { pub fn build(mut self) -> TestConfig { if let Some(upgrade) = self.upgrades.get(&::Upgrade::VERSION) { - self.config.start_proposing_view = upgrade.start_proposing_view; - self.config.stop_proposing_view = upgrade.stop_proposing_view; - self.config.start_voting_view = upgrade.start_voting_view.unwrap_or(0); - self.config.stop_voting_view = upgrade.stop_voting_view.unwrap_or(u64::MAX); - self.config.start_proposing_time = upgrade.start_proposing_time; - self.config.stop_proposing_time = upgrade.stop_proposing_time; - self.config.start_voting_time = upgrade.start_voting_time.unwrap_or(0); - self.config.stop_voting_time = upgrade.stop_voting_time.unwrap_or(u64::MAX); + upgrade.set_hotshot_config(&mut self.config) } TestConfig { diff --git a/types/src/v0/v0_1/instance_state.rs b/types/src/v0/v0_1/instance_state.rs index facf203721..bb839880d8 100644 --- a/types/src/v0/v0_1/instance_state.rs +++ b/types/src/v0/v0_1/instance_state.rs @@ -2,10 +2,13 @@ use std::collections::BTreeMap; use std::sync::Arc; +use hotshot_types::HotShotConfig; use serde::{Deserialize, Serialize}; use std::fmt::Debug; -use crate::{v0::traits::StateCatchup, ChainConfig, GenesisHeader, L1BlockInfo, ValidatedState}; +use crate::{ + v0::traits::StateCatchup, ChainConfig, GenesisHeader, L1BlockInfo, PubKey, ValidatedState, +}; use vbs::version::Version; use super::l1::L1Client; @@ -20,30 +23,61 @@ pub enum UpgradeType { ChainConfig { chain_config: ChainConfig }, } -/// Represents the upgrade config including the type of upgrade and upgrade parameters for hotshot config. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct Upgrade { - pub start_voting_time: Option, - pub stop_voting_time: Option, +pub struct TimeBasedUpgrade { pub start_proposing_time: u64, pub stop_proposing_time: u64, - pub start_voting_view: Option, - pub stop_voting_view: Option, + pub start_voting_time: Option, + pub stop_voting_time: Option, +} + +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub struct ViewBasedUpgrade { pub start_proposing_view: u64, pub stop_proposing_view: u64, - // View or time based - pub mode: UpgradeMode, - /// The specific type of upgrade configuration. - /// - /// Currently, we only support chain configuration upgrades (`upgrade.chain_config` in genesis toml file). - #[serde(flatten)] - pub upgrade_type: UpgradeType, + pub start_voting_view: Option, + pub stop_voting_view: Option, } +/// Represents the specific type of upgrade. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +#[serde(untagged)] pub enum UpgradeMode { - View, - Time, + Time(TimeBasedUpgrade), + View(ViewBasedUpgrade), +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Upgrade { + pub mode: UpgradeMode, + pub upgrade_type: UpgradeType, +} + +impl Upgrade { + pub fn set_hotshot_config(&self, config: &mut HotShotConfig) { + match &self.mode { + UpgradeMode::View(v) => { + config.start_proposing_view = v.start_proposing_view; + config.stop_proposing_view = v.stop_proposing_view; + config.start_voting_view = v.start_voting_view.unwrap_or(0); + config.stop_voting_view = v.stop_voting_view.unwrap_or(u64::MAX); + config.start_proposing_time = 0; + config.stop_proposing_time = u64::MAX; + config.start_voting_time = 0; + config.stop_voting_time = u64::MAX; + } + UpgradeMode::Time(t) => { + config.start_proposing_time = t.start_proposing_time; + config.stop_proposing_time = t.stop_proposing_time; + config.start_voting_time = t.start_voting_time.unwrap_or(0); + config.stop_voting_time = t.stop_voting_time.unwrap_or(u64::MAX); + config.start_proposing_view = 0; + config.stop_proposing_view = u64::MAX; + config.start_voting_view = 0; + config.stop_voting_view = u64::MAX; + } + } + } } /// Represents the immutable state of a node. From 1aca30fbc7853d15d6a2ca2b6af0ce3248175e9e Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Fri, 12 Jul 2024 07:58:07 -0700 Subject: [PATCH 39/65] update results --- scripts/benchmarks_results/upload_results.csv | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/scripts/benchmarks_results/upload_results.csv b/scripts/benchmarks_results/upload_results.csv index 2ce7857883..0c43181fe2 100644 --- a/scripts/benchmarks_results/upload_results.csv +++ b/scripts/benchmarks_results/upload_results.csv @@ -1,9 +1,8 @@ -Note: actual rounds = 100 (usually calculate info between round 20 ~ 120), actual transactions_per_round is a distribution between 1~20, transaction_size is the avg size of all txs -commit_sha,total_nodes,da_committee_size,transactions_per_round,transaction_size,rounds,leader_election_type,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,throughput_bytes_per_sec,total_transactions_committed,total_time_elapsed_in_sec,total_num_views,failed_num_views -20-120-no-latency,5,5,10,542,10,static-leader-selection,0,0,0,2022,149,40,121,1 -commit_sha,total_nodes,da_committee_size,transactions_per_round,transaction_size,rounds,leader_election_type,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,throughput_bytes_per_sec,total_transactions_committed,total_time_elapsed_in_sec,total_num_views,failed_num_views -20-120,5,5,10,578,10,static-leader-selection,0,0,0,2826,303,62,121,1 -private_pool_avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec -3,0,7 -public_pool_avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec -2,0,7 \ No newline at end of file +total_nodes,da_committee_size,block_range,transaction_size_range,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_bytes,total_time_elapsed_in_sec +5,5,20~120,1~1000,1~20,public,1,0,4,209,22,503,53 +total_nodes,da_committee_size,block_range,transaction_size_range,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_bytes,total_time_elapsed_in_sec +5,5,20~120,1~1000,1~20,private,3,0,6,1267,115,584,53 +total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec +5,5,50~150,1~1000,1~20,public,1,0,2,260,23,566,50 +total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec +5,5,50~150,1~1000,1~20,private,2,0,2,1277,118,541,50 \ No newline at end of file From 7a9c113f1c16f1c54589d355fa77479775034094 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Fri, 12 Jul 2024 20:36:08 +0500 Subject: [PATCH 40/65] change u64 to timestamp for time based upgrade and add tests --- sequencer/src/genesis.rs | 78 ++++++++++++++++--- .../impls/block/full_payload/ns_proof/test.rs | 3 +- types/src/v0/mod.rs | 3 + types/src/v0/v0_1/instance_state.rs | 38 ++++++--- types/src/v0/v0_2/mod.rs | 9 ++- types/src/v0/v0_3/mod.rs | 9 ++- 6 files changed, 113 insertions(+), 27 deletions(-) diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index 9cd66fb866..08d0184df6 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -4,10 +4,7 @@ use std::{ }; use anyhow::Context; - -use espresso_types::Upgrade; -use espresso_types::{ChainConfig, FeeAccount, FeeAmount, GenesisHeader, L1BlockInfo}; - +use espresso_types::{ChainConfig, FeeAccount, FeeAmount, GenesisHeader, L1BlockInfo, Upgrade}; use serde::{Deserialize, Serialize}; use vbs::version::Version; @@ -190,8 +187,7 @@ impl Genesis { #[cfg(test)] mod test { use espresso_types::{ - v0_1::{UpgradeMode, ViewBasedUpgrade}, - L1BlockInfo, Timestamp, UpgradeType, + L1BlockInfo, TimeBasedUpgrade, Timestamp, UpgradeMode, UpgradeType, ViewBasedUpgrade, }; use ethers::prelude::{Address, H160, H256}; use sequencer_utils::ser::FromStringOrInteger; @@ -369,7 +365,7 @@ mod test { } #[test] - fn test_genesis_toml_upgrade() { + fn test_genesis_toml_upgrade_view_mode() { // without optional fields // with view settings let toml = toml! { @@ -428,10 +424,74 @@ mod test { }; assert_eq!(*genesis_upgrade, upgrade); + } + + #[test] + fn test_genesis_toml_upgrade_time_mode() { + // without optional fields + // with time settings + let toml = toml! { + [stake_table] + capacity = 10 + + [chain_config] + chain_id = 12345 + max_block_size = 30000 + base_fee = 1 + fee_recipient = "0x0000000000000000000000000000000000000000" + fee_contract = "0x0000000000000000000000000000000000000000" + + [header] + timestamp = 123456 + + [accounts] + "0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f" = 100000 + "0x0000000000000000000000000000000000000000" = 42 + + [l1_finalized] + number = 64 + timestamp = "0x123def" + hash = "0x80f5dd11f2bdda2814cb1ad94ef30a47de02cf28ad68c89e104c00c4e51bb7a5" - let mut upgrades = BTreeMap::new(); - upgrades.insert(Version { major: 0, minor: 2 }, upgrade); + [[upgrade]] + version = "0.2" + start_proposing_time = "2024-01-01T00:00:00Z" + stop_proposing_time = "2024-01-02T00:00:00Z" + + [upgrade.chain_config] + chain_id = 12345 + max_block_size = 30000 + base_fee = 1 + fee_recipient = "0x0000000000000000000000000000000000000000" + fee_contract = "0x0000000000000000000000000000000000000000" + } + .to_string(); + + let genesis: Genesis = toml::from_str(&toml).unwrap_or_else(|err| panic!("{err:#}")); + + let (version, genesis_upgrade) = genesis.upgrades.last_key_value().unwrap(); + + assert_eq!(*version, Version { major: 0, minor: 2 }); + + let upgrade = Upgrade { + mode: UpgradeMode::Time(TimeBasedUpgrade { + start_voting_time: None, + stop_voting_time: None, + start_proposing_time: Timestamp::from_string("2024-01-01T00:00:00Z".to_string()) + .unwrap(), + stop_proposing_time: Timestamp::from_string("2024-01-02T00:00:00Z".to_string()) + .unwrap(), + }), + upgrade_type: UpgradeType::ChainConfig { + chain_config: genesis.chain_config, + }, + }; + assert_eq!(*genesis_upgrade, upgrade); + } + + #[test] + fn test_genesis_toml_upgrade_view_and_time_mode() { // set both time and view parameters // this should err let toml = toml! { diff --git a/types/src/v0/impls/block/full_payload/ns_proof/test.rs b/types/src/v0/impls/block/full_payload/ns_proof/test.rs index d18831a91f..39b0b434c5 100644 --- a/types/src/v0/impls/block/full_payload/ns_proof/test.rs +++ b/types/src/v0/impls/block/full_payload/ns_proof/test.rs @@ -1,4 +1,3 @@ -use crate::{v0::impls::block::test::ValidTest, NsProof, Payload}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use futures::future; use hotshot::traits::BlockPayload; @@ -8,6 +7,8 @@ use hotshot_types::{ }; use jf_vid::{VidDisperse, VidScheme}; +use crate::{v0::impls::block::test::ValidTest, NsProof, Payload}; + #[async_std::test] async fn ns_proof() { let test_cases = vec![ diff --git a/types/src/v0/mod.rs b/types/src/v0/mod.rs index bd9bb77473..e683ac7145 100644 --- a/types/src/v0/mod.rs +++ b/types/src/v0/mod.rs @@ -114,6 +114,9 @@ reexport_unchanged_types!( TxTableEntriesRange, Upgrade, UpgradeType, + UpgradeMode, + TimeBasedUpgrade, + ViewBasedUpgrade, ValidatedState, BlockSize, ); diff --git a/types/src/v0/v0_1/instance_state.rs b/types/src/v0/v0_1/instance_state.rs index bb839880d8..9502bd5f78 100644 --- a/types/src/v0/v0_1/instance_state.rs +++ b/types/src/v0/v0_1/instance_state.rs @@ -3,11 +3,13 @@ use std::collections::BTreeMap; use std::sync::Arc; use hotshot_types::HotShotConfig; +use sequencer_utils::ser::FromStringOrInteger; use serde::{Deserialize, Serialize}; use std::fmt::Debug; use crate::{ - v0::traits::StateCatchup, ChainConfig, GenesisHeader, L1BlockInfo, PubKey, ValidatedState, + v0::traits::StateCatchup, ChainConfig, GenesisHeader, L1BlockInfo, PubKey, Timestamp, + ValidatedState, }; use vbs::version::Version; @@ -23,19 +25,29 @@ pub enum UpgradeType { ChainConfig { chain_config: ChainConfig }, } +/// Represents an upgrade based on time (unix timestamp). #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct TimeBasedUpgrade { - pub start_proposing_time: u64, - pub stop_proposing_time: u64, - pub start_voting_time: Option, - pub stop_voting_time: Option, + /// the earliest unix timestamp in which the node can propose an upgrade + pub start_proposing_time: Timestamp, + /// timestamp after which the node stops proposing an upgrade + pub stop_proposing_time: Timestamp, + /// The timestamp at which voting for the upgrade proposal starts + pub start_voting_time: Option, + /// The timestamp at which voting for the upgrade proposal stops + pub stop_voting_time: Option, } +/// Represents an upgrade based on view. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct ViewBasedUpgrade { + /// the earliest view in which the node can propose an upgrade pub start_proposing_view: u64, + /// view after which the node stops proposing an upgrade pub stop_proposing_view: u64, + /// The view at which voting for the upgrade proposal starts pub start_voting_view: Option, + /// The view at which voting for the upgrade proposal stops pub stop_voting_view: Option, } @@ -43,13 +55,18 @@ pub struct ViewBasedUpgrade { #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(untagged)] pub enum UpgradeMode { + /// Upgrade based on unix timestamp. Time(TimeBasedUpgrade), + /// Upgrade based on view. View(ViewBasedUpgrade), } +/// Represents a general upgrade with mode and type. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Upgrade { + /// The mode of the upgrade (time-based or view-based). pub mode: UpgradeMode, + /// The type of the upgrade. pub upgrade_type: UpgradeType, } @@ -67,10 +84,13 @@ impl Upgrade { config.stop_voting_time = u64::MAX; } UpgradeMode::Time(t) => { - config.start_proposing_time = t.start_proposing_time; - config.stop_proposing_time = t.stop_proposing_time; - config.start_voting_time = t.start_voting_time.unwrap_or(0); - config.stop_voting_time = t.stop_voting_time.unwrap_or(u64::MAX); + config.start_proposing_time = t.start_proposing_time.unix_timestamp(); + config.stop_proposing_time = t.stop_proposing_time.unix_timestamp(); + config.start_voting_time = t.start_voting_time.unwrap_or_default().unix_timestamp(); + config.stop_voting_time = t + .stop_voting_time + .unwrap_or(Timestamp::from_integer(u64::MAX).unwrap()) + .unix_timestamp(); config.start_proposing_view = 0; config.stop_proposing_view = u64::MAX; config.start_voting_view = 0; diff --git a/types/src/v0/v0_2/mod.rs b/types/src/v0/v0_2/mod.rs index 9e745f21ff..f71f78fb9d 100644 --- a/types/src/v0/v0_2/mod.rs +++ b/types/src/v0/v0_2/mod.rs @@ -8,10 +8,11 @@ pub use super::v0_1::{ L1Snapshot, NamespaceId, NodeState, NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, NsTableValidationError, NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, - ResolvableChainConfig, Transaction, TxIndex, TxIter, TxPayload, TxPayloadRange, TxProof, - TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeType, ValidatedState, - BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, - NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, + ResolvableChainConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, + TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, + UpgradeType, ValidatedState, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, + FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, + TX_OFFSET_BYTE_LEN, }; pub const VERSION: Version = Version { major: 0, minor: 2 }; diff --git a/types/src/v0/v0_3/mod.rs b/types/src/v0/v0_3/mod.rs index 1bf37d80ba..a3f38283c8 100644 --- a/types/src/v0/v0_3/mod.rs +++ b/types/src/v0/v0_3/mod.rs @@ -8,10 +8,11 @@ pub use super::v0_1::{ L1Snapshot, NamespaceId, NodeState, NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, NsTableValidationError, NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, - ResolvableChainConfig, Transaction, TxIndex, TxIter, TxPayload, TxPayloadRange, TxProof, - TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeType, ValidatedState, - BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, - NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, + ResolvableChainConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, + TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, + UpgradeType, ValidatedState, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, + FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, + TX_OFFSET_BYTE_LEN, }; pub const VERSION: Version = Version { major: 0, minor: 3 }; From 324719b402e90c8538bbe9989c25341a02ea1842 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Fri, 12 Jul 2024 20:48:08 +0500 Subject: [PATCH 41/65] add timestamp::max() --- types/src/v0/utils.rs | 6 ++++++ types/src/v0/v0_1/instance_state.rs | 5 +++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/types/src/v0/utils.rs b/types/src/v0/utils.rs index 530f6230c2..69a56b2080 100644 --- a/types/src/v0/utils.rs +++ b/types/src/v0/utils.rs @@ -39,6 +39,12 @@ impl Timestamp { pub fn unix_timestamp(&self) -> u64 { self.0.unix_timestamp() as u64 } + + pub fn max() -> anyhow::Result { + Ok(Self( + OffsetDateTime::from_unix_timestamp(i64::MAX).context("overflow")?, + )) + } } impl FromStringOrInteger for Timestamp { diff --git a/types/src/v0/v0_1/instance_state.rs b/types/src/v0/v0_1/instance_state.rs index 9502bd5f78..7e52eac6c4 100644 --- a/types/src/v0/v0_1/instance_state.rs +++ b/types/src/v0/v0_1/instance_state.rs @@ -3,7 +3,6 @@ use std::collections::BTreeMap; use std::sync::Arc; use hotshot_types::HotShotConfig; -use sequencer_utils::ser::FromStringOrInteger; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -87,9 +86,11 @@ impl Upgrade { config.start_proposing_time = t.start_proposing_time.unix_timestamp(); config.stop_proposing_time = t.stop_proposing_time.unix_timestamp(); config.start_voting_time = t.start_voting_time.unwrap_or_default().unix_timestamp(); + // this should not panmic because Timestamp::max() constructs the maximum possible Unix timestamp + // using i64::MAX config.stop_voting_time = t .stop_voting_time - .unwrap_or(Timestamp::from_integer(u64::MAX).unwrap()) + .unwrap_or(Timestamp::max().expect("overflow")) .unix_timestamp(); config.start_proposing_view = 0; config.stop_proposing_view = u64::MAX; From b9c123d04b7635323748e92c598bb7dab59f834d Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Fri, 12 Jul 2024 20:55:10 +0500 Subject: [PATCH 42/65] add check for propose view/time --- sequencer/src/genesis.rs | 14 ++++++++++++++ types/src/v0/v0_1/instance_state.rs | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index 08d0184df6..f809bc49b1 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -140,6 +140,12 @@ mod upgrade_serialization { )) } (None, Some(v)) => { + if v.start_proposing_view < v.stop_proposing_view { + return Err(de::Error::custom( + "start_proposing_view is less than stop_proposing_view", + )); + } + map.insert( version, Upgrade { @@ -149,6 +155,14 @@ mod upgrade_serialization { ); } (Some(t), None) => { + if t.start_proposing_time.unix_timestamp() + < t.stop_proposing_time.unix_timestamp() + { + return Err(de::Error::custom( + "start_proposing_time is less than stop_proposing_time", + )); + } + map.insert( version, Upgrade { diff --git a/types/src/v0/v0_1/instance_state.rs b/types/src/v0/v0_1/instance_state.rs index 7e52eac6c4..c54860474c 100644 --- a/types/src/v0/v0_1/instance_state.rs +++ b/types/src/v0/v0_1/instance_state.rs @@ -86,7 +86,7 @@ impl Upgrade { config.start_proposing_time = t.start_proposing_time.unix_timestamp(); config.stop_proposing_time = t.stop_proposing_time.unix_timestamp(); config.start_voting_time = t.start_voting_time.unwrap_or_default().unix_timestamp(); - // this should not panmic because Timestamp::max() constructs the maximum possible Unix timestamp + // this should not panic because Timestamp::max() constructs the maximum possible Unix timestamp // using i64::MAX config.stop_voting_time = t .stop_voting_time From 644a487271def741491e996ad77737528fb1a4c3 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Fri, 12 Jul 2024 21:17:20 +0500 Subject: [PATCH 43/65] fix check --- sequencer/src/genesis.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index f809bc49b1..59941ae595 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -140,9 +140,9 @@ mod upgrade_serialization { )) } (None, Some(v)) => { - if v.start_proposing_view < v.stop_proposing_view { + if v.start_proposing_view > v.stop_proposing_view { return Err(de::Error::custom( - "start_proposing_view is less than stop_proposing_view", + "stop_proposing_view is less than start_proposing_view", )); } @@ -156,10 +156,10 @@ mod upgrade_serialization { } (Some(t), None) => { if t.start_proposing_time.unix_timestamp() - < t.stop_proposing_time.unix_timestamp() + > t.stop_proposing_time.unix_timestamp() { return Err(de::Error::custom( - "start_proposing_time is less than stop_proposing_time", + "stop_proposing_time is less than start_proposing_time", )); } From 2bdb1b9fa205a83270ce0c25d68e32001625bdf7 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Fri, 12 Jul 2024 23:31:30 +0500 Subject: [PATCH 44/65] move impl Upgrade from types to impls module --- sequencer/src/lib.rs | 4 ++-- types/src/v0/impls/instance_state.rs | 36 ++++++++++++++++++++++++++-- types/src/v0/v0_1/instance_state.rs | 36 +--------------------------- 3 files changed, 37 insertions(+), 39 deletions(-) diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index ca05884ba6..7cfefdb5ae 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -204,7 +204,7 @@ pub async fn init_node( .upgrades .get(&::Upgrade::VERSION) { - upgrade.set_hotshot_config(&mut config.config); + upgrade.set_hotshot_config_parameters(&mut config.config); } // If the `Libp2p` bootstrap nodes were supplied via the command line, override those @@ -444,7 +444,7 @@ pub mod testing { pub fn build(mut self) -> TestConfig { if let Some(upgrade) = self.upgrades.get(&::Upgrade::VERSION) { - upgrade.set_hotshot_config(&mut self.config) + upgrade.set_hotshot_config_parameters(&mut self.config) } TestConfig { diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index c47abcda14..575a5da16e 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -1,10 +1,10 @@ use std::{collections::BTreeMap, sync::Arc}; -use hotshot_types::traits::{node_implementation::NodeType, states::InstanceState}; +use hotshot_types::{traits::{node_implementation::NodeType, states::InstanceState}, HotShotConfig}; use vbs::version::{StaticVersionType, Version}; use crate::{ - v0::traits::StateCatchup, ChainConfig, L1Client, NodeState, SeqTypes, Upgrade, ValidatedState, + v0::traits::StateCatchup, ChainConfig, L1Client, NodeState, PubKey, SeqTypes, Timestamp, Upgrade, UpgradeMode, ValidatedState }; impl NodeState { @@ -77,6 +77,38 @@ impl Default for NodeState { impl InstanceState for NodeState {} +impl Upgrade { + pub fn set_hotshot_config_parameters(&self, config: &mut HotShotConfig) { + match &self.mode { + UpgradeMode::View(v) => { + config.start_proposing_view = v.start_proposing_view; + config.stop_proposing_view = v.stop_proposing_view; + config.start_voting_view = v.start_voting_view.unwrap_or(0); + config.stop_voting_view = v.stop_voting_view.unwrap_or(u64::MAX); + config.start_proposing_time = 0; + config.stop_proposing_time = u64::MAX; + config.start_voting_time = 0; + config.stop_voting_time = u64::MAX; + } + UpgradeMode::Time(t) => { + config.start_proposing_time = t.start_proposing_time.unix_timestamp(); + config.stop_proposing_time = t.stop_proposing_time.unix_timestamp(); + config.start_voting_time = t.start_voting_time.unwrap_or_default().unix_timestamp(); + // this should not panic because Timestamp::max() constructs the maximum possible Unix timestamp + // using i64::MAX + config.stop_voting_time = t + .stop_voting_time + .unwrap_or(Timestamp::max().expect("overflow")) + .unix_timestamp(); + config.start_proposing_view = 0; + config.stop_proposing_view = u64::MAX; + config.start_voting_view = 0; + config.stop_voting_view = u64::MAX; + } + } + } +} + #[cfg(any(test, feature = "testing"))] pub mod mock { use std::collections::HashMap; diff --git a/types/src/v0/v0_1/instance_state.rs b/types/src/v0/v0_1/instance_state.rs index c54860474c..6007bed21e 100644 --- a/types/src/v0/v0_1/instance_state.rs +++ b/types/src/v0/v0_1/instance_state.rs @@ -2,13 +2,11 @@ use std::collections::BTreeMap; use std::sync::Arc; -use hotshot_types::HotShotConfig; use serde::{Deserialize, Serialize}; use std::fmt::Debug; use crate::{ - v0::traits::StateCatchup, ChainConfig, GenesisHeader, L1BlockInfo, PubKey, Timestamp, - ValidatedState, + v0::traits::StateCatchup, ChainConfig, GenesisHeader, L1BlockInfo, Timestamp, ValidatedState, }; use vbs::version::Version; @@ -69,38 +67,6 @@ pub struct Upgrade { pub upgrade_type: UpgradeType, } -impl Upgrade { - pub fn set_hotshot_config(&self, config: &mut HotShotConfig) { - match &self.mode { - UpgradeMode::View(v) => { - config.start_proposing_view = v.start_proposing_view; - config.stop_proposing_view = v.stop_proposing_view; - config.start_voting_view = v.start_voting_view.unwrap_or(0); - config.stop_voting_view = v.stop_voting_view.unwrap_or(u64::MAX); - config.start_proposing_time = 0; - config.stop_proposing_time = u64::MAX; - config.start_voting_time = 0; - config.stop_voting_time = u64::MAX; - } - UpgradeMode::Time(t) => { - config.start_proposing_time = t.start_proposing_time.unix_timestamp(); - config.stop_proposing_time = t.stop_proposing_time.unix_timestamp(); - config.start_voting_time = t.start_voting_time.unwrap_or_default().unix_timestamp(); - // this should not panic because Timestamp::max() constructs the maximum possible Unix timestamp - // using i64::MAX - config.stop_voting_time = t - .stop_voting_time - .unwrap_or(Timestamp::max().expect("overflow")) - .unix_timestamp(); - config.start_proposing_view = 0; - config.stop_proposing_view = u64::MAX; - config.start_voting_view = 0; - config.stop_voting_view = u64::MAX; - } - } - } -} - /// Represents the immutable state of a node. /// /// For mutable state, use `ValidatedState`. From 02728913cd1ae8cc3cf7cbde5420628741956d42 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Fri, 12 Jul 2024 23:33:24 +0500 Subject: [PATCH 45/65] lint --- types/src/v0/impls/instance_state.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index 575a5da16e..adf2c15e70 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -1,10 +1,14 @@ use std::{collections::BTreeMap, sync::Arc}; -use hotshot_types::{traits::{node_implementation::NodeType, states::InstanceState}, HotShotConfig}; +use hotshot_types::{ + traits::{node_implementation::NodeType, states::InstanceState}, + HotShotConfig, +}; use vbs::version::{StaticVersionType, Version}; use crate::{ - v0::traits::StateCatchup, ChainConfig, L1Client, NodeState, PubKey, SeqTypes, Timestamp, Upgrade, UpgradeMode, ValidatedState + v0::traits::StateCatchup, ChainConfig, L1Client, NodeState, PubKey, SeqTypes, Timestamp, + Upgrade, UpgradeMode, ValidatedState, }; impl NodeState { From 0d8f9273131052dc8d9b976b4fcae52cacb0d5d4 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Fri, 12 Jul 2024 16:39:32 -0400 Subject: [PATCH 46/65] Migrate to version-aware expression for generated merkle root columns --- ...6__alter_merkle_root_column_expressions.sql | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 sequencer/api/migrations/V36__alter_merkle_root_column_expressions.sql diff --git a/sequencer/api/migrations/V36__alter_merkle_root_column_expressions.sql b/sequencer/api/migrations/V36__alter_merkle_root_column_expressions.sql new file mode 100644 index 0000000000..ff5bcf8eea --- /dev/null +++ b/sequencer/api/migrations/V36__alter_merkle_root_column_expressions.sql @@ -0,0 +1,18 @@ +-- The generated columns for header merkle roots were originally created by extracting fields +-- `block_merkle_tree_root` and `fee_merkle_tree_root` from the header JSON. Post 0.1, though, the +-- header serialization changed so that these fields are now nested one level deeper: +-- `fields.block_merkle_tree_root` and `fields.fee_merkle_tree_root`. This migration alters the +-- generated column expression to use NULL coalescing to extract the value from either of these +-- paths depending on which version of the header we have. +-- +-- Pre 17.x (we target Postgres >= 16.x), there is not explicit instruction for changing the +-- expression of a generated column, so the best we can do is drop and re-add the column with a +-- different expression. + +ALTER TABLE header + DROP column block_merkle_tree_root, + ADD column block_merkle_tree_root text + GENERATED ALWAYS AS (coalesce(data->'fields'->>'block_merkle_tree_root', data->>'block_merkle_tree_root')) STORED NOT NULL, + DROP column fee_merkle_tree_root, + ADD column fee_merkle_tree_root text + GENERATED ALWAYS AS (coalesce(data->'fields'->>'fee_merkle_tree_root', data->>'fee_merkle_tree_root')) STORED NOT NULL; From 7184e3cce876866ef6a3ae7e59dd1fb7ff957d17 Mon Sep 17 00:00:00 2001 From: Jeremy Date: Mon, 15 Jul 2024 20:30:03 +0800 Subject: [PATCH 47/65] Add anvil and postgres to dev-node image (#1707) * Add anvil and postgres to dev-node image * Fix the build-docker-images scripts --- docker/espresso-dev-node.Dockerfile | 8 +++++++- scripts/build-docker-images | 10 +++++----- scripts/build-docker-images-native | 11 +++++------ 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/docker/espresso-dev-node.Dockerfile b/docker/espresso-dev-node.Dockerfile index 7b59828572..5da893bac4 100644 --- a/docker/espresso-dev-node.Dockerfile +++ b/docker/espresso-dev-node.Dockerfile @@ -14,6 +14,12 @@ RUN curl -LO https://github.com/EspressoSystems/ark-srs/releases/download/v0.2.0 COPY target/$TARGETARCH/release/espresso-dev-node /bin/espresso-dev-node RUN chmod +x /bin/espresso-dev-node +# Download the anvil binary +RUN curl -L https://github.com/foundry-rs/foundry/releases/download/nightly/foundry_nightly_linux_${TARGETARCH}.tar.gz --output -| tar -xzvf - -C /bin/ anvil + +COPY scripts/launch-dev-node-with-postgres /bin/launch-dev-node-with-postgres +RUN chmod +x /bin/launch-dev-node-with-postgres + # When running as a Docker service, we always want a healthcheck endpoint, so set a default for the # port that the HTTP server will run on. This can be overridden in any given deployment environment. ENV ESPRESSO_SEQUENCER_API_PORT=8770 @@ -23,4 +29,4 @@ EXPOSE 8770 EXPOSE 8771 EXPOSE 8772 -CMD [ "/bin/espresso-dev-node"] +CMD [ "/bin/launch-dev-node-with-postgres"] diff --git a/scripts/build-docker-images b/scripts/build-docker-images index 2b31d20d46..d2c779bc1d 100755 --- a/scripts/build-docker-images +++ b/scripts/build-docker-images @@ -38,14 +38,14 @@ for ARCH in "amd64" "arm64"; do for binary in "orchestrator" "cdn-broker" "cdn-marshal" "cdn-whitelist" "sequencer" "commitment-task" "submit-transactions" "reset-storage" "state-relay-server" "state-prover" "deploy" "keygen" "permissionless-builder" "nasty-client" "pub-key" "espresso-bridge" "espresso-dev-node"; do cp -v "${CARGO_TARGET_DIR}/${TARGET}/release/$binary" ${WORKDIR}/target/$ARCH/release done - - # Download the latest foundry binary and extract anvil for the dev-node docker image. - curl -L https://github.com/foundry-rs/foundry/releases/download/nightly/foundry_nightly_linux_${ARCH}.tar.gz -o ${WORKDIR}/foundry.tar.gz - tar -xzvf ${WORKDIR}/foundry.tar.gz -C ${WORKDIR}/target/$ARCH/release anvil done +mkdir -p ${WORKDIR}/docker/scripts +cp -v docker/scripts/sequencer-awssecretsmanager.sh ${WORKDIR}/docker/scripts + # Copy the dev-node launch script -cp -v scripts/launch-dev-node-with-postgres ${WORKDIR} +mkdir -p ${WORKDIR}/scripts +cp -v scripts/launch-dev-node-with-postgres ${WORKDIR}/scripts export DOCKER_BUILDKIT=1 docker build -t ghcr.io/espressosystems/espresso-sequencer/orchestrator:main -f docker/orchestrator.Dockerfile ${WORKDIR} diff --git a/scripts/build-docker-images-native b/scripts/build-docker-images-native index 10f659681d..bb8d927d7c 100755 --- a/scripts/build-docker-images-native +++ b/scripts/build-docker-images-native @@ -95,13 +95,12 @@ for binary in "orchestrator" "cdn-broker" "cdn-marshal" "cdn-whitelist" "sequenc fi done -# Copy the dev-node launch script -cp -v scripts/launch-dev-node-with-postgres ${WORKDIR} - -# Download the latest foundry binary and extract anvil for the dev-node docker image. -curl -L https://github.com/foundry-rs/foundry/releases/download/nightly/foundry_nightly_linux_${ARCH}.tar.gz -o ${WORKDIR}/foundry.tar.gz -tar -xzvf ${WORKDIR}/foundry.tar.gz -C ${WORKDIR}/target/$ARCH/release anvil +mkdir -p ${WORKDIR}/docker/scripts +cp -v docker/scripts/sequencer-awssecretsmanager.sh ${WORKDIR}/docker/scripts +# Copy the dev-node launch script +mkdir -p ${WORKDIR}/scripts +cp -v scripts/launch-dev-node-with-postgres ${WORKDIR}/scripts export DOCKER_BUILDKIT=1 docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/orchestrator:main -f docker/orchestrator.Dockerfile ${WORKDIR} From 94eaec37c99cd3141d00572bdd76d926d042d85e Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 15 Jul 2024 09:57:07 -0400 Subject: [PATCH 48/65] Have config/hotshot endpoint return full network config --- sequencer/src/api.rs | 28 ++-- sequencer/src/api/data_source.rs | 217 +++++++++++++++++++++++++------ sequencer/src/context.rs | 17 ++- sequencer/src/lib.rs | 9 +- 4 files changed, 206 insertions(+), 65 deletions(-) diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 221418f5cb..436ff4e528 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -18,16 +18,16 @@ use futures::{ }; use hotshot::types::{Event, SystemContextHandle}; use hotshot_events_service::events_source::{BuilderEvent, EventsSource, EventsStreamer}; +use hotshot_orchestrator::config::NetworkConfig; use hotshot_query_service::data_source::ExtensibleDataSource; use hotshot_state_prover::service::light_client_genesis_from_stake_table; use hotshot_types::{ data::ViewNumber, light_client::StateSignatureRequestBody, traits::network::ConnectedNetwork, - HotShotConfig, }; use jf_merkle_tree::MerkleTreeScheme; use vbs::version::StaticVersionType; -use self::data_source::{HotShotConfigDataSource, PublicHotShotConfig, StateSignatureDataSource}; +use self::data_source::{HotShotConfigDataSource, PublicNetworkConfig, StateSignatureDataSource}; use crate::{ network, persistence::ChainConfigPersistence, state_signature::StateSigner, Node, SeqTypes, SequencerContext, @@ -53,6 +53,7 @@ struct ConsensusState, P: SequencerPersistence, Ver: state_signer: Arc>, event_streamer: Arc>>, node_state: NodeState, + config: NetworkConfig, #[derivative(Debug = "ignore")] handle: Arc>>>, @@ -66,6 +67,7 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp state_signer: ctx.state_signer(), event_streamer: ctx.event_streamer(), node_state: ctx.node_state(), + config: ctx.config(), handle: ctx.consensus(), } } @@ -114,18 +116,8 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp &self.consensus.as_ref().get().await.get_ref().node_state } - async fn hotshot_config(&self) -> HotShotConfig { - self.consensus - .as_ref() - .get() - .await - .get_ref() - .handle - .read() - .await - .hotshot - .config - .clone() + async fn network_config(&self) -> NetworkConfig { + self.consensus.as_ref().get().await.get_ref().config.clone() } } @@ -314,16 +306,16 @@ impl< P: SequencerPersistence, > HotShotConfigDataSource for StorageState { - async fn get_config(&self) -> PublicHotShotConfig { - self.as_ref().hotshot_config().await.into() + async fn get_config(&self) -> PublicNetworkConfig { + self.as_ref().network_config().await.into() } } impl, Ver: StaticVersionType + 'static, P: SequencerPersistence> HotShotConfigDataSource for ApiState { - async fn get_config(&self) -> PublicHotShotConfig { - self.hotshot_config().await.into() + async fn get_config(&self) -> PublicNetworkConfig { + self.network_config().await.into() } } diff --git a/sequencer/src/api/data_source.rs b/sequencer/src/api/data_source.rs index 8b374b7521..0755b63a40 100644 --- a/sequencer/src/api/data_source.rs +++ b/sequencer/src/api/data_source.rs @@ -1,6 +1,6 @@ use std::{num::NonZeroUsize, time::Duration}; -use anyhow::bail; +use anyhow::{bail, Context}; use async_trait::async_trait; use committable::Commitment; use espresso_types::{ @@ -9,6 +9,9 @@ use espresso_types::{ }; use ethers::prelude::Address; use futures::future::Future; +use hotshot_orchestrator::config::{ + BuilderType, CombinedNetworkConfig, Libp2pConfig, NetworkConfig, RandomBuilderConfig, +}; use hotshot_query_service::{ availability::AvailabilityDataSource, data_source::{MetricsDataSource, UpdateDataSource, VersionedDataSource}, @@ -20,7 +23,7 @@ use hotshot_types::{ data::ViewNumber, light_client::StateSignatureRequestBody, traits::network::ConnectedNetwork, ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, }; -use serde::Serialize; +use serde::{Deserialize, Serialize}; use tide_disco::Url; use vbs::version::StaticVersionType; use vec1::Vec1; @@ -97,7 +100,7 @@ pub(crate) trait SubmitDataSource, P: SequencerPersi } pub(crate) trait HotShotConfigDataSource { - fn get_config(&self) -> impl Send + Future; + fn get_config(&self) -> impl Send + Future; } #[async_trait] @@ -162,14 +165,14 @@ impl CatchupDataSource for MetricsDataSource {} /// This struct defines the public Hotshot validator configuration. /// Private key and state key pairs are excluded for security reasons. -#[derive(Debug, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize)] pub struct PublicValidatorConfig { - pub public_key: PubKey, - pub stake_value: u64, - pub is_da: bool, - pub private_key: &'static str, - pub state_public_key: String, - pub state_key_pair: &'static str, + public_key: PubKey, + stake_value: u64, + is_da: bool, + private_key: String, + state_public_key: String, + state_key_pair: String, } impl From> for PublicValidatorConfig { @@ -189,8 +192,8 @@ impl From> for PublicValidatorConfig { stake_value, is_da, state_public_key: state_public_key.to_string(), - private_key: "*****", - state_key_pair: "*****", + private_key: "*****".into(), + state_key_pair: "*****".into(), } } } @@ -198,36 +201,36 @@ impl From> for PublicValidatorConfig { /// This struct defines the public Hotshot configuration parameters. /// Our config module features a GET endpoint accessible via the route `/hotshot` to display the hotshot config parameters. /// Hotshot config has sensitive information like private keys and such fields are excluded from this struct. -#[derive(Debug, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize)] pub struct PublicHotShotConfig { - pub execution_type: ExecutionType, - pub start_threshold: (u64, u64), - pub num_nodes_with_stake: NonZeroUsize, - pub num_nodes_without_stake: usize, - pub known_nodes_with_stake: Vec>, - pub known_da_nodes: Vec>, - pub known_nodes_without_stake: Vec, - pub my_own_validator_config: PublicValidatorConfig, - pub da_staked_committee_size: usize, - pub da_non_staked_committee_size: usize, - pub fixed_leader_for_gpuvid: usize, - pub next_view_timeout: u64, - pub view_sync_timeout: Duration, - pub timeout_ratio: (u64, u64), - pub round_start_delay: u64, - pub start_delay: u64, - pub num_bootstrap: usize, - pub builder_timeout: Duration, - pub data_request_delay: Duration, - pub builder_urls: Vec1, - pub start_proposing_view: u64, - pub stop_proposing_view: u64, - pub start_voting_view: u64, - pub stop_voting_view: u64, - pub start_proposing_time: u64, - pub stop_proposing_time: u64, - pub start_voting_time: u64, - pub stop_voting_time: u64, + execution_type: ExecutionType, + start_threshold: (u64, u64), + num_nodes_with_stake: NonZeroUsize, + num_nodes_without_stake: usize, + known_nodes_with_stake: Vec>, + known_da_nodes: Vec>, + known_nodes_without_stake: Vec, + my_own_validator_config: PublicValidatorConfig, + da_staked_committee_size: usize, + da_non_staked_committee_size: usize, + fixed_leader_for_gpuvid: usize, + next_view_timeout: u64, + view_sync_timeout: Duration, + timeout_ratio: (u64, u64), + round_start_delay: u64, + start_delay: u64, + num_bootstrap: usize, + builder_timeout: Duration, + data_request_delay: Duration, + builder_urls: Vec1, + start_proposing_view: u64, + stop_proposing_view: u64, + start_voting_view: u64, + stop_voting_view: u64, + start_proposing_time: u64, + stop_proposing_time: u64, + start_voting_time: u64, + stop_voting_time: u64, } impl From> for PublicHotShotConfig { @@ -299,6 +302,138 @@ impl From> for PublicHotShotConfig { } } +impl PublicHotShotConfig { + pub fn into_hotshot_config( + self, + my_own_validator_config: ValidatorConfig, + ) -> HotShotConfig { + HotShotConfig { + execution_type: self.execution_type, + start_threshold: self.start_threshold, + num_nodes_with_stake: self.num_nodes_with_stake, + num_nodes_without_stake: self.num_nodes_without_stake, + known_nodes_with_stake: self.known_nodes_with_stake, + known_da_nodes: self.known_da_nodes, + known_nodes_without_stake: self.known_nodes_without_stake, + my_own_validator_config, + da_staked_committee_size: self.da_staked_committee_size, + da_non_staked_committee_size: self.da_non_staked_committee_size, + fixed_leader_for_gpuvid: self.fixed_leader_for_gpuvid, + next_view_timeout: self.next_view_timeout, + view_sync_timeout: self.view_sync_timeout, + timeout_ratio: self.timeout_ratio, + round_start_delay: self.round_start_delay, + start_delay: self.start_delay, + num_bootstrap: self.num_bootstrap, + builder_timeout: self.builder_timeout, + data_request_delay: self.data_request_delay, + builder_urls: self.builder_urls, + start_proposing_view: self.start_proposing_view, + stop_proposing_view: self.stop_proposing_view, + start_voting_view: self.start_voting_view, + stop_voting_view: self.stop_voting_view, + start_proposing_time: self.start_proposing_time, + stop_proposing_time: self.stop_proposing_time, + start_voting_time: self.start_voting_time, + stop_voting_time: self.stop_voting_time, + } + } +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct PublicNetworkConfig { + rounds: usize, + indexed_da: bool, + transactions_per_round: usize, + manual_start_password: Option, + num_bootrap: usize, + next_view_timeout: u64, + view_sync_timeout: Duration, + builder_timeout: Duration, + data_request_delay: Duration, + node_index: u64, + seed: [u8; 32], + transaction_size: usize, + start_delay_seconds: u64, + key_type_name: String, + libp2p_config: Option, + config: PublicHotShotConfig, + cdn_marshal_address: Option, + combined_network_config: Option, + commit_sha: String, + builder: BuilderType, + random_builder: Option, +} + +impl From> for PublicNetworkConfig { + fn from(cfg: NetworkConfig) -> Self { + Self { + rounds: cfg.rounds, + indexed_da: cfg.indexed_da, + transactions_per_round: cfg.transactions_per_round, + manual_start_password: cfg.manual_start_password, + num_bootrap: cfg.num_bootrap, + next_view_timeout: cfg.next_view_timeout, + view_sync_timeout: cfg.view_sync_timeout, + builder_timeout: cfg.builder_timeout, + data_request_delay: cfg.data_request_delay, + node_index: cfg.node_index, + seed: cfg.seed, + transaction_size: cfg.transaction_size, + start_delay_seconds: cfg.start_delay_seconds, + key_type_name: cfg.key_type_name, + libp2p_config: cfg.libp2p_config, + config: cfg.config.into(), + cdn_marshal_address: cfg.cdn_marshal_address, + combined_network_config: cfg.combined_network_config, + commit_sha: cfg.commit_sha, + builder: cfg.builder, + random_builder: cfg.random_builder, + } + } +} + +impl PublicNetworkConfig { + pub fn into_network_config( + self, + my_own_validator_config: ValidatorConfig, + ) -> anyhow::Result> { + let node_index = self + .config + .known_nodes_with_stake + .iter() + .position(|peer| peer.stake_table_entry.stake_key == my_own_validator_config.public_key) + .context(format!( + "the node {} is not in the stake table", + my_own_validator_config.public_key + ))? as u64; + + Ok(NetworkConfig { + rounds: self.rounds, + indexed_da: self.indexed_da, + transactions_per_round: self.transactions_per_round, + manual_start_password: self.manual_start_password, + num_bootrap: self.num_bootrap, + next_view_timeout: self.next_view_timeout, + view_sync_timeout: self.view_sync_timeout, + builder_timeout: self.builder_timeout, + data_request_delay: self.data_request_delay, + node_index, + seed: self.seed, + transaction_size: self.transaction_size, + start_delay_seconds: self.start_delay_seconds, + key_type_name: self.key_type_name, + libp2p_config: self.libp2p_config, + config: self.config.into_hotshot_config(my_own_validator_config), + cdn_marshal_address: self.cdn_marshal_address, + combined_network_config: self.combined_network_config, + commit_sha: self.commit_sha, + builder: self.builder, + random_builder: self.random_builder, + }) + } +} + #[cfg(test)] pub(crate) mod testing { use super::{super::Options, *}; diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 1340f9abb2..cdd22165af 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -20,13 +20,12 @@ use hotshot::{ }; use hotshot_events_service::events_source::{EventConsumer, EventsStreamer}; use hotshot_example_types::auction_results_provider_types::TestAuctionResultsProvider; -use hotshot_orchestrator::client::OrchestratorClient; +use hotshot_orchestrator::{client::OrchestratorClient, config::NetworkConfig}; use hotshot_query_service::Leaf; use hotshot_types::{ consensus::ConsensusMetricsValue, data::ViewNumber, traits::{election::Membership, metrics::Metrics, network::ConnectedNetwork}, - HotShotConfig, }; use url::Url; use vbs::version::StaticVersionType; @@ -63,6 +62,8 @@ pub struct SequencerContext< detached: bool, node_state: NodeState, + + config: NetworkConfig, } impl, P: SequencerPersistence, Ver: StaticVersionType + 'static> @@ -71,7 +72,7 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp #[tracing::instrument(skip_all, fields(node_id = instance_state.node_id))] #[allow(clippy::too_many_arguments)] pub async fn init( - config: HotShotConfig, + network_config: NetworkConfig, instance_state: NodeState, persistence: P, network: Arc, @@ -80,6 +81,7 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp stake_table_capacity: u64, _: Ver, ) -> anyhow::Result { + let config = &network_config.config; let pub_key = config.my_own_validator_config.public_key; tracing::info!(%pub_key, "initializing consensus"); @@ -131,7 +133,7 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp config.my_own_validator_config.public_key, config.my_own_validator_config.private_key.clone(), instance_state.node_id, - config, + config.clone(), memberships, network, initializer, @@ -153,6 +155,7 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp state_signer, event_streamer, instance_state, + network_config, )) } @@ -163,6 +166,7 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp state_signer: StateSigner, event_streamer: Arc>>, node_state: NodeState, + config: NetworkConfig, ) -> Self { let events = handle.event_stream(); @@ -174,6 +178,7 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp wait_for_orchestrator: None, events_streamer: event_streamer.clone(), node_state, + config, }; ctx.spawn( "main event handler", @@ -285,6 +290,10 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp // Set `detached` so the drop handler doesn't call `shut_down`. self.detached = true; } + + pub fn config(&self) -> NetworkConfig { + self.config.clone() + } } impl, P: SequencerPersistence, Ver: StaticVersionType + 'static> Drop diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 7cfefdb5ae..03cf4f6529 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -328,7 +328,7 @@ pub async fn init_node( }; let mut ctx = SequencerContext::init( - config.config, + config, instance_state, persistence, network, @@ -659,7 +659,12 @@ pub mod testing { "starting node", ); SequencerContext::init( - config, + NetworkConfig { + config, + // For testing, we use a fake network, so the rest of the network config beyond + // the base consensus config does not matter. + ..Default::default() + }, node_state, persistence_opt.create().await.unwrap(), network, From 7075d516540dceb95a905612b13cb32c9979b9bc Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 15 Jul 2024 10:49:20 -0400 Subject: [PATCH 49/65] Allow nodes to fetch network config from peers --- builder/src/bin/permissioned-builder.rs | 1 + sequencer/src/api.rs | 58 ++++++++++++++++++++++++- sequencer/src/catchup.rs | 45 ++++++++++++++++++- sequencer/src/lib.rs | 18 ++++++-- sequencer/src/main.rs | 1 + sequencer/src/options.rs | 27 ++++++++++++ 6 files changed, 144 insertions(+), 6 deletions(-) diff --git a/builder/src/bin/permissioned-builder.rs b/builder/src/bin/permissioned-builder.rs index a83411ed38..b76d02b245 100644 --- a/builder/src/bin/permissioned-builder.rs +++ b/builder/src/bin/permissioned-builder.rs @@ -255,6 +255,7 @@ async fn main() -> anyhow::Result<()> { private_staking_key: private_staking_key.clone(), private_state_key, state_peers: opt.state_peers, + config_peers: None, catchup_backoff: Default::default(), }; diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 436ff4e528..7c9f9c724b 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1051,6 +1051,7 @@ mod test { metrics::NoMetrics, node_implementation::{ConsensusTime, NodeType}, }, + ValidatorConfig, }; use jf_merkle_tree::prelude::{MerkleProof, Sha3Node}; use portpicker::pick_unused_port; @@ -1063,7 +1064,8 @@ mod test { use vbs::version::Version; use self::{ - data_source::testing::TestableSequencerDataSource, sql::DataSource as SqlDataSource, + data_source::{testing::TestableSequencerDataSource, PublicHotShotConfig}, + sql::DataSource as SqlDataSource, }; use super::*; use crate::{ @@ -1664,4 +1666,58 @@ mod test { .unwrap(); assert_eq!(chain, new_chain); } + + #[async_std::test] + async fn test_fetch_config() { + setup_logging(); + setup_backtrace(); + + let port = pick_unused_port().expect("No ports free"); + let url: surf_disco::Url = format!("http://localhost:{port}").parse().unwrap(); + let client: Client = Client::new(url.clone()); + + let options = Options::with_port(port).config(Default::default()); + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); + let network_config = TestConfigBuilder::default().l1_url(l1).build(); + let config = TestNetworkConfigBuilder::default() + .api_config(options) + .network_config(network_config) + .build(); + let network = TestNetwork::new(config).await; + client.connect(None).await; + + // Fetch a network config from the API server. The first peer URL is bogus, to test the + // failure/retry case. + let peers = StatePeers::::from_urls( + vec!["https://notarealnode.network".parse().unwrap(), url], + Default::default(), + ); + + // Fetch the config from node 1, a different node than the one running the service. + let validator = ValidatorConfig::generated_from_seed_indexed([0; 32], 1, 1, false); + let mut config = peers.fetch_config(validator.clone()).await; + + // Check the node-specific information in the recovered config is correct. + assert_eq!( + config.config.my_own_validator_config.public_key, + validator.public_key + ); + assert_eq!( + config.config.my_own_validator_config.private_key, + validator.private_key + ); + + // Check the public information is also correct (with respect to the node that actually + // served the config, for public keys). + config.config.my_own_validator_config = + ValidatorConfig::generated_from_seed_indexed([0; 32], 0, 1, true); + pretty_assertions::assert_eq!( + serde_json::to_value(PublicHotShotConfig::from(config.config)).unwrap(), + serde_json::to_value(PublicHotShotConfig::from( + network.cfg.hotshot_config().clone() + )) + .unwrap() + ); + } } diff --git a/sequencer/src/catchup.rs b/sequencer/src/catchup.rs index fbc2b2c7a3..9c08bafd37 100644 --- a/sequencer/src/catchup.rs +++ b/sequencer/src/catchup.rs @@ -8,7 +8,11 @@ use espresso_types::{ v0::traits::{PersistenceOptions, StateCatchup}, AccountQueryData, BackoffParams, BlockMerkleTree, ChainConfig, FeeAccount, FeeMerkleCommitment, }; -use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime as _}; +use futures::future::FutureExt; +use hotshot_orchestrator::config::NetworkConfig; +use hotshot_types::{ + data::ViewNumber, traits::node_implementation::ConsensusTime as _, ValidatorConfig, +}; use jf_merkle_tree::{prelude::MerkleNode, ForgetableMerkleTreeScheme, MerkleTreeScheme}; use serde::de::DeserializeOwned; use surf_disco::Request; @@ -16,7 +20,13 @@ use tide_disco::error::ServerError; use url::Url; use vbs::version::StaticVersionType; -use crate::api::{data_source::CatchupDataSource, BlocksFrontier}; +use crate::{ + api::{ + data_source::{CatchupDataSource, PublicNetworkConfig}, + BlocksFrontier, + }, + PubKey, +}; // This newtype is probably not worth having. It's only used to be able to log // URLs before doing requests. @@ -71,6 +81,37 @@ impl StatePeers { backoff, } } + + pub async fn fetch_config( + &self, + my_own_validator_config: ValidatorConfig, + ) -> NetworkConfig { + self.backoff() + .retry(self, move |provider| { + let my_own_validator_config = my_own_validator_config.clone(); + async move { + for client in &provider.clients { + tracing::info!("fetching config from {}", client.url); + match client + .get::("config/hotshot") + .send() + .await + { + Ok(res) => { + return res.into_network_config(my_own_validator_config) + .context(format!("fetched config from {}, but failed to convert to private config", client.url)); + } + Err(err) => { + tracing::warn!("error fetching config from peer: {err:#}"); + } + } + } + bail!("could not fetch config from any peer"); + } + .boxed() + }) + .await + } } #[async_trait] diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 03cf4f6529..e83261786c 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -102,6 +102,7 @@ pub struct NetworkParams { pub private_staking_key: BLSPrivKey, pub private_state_key: StateSignKey, pub state_peers: Vec, + pub config_peers: Option>, pub catchup_backoff: BackoffParams, /// The address to send to other Libp2p nodes to contact us @@ -168,12 +169,23 @@ pub async fn init_node( .with_context(|| "Failed to derive Libp2p peer ID")?; let mut persistence = persistence_opt.clone().create().await?; - let (mut config, wait_for_orchestrator) = match persistence.load_config().await? { - Some(config) => { + let (mut config, wait_for_orchestrator) = match ( + persistence.load_config().await?, + network_params.config_peers, + ) { + (Some(config), _) => { tracing::info!("loaded network config from storage, rejoining existing network"); (config, false) } - None => { + // If we were told to fetch the config from an already-started peer, do so. + (None, Some(peers)) => { + tracing::info!(?peers, "loading network config from peers"); + let peers = StatePeers::::from_urls(peers, network_params.catchup_backoff); + let config = peers.fetch_config(my_config.clone()).await; + (config, false) + } + // Otherwise, this is a fresh network; load from the orchestrator. + (None, None) => { tracing::info!("loading network config from orchestrator"); tracing::error!( "waiting for other nodes to connect, DO NOT RESTART until fully connected" diff --git a/sequencer/src/main.rs b/sequencer/src/main.rs index a5a878176f..1142e5c587 100644 --- a/sequencer/src/main.rs +++ b/sequencer/src/main.rs @@ -83,6 +83,7 @@ where private_staking_key, private_state_key, state_peers: opt.state_peers, + config_peers: opt.config_peers, catchup_backoff: opt.catchup_backoff, }; diff --git a/sequencer/src/options.rs b/sequencer/src/options.rs index 1eb88bf7bd..d8384bacb7 100644 --- a/sequencer/src/options.rs +++ b/sequencer/src/options.rs @@ -188,6 +188,16 @@ pub struct Options { #[derivative(Debug(format_with = "fmt_urls"))] pub state_peers: Vec, + /// Peer nodes use to fetch missing config + /// + /// Typically, the network-wide config is fetched from the orchestrator on startup and then + /// persisted and loaded from local storage each time the node restarts. However, if the + /// persisted config is missing when the node restarts (for example, the node is being migrated + /// to new persistent storage), it can instead be fetched directly from a peer. + #[clap(long, env = "ESPRESSO_SEQUENCER_CONFIG_PEERS", value_delimiter = ',')] + #[derivative(Debug(format_with = "fmt_opt_urls"))] + pub config_peers: Option>, + /// Exponential backoff for fetching missing state from peers. #[clap(flatten)] pub catchup_backoff: BackoffParams, @@ -230,6 +240,23 @@ fn fmt_urls(v: &[Url], fmt: &mut std::fmt::Formatter) -> Result<(), std::fmt::Er ) } +fn fmt_opt_urls( + v: &Option>, + fmt: &mut std::fmt::Formatter, +) -> Result<(), std::fmt::Error> { + match v { + Some(urls) => { + write!(fmt, "Some(")?; + fmt_urls(urls, fmt)?; + write!(fmt, ")")?; + } + None => { + write!(fmt, "None")?; + } + } + Ok(()) +} + #[derive(Clone, Debug, Snafu)] pub struct ParseDurationError { reason: String, From 3cc1a3ff933f322fe05c2388897398367cb3d79a Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 15 Jul 2024 10:59:47 -0400 Subject: [PATCH 50/65] Check node index in config fetching test --- sequencer/src/api.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 7c9f9c724b..94961d2e52 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1699,6 +1699,7 @@ mod test { let mut config = peers.fetch_config(validator.clone()).await; // Check the node-specific information in the recovered config is correct. + assert_eq!(config.node_index, 1); assert_eq!( config.config.my_own_validator_config.public_key, validator.public_key From f11bac20b27ae1c100c868aa402176c12bd76594 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 15 Jul 2024 11:01:50 -0400 Subject: [PATCH 51/65] Persist network config after fetching from peer --- sequencer/src/lib.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index e83261786c..a837b7996e 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -182,6 +182,13 @@ pub async fn init_node( tracing::info!(?peers, "loading network config from peers"); let peers = StatePeers::::from_urls(peers, network_params.catchup_backoff); let config = peers.fetch_config(my_config.clone()).await; + + tracing::info!( + node_id = config.node_index, + stake_table = ?config.config.known_nodes_with_stake, + "loaded config", + ); + persistence.save_config(&config).await?; (config, false) } // Otherwise, this is a fresh network; load from the orchestrator. From 7594ef4fcc0ecaaa79dc4c81772b30d09faa64fb Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Mon, 15 Jul 2024 20:36:07 +0500 Subject: [PATCH 52/65] update upgrades.md --- doc/upgrades.md | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/doc/upgrades.md b/doc/upgrades.md index c01bef0d4f..b8eccc24a9 100644 --- a/doc/upgrades.md +++ b/doc/upgrades.md @@ -5,6 +5,17 @@ Hotshot protocol supports upgrades through an Upgrade proposal mechanism. The Up After enough votes have been collected on the `UpgradeProposal`, an `UpgradeCertificate` is formed. This is attached to the next `QuorumProposal`, and any node that receives an `UpgradeCertificate` in this way re-attaches it to its own `QuorumProposal` until the network has upgraded, or (in rare cases) we failed to reach consensus on the `UpgradeCertificate`. + + +An upgrade consists of two parts: + +- **Version Bump:** The version bump initiates a change in the protocol, which can involve logic updates (e.g., new rules or modifications to existing rules within the protocol, such as blocks executing new logic if the version is greater than or equal to the specified NEXT_VERSION) and type changes (e.g., using a new type variant for a particular version, such as using V2 for ValidatedState if the version is 0.2). +- **Migration:** Migration involves updating existing data to align with the new version, such as updating the chain config + + + +Note: We currently support only chain config migration. + ## Enabling an Upgrade To enable an upgrade in Hotshot protocol, it is essential to define the base version, the upgrade version, and a upgrade hash: @@ -25,18 +36,30 @@ impl NodeType for SeqTypes { } ``` -These parameters are fetched from the genesis TOML file and set in Hotshot config: +Hotshot provides two modes for upgrades: view-based and time-based. + +View-based parameters allow nodes to vote and propose an upgrade based on the current node's view, while time-based parameters use Unix timestamps for the same purpose. + +To simplify configuration, these parameters are fetched from the genesis TOML file and set in the Hotshot config. The TOML file can include either view-based parameters or time-based parameters, but not both. +Furthermore, The start and stop voting parameters for both time-based and view-based upgrades are optional. Start parameter is set 0 so that voting begins as soon as node is started while the stop parameter is set to a maximum value so that the nodes keep voting until enough votes are collected. -- **start_voting_view:** view at which voting for the upgrade proposal starts. In our implementation, this is set to 1 so that voting is enabled as soon as the node is started. +View based: +- **start_voting_view:** view at which voting for the upgrade proposal starts. - **stop_voting_view:** view at which voting for the upgrade proposal stops. To disable an upgrade, set this parameter to 0 or ensure `stop_voting_view` is less than `start_voting_view`. - **start_proposing_view:** the earliest view in which the node can propose an upgrade. This should be set to when an upgrade is intended. If the current view > `start_proposing_view`, the node will send out an `UpgradeProposal`. - **stop_proposing_view:** view after which the node stops proposing an upgrade. If the upgrade proposal fails and the current view > stop_proposing_view then the upgrade is never proposed again. -The window between `start_proposing_view` and `stop_proposing_view` should provide sufficient time for nodes to continue proposing the upgrade until successful. +Time based: +- **start_voting_time:** UNIX timestamp at which voting for the upgrade proposal starts. +- **stop_voting_time:** UNIX timestamp at which voting for the upgrade proposal stops. +- **start_proposing_time:** the earliest UNIX timestamnp in which the node can propose an upgrade. +- **stop_proposing_time:** UNIX timestamp after which the node stops proposing an upgrade. + + +The window between `start_proposing_view/time` and `stop_proposing_view/time` should provide sufficient time for nodes to continue proposing the upgrade until successful. Ensure that the `ESPRESSO_SEQUENCER_GENESIS_FILE` environment variable is defined to point to the path of the genesis TOML file. For an example with upgrades enabled, refer to [`data/genesis/demo.toml`](../data/genesis/demo.toml). -Note: We currently support only chain config upgrade. ### Example TOML Configuration ```toml From 97546dc58e06cfbed456adabf498b919363b5df7 Mon Sep 17 00:00:00 2001 From: tbro Date: Tue, 16 Jul 2024 10:20:21 +0300 Subject: [PATCH 53/65] Reword the description of upgrade for clarity. I did some rewording. I hope it is clearer this way, but might be worth getting a second opinion. --- doc/upgrades.md | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/doc/upgrades.md b/doc/upgrades.md index b8eccc24a9..2dc55e15a7 100644 --- a/doc/upgrades.md +++ b/doc/upgrades.md @@ -1,20 +1,13 @@ - # Upgrades Hotshot protocol supports upgrades through an Upgrade proposal mechanism. The Upgrade proposal is broadcast separately from the `QuorumProposal`, typically several views in advance of its attachment. The goal is to ensure ample time for nodes to receive and prepare for the upgrade process. After enough votes have been collected on the `UpgradeProposal`, an `UpgradeCertificate` is formed. This is attached to the next `QuorumProposal`, and any node that receives an `UpgradeCertificate` in this way re-attaches it to its own `QuorumProposal` until the network has upgraded, or (in rare cases) we failed to reach consensus on the `UpgradeCertificate`. - - An upgrade consists of two parts: -- **Version Bump:** The version bump initiates a change in the protocol, which can involve logic updates (e.g., new rules or modifications to existing rules within the protocol, such as blocks executing new logic if the version is greater than or equal to the specified NEXT_VERSION) and type changes (e.g., using a new type variant for a particular version, such as using V2 for ValidatedState if the version is 0.2). -- **Migration:** Migration involves updating existing data to align with the new version, such as updating the chain config - - - -Note: We currently support only chain config migration. +- **Version Bump:** The version bump initiates a change in the protocol, which can involve logic updates and type changes. Logic updates topically involve adding or modifying the criteria or consequences of block execution. This new behavior will be enabled at runtime if sequencer version is greater than or equal to the version behind which they are gated. In addition, an upgrade may change the shape of a type. A field of `BlockHeader` might become a `u64` where it was before a `u8`. A field may be added to `ChainConfig`. In such cases a new version of these types is added and a version of the sequencer designated to incorporate them. +- **Migration:** Migration involves updating existing data to align with the new version, such as updating chain-config values. Since these values are immutable in normal operation, an upgrade is required to modify them. Note that the only currently supported upgrade of this kind is the migration of chain-config. ## Enabling an Upgrade @@ -27,12 +20,12 @@ To enable an upgrade in Hotshot protocol, it is essential to define the base ver These are defined in [NodeType implementation](../types/src/v0/mod.rs) for the Types (`SeqTypes` in our case). ```rust impl NodeType for SeqTypes { - type Base = StaticVersion<0, 1>; - type Upgrade = StaticVersion<0, 2>; - const UPGRADE_HASH: [u8; 32] = [ - 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, - ], - .. + type Base = StaticVersion<0, 1>; + type Upgrade = StaticVersion<0, 2>; + const UPGRADE_HASH: [u8; 32] = [ + 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, + ], + .. } ``` @@ -92,4 +85,4 @@ A successful Hotshot upgrade results in a new version, which allows us to update In scenarios where nodes join the network or restart, missing the upgrade window may result in their ValidatedState having only a chain config commitment. In such cases, nodes need to catch up from their peers to get the full chain config for this chain config commitment. -Note: For the fee upgrade to work, the builder must have sufficient funds to cover the fees. The Espresso bridge can be used to fund the builder. \ No newline at end of file +Note: For the fee upgrade to work, the builder must have sufficient funds to cover the fees. The Espresso bridge can be used to fund the builder. From 66f9f35e8b1ce3546fe2f6700270c05d8efd88ab Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Tue, 16 Jul 2024 13:55:47 +0500 Subject: [PATCH 54/65] fix typo --- doc/upgrades.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/upgrades.md b/doc/upgrades.md index 2dc55e15a7..b8acd9f5f1 100644 --- a/doc/upgrades.md +++ b/doc/upgrades.md @@ -6,8 +6,8 @@ After enough votes have been collected on the `UpgradeProposal`, an `UpgradeCert An upgrade consists of two parts: -- **Version Bump:** The version bump initiates a change in the protocol, which can involve logic updates and type changes. Logic updates topically involve adding or modifying the criteria or consequences of block execution. This new behavior will be enabled at runtime if sequencer version is greater than or equal to the version behind which they are gated. In addition, an upgrade may change the shape of a type. A field of `BlockHeader` might become a `u64` where it was before a `u8`. A field may be added to `ChainConfig`. In such cases a new version of these types is added and a version of the sequencer designated to incorporate them. -- **Migration:** Migration involves updating existing data to align with the new version, such as updating chain-config values. Since these values are immutable in normal operation, an upgrade is required to modify them. Note that the only currently supported upgrade of this kind is the migration of chain-config. +- **Version Bump:** The version bump initiates a change in the protocol, which can involve logic updates and type changes. Logic updates typically involve adding or modifying the criteria or consequences of block execution. This new behavior will be enabled at runtime if sequencer version is greater than or equal to the version behind which they are gated. In addition, an upgrade may change the shape of a type. A field of `BlockHeader` might become a `u64` where it was before a `u8`. A field may be added to `ChainConfig`. In such cases a new version of these types is added and a version of the sequencer designated to incorporate them. +- **Migration:** Migration involves updating existing data to align with the new version, such as updating `ChainConfig` values. Since these values are immutable in normal operation, an upgrade is required to modify them. Note that the only currently supported upgrade of this kind is the migration of `ChainConfig`. ## Enabling an Upgrade From 2659decf6aa51e5b9c73c24300cd8697a358474f Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Tue, 16 Jul 2024 14:24:59 +0500 Subject: [PATCH 55/65] lint --- doc/upgrades.md | 91 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 65 insertions(+), 26 deletions(-) diff --git a/doc/upgrades.md b/doc/upgrades.md index b8acd9f5f1..75a65dfba4 100644 --- a/doc/upgrades.md +++ b/doc/upgrades.md @@ -1,23 +1,40 @@ # Upgrades -Hotshot protocol supports upgrades through an Upgrade proposal mechanism. The Upgrade proposal is broadcast separately from the `QuorumProposal`, typically several views in advance of its attachment. The goal is to ensure ample time for nodes to receive and prepare for the upgrade process. +Hotshot protocol supports upgrades through an Upgrade proposal mechanism. The Upgrade proposal is broadcast separately +from the `QuorumProposal`, typically several views in advance of its attachment. The goal is to ensure ample time for +nodes to receive and prepare for the upgrade process. -After enough votes have been collected on the `UpgradeProposal`, an `UpgradeCertificate` is formed. This is attached to the next `QuorumProposal`, and any node that receives an `UpgradeCertificate` in this way re-attaches it to its own `QuorumProposal` until the network has upgraded, or (in rare cases) we failed to reach consensus on the `UpgradeCertificate`. +After enough votes have been collected on the `UpgradeProposal`, an `UpgradeCertificate` is formed. This is attached to +the next `QuorumProposal`, and any node that receives an `UpgradeCertificate` in this way re-attaches it to its own +`QuorumProposal` until the network has upgraded, or (in rare cases) we failed to reach consensus on the +`UpgradeCertificate`. An upgrade consists of two parts: -- **Version Bump:** The version bump initiates a change in the protocol, which can involve logic updates and type changes. Logic updates typically involve adding or modifying the criteria or consequences of block execution. This new behavior will be enabled at runtime if sequencer version is greater than or equal to the version behind which they are gated. In addition, an upgrade may change the shape of a type. A field of `BlockHeader` might become a `u64` where it was before a `u8`. A field may be added to `ChainConfig`. In such cases a new version of these types is added and a version of the sequencer designated to incorporate them. -- **Migration:** Migration involves updating existing data to align with the new version, such as updating `ChainConfig` values. Since these values are immutable in normal operation, an upgrade is required to modify them. Note that the only currently supported upgrade of this kind is the migration of `ChainConfig`. +- **Version Bump:** The version bump initiates a change in the protocol, which can involve logic updates and type + changes. Logic updates typically involve adding or modifying the criteria or consequences of block execution. This new + behavior will be enabled at runtime if sequencer version is greater than or equal to the version behind which they are + gated. In addition, an upgrade may change the shape of a type. A field of `BlockHeader` might become a `u64` where it + was before a `u8`. A field may be added to `ChainConfig`. In such cases a new version of these types is added and a + version of the sequencer designated to incorporate them. +- **Migration:** Migration involves updating existing data to align with the new version, such as updating `ChainConfig` + values. Since these values are immutable in normal operation, an upgrade is required to modify them. Note that the + only currently supported upgrade of this kind is the migration of `ChainConfig`. ## Enabling an Upgrade -To enable an upgrade in Hotshot protocol, it is essential to define the base version, the upgrade version, and a upgrade hash: +To enable an upgrade in Hotshot protocol, it is essential to define the base version, the upgrade version, and a upgrade +hash: - **Base Version:** Represents the current version of the protocol (`0.1` in this example). -- **Upgrade Version:** Specifies the version to which the protocol will upgrade once the process is successful (`0.2` in this example). -- **Upgrade Hash:** Acts as a unique identifier for the specific upgrade nodes are voting on. It distinguishes between different proposals of the same version upgrade, ensuring nodes vote and execute the correct one. It consists of a sequence of 32 bytes. +- **Upgrade Version:** Specifies the version to which the protocol will upgrade once the process is successful (`0.2` in + this example). +- **Upgrade Hash:** Acts as a unique identifier for the specific upgrade nodes are voting on. It distinguishes between + different proposals of the same version upgrade, ensuring nodes vote and execute the correct one. It consists of a + sequence of 32 bytes. These are defined in [NodeType implementation](../types/src/v0/mod.rs) for the Types (`SeqTypes` in our case). + ```rust impl NodeType for SeqTypes { type Base = StaticVersion<0, 1>; @@ -31,27 +48,37 @@ impl NodeType for SeqTypes { Hotshot provides two modes for upgrades: view-based and time-based. -View-based parameters allow nodes to vote and propose an upgrade based on the current node's view, while time-based parameters use Unix timestamps for the same purpose. +View-based parameters allow nodes to vote and propose an upgrade based on the current node's view, while time-based +parameters use Unix timestamps for the same purpose. -To simplify configuration, these parameters are fetched from the genesis TOML file and set in the Hotshot config. The TOML file can include either view-based parameters or time-based parameters, but not both. -Furthermore, The start and stop voting parameters for both time-based and view-based upgrades are optional. Start parameter is set 0 so that voting begins as soon as node is started while the stop parameter is set to a maximum value so that the nodes keep voting until enough votes are collected. +To simplify configuration, these parameters are fetched from the genesis TOML file and set in the Hotshot config. The +TOML file can include either view-based parameters or time-based parameters, but not both. Furthermore, the start and +stop voting parameters for both time-based and view-based upgrades are optional. Start parameter is set 0 so that voting +begins as soon as node is started while the stop parameter is set to a maximum value so that the nodes keep voting until +enough votes are collected. View based: -- **start_voting_view:** view at which voting for the upgrade proposal starts. -- **stop_voting_view:** view at which voting for the upgrade proposal stops. To disable an upgrade, set this parameter to 0 or ensure `stop_voting_view` is less than `start_voting_view`. -- **start_proposing_view:** the earliest view in which the node can propose an upgrade. This should be set to when an upgrade is intended. If the current view > `start_proposing_view`, the node will send out an `UpgradeProposal`. -- **stop_proposing_view:** view after which the node stops proposing an upgrade. If the upgrade proposal fails and the current view > stop_proposing_view then the upgrade is never proposed again. + +- **start_voting_view:** view at which voting for the upgrade proposal starts. +- **stop_voting_view:** view at which voting for the upgrade proposal stops. To disable an upgrade, set this parameter + to 0 or ensure `stop_voting_view` is less than `start_voting_view`. +- **start_proposing_view:** the earliest view in which the node can propose an upgrade. This should be set to when an + upgrade is intended. If the current view > `start_proposing_view`, the node will send out an `UpgradeProposal`. +- **stop_proposing_view:** view after which the node stops proposing an upgrade. If the upgrade proposal fails and the + current view > stop_proposing_view then the upgrade is never proposed again. Time based: -- **start_voting_time:** UNIX timestamp at which voting for the upgrade proposal starts. -- **stop_voting_time:** UNIX timestamp at which voting for the upgrade proposal stops. + +- **start_voting_time:** UNIX timestamp at which voting for the upgrade proposal starts. +- **stop_voting_time:** UNIX timestamp at which voting for the upgrade proposal stops. - **start_proposing_time:** the earliest UNIX timestamnp in which the node can propose an upgrade. - **stop_proposing_time:** UNIX timestamp after which the node stops proposing an upgrade. +The window between `start_proposing_view/time` and `stop_proposing_view/time` should provide sufficient time for nodes +to continue proposing the upgrade until successful. -The window between `start_proposing_view/time` and `stop_proposing_view/time` should provide sufficient time for nodes to continue proposing the upgrade until successful. - -Ensure that the `ESPRESSO_SEQUENCER_GENESIS_FILE` environment variable is defined to point to the path of the genesis TOML file. For an example with upgrades enabled, refer to [`data/genesis/demo.toml`](../data/genesis/demo.toml). +Ensure that the `ESPRESSO_SEQUENCER_GENESIS_FILE` environment variable is defined to point to the path of the genesis +TOML file. For an example with upgrades enabled, refer to [`data/genesis/demo.toml`](../data/genesis/demo.toml). ### Example TOML Configuration @@ -68,21 +95,33 @@ max_block_size = '1mb' fee_recipient = '0x0000000000000000000000000000000000000000' fee_contract = '0xa15bb66138824a1c7167f5e85b957d04dd34e468' ``` -In the TOML configuration example above, the `upgrade` section defines an array of tables, each specifying upgrade parameters: + +In the TOML configuration example above, the `upgrade` section defines an array of tables, each specifying upgrade +parameters: - **Version:** the new version after an upgrade is successful. - **View:** Represents the `start_proposing_view` value at which the upgrade is proposed. - **Propose Window:** Refers to the view window between `start_proposing_view` and `stop_proposing_view`. -The `upgrade.chain_config` table contains the complete set of chain config parameters, which can be used, for example, to enable protocol fees or modify other parameters. - +The `upgrade.chain_config` table contains the complete set of chain config parameters, which can be used, for example, +to enable protocol fees or modify other parameters. ## Fee upgrade -A successful Hotshot upgrade results in a new version, which allows us to update the `ChainConfig` and execute the upgrade if there exists any. `Chainconfig` includes the fee parameters. The sequencer node has two states: `NodeState` and `ValidatedState`. `NodeState` is an immutable state that contains `ResolvableChainConfig` (Enum of `ChainConfig`'s commitment and full `ChainConfig`), whereas `ValidatedState` is a mutable state. To make updates to the chain config post-upgrade possible, `ResolvableChainConfig` is also added to `ValidatedState`. +A successful Hotshot upgrade results in a new version, which allows us to update the `ChainConfig` and execute the +upgrade if there exists any. `Chainconfig` includes the fee parameters. The sequencer node has two states: `NodeState` +and `ValidatedState`. `NodeState` is an immutable state that contains `ResolvableChainConfig` (Enum of `ChainConfig`'s +commitment and full `ChainConfig`), whereas `ValidatedState` is a mutable state. To make updates to the chain config +post-upgrade possible, `ResolvableChainConfig` is also added to `ValidatedState`. -`NodeState` also includes two additional fields: `upgrades` and `current_version`. Functions like `Header::new()` and `ValidatedState::apply_header()` include a version parameter, which is used to apply upgrades by checking if this version is greater than `current_version` in NodeState and fetching the upgrade, if available, from the upgrades BTreeMap in NodeState. +`NodeState` also includes two additional fields: `upgrades` and `current_version`. Functions like `Header::new()` and +`ValidatedState::apply_header()` include a version parameter, which is used to apply upgrades by checking if this +version is greater than `current_version` in NodeState and fetching the upgrade, if available, from the upgrades +BTreeMap in NodeState. -In scenarios where nodes join the network or restart, missing the upgrade window may result in their ValidatedState having only a chain config commitment. In such cases, nodes need to catch up from their peers to get the full chain config for this chain config commitment. +In scenarios where nodes join the network or restart, missing the upgrade window may result in their ValidatedState +having only a chain config commitment. In such cases, nodes need to catch up from their peers to get the full chain +config for this chain config commitment. -Note: For the fee upgrade to work, the builder must have sufficient funds to cover the fees. The Espresso bridge can be used to fund the builder. +Note: For the fee upgrade to work, the builder must have sufficient funds to cover the fees. The Espresso bridge can be +used to fund the builder. From 23b90ba8eaa34347583a35c9b0d46f3baaaea4f6 Mon Sep 17 00:00:00 2001 From: Jeremy Date: Tue, 16 Jul 2024 23:14:34 +0800 Subject: [PATCH 56/65] Add the espresso-dev-node server (#1709) * Add a server in espresso-dev-node for easy debugging * Optimization after reviews --- hotshot-state-prover/src/service.rs | 9 +- sequencer/api/espresso_dev_node.toml | 46 +++++ sequencer/src/bin/espresso-dev-node.rs | 237 ++++++++++++++++++++----- 3 files changed, 250 insertions(+), 42 deletions(-) create mode 100644 sequencer/api/espresso_dev_node.toml diff --git a/hotshot-state-prover/src/service.rs b/hotshot-state-prover/src/service.rs index 41342255a5..a38428f002 100644 --- a/hotshot-state-prover/src/service.rs +++ b/hotshot-state-prover/src/service.rs @@ -409,7 +409,14 @@ pub async fn run_prover_service( .await .with_context(|| "Failed to initialize stake table")?, ); + run_prover_service_with_stake_table(config, bind_version, st).await +} +pub async fn run_prover_service_with_stake_table( + config: StateProverConfig, + bind_version: Ver, + st: Arc>, +) -> Result<()> { tracing::info!("Light client address: {:?}", config.light_client_address); let relay_server_client = Arc::new(Client::::new(config.relay_server.clone())); @@ -422,7 +429,7 @@ pub async fn run_prover_service( } let proving_key = - spawn_blocking(move || Arc::new(load_proving_key(stake_table_capacity))).await; + spawn_blocking(move || Arc::new(load_proving_key(config.stake_table_capacity))).await; let update_interval = config.update_interval; let retry_interval = config.retry_interval; diff --git a/sequencer/api/espresso_dev_node.toml b/sequencer/api/espresso_dev_node.toml new file mode 100644 index 0000000000..75ce8143f8 --- /dev/null +++ b/sequencer/api/espresso_dev_node.toml @@ -0,0 +1,46 @@ +[meta] +NAME = "espresso_dev_node_server" +DESCRIPTION = "A server for debugging and developing with the espresso dev node" +FORMAT_VERSION = "0.1.0" + +[route.devinfo] +PATH = ["/dev-info"] +DOC = """ +Get the debug info + +Returns +``` +{ + "builder_url": string, + "prover_port": integer, + "l1_url": string, + "light_client_address:" address, +} +``` +""" + +[route.sethotshotdown] +PATH = ["set-hotshot-down"] +METHOD = "POST" +DOC = """ +Set the hotshot down since the given L1 height. + +Body: +``` +{ + "height": integer, +} +``` +By doing this, the L1 height in the light contract will be frozen and rollups will detect +the HotShot failure. This is intended to be used when testing the rollups' functionalities. +""" + +[route.sethotshotup] +PATH = ["set-hotshot-up"] +METHOD = "POST" +DOC = """ +Set the hotshot up in the light client contract. + +This is intended to be used when `freeze` has been called previously. By unfreezing the L1 height, +rollups will detect the reactivity of HotShot. +""" diff --git a/sequencer/src/bin/espresso-dev-node.rs b/sequencer/src/bin/espresso-dev-node.rs index f2e5cb3619..2df30c1619 100644 --- a/sequencer/src/bin/espresso-dev-node.rs +++ b/sequencer/src/bin/espresso-dev-node.rs @@ -1,16 +1,19 @@ -use std::{sync::Arc, time::Duration}; +use std::{io, sync::Arc, time::Duration}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use async_std::task::{sleep, spawn, spawn_blocking}; +use async_std::task::spawn; use clap::Parser; -use es_version::{SequencerVersion, SEQUENCER_VERSION}; +use contract_bindings::light_client_mock::LightClientMock; +use es_version::SEQUENCER_VERSION; use ethers::{ + middleware::{MiddlewareBuilder, SignerMiddleware}, providers::{Http, Middleware, Provider}, signers::{coins_bip39::English, MnemonicBuilder, Signer}, + types::{Address, U256}, }; use futures::FutureExt; use hotshot_state_prover::service::{ - load_proving_key, one_honest_threshold, sync_state, StateProverConfig, + one_honest_threshold, run_prover_service_with_stake_table, StateProverConfig, }; use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableScheme}; use portpicker::pick_unused_port; @@ -27,9 +30,10 @@ use sequencer_utils::{ deployer::{deploy, Contract, Contracts}, AnvilOptions, }; -use surf_disco::Client; -use tide_disco::error::ServerError; +use serde::{Deserialize, Serialize}; +use tide_disco::{error::ServerError, Api, Error as _, StatusCode}; use url::Url; +use vbs::version::StaticVersionType; #[derive(Clone, Debug, Parser)] struct Args { @@ -69,6 +73,16 @@ struct Args { #[clap(short, long, env = "ESPRESSO_BUILDER_PORT")] builder_port: Option, + /// Port for connecting to the prover. + #[clap(short, long, env = "ESPRESSO_PROVER_PORT")] + prover_port: Option, + + /// Port for the dev node. + /// + /// This is used to provide tools and information to facilitate developers debugging. + #[clap(short, long, env = "ESPRESSO_DEV_NODE_PORT", default_value = "20000")] + dev_node_port: u16, + #[clap(flatten)] sql: persistence::sql::Options, } @@ -148,52 +162,136 @@ async fn main() -> anyhow::Result<()> { SEQUENCER_VERSION, )); - // Run the prover service. These code are basically from `hotshot-state-prover`. The difference - // is that here we don't need to fetch the `stake table` from other entities. - // TODO: Remove the redundant code. - let proving_key = - spawn_blocking(move || Arc::new(load_proving_key(STAKE_TABLE_CAPACITY_FOR_TEST as usize))) - .await; - let relay_server_client = - Client::::new(relay_server_url.clone()); - - let provider = Provider::::try_from(url.to_string()).unwrap(); + let provider = Provider::::try_from(url.as_str()).unwrap(); let chain_id = provider.get_chainid().await.unwrap().as_u64(); + let wallet = MnemonicBuilder::::default() + .phrase(cli_params.mnemonic.as_str()) + .index(cli_params.account_index) + .expect("error building wallet") + .build() + .expect("error opening wallet") + .with_chain_id(chain_id); + let update_interval = Duration::from_secs(20); let retry_interval = Duration::from_secs(2); + let prover_port = cli_params + .prover_port + .unwrap_or_else(|| pick_unused_port().unwrap()); + let light_client_address = contracts + .get_contract_address(Contract::LightClientProxy) + .unwrap(); let config = StateProverConfig { relay_server: relay_server_url, update_interval, retry_interval, - l1_provider: url, - light_client_address: contracts - .get_contract_address(Contract::LightClientProxy) - .unwrap(), - eth_signing_key: MnemonicBuilder::::default() - .phrase(cli_params.mnemonic.as_str()) - .index(cli_params.account_index) - .expect("error building wallet") - .build() - .expect("error opening wallet") - .with_chain_id(chain_id) - .signer() - .clone(), + l1_provider: url.clone(), + light_client_address, + eth_signing_key: wallet.signer().clone(), sequencer_url: "http://localhost".parse().unwrap(), // This should not be used in dev-node - port: None, + port: Some(prover_port), stake_table_capacity: STAKE_TABLE_CAPACITY_FOR_TEST as usize, }; - loop { - if let Err(err) = sync_state(&st, proving_key.clone(), &relay_server_client, &config).await - { - tracing::error!("Cannot sync the light client state, will retry: {}", err); - sleep(retry_interval).await; - } else { - tracing::info!("Sleeping for {:?}", update_interval); - sleep(update_interval).await; + spawn(run_prover_service_with_stake_table( + config, + SEQUENCER_VERSION, + Arc::new(st), + )); + + let dev_info = DevInfo { + builder_url: network.cfg.hotshot_config().builder_urls[0].clone(), + prover_port, + l1_url: url, + light_client_address, + }; + + let mock_contract = + LightClientMock::new(light_client_address, Arc::new(provider.with_signer(wallet))); + + run_dev_node_server( + cli_params.dev_node_port, + mock_contract, + dev_info, + SEQUENCER_VERSION, + ) + .await?; + + Ok(()) +} + +async fn run_dev_node_server( + port: u16, + mock_contract: LightClientMock, S>>, + dev_info: DevInfo, + bind_version: Ver, +) -> anyhow::Result<()> { + let mut app = tide_disco::App::<(), ServerError>::with_state(()); + let toml = + toml::from_str::(include_str!("../../api/espresso_dev_node.toml")) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + + let mut api = Api::<(), ServerError, Ver>::new(toml) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + + api.get("devinfo", move |_, _| { + let info = dev_info.clone(); + async move { Ok(info.clone()) }.boxed() + }) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + + let contract = mock_contract.clone(); + api.post("sethotshotdown", move |req, _| { + let contract = contract.clone(); + async move { + let height = req + .body_auto::(Ver::instance()) + .map_err(ServerError::from_request_error)? + .height; + contract + .set_hot_shot_down_since(U256::from(height)) + .send() + .await + .map_err(|err| { + ServerError::catch_all(StatusCode::INTERNAL_SERVER_ERROR, err.to_string()) + })?; + Ok(()) } - } + .boxed() + }) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + + api.post("sethotshotup", move |_, _| { + let contract = mock_contract.clone(); + async move { + contract.set_hot_shot_up().send().await.map_err(|err| { + ServerError::catch_all(StatusCode::INTERNAL_SERVER_ERROR, err.to_string()) + })?; + Ok(()) + } + .boxed() + }) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + + app.register_module("api", api) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + + app.serve(format!("0.0.0.0:{port}"), bind_version).await?; + + Ok(()) +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct DevInfo { + pub builder_url: Url, + pub prover_port: u16, + pub l1_url: Url, + pub light_client_address: Address, +} + +#[derive(Debug, Serialize, Deserialize)] +struct SetHotshotUpBody { + pub height: u64, } #[cfg(test)] @@ -207,7 +305,10 @@ mod tests { use es_version::SequencerVersion; use escargot::CargoBuild; use espresso_types::{BlockMerkleTree, Header, SeqTypes, Transaction}; - use ethers::types::{Address, U256}; + use ethers::{ + providers::Middleware, + types::{Address, U256}, + }; use futures::TryStreamExt; use hotshot_query_service::{ availability::{BlockQueryData, TransactionQueryData, VidCommonQueryData}, @@ -220,6 +321,8 @@ mod tests { use surf_disco::Client; use tide_disco::error::ServerError; + use crate::{DevInfo, SetHotshotUpBody}; + const TEST_MNEMONIC: &str = "test test test test test test test test test test test junk"; pub struct BackgroundProcess(Child); @@ -244,6 +347,8 @@ mod tests { let api_port = pick_unused_port().unwrap(); + let dev_node_port = pick_unused_port().unwrap(); + let instance = AnvilOptions::default().spawn().await; let l1_url = instance.url(); @@ -263,6 +368,7 @@ mod tests { .env("ESPRESSO_SEQUENCER_POSTGRES_HOST", "localhost") .env("ESPRESSO_SEQUENCER_ETH_MNEMONIC", TEST_MNEMONIC) .env("ESPRESSO_DEPLOYER_ACCOUNT_INDEX", "0") + .env("ESPRESSO_DEV_NODE_PORT", dev_node_port.to_string()) .env( "ESPRESSO_SEQUENCER_POSTGRES_PORT", postgres_port.to_string(), @@ -339,7 +445,7 @@ mod tests { let signer = init_signer(&l1_url, TEST_MNEMONIC, 0).await.unwrap(); let light_client = LightClient::new( light_client_address.parse::
().unwrap(), - Arc::new(signer), + Arc::new(signer.clone()), ); while light_client @@ -395,6 +501,55 @@ mod tests { } } + let dev_node_client: Client = + Client::new(format!("http://localhost:{dev_node_port}").parse().unwrap()); + dev_node_client.connect(None).await; + + // Check the dev node api + { + tracing::info!("checking the dev node api"); + dev_node_client + .get::("api/dev-info") + .send() + .await + .unwrap(); + + let height = signer.get_block_number().await.unwrap().as_u64(); + dev_node_client + .post::<()>("api/set-hotshot-down") + .body_json(&SetHotshotUpBody { height: height - 1 }) + .unwrap() + .send() + .await + .unwrap(); + + while !light_client + .lag_over_escape_hatch_threshold(U256::from(height), U256::from(0)) + .call() + .await + .unwrap_or(false) + { + tracing::info!("waiting for setting hotshot down"); + sleep(Duration::from_secs(3)).await; + } + + dev_node_client + .post::<()>("api/set-hotshot-up") + .send() + .await + .unwrap(); + + while light_client + .lag_over_escape_hatch_threshold(U256::from(height), U256::from(0)) + .call() + .await + .unwrap_or(true) + { + tracing::info!("waiting for setting hotshot up"); + sleep(Duration::from_secs(3)).await; + } + } + drop(db); } } From b3fbb4f4a6b813241c6f364dd8aaa733c8cb0d67 Mon Sep 17 00:00:00 2001 From: Rob Date: Tue, 16 Jul 2024 14:26:44 -0400 Subject: [PATCH 57/65] update the CDN --- Cargo.lock | 90 ++++++++++++++++++++++++++++++++++++++++++++++++------ Cargo.toml | 4 +-- 2 files changed, 83 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9822c77996..096fe82c1c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1516,7 +1516,7 @@ version = "0.4.0" source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.2#09389360284c51dd44a3dae1f1c3b395125abe82" dependencies = [ "async-std", - "cdn-proto", + "cdn-proto 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.2)", "clap", "console-subscriber 0.3.0", "dashmap 5.5.3", @@ -1534,13 +1534,37 @@ dependencies = [ "tracing-subscriber 0.3.18", ] +[[package]] +name = "cdn-broker" +version = "0.4.0" +source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.3#26a7b21372c4b38dca2f4aecb4267b271d1ac883" +dependencies = [ + "async-std", + "cdn-proto 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.3)", + "clap", + "console-subscriber 0.3.0", + "dashmap 6.0.1", + "derivative", + "jf-signature", + "lazy_static", + "local-ip-address", + "parking_lot", + "portpicker", + "prometheus", + "rand 0.8.5", + "rkyv", + "tokio", + "tracing", + "tracing-subscriber 0.3.18", +] + [[package]] name = "cdn-client" version = "0.4.0" source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.2#09389360284c51dd44a3dae1f1c3b395125abe82" dependencies = [ "async-std", - "cdn-proto", + "cdn-proto 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.2)", "clap", "jf-signature", "rand 0.8.5", @@ -1555,7 +1579,21 @@ version = "0.4.0" source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.2#09389360284c51dd44a3dae1f1c3b395125abe82" dependencies = [ "async-std", - "cdn-proto", + "cdn-proto 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.2)", + "clap", + "jf-signature", + "tokio", + "tracing", + "tracing-subscriber 0.3.18", +] + +[[package]] +name = "cdn-marshal" +version = "0.4.0" +source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.3#26a7b21372c4b38dca2f4aecb4267b271d1ac883" +dependencies = [ + "async-std", + "cdn-proto 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.3)", "clap", "jf-signature", "tokio", @@ -1597,6 +1635,40 @@ dependencies = [ "warp", ] +[[package]] +name = "cdn-proto" +version = "0.4.0" +source = "git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.3#26a7b21372c4b38dca2f4aecb4267b271d1ac883" +dependencies = [ + "anyhow", + "ark-serialize", + "async-trait", + "capnp", + "capnpc", + "derivative", + "jf-signature", + "kanal", + "lazy_static", + "mnemonic", + "num_enum", + "pem 3.0.4", + "prometheus", + "quinn", + "rand 0.8.5", + "rcgen 0.13.1", + "redis", + "rkyv", + "rustls 0.23.10", + "rustls-pki-types", + "sqlx", + "thiserror", + "tokio", + "tokio-rustls 0.26.0", + "tracing", + "url", + "warp", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -4091,9 +4163,9 @@ dependencies = [ "bimap", "bincode", "blake3", - "cdn-broker", + "cdn-broker 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.2)", "cdn-client", - "cdn-marshal", + "cdn-marshal 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.2)", "chrono", "committable", "custom_debug 0.5.1", @@ -4449,7 +4521,7 @@ dependencies = [ "async-trait", "bincode", "bitvec", - "cdn-proto", + "cdn-proto 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.2)", "chrono", "committable", "either", @@ -4535,7 +4607,7 @@ dependencies = [ "bincode", "bitvec", "blake3", - "cdn-proto", + "cdn-proto 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.2)", "committable", "custom_debug 0.5.1", "derivative", @@ -8627,8 +8699,8 @@ dependencies = [ "bincode", "blake3", "bytesize", - "cdn-broker", - "cdn-marshal", + "cdn-broker 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.3)", + "cdn-marshal 0.4.0 (git+https://github.com/EspressoSystems/Push-CDN?tag=0.4.3)", "clap", "cld", "committable", diff --git a/Cargo.toml b/Cargo.toml index bc28a7ec67..2faef87828 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,11 +68,11 @@ hotshot-example-types = { git = "https://github.com/EspressoSystems/hotshot", ta cdn-broker = { git = "https://github.com/EspressoSystems/Push-CDN", features = [ "runtime-async-std", "global-permits", -], tag = "0.4.2", package = "cdn-broker" } +], tag = "0.4.3", package = "cdn-broker" } cdn-marshal = { git = "https://github.com/EspressoSystems/Push-CDN", features = [ "runtime-async-std", "global-permits", -], tag = "0.4.2", package = "cdn-marshal" } +], tag = "0.4.3", package = "cdn-marshal" } jf-plonk = { git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5", features = [ "test-apis", From a350aa22966de00195b32967ea4b3ae67074118c Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 17 Jul 2024 10:49:18 -0700 Subject: [PATCH 58/65] more results --- scripts/benchmarks_results/upload_results.csv | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/scripts/benchmarks_results/upload_results.csv b/scripts/benchmarks_results/upload_results.csv index 0c43181fe2..2339f1a331 100644 --- a/scripts/benchmarks_results/upload_results.csv +++ b/scripts/benchmarks_results/upload_results.csv @@ -5,4 +5,18 @@ total_nodes,da_committee_size,block_range,transaction_size_range,transaction_per total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec 5,5,50~150,1~1000,1~20,public,1,0,2,260,23,566,50 total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec -5,5,50~150,1~1000,1~20,private,2,0,2,1277,118,541,50 \ No newline at end of file +5,5,50~150,1~1000,1~20,private,2,0,2,1277,118,541,50 +total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec +5,5,50~150,1~1000,1~10,public,1,0,1,242,15,695,43 +total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec +5,5,50~150,1~1000,1~10,private,1,0,2,852,63,581,43 +total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec +5,5,50~150,1~1000,1~20,private,2,0,2,990,78,571,45 +total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec +5,5,50~150,1~1000,1~20,public,1,0,2,278,21,597,45 +total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec +5,5,50~150,1~1000,1~40,private,5,0,9,1758,255,565,82 +total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec +5,5,50~150,1~1000,1~40,public,4,0,9,215,33,536,82 +total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec +5,5,50~150,1~1000,1~50,private,11,0,11,54,18,616,204 \ No newline at end of file From 47e7c17f601d91a01807c8285745865a06ba0b4b Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Wed, 17 Jul 2024 14:01:58 -0400 Subject: [PATCH 59/65] Update prover service to new config format --- hotshot-state-prover/src/service.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/hotshot-state-prover/src/service.rs b/hotshot-state-prover/src/service.rs index a38428f002..7b181c0d4a 100644 --- a/hotshot-state-prover/src/service.rs +++ b/hotshot-state-prover/src/service.rs @@ -107,8 +107,13 @@ pub fn init_stake_table( } #[derive(Debug, Deserialize)] -pub struct PublicHotShotConfig { - pub known_nodes_with_stake: Vec>, +struct PublicHotShotConfig { + known_nodes_with_stake: Vec>, +} + +#[derive(Debug, Deserialize)] +struct PublicNetworkConfig { + config: PublicHotShotConfig, } /// Initialize the stake table from a sequencer node that @@ -129,8 +134,8 @@ async fn init_stake_table_from_sequencer( // Request the configuration until it is successful let network_config: PublicHotShotConfig = loop { match reqwest::get(config_url.clone()).await { - Ok(resp) => match resp.json::().await { - Ok(config) => break config, + Ok(resp) => match resp.json::().await { + Ok(config) => break config.config, Err(e) => { tracing::error!("Failed to parse the network config: {e}"); sleep(Duration::from_secs(5)).await; From b3cf1b539c652b42519c1116dd928f8035fa868c Mon Sep 17 00:00:00 2001 From: Sishan Long Date: Wed, 17 Jul 2024 11:08:02 -0700 Subject: [PATCH 60/65] more results --- scripts/benchmarks_results/upload_results.csv | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/benchmarks_results/upload_results.csv b/scripts/benchmarks_results/upload_results.csv index 2339f1a331..b992f609a8 100644 --- a/scripts/benchmarks_results/upload_results.csv +++ b/scripts/benchmarks_results/upload_results.csv @@ -19,4 +19,8 @@ total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transa total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec 5,5,50~150,1~1000,1~40,public,4,0,9,215,33,536,82 total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec -5,5,50~150,1~1000,1~50,private,11,0,11,54,18,616,204 \ No newline at end of file +5,5,50~150,1~1000,1~50,private,11,0,11,54,18,616,204 +total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec +5,5,50~150,1~1000,1~1,private,1,0,1,227,14,665,41 +total_nodes,da_committee_size,block_range,transaction_size_range_in_bytes,transaction_per_batch_range,pub_or_priv_pool,avg_latency_in_sec,minimum_latency_in_sec,maximum_latency_in_sec,avg_throughput_bytes_per_sec,total_transactions,avg_transaction_size_in_bytes,total_time_elapsed_in_sec +5,5,50~150,1~1000,1~1,public,1,0,1,293,20,601,41 \ No newline at end of file From 57178f81993fdf11bdc5183cc7ea230d1e94c755 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Thu, 18 Jul 2024 03:30:49 +0500 Subject: [PATCH 61/65] change block merkle commitment type to Commitment
--- sequencer/src/bin/nasty-client.rs | 2 +- types/src/v0/impls/header.rs | 16 ++++++---------- types/src/v0/impls/state.rs | 14 ++++++++------ types/src/v0/v0_1/state.rs | 5 +++-- 4 files changed, 18 insertions(+), 19 deletions(-) diff --git a/sequencer/src/bin/nasty-client.rs b/sequencer/src/bin/nasty-client.rs index 489ffb23b4..1e7ab86425 100644 --- a/sequencer/src/bin/nasty-client.rs +++ b/sequencer/src/bin/nasty-client.rs @@ -883,7 +883,7 @@ impl ResourceManager
{ .context("malformed merkle proof")? .or_else(|_| bail!("invalid merkle proof"))?; ensure!( - proof.elem() == Some(index_header.commit().as_ref()), + proof.elem() == Some(&index_header.commit()), "merkle proof is for wrong element: {:?} != {:?}", proof.elem(), index_header.commit() diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index ca5ae1f733..6ccbf4dee0 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -439,7 +439,7 @@ impl Header { state .block_merkle_tree - .push(parent_header.commit().as_ref()) + .push(parent_header.commit()) .context("missing blocks frontier")?; let block_merkle_tree_root = state.block_merkle_tree.commitment(); @@ -948,7 +948,7 @@ mod test_headers { *parent_leaf.block_header_mut() = parent.clone(); let block_merkle_tree = - BlockMerkleTree::from_elems(Some(32), Vec::<[u8; 32]>::new()).unwrap(); + BlockMerkleTree::from_elems(Some(32), Vec::>::new()).unwrap(); let fee_info = FeeInfo::genesis(); let fee_merkle_tree = FeeMerkleTree::from_kv_set( @@ -1009,7 +1009,7 @@ mod test_headers { assert_eq!( block_merkle_tree, - BlockMerkleTree::from_elems(Some(32), Vec::<[u8; 32]>::new()).unwrap() + BlockMerkleTree::from_elems(Some(32), Vec::>::new()).unwrap() ); } } @@ -1206,9 +1206,7 @@ mod test_headers { *parent_leaf.block_header_mut() = parent_header.clone(); // Populate the tree with an initial `push`. - block_merkle_tree - .push(genesis.header.commit().as_ref()) - .unwrap(); + block_merkle_tree.push(genesis.header.commit()).unwrap(); let block_merkle_tree_root = block_merkle_tree.commitment(); validated_state.block_merkle_tree = block_merkle_tree.clone(); *parent_header.block_merkle_tree_root_mut() = block_merkle_tree_root; @@ -1306,9 +1304,7 @@ mod test_headers { let fee_merkle_tree = parent_state.fee_merkle_tree.clone(); // Populate the tree with an initial `push`. - block_merkle_tree - .push(genesis.header.commit().as_ref()) - .unwrap(); + block_merkle_tree.push(genesis.header.commit()).unwrap(); let block_merkle_tree_root = block_merkle_tree.commitment(); let fee_merkle_tree_root = fee_merkle_tree.commitment(); parent_state.block_merkle_tree = block_merkle_tree.clone(); @@ -1367,7 +1363,7 @@ mod test_headers { } let mut block_merkle_tree = proposal_state.block_merkle_tree.clone(); - block_merkle_tree.push(proposal.commit().as_ref()).unwrap(); + block_merkle_tree.push(proposal.commit()).unwrap(); let proposal_state = proposal_state .apply_header( diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index 681fd92307..9aa615dae6 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -1,7 +1,7 @@ use std::ops::Add; use anyhow::bail; -use committable::Committable; +use committable::{Commitment, Committable}; use ethers::types::Address; use hotshot_query_service::merklized_state::MerklizedState; use hotshot_types::{ @@ -55,9 +55,11 @@ impl StateDelta for Delta {} impl Default for ValidatedState { fn default() -> Self { - let block_merkle_tree = - BlockMerkleTree::from_elems(Some(BLOCK_MERKLE_TREE_HEIGHT), Vec::<[u8; 32]>::new()) - .unwrap(); + let block_merkle_tree = BlockMerkleTree::from_elems( + Some(BLOCK_MERKLE_TREE_HEIGHT), + Vec::>::new(), + ) + .unwrap(); // Words of wisdom from @mrain: "capacity = arity^height" // "For index space 2^160, arity 256 (2^8), @@ -501,7 +503,7 @@ fn apply_proposal( // pushing a block into merkle tree shouldn't fail validated_state .block_merkle_tree - .push(parent_leaf.block_header().commit().as_ref()) + .push(parent_leaf.block_header().commit()) .unwrap(); for FeeInfo { account, amount } in l1_deposits.iter() { @@ -632,7 +634,7 @@ impl hotshot_types::traits::states::TestableState for ValidatedState { impl MerklizedState for BlockMerkleTree { type Key = Self::Index; - type Entry = [u8; 32]; + type Entry = Commitment
; type T = Sha3Node; type Commit = Self::Commitment; type Digest = Sha3Digest; diff --git a/types/src/v0/v0_1/state.rs b/types/src/v0/v0_1/state.rs index 9e2df8089c..c9906f7be7 100644 --- a/types/src/v0/v0_1/state.rs +++ b/types/src/v0/v0_1/state.rs @@ -1,7 +1,8 @@ -use crate::ResolvableChainConfig; +use crate::{Header, ResolvableChainConfig}; use super::{FeeAccount, FeeAmount}; +use committable::Commitment; use jf_merkle_tree::{ prelude::{LightWeightSHA3MerkleTree, Sha3Digest, Sha3Node}, universal_merkle_tree::UniversalMerkleTree, @@ -12,7 +13,7 @@ use serde::{Deserialize, Serialize}; // The block merkle tree accumulates header commitments. However, since the underlying // representation of the commitment type remains the same even while the header itself changes, // using the underlying type `[u8; 32]` allows us to use the same state type across minor versions. -pub type BlockMerkleTree = LightWeightSHA3MerkleTree<[u8; 32]>; +pub type BlockMerkleTree = LightWeightSHA3MerkleTree>; pub type BlockMerkleCommitment = ::Commitment; pub type FeeMerkleTree = UniversalMerkleTree; From ea2566770865f0247281c72808f1ccc79640560b Mon Sep 17 00:00:00 2001 From: tbro <48967308+tbro@users.noreply.github.com> Date: Thu, 18 Jul 2024 23:17:26 +0300 Subject: [PATCH 62/65] Add Auction Types (#1726) * Add Auction Types Adds `BidTx` and friends to `v0_3`. The intention is to provide the necessary elements for first round of Marketplace integration. Since this is all hidden behind a version gate, our priority should be to satisfy integration necessities. --------- Co-authored-by: tbro --- types/src/eth_signature_key.rs | 3 + types/src/v0/impls/auction.rs | 271 +++++++++++++++++++++++++++++++++ types/src/v0/impls/mod.rs | 1 + types/src/v0/v0_3/auction.rs | 52 +++++++ types/src/v0/v0_3/mod.rs | 2 + 5 files changed, 329 insertions(+) create mode 100644 types/src/v0/impls/auction.rs create mode 100644 types/src/v0/v0_3/auction.rs diff --git a/types/src/eth_signature_key.rs b/types/src/eth_signature_key.rs index 6843fd0b89..d50068e488 100644 --- a/types/src/eth_signature_key.rs +++ b/types/src/eth_signature_key.rs @@ -48,6 +48,9 @@ impl EthKeyPair { let signing_key: &SigningKey = derived_priv_key.as_ref(); Ok(signing_key.clone().into()) } + pub fn random() -> EthKeyPair { + SigningKey::random(&mut rand::thread_rng()).into() + } pub fn fee_account(&self) -> FeeAccount { self.fee_account diff --git a/types/src/v0/impls/auction.rs b/types/src/v0/impls/auction.rs new file mode 100644 index 0000000000..ac4e03a35b --- /dev/null +++ b/types/src/v0/impls/auction.rs @@ -0,0 +1,271 @@ +use crate::{ + eth_signature_key::{EthKeyPair, SigningError}, + v0_1::ValidatedState, + v0_3::{BidTx, BidTxBody, FullNetworkTx}, + FeeAccount, FeeAmount, FeeError, FeeInfo, NamespaceId, +}; +use committable::{Commitment, Committable}; +use ethers::types::Signature; +use hotshot_types::{ + data::ViewNumber, + traits::{ + auction_results_provider::HasUrl, node_implementation::ConsensusTime, + signature_key::BuilderSignatureKey, + }, +}; +use std::str::FromStr; +use thiserror::Error; +use url::Url; + +impl FullNetworkTx { + /// Proxy for `execute` method of each transaction variant. + pub fn execute(&self, state: &mut ValidatedState) -> Result<(), ExecutionError> { + match self { + Self::Bid(bid) => bid.execute(state), + } + } +} + +impl Committable for BidTxBody { + fn tag() -> String { + "BID_TX".to_string() + } + + fn commit(&self) -> Commitment { + let comm = committable::RawCommitmentBuilder::new(&Self::tag()) + .fixed_size_field("account", &self.account.to_fixed_bytes()) + .fixed_size_field("gas_price", &self.gas_price.to_fixed_bytes()) + .fixed_size_field("bid_amount", &self.bid_amount.to_fixed_bytes()) + .var_size_field("url", self.url.as_str().as_ref()) + .u64_field("view", self.view.u64()) + .var_size_field("namespaces", &bincode::serialize(&self.namespaces).unwrap()); + comm.finalize() + } +} + +impl BidTxBody { + /// Construct a new `BidTxBody`. + pub fn new( + account: FeeAccount, + bid: FeeAmount, + view: ViewNumber, + namespaces: Vec, + url: Url, + ) -> Self { + Self { + account, + bid_amount: bid, + view, + namespaces, + url, + // TODO gas_price will come from config probably, but we + // can use any value for first round of integration + ..Self::default() + } + } + + /// Sign `BidTxBody` and return the signature. + fn sign(&self, key: &EthKeyPair) -> Result { + FeeAccount::sign_builder_message(key, self.commit().as_ref()) + } + /// Sign Body and return a `BidTx`. This is the expected way to obtain a `BidTx`. + /// ``` + /// # use espresso_types::FeeAccount; + /// # use espresso_types::v0_3::BidTxBody; + /// + /// let key = FeeAccount::test_key_pair(); + /// BidTxBody::default().signed(&key).unwrap(); + /// ``` + pub fn signed(self, key: &EthKeyPair) -> Result { + let signature = self.sign(key)?; + let bid = BidTx { + body: self, + signature, + }; + Ok(bid) + } + + /// Get account submitting the bid + pub fn account(&self) -> FeeAccount { + self.account + } + /// Get amount of bid + pub fn amount(&self) -> FeeAmount { + self.bid_amount + } + /// Instantiate a `BidTxBody` containing the values of `self` + /// with a new `url` field. + pub fn with_url(self, url: Url) -> Self { + Self { url, ..self } + } +} + +impl Default for BidTxBody { + fn default() -> Self { + let key = FeeAccount::test_key_pair(); + let nsid = NamespaceId::from(999u64); + Self { + url: Url::from_str("https://sequencer:3939").unwrap(), + account: key.fee_account(), + gas_price: FeeAmount::default(), + bid_amount: FeeAmount::default(), + view: ViewNumber::genesis(), + namespaces: vec![nsid], + } + } +} +impl Default for BidTx { + fn default() -> Self { + BidTxBody::default() + .signed(&FeeAccount::test_key_pair()) + .unwrap() + } +} + +#[derive(Error, Debug, Eq, PartialEq)] +/// Failure cases of transaction execution +pub enum ExecutionError { + #[error("Invalid Signature")] + /// Transaction Signature could not be verified. + InvalidSignature, + #[error("Invalid Phase")] + /// Transaction submitted during incorrect Marketplace Phase + InvalidPhase, + #[error("FeeError: {0}")] + /// Insufficient funds or MerkleTree error. + FeeError(FeeError), + #[error("Could not resolve `ChainConfig`")] + /// Could not resolve `ChainConfig`. + UnresolvableChainConfig, +} + +impl From for ExecutionError { + fn from(e: FeeError) -> Self { + Self::FeeError(e) + } +} + +impl BidTx { + /// Execute `BidTx`. + /// * verify signature + /// * charge bid amount + /// * charge gas + pub fn execute(&self, state: &mut ValidatedState) -> Result<(), ExecutionError> { + self.verify()?; + + // In JIT sequencer only receives winning bids. In AOT all + // bids are charged as received (losing bids are refunded). In + // any case we can charge the bids and gas during execution. + self.charge(state)?; + + Ok(()) + } + /// Charge Bid. Only winning bids are charged in JIT. + fn charge(&self, state: &mut ValidatedState) -> Result<(), ExecutionError> { + // As the code is currently organized, I think chain_config + // will always be resolved here. But let's guard against the + // error in case code is shifted around in the future. + let Some(chain_config) = state.chain_config.resolve() else { + return Err(ExecutionError::UnresolvableChainConfig); + }; + + // TODO change to `bid_recipient` when this logic is finally enabled + let recipient = chain_config.fee_recipient; + // Charge the bid amount + state + .charge_fee(FeeInfo::new(self.account(), self.amount()), recipient) + .map_err(ExecutionError::from)?; + + // Charge the the gas amount + state + .charge_fee(FeeInfo::new(self.account(), self.gas_price()), recipient) + .map_err(ExecutionError::from)?; + + Ok(()) + } + /// Cryptographic signature verification + fn verify(&self) -> Result<(), ExecutionError> { + self.body + .account + .validate_builder_signature(&self.signature, self.body.commit().as_ref()) + .then_some(()) + .ok_or(ExecutionError::InvalidSignature) + } + /// Return the body of the transaction + pub fn body(self) -> BidTxBody { + self.body + } + /// Instantiate a `BidTx` containing the values of `self` + /// with a new `url` field on `body`. + pub fn with_url(self, url: Url) -> Self { + let body = self.body.with_url(url); + Self { body, ..self } + } + /// get gas price + pub fn gas_price(&self) -> FeeAmount { + self.body.gas_price + } + /// get bid amount + pub fn amount(&self) -> FeeAmount { + self.body.bid_amount + } + /// get bid account + pub fn account(&self) -> FeeAccount { + self.body.account + } +} + +impl HasUrl for BidTx { + /// Get the `url` field from the body. + fn url(&self) -> Url { + self.body.url() + } +} + +impl HasUrl for BidTxBody { + /// Get the cloned `url` field. + fn url(&self) -> Url { + self.url.clone() + } +} + +mod test { + use super::*; + + impl BidTx { + pub fn mock(key: EthKeyPair) -> Self { + BidTxBody::default().signed(&key).unwrap() + } + } + + #[test] + fn test_mock_bid_tx_sign_and_verify() { + let key = FeeAccount::test_key_pair(); + let bidtx = BidTx::mock(key); + bidtx.verify().unwrap(); + } + + #[test] + fn test_mock_bid_tx_charge() { + let mut state = ValidatedState::default(); + let key = FeeAccount::test_key_pair(); + let bidtx = BidTx::mock(key); + bidtx.charge(&mut state).unwrap(); + } + + #[test] + fn test_bid_tx_construct() { + let key_pair = EthKeyPair::random(); + BidTxBody::new( + key_pair.fee_account(), + FeeAmount::from(1), + ViewNumber::genesis(), + vec![NamespaceId::from(999u64)], + Url::from_str("https://sequencer:3131").unwrap(), + ) + .signed(&key_pair) + .unwrap() + .verify() + .unwrap(); + } +} diff --git a/types/src/v0/impls/mod.rs b/types/src/v0/impls/mod.rs index 3940fb0097..91aad85957 100644 --- a/types/src/v0/impls/mod.rs +++ b/types/src/v0/impls/mod.rs @@ -1,5 +1,6 @@ pub use super::*; +mod auction; mod block; mod chain_config; mod fee_info; diff --git a/types/src/v0/v0_3/auction.rs b/types/src/v0/v0_3/auction.rs new file mode 100644 index 0000000000..a44e368006 --- /dev/null +++ b/types/src/v0/v0_3/auction.rs @@ -0,0 +1,52 @@ +use crate::{FeeAccount, FeeAmount, NamespaceId}; +use ethers::types::Signature; +use hotshot_types::data::ViewNumber; +use serde::{Deserialize, Serialize}; +use url::Url; + +#[derive(Debug, Clone, Eq, PartialEq, Deserialize, Serialize, Hash)] +/// Wrapper enum for Full Network Transactions. Each transaction type +/// will be a variant of this enum. +pub enum FullNetworkTx { + Bid(BidTx), +} + +#[derive(Debug, Clone, Eq, PartialEq, Deserialize, Serialize, Hash)] +/// A transaction to bid for the sequencing rights of a namespace. It +/// is the `signed` form of `BidTxBody`. Expected usage is *build* +/// it by calling `signed` on `BidTxBody`. +pub struct BidTx { + pub(crate) body: BidTxBody, + pub(crate) signature: Signature, +} + +/// A transaction body holding data required for bid submission. +#[derive(Debug, Clone, Eq, PartialEq, Deserialize, Serialize, Hash)] +pub struct BidTxBody { + /// Account responsible for the signature + pub(crate) account: FeeAccount, + /// Fee to be sequenced in the network. Different than the bid_amount fee + // FULL_NETWORK_GAS * MINIMUM_GAS_PRICE + pub(crate) gas_price: FeeAmount, + /// The bid amount designated in Wei. This is different than + /// the sequencing fee (gas price) for this transaction + pub(crate) bid_amount: FeeAmount, + /// The URL the HotShot leader will use to request a bundle + /// from this sequencer if they win the auction + pub(crate) url: Url, + /// The slot this bid is for + pub(crate) view: ViewNumber, + /// The set of namespace ids the sequencer is bidding for + pub(crate) namespaces: Vec, +} + +/// The results of an Auction +#[derive(Debug, Clone, Eq, PartialEq, Deserialize, Serialize, Hash)] +pub struct AuctionResults { + /// view number the results are for + pub(crate) view_number: ViewNumber, + /// A list of the bid txs that won + pub(crate) winning_bids: Vec, + /// A list of reserve sequencers being used + pub(crate) reserve_bids: Vec<(NamespaceId, Url)>, +} diff --git a/types/src/v0/v0_3/mod.rs b/types/src/v0/v0_3/mod.rs index a3f38283c8..f8a6ef1b6a 100644 --- a/types/src/v0/v0_3/mod.rs +++ b/types/src/v0/v0_3/mod.rs @@ -17,6 +17,8 @@ pub use super::v0_1::{ pub const VERSION: Version = Version { major: 0, minor: 3 }; +mod auction; mod header; +pub use auction::{AuctionResults, BidTx, BidTxBody, FullNetworkTx}; pub use header::Header; From b122924bef5d48ea5ff2d55ff00eccebc4ac6e14 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Thu, 18 Jul 2024 17:13:58 -0400 Subject: [PATCH 63/65] Use correct header version when computing merklized state update --- sequencer/src/api/options.rs | 2 +- sequencer/src/state.rs | 30 ++++++++++++------------------ types/src/v0/impls/state.rs | 4 ++++ 3 files changed, 17 insertions(+), 19 deletions(-) diff --git a/sequencer/src/api/options.rs b/sequencer/src/api/options.rs index 68039cd8e6..c3bd7da20d 100644 --- a/sequencer/src/api/options.rs +++ b/sequencer/src/api/options.rs @@ -374,7 +374,7 @@ impl Options { let get_node_state = async move { state.node_state().await.clone() }; tasks.spawn( "merklized state storage update loop", - update_state_storage_loop(ds, get_node_state, Ver::version()), + update_state_storage_loop(ds, get_node_state), ); } diff --git a/sequencer/src/state.rs b/sequencer/src/state.rs index a244fa5eda..c2f37045cd 100644 --- a/sequencer/src/state.rs +++ b/sequencer/src/state.rs @@ -15,7 +15,6 @@ use hotshot_query_service::{ types::HeightIndexed, }; use jf_merkle_tree::{LookupResult, MerkleTreeScheme, ToTraversalPath, UniversalMerkleTreeScheme}; -use vbs::version::Version; use crate::{ api::data_source::CatchupDataSource, catchup::SqlStateCatchup, @@ -27,7 +26,6 @@ async fn compute_state_update( instance: &NodeState, parent_leaf: &LeafQueryData, proposed_leaf: &LeafQueryData, - version: Version, ) -> anyhow::Result<(ValidatedState, Delta)> { let proposed_leaf = proposed_leaf.leaf(); let parent_leaf = parent_leaf.leaf(); @@ -35,6 +33,12 @@ async fn compute_state_update( // Check internal consistency. let parent_header = parent_leaf.block_header(); + ensure!( + state.chain_config.commit() == parent_header.chain_config().commit(), + "internal error! in-memory chain config {:?} does not match parent header {:?}", + state.chain_config, + parent_header.chain_config(), + ); ensure!( state.block_merkle_tree.commitment() == parent_header.block_merkle_tree_root(), "internal error! in-memory block tree {:?} does not match parent header {:?}", @@ -49,7 +53,7 @@ async fn compute_state_update( ); state - .apply_header(instance, parent_leaf, header, version) + .apply_header(instance, parent_leaf, header, header.version()) .await } @@ -132,14 +136,12 @@ async fn update_state_storage( instance: &NodeState, parent_leaf: &LeafQueryData, proposed_leaf: &LeafQueryData, - version: Version, ) -> anyhow::Result { let parent_chain_config = parent_state.chain_config; - let (state, delta) = - compute_state_update(parent_state, instance, parent_leaf, proposed_leaf, version) - .await - .context("computing state update")?; + let (state, delta) = compute_state_update(parent_state, instance, parent_leaf, proposed_leaf) + .await + .context("computing state update")?; let mut storage = storage.write().await; if let Err(err) = store_state_update(&mut *storage, proposed_leaf.height(), &state, delta).await @@ -199,7 +201,6 @@ async fn store_genesis_state( pub(crate) async fn update_state_storage_loop( storage: Arc>, instance: impl Future, - version: Version, ) -> anyhow::Result<()> { let mut instance = instance.await; instance.peers = Arc::new(SqlStateCatchup::new(storage.clone(), Default::default())); @@ -240,15 +241,8 @@ pub(crate) async fn update_state_storage_loop( while let Some(leaf) = leaves.next().await { loop { - match update_state_storage( - &parent_state, - &storage, - &instance, - &parent_leaf, - &leaf, - version, - ) - .await + match update_state_storage(&parent_state, &storage, &instance, &parent_leaf, &leaf) + .await { Ok(state) => { parent_leaf = leaf; diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index 9aa615dae6..34b2e64278 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -141,6 +141,10 @@ impl ValidatedState { /// Charge a fee to an account, transferring the funds to the fee recipient account. pub fn charge_fee(&mut self, fee_info: FeeInfo, recipient: FeeAccount) -> Result<(), FeeError> { + if fee_info.amount == 0.into() { + return Ok(()); + } + let fee_state = self.fee_merkle_tree.clone(); // Deduct the fee from the paying account. From bcbafc28dbbb30a7b5f07cd01be1cb43a7854e53 Mon Sep 17 00:00:00 2001 From: Lukasz Rzasik Date: Mon, 22 Jul 2024 14:03:30 +0200 Subject: [PATCH 64/65] Implement `QueryablePayload::transaction` for `Payload` --- types/src/v0/impls/block/full_payload/payload.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/types/src/v0/impls/block/full_payload/payload.rs b/types/src/v0/impls/block/full_payload/payload.rs index 4785a0b36b..bbb75399a4 100644 --- a/types/src/v0/impls/block/full_payload/payload.rs +++ b/types/src/v0/impls/block/full_payload/payload.rs @@ -219,6 +219,14 @@ impl QueryablePayload for Payload { TxProof::new(index, self, &common) } + + fn transaction( + &self, + _meta: &Self::Metadata, + index: &Self::TransactionIndex, + ) -> Option { + self.transaction(index) + } } impl std::fmt::Display for Payload { From 13d8c3967814378a13a7c4fd492bd9ecf9ede9a3 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Mon, 22 Jul 2024 23:00:08 +0500 Subject: [PATCH 65/65] use max base fee in builder --- builder/src/bin/permissionless-builder.rs | 12 +++++++----- builder/src/lib.rs | 2 ++ builder/src/non_permissioned.rs | 7 +++---- builder/src/permissioned.rs | 9 +++++---- data/genesis/demo.toml | 4 ++-- sequencer/src/genesis.rs | 22 +++++++++++++++++++++- 6 files changed, 40 insertions(+), 16 deletions(-) diff --git a/builder/src/bin/permissionless-builder.rs b/builder/src/bin/permissionless-builder.rs index e07c418a4d..0c250eae2f 100644 --- a/builder/src/bin/permissionless-builder.rs +++ b/builder/src/bin/permissionless-builder.rs @@ -4,13 +4,14 @@ use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; use builder::non_permissioned::{build_instance_state, BuilderConfig}; use clap::Parser; use cld::ClDuration; -use es_version::SEQUENCER_VERSION; -use espresso_types::eth_signature_key::EthKeyPair; +use espresso_types::{eth_signature_key::EthKeyPair, SeqTypes}; use hotshot::traits::ValidatedState; +use hotshot_builder_core::testing::basic_test::NodeType; use hotshot_types::{data::ViewNumber, traits::node_implementation::ConsensusTime}; use sequencer::{Genesis, L1Params}; use snafu::Snafu; use url::Url; +use vbs::version::StaticVersionType; #[derive(Parser, Clone, Debug)] struct NonPermissionedBuilderOptions { @@ -106,8 +107,6 @@ async fn main() -> anyhow::Result<()> { let opt = NonPermissionedBuilderOptions::parse(); let genesis = Genesis::from_file(&opt.genesis_file)?; - let sequencer_version = SEQUENCER_VERSION; - let l1_params = L1Params { url: opt.l1_provider_url, events_max_block_range: 10000, @@ -122,10 +121,12 @@ async fn main() -> anyhow::Result<()> { genesis.chain_config, l1_params, opt.state_peers, - sequencer_version, + ::Base::instance(), ) .unwrap(); + let base_fee = genesis.max_base_fee(); + let validated_state = ValidatedState::genesis(&instance_state).0; let api_response_timeout_duration = opt.max_api_timeout_duration; @@ -148,6 +149,7 @@ async fn main() -> anyhow::Result<()> { api_response_timeout_duration, buffer_view_num_count, txn_timeout_duration, + base_fee, ) .await; diff --git a/builder/src/lib.rs b/builder/src/lib.rs index e3a62a8c2a..3ab3a17aa1 100644 --- a/builder/src/lib.rs +++ b/builder/src/lib.rs @@ -574,6 +574,7 @@ pub mod testing { Duration::from_millis(2000), 15, Duration::from_millis(500), + ChainConfig::default().base_fee, ) .await .unwrap(); @@ -640,6 +641,7 @@ pub mod testing { Duration::from_millis(2000), 15, Duration::from_millis(500), + ChainConfig::default().base_fee, ) .await .unwrap(); diff --git a/builder/src/non_permissioned.rs b/builder/src/non_permissioned.rs index fbcc508ee9..8b02e3849f 100644 --- a/builder/src/non_permissioned.rs +++ b/builder/src/non_permissioned.rs @@ -10,7 +10,7 @@ use async_compatibility_layer::{ }; use async_std::sync::{Arc, RwLock}; use espresso_types::{ - eth_signature_key::EthKeyPair, ChainConfig, L1Client, NodeState, Payload, SeqTypes, + eth_signature_key::EthKeyPair, ChainConfig, FeeAmount, L1Client, NodeState, Payload, SeqTypes, ValidatedState, }; use ethers::{ @@ -94,6 +94,7 @@ impl BuilderConfig { max_api_timeout_duration: Duration, buffered_view_num_count: usize, maximize_txns_count_timeout_duration: Duration, + base_fee: FeeAmount, ) -> anyhow::Result { tracing::info!( address = %builder_key_pair.fee_account(), @@ -168,9 +169,7 @@ impl BuilderConfig { global_state_clone, node_count, maximize_txns_count_timeout_duration, - instance_state - .chain_config - .base_fee + base_fee .as_u64() .context("the base fee exceeds the maximum amount that a builder can pay (defined by u64::MAX)")?, Arc::new(instance_state), diff --git a/builder/src/permissioned.rs b/builder/src/permissioned.rs index b40c7ea667..4eef4c8ddb 100644 --- a/builder/src/permissioned.rs +++ b/builder/src/permissioned.rs @@ -25,7 +25,7 @@ use async_std::{ use espresso_types::{ eth_signature_key::EthKeyPair, v0::traits::{PersistenceOptions, SequencerPersistence, StateCatchup}, - L1Client, NodeState, Payload, PubKey, SeqTypes, ValidatedState, + FeeAmount, L1Client, NodeState, Payload, PubKey, SeqTypes, ValidatedState, }; use ethers::{ core::k256::ecdsa::SigningKey, @@ -247,6 +247,7 @@ pub async fn init_node, P: SequencerPersistence, Ver: StaticVersionTyp max_api_timeout_duration: Duration, buffered_view_num_count: usize, maximize_txns_count_timeout_duration: Duration, + base_fee: FeeAmount, ) -> anyhow::Result { // tx channel let (mut tx_sender, tx_receiver) = @@ -476,9 +479,7 @@ impl, P: SequencerPersistence, Ver: StaticVersionTyp global_state_clone, NonZeroUsize::new(1).unwrap(), maximize_txns_count_timeout_duration, - instance_state - .chain_config - .base_fee + base_fee .as_u64() .context("the base fee exceeds the maximum amount that a builder can pay (defined by u64::MAX)")?, Arc::new(instance_state), diff --git a/data/genesis/demo.toml b/data/genesis/demo.toml index ec85c2bf65..3493200e6f 100644 --- a/data/genesis/demo.toml +++ b/data/genesis/demo.toml @@ -3,7 +3,7 @@ capacity = 10 [chain_config] chain_id = 999999999 -base_fee = '1 wei' +base_fee = '0 wei' max_block_size = '1mb' fee_recipient = '0x0000000000000000000000000000000000000000' fee_contract = '0xa15bb66138824a1c7167f5e85b957d04dd34e468' @@ -18,7 +18,7 @@ stop_proposing_view = 15 [upgrade.chain_config] chain_id = 999999999 -base_fee = '2 wei' +base_fee = '1 wei' max_block_size = '1mb' fee_recipient = '0x0000000000000000000000000000000000000000' fee_contract = '0xa15bb66138824a1c7167f5e85b957d04dd34e468' diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index 59941ae595..703f752157 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -4,7 +4,9 @@ use std::{ }; use anyhow::Context; -use espresso_types::{ChainConfig, FeeAccount, FeeAmount, GenesisHeader, L1BlockInfo, Upgrade}; +use espresso_types::{ + ChainConfig, FeeAccount, FeeAmount, GenesisHeader, L1BlockInfo, Upgrade, UpgradeType, +}; use serde::{Deserialize, Serialize}; use vbs::version::Version; @@ -47,6 +49,24 @@ pub struct Genesis { pub upgrades: BTreeMap, } +impl Genesis { + pub fn max_base_fee(&self) -> FeeAmount { + let mut base_fee = self.chain_config.base_fee; + + let upgrades: Vec<&Upgrade> = self.upgrades.values().collect(); + + for upgrade in upgrades { + match upgrade.upgrade_type { + UpgradeType::ChainConfig { chain_config } => { + base_fee = std::cmp::max(chain_config.base_fee, base_fee); + } + } + } + + base_fee + } +} + mod upgrade_serialization { use std::{collections::BTreeMap, fmt};