Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Prover] Sequencer node stake table initialization #1600

Merged
merged 2 commits into from
Jun 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 4 additions & 3 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ services:
environment:
- ESPRESSO_SEQUENCER_ORCHESTRATOR_URL
- ESPRESSO_SEQUENCER_L1_PROVIDER
- ESPRESSO_SEQUENCER_URL
- ESPRESSO_SEQUENCER_STAKE_TABLE_CAPACITY
- ESPRESSO_DEPLOYER_ACCOUNT_INDEX
- RUST_LOG
Expand All @@ -42,7 +43,7 @@ services:
depends_on:
demo-l1-network:
condition: service_healthy
orchestrator:
sequencer0:
condition: service_healthy
# Make sure this doesn't start until the other contracts have been deployed, since we use the same mnemonic.
deploy-sequencer-contracts:
Expand Down Expand Up @@ -185,7 +186,7 @@ services:
environment:
- ESPRESSO_PROVER_SERVICE_PORT
- ESPRESSO_STATE_RELAY_SERVER_URL
- ESPRESSO_SEQUENCER_ORCHESTRATOR_URL
- ESPRESSO_SEQUENCER_URL
- ESPRESSO_STATE_PROVER_UPDATE_INTERVAL
- ESPRESSO_SEQUENCER_L1_PROVIDER
- ESPRESSO_SEQUENCER_ETH_MNEMONIC
Expand All @@ -199,7 +200,7 @@ services:
- ASYNC_STD_THREAD_COUNT
- RAYON_NUM_THREADS
depends_on:
orchestrator:
sequencer0:
condition: service_healthy
state-relay-server:
condition: service_healthy
Expand Down
3 changes: 2 additions & 1 deletion hotshot-state-prover/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ authors = { workspace = true }
edition = { workspace = true }

[dependencies]
anyhow = "1.0"
anyhow = { workspace = true }
ark-bn254 = { workspace = true }
ark-ec = { workspace = true }
ark-ed-on-bn254 = { workspace = true }
Expand Down Expand Up @@ -39,6 +39,7 @@ jf-rescue = { workspace = true, features = ["gadgets"] }
jf-signature = { workspace = true, features = ["schnorr", "bls", "gadgets"] }
jf-utils = { workspace = true }
rand_chacha = { workspace = true }
reqwest = { workspace = true }
sequencer-utils = { path = "../utils" }
serde = { workspace = true }
snafu = { workspace = true }
Expand Down
20 changes: 12 additions & 8 deletions hotshot-state-prover/src/bin/state-prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,14 +57,14 @@ struct Args {
)]
eth_account_index: u32,

/// URL of the HotShot orchestrator.
/// URL of a sequencer node that is currently providing the HotShot config.
/// This is used to initialize the stake table.
#[clap(
short,
long,
env = "ESPRESSO_SEQUENCER_ORCHESTRATOR_URL",
default_value = "http://localhost:8080"
env = "ESPRESSO_SEQUENCER_URL",
default_value = "http://localhost:24000"
)]
pub orchestrator_url: Url,
pub sequencer_url: Url,

/// If daemon and provided, the service will run a basic HTTP server on the given port.
///
Expand Down Expand Up @@ -115,16 +115,20 @@ async fn main() {
.with_chain_id(chain_id)
.signer()
.clone(),
orchestrator_url: args.orchestrator_url,
sequencer_url: args.sequencer_url,
port: args.port,
stake_table_capacity: args.stake_table_capacity,
};

if args.daemon {
// Launching the prover service daemon
run_prover_service(config, SEQUENCER_VERSION).await;
if let Err(err) = run_prover_service(config, SEQUENCER_VERSION).await {
tracing::error!("Error running prover service: {:?}", err);
};
} else {
// Run light client state update once
run_prover_once(config, SEQUENCER_VERSION).await;
if let Err(err) = run_prover_once(config, SEQUENCER_VERSION).await {
tracing::error!("Error running prover once: {:?}", err);
};
}
}
133 changes: 73 additions & 60 deletions hotshot-state-prover/src/service.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
//! A light client prover service

use crate::snark::{generate_state_update_proof, Proof, ProvingKey};
use anyhow::anyhow;
use anyhow::{anyhow, Context, Result};
use async_std::{
io,
sync::Arc,
Expand All @@ -20,10 +20,8 @@ use ethers::{
use futures::FutureExt;
use hotshot_contract_adapter::jellyfish::{u256_to_field, ParsedPlonkProof};
use hotshot_contract_adapter::light_client::ParsedLightClientState;
use hotshot_orchestrator::OrchestratorVersion;
use hotshot_stake_table::vec_based::config::FieldType;
use hotshot_stake_table::vec_based::StakeTable;
use hotshot_types::signature_key::BLSPubKey;
use hotshot_types::traits::stake_table::{SnapshotVersion, StakeTableError, StakeTableScheme as _};
use hotshot_types::{
light_client::{
Expand All @@ -32,11 +30,13 @@ use hotshot_types::{
},
traits::signature_key::StakeTableEntryType,
};
use hotshot_types::{signature_key::BLSPubKey, PeerConfig};

use jf_pcs::prelude::UnivariateUniversalParams;
use jf_plonk::errors::PlonkError;
use jf_relation::Circuit as _;
use jf_signature::constants::CS_ID_SCHNORR;
use serde::Deserialize;
use std::{
iter,
time::{Duration, Instant},
Expand All @@ -52,8 +52,6 @@ type F = ark_ed_on_bn254::Fq;
/// A wallet with local signer and connected to network via http
pub type L1Wallet = SignerMiddleware<Provider<Http>, LocalWallet>;

type NetworkConfig = hotshot_orchestrator::config::NetworkConfig<BLSPubKey>;

/// Configuration/Parameters used for hotshot state prover
#[derive(Debug, Clone)]
pub struct StateProverConfig {
Expand All @@ -69,8 +67,9 @@ pub struct StateProverConfig {
pub light_client_address: Address,
/// Transaction signing key for Ethereum
pub eth_signing_key: SigningKey,
/// Address off the hotshot orchestrator, used for stake table initialization.
pub orchestrator_url: Url,
/// URL of a node that is currently providing the HotShot config.
/// This is used to initialize the stake table.
pub sequencer_url: Url,
/// If daemon and provided, the service will run a basic HTTP server on the given port.
///
/// The server provides healthcheck and version endpoints.
Expand Down Expand Up @@ -103,62 +102,70 @@ pub fn init_stake_table(
Ok(st)
}

async fn init_stake_table_from_orchestrator(
orchestrator_url: &Url,
#[derive(Debug, Deserialize)]
pub struct PublicHotShotConfig {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: why not just import this from sequencer?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I tried that but cargo told me there was a cyclic dependency problem somewhere. I also considered moving it to utils but it didn't seem in the util spirit.

I can keep pushing if we want though, maybe it wouldn't be too bad

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Deploying the light client contract will invoke light_client_genesis fn from hotshot-state-prover repo. Thus a cyclic dep.

pub known_nodes_with_stake: Vec<PeerConfig<BLSPubKey>>,
}

/// Initialize the stake table from a sequencer node that
/// is currently providing the HotShot config.
///
/// Does not error, runs until the stake table is provided.
async fn init_stake_table_from_sequencer(
sequencer_url: &Url,
stake_table_capacity: usize,
) -> StakeTable<BLSPubKey, StateVerKey, CircuitField> {
tracing::info!("Initializing stake table from HotShot orchestrator {orchestrator_url}");
let client = Client::<ServerError, OrchestratorVersion>::new(orchestrator_url.clone());
loop {
match client.get::<bool>("api/peer_pub_ready").send().await {
Ok(true) => {
match client
.post::<NetworkConfig>("api/post_config_after_peer_collected")
.send()
.await
{
Ok(config) => {
let mut st = StakeTable::<BLSPubKey, StateVerKey, CircuitField>::new(
stake_table_capacity,
);
tracing::debug!("{}", config.config.known_nodes_with_stake.len());
config
.config
.known_nodes_with_stake
.into_iter()
.for_each(|config| {
st.register(
*config.stake_table_entry.key(),
config.stake_table_entry.stake(),
config.state_ver_key,
)
.expect("Key registration shouldn't fail.");
});
st.advance();
st.advance();
return st;
}
Err(e) => {
tracing::warn!("Orchestrator error: {e}, retrying.");
}
) -> Result<StakeTable<BLSPubKey, StateVerKey, CircuitField>> {
tracing::info!("Initializing stake table from node at {sequencer_url}");

// Construct the URL to fetch the network config
let config_url = sequencer_url
.join("/v0/config/hotshot")
.with_context(|| "Invalid URL")?;

// Request the configuration until it is successful
let network_config: PublicHotShotConfig = loop {
match reqwest::get(config_url.clone()).await {
Ok(resp) => match resp.json::<PublicHotShotConfig>().await {
Ok(config) => break config,
Err(e) => {
tracing::error!("Failed to parse the network config: {e}");
sleep(Duration::from_secs(5)).await;
}
}
Ok(false) => {
tracing::info!("Peers' keys are not ready, retrying.");
}
},
Err(e) => {
tracing::warn!("Orchestrator error {e}, retrying.");
tracing::error!("Failed to fetch the network config: {e}");
sleep(Duration::from_secs(5)).await;
}
}
sleep(Duration::from_secs(2)).await;
};

// Create empty stake table
let mut st = StakeTable::<BLSPubKey, StateVerKey, CircuitField>::new(stake_table_capacity);

// Populate the stake table
for node in network_config.known_nodes_with_stake.into_iter() {
st.register(
*node.stake_table_entry.key(),
node.stake_table_entry.stake(),
node.state_ver_key,
)
.expect("Key registration shouldn't fail.");
}

// Advance the stake table
st.advance();
st.advance();

Ok(st)
}

pub async fn light_client_genesis(
orchestrator_url: &Url,
sequencer_url: &Url,
stake_table_capacity: usize,
) -> anyhow::Result<ParsedLightClientState> {
let st = init_stake_table_from_orchestrator(orchestrator_url, stake_table_capacity).await;
let st = init_stake_table_from_sequencer(sequencer_url, stake_table_capacity)
.await
.with_context(|| "Failed to initialize stake table")?;
light_client_genesis_from_stake_table(st)
}

Expand Down Expand Up @@ -389,12 +396,13 @@ fn start_http_server<Ver: StaticVersionType + 'static>(
pub async fn run_prover_service<Ver: StaticVersionType + 'static>(
config: StateProverConfig,
bind_version: Ver,
) {
) -> Result<()> {
tracing::info!("Stake table capacity: {}", config.stake_table_capacity);
// TODO(#1022): maintain the following stake table
let st = Arc::new(
init_stake_table_from_orchestrator(&config.orchestrator_url, config.stake_table_capacity)
.await,
init_stake_table_from_sequencer(&config.sequencer_url, config.stake_table_capacity)
.await
.with_context(|| "Failed to initialize stake table")?,
);

tracing::info!("Light client address: {:?}", config.light_client_address);
Expand Down Expand Up @@ -427,10 +435,13 @@ pub async fn run_prover_service<Ver: StaticVersionType + 'static>(
}

/// Run light client state prover once
pub async fn run_prover_once<Ver: StaticVersionType>(config: StateProverConfig, _: Ver) {
let st =
init_stake_table_from_orchestrator(&config.orchestrator_url, config.stake_table_capacity)
.await;
pub async fn run_prover_once<Ver: StaticVersionType>(
config: StateProverConfig,
_: Ver,
) -> Result<()> {
let st = init_stake_table_from_sequencer(&config.sequencer_url, config.stake_table_capacity)
.await
.with_context(|| "Failed to initialize stake table")?;
let stake_table_capacity = config.stake_table_capacity;
let proving_key =
spawn_blocking(move || Arc::new(load_proving_key(stake_table_capacity))).await;
Expand All @@ -439,6 +450,8 @@ pub async fn run_prover_once<Ver: StaticVersionType>(config: StateProverConfig,
sync_state(&st, proving_key, &relay_server_client, &config)
.await
.expect("Error syncing the light client state.");

Ok(())
}

#[derive(Debug, Display)]
Expand Down Expand Up @@ -642,7 +655,7 @@ mod test {
l1_provider: Url::parse("http://localhost").unwrap(),
light_client_address: Address::default(),
eth_signing_key: SigningKey::random(&mut test_rng()),
orchestrator_url: Url::parse("http://localhost").unwrap(),
sequencer_url: Url::parse("http://localhost").unwrap(),
port: None,
stake_table_capacity: 10,
}
Expand Down
4 changes: 2 additions & 2 deletions process-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ processes:
depends_on:
demo-l1-network:
condition: process_healthy
orchestrator:
sequencer0:
condition: process_healthy
# Make sure this doesn't start until the other contracts have been deployed, since we use the same mnemonic.
deploy-sequencer-contracts:
Expand Down Expand Up @@ -92,7 +92,7 @@ processes:
- MNEMONIC=$ESPRESSO_SEQUENCER_ETH_MNEMONIC
- RAYON_NUM_THREADS=$PROVER_RAYON_NUM_THREADS
depends_on:
orchestrator:
sequencer0:
condition: process_healthy
state-relay-server:
condition: process_healthy
Expand Down
15 changes: 7 additions & 8 deletions sequencer/src/bin/deploy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,14 @@ struct Options {
)]
rpc_url: Url,

/// URL of the HotShot orchestrator.
///
/// This is used to get the stake table for initializing the light client contract.
/// URL of a sequencer node that is currently providing the HotShot config.
/// This is used to initialize the stake table.
#[clap(
long,
env = "ESPRESSO_SEQUENCER_ORCHESTRATOR_URL",
default_value = "http://localhost:40001"
env = "ESPRESSO_SEQUENCER_URL",
default_value = "http://localhost:24000"
)]
orchestrator_url: Url,
pub sequencer_url: Url,

/// Mnemonic for an L1 wallet.
///
Expand Down Expand Up @@ -94,9 +93,9 @@ async fn main() -> anyhow::Result<()> {
let opt = Options::parse();
let contracts = Contracts::from(opt.contracts);

let orchestrator_url = opt.orchestrator_url.clone();
let sequencer_url = opt.sequencer_url.clone();

let genesis = light_client_genesis(&orchestrator_url, opt.stake_table_capacity).boxed();
let genesis = light_client_genesis(&sequencer_url, opt.stake_table_capacity).boxed();

let contracts = deploy(
opt.rpc_url,
Expand Down
Loading