From ee086eabbf791f8be5e1a028502d4d8f54c1eec9 Mon Sep 17 00:00:00 2001 From: Jeremy Date: Thu, 30 May 2024 23:17:34 +0800 Subject: [PATCH] Add dev-node binary (#1353) Add `espresso-dev-node` binary and friends. Currently docker image includes dev node, postgres and anvil. `builder_port` will be added in follow up (https://github.com/EspressoSystems/espresso-sequencer/issues/1522). --------- Co-authored-by: sveitser Co-authored-by: Abdul Basit Co-authored-by: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> Co-authored-by: tbro --- .github/workflows/build.yml | 27 ++ .github/workflows/test.yml | 2 +- .gitignore | 3 - .../{settings.json.example => settings.json} | 0 Cargo.lock | 14 + Cargo.toml | 1 + docker-compose.yaml | 1 + docker/espresso-dev-node.Dockerfile | 32 ++ hotshot-state-prover/Cargo.toml | 2 +- hotshot-state-prover/src/service.rs | 8 +- scripts/build-docker-images | 11 +- scripts/build-docker-images-native | 11 +- scripts/launch-dev-node-with-postgres | 25 ++ sequencer/Cargo.toml | 6 + sequencer/src/api.rs | 69 +++- sequencer/src/bin/deploy.rs | 123 +------ sequencer/src/bin/espresso-dev-node.rs | 326 ++++++++++++++++++ sequencer/src/lib.rs | 55 ++- utils/Cargo.toml | 2 +- utils/src/deployer.rs | 122 ++++++- 20 files changed, 696 insertions(+), 144 deletions(-) rename .vscode/{settings.json.example => settings.json} (100%) create mode 100644 docker/espresso-dev-node.Dockerfile create mode 100644 scripts/launch-dev-node-with-postgres create mode 100644 sequencer/src/bin/espresso-dev-node.rs diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b7e8ce0eed..26498ccf2b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -52,6 +52,11 @@ jobs: run: | cargo build --locked --release --workspace + - name: Build Espresso Dev Node + # Espresso Dev Node currently requires testing feature, so it is built separately. + run: | + cargo build --locked --release --features testing --bin espresso-dev-node + - name: Upload artifacts uses: actions/upload-artifact@v3 with: @@ -72,6 +77,7 @@ jobs: target/release/keygen target/release/permissionless-builder target/release/nasty-client + target/release/espresso-dev-node target/release/pub-key target/release/espresso-bridge @@ -97,6 +103,11 @@ jobs: run: | cargo build --locked --release --workspace + - name: Build Espresso Dev Node + # Espresso Dev Node currently requires testing feature, so it is built separately. + run: | + cargo build --locked --release --features testing --bin espresso-dev-node + - name: Upload artifacts uses: actions/upload-artifact@v3 with: @@ -117,6 +128,7 @@ jobs: target/release/keygen target/release/permissionless-builder target/release/nasty-client + target/release/espresso-dev-node target/release/pub-key target/release/espresso-bridge @@ -136,6 +148,7 @@ jobs: deploy-tag: ${{ steps.deploy.outputs.tags }} builder-tag: ${{ steps.builder.outputs.tags }} nasty-client-tag: ${{ steps.nasty-client.outputs.tags }} + espresso-dev-node-tag: ${{ steps.espresso-dev-node.outputs.tags }} bridge-tag: ${{ steps.bridge.outputs.tags }} steps: - name: Checkout Repository @@ -238,6 +251,11 @@ jobs: with: images: ghcr.io/espressosystems/espresso-sequencer/nasty-client + - name: Generate espresso-dev-node metadata + uses: docker/metadata-action@v5 + id: espresso-dev-node + with: + images: ghcr.io/espressosystems/espresso-sequencer/espresso-dev-node - name: Generate bridge metadata uses: docker/metadata-action@v5 id: bridge @@ -364,6 +382,15 @@ jobs: tags: ${{ steps.nasty-client.outputs.tags }} labels: ${{ steps.nasty-client.outputs.labels }} + - name: Build and push espresso-dev-node docker + uses: docker/build-push-action@v5 + with: + context: ./ + file: ./docker/nasty-client.Dockerfile + platforms: linux/amd64,linux/arm64 + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.espresso-dev-node.outputs.tags }} + labels: ${{ steps.espresso-dev-node.outputs.labels }} - name: Build and push bridge docker uses: docker/build-push-action@v5 with: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 37c4c45fd3..b50fdfeceb 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -55,4 +55,4 @@ jobs: cargo build --locked --bin diff-test --release cargo test --locked --release --workspace --all-features --no-run cargo test --locked --release --workspace --all-features --verbose -- --test-threads 1 --nocapture - timeout-minutes: 30 + timeout-minutes: 40 diff --git a/.gitignore b/.gitignore index fa3cb6f9fa..a31394fd33 100644 --- a/.gitignore +++ b/.gitignore @@ -15,9 +15,6 @@ target/ # Jetbrains editor .idea -# vscode stuff -.vscode/settings.json - node_modules/ **/*DS_Store diff --git a/.vscode/settings.json.example b/.vscode/settings.json similarity index 100% rename from .vscode/settings.json.example rename to .vscode/settings.json diff --git a/Cargo.lock b/Cargo.lock index 85a159861b..b39a566819 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2820,6 +2820,18 @@ dependencies = [ "vbs", ] +[[package]] +name = "escargot" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f474c6844cbd04e783d0f25757583db4f491770ca618bedf2fb01815fc79939" +dependencies = [ + "log", + "once_cell", + "serde", + "serde_json", +] + [[package]] name = "espresso-macros" version = "0.1.0" @@ -8487,11 +8499,13 @@ dependencies = [ "derive_more", "dotenvy", "es-version", + "escargot", "espresso-macros", "ethers", "ethers-contract-derive", "futures", "hotshot", + "hotshot-contract-adapter", "hotshot-events-service", "hotshot-orchestrator", "hotshot-query-service", diff --git a/Cargo.toml b/Cargo.toml index ea36e159d0..01371e2789 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,6 +51,7 @@ hotshot = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.55" } # Hotshot imports hotshot-builder-api = { git = "https://github.com/EspressoSystems/HotShot.git", tag = "0.5.55" } hotshot-builder-core = { git = "https://github.com/EspressoSystems/hotshot-builder-core", tag = "0.1.23" } +hotshot-contract-adapter = { version = "0.1.0", path = "contracts/rust/adapter" } hotshot-events-service = { git = "https://github.com/EspressoSystems/hotshot-events-service.git", tag = "0.1.23" } hotshot-orchestrator = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.55" } hotshot-query-service = { git = "https://github.com/EspressoSystems/hotshot-query-service", tag = "0.1.26" } diff --git a/docker-compose.yaml b/docker-compose.yaml index 254b032c56..3e74ce03c0 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -268,6 +268,7 @@ services: - ESPRESSO_SEQUENCER_API_PEERS=http://sequencer2:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_STATE_PEERS=http://sequencer2:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_POSTGRES_HOST=sequencer-db-1 + - ESPRESSO_SEQUENCER_POSTGRES_PORT=$ESPRESSO_SEQUENCER_DB_PORT - ESPRESSO_SEQUENCER_POSTGRES_USER=root - ESPRESSO_SEQUENCER_POSTGRES_PASSWORD=password - ESPRESSO_SEQUENCER_POSTGRES_DATABASE=sequencer diff --git a/docker/espresso-dev-node.Dockerfile b/docker/espresso-dev-node.Dockerfile new file mode 100644 index 0000000000..616d7a3351 --- /dev/null +++ b/docker/espresso-dev-node.Dockerfile @@ -0,0 +1,32 @@ +FROM postgres + +ARG TARGETARCH + +RUN apt-get update \ + && apt-get install -y curl libcurl4 wait-for-it tini \ + && rm -rf /var/lib/apt/lists/* +ENTRYPOINT ["tini", "--"] + +# Download an SRS file to avoid download at runtime +ENV AZTEC_SRS_PATH=/kzg10-aztec20-srs-1048584.bin +RUN curl -LO https://github.com/EspressoSystems/ark-srs/releases/download/v0.2.0/$AZTEC_SRS_PATH + +COPY target/$TARGETARCH/release/espresso-dev-node /bin/espresso-dev-node +RUN chmod +x /bin/espresso-dev-node + +COPY target/$TARGETARCH/release/anvil /bin/anvil +RUN chmod +x /bin/anvil + +COPY launch-dev-node-with-postgres /bin/launch-dev-node-with-postgres +RUN chmod +x /bin/launch-dev-node-with-postgres + +# When running as a Docker service, we always want a healthcheck endpoint, so set a default for the +# port that the HTTP server will run on. This can be overridden in any given deployment environment. +ENV ESPRESSO_SEQUENCER_API_PORT=8770 +HEALTHCHECK --interval=1s --timeout=1s --retries=100 CMD curl --fail http://localhost:${ESPRESSO_SEQUENCER_API_PORT}/status/block-height || exit 1 + +EXPOSE 8770 +EXPOSE 8771 +EXPOSE 8772 + +CMD [ "/bin/launch-dev-node-with-postgres"] diff --git a/hotshot-state-prover/Cargo.toml b/hotshot-state-prover/Cargo.toml index 690b2e2893..bfc38c73e0 100644 --- a/hotshot-state-prover/Cargo.toml +++ b/hotshot-state-prover/Cargo.toml @@ -26,7 +26,7 @@ displaydoc = { version = "0.2.3", default-features = false } es-version = { workspace = true } ethers = { workspace = true } futures = { workspace = true } -hotshot-contract-adapter = { path = "../contracts/rust/adapter" } +hotshot-contract-adapter = { workspace = true } hotshot-orchestrator = { workspace = true } hotshot-stake-table = { workspace = true } hotshot-types = { workspace = true } diff --git a/hotshot-state-prover/src/service.rs b/hotshot-state-prover/src/service.rs index 1d27fcbd56..afcb2a3a62 100644 --- a/hotshot-state-prover/src/service.rs +++ b/hotshot-state-prover/src/service.rs @@ -159,11 +159,17 @@ pub async fn light_client_genesis( stake_table_capacity: usize, ) -> anyhow::Result { let st = init_stake_table_from_orchestrator(orchestrator_url, stake_table_capacity).await; + light_client_genesis_from_stake_table(st) +} + +#[inline] +pub fn light_client_genesis_from_stake_table( + st: StakeTable, +) -> anyhow::Result { let (bls_comm, schnorr_comm, stake_comm) = st .commitment(SnapshotVersion::LastEpochStart) .expect("Commitment computation shouldn't fail."); let threshold = one_honest_threshold(st.total_stake(SnapshotVersion::LastEpochStart)?); - let pi = vec![ u256_to_field(threshold), F::from(0_u64), // Arbitrary value for view number diff --git a/scripts/build-docker-images b/scripts/build-docker-images index 14742079ea..61a55d90bb 100755 --- a/scripts/build-docker-images +++ b/scripts/build-docker-images @@ -7,7 +7,6 @@ nix develop .#armCrossShell --ignore-environment --command cargo build --release # The rest of the script doesn't run in a nix shell but we need to know where # the binaries are. CARGO_TARGET_DIR="./target/nix" -CONTRACTS_DIR="./contracts" # Copy binaries to a temporary directory. WORKDIR=$(mktemp -d -t espresso-docker-build-XXXXXXXX) @@ -36,11 +35,18 @@ for ARCH in "amd64" "arm64"; do ;; esac mkdir -p ${WORKDIR}/target/$ARCH/release - for binary in "orchestrator" "cdn-broker" "cdn-marshal" "cdn-whitelist" "sequencer" "commitment-task" "submit-transactions" "reset-storage" "state-relay-server" "state-prover" "deploy" "keygen" "permissionless-builder" "nasty-client" "pub-key" "espresso-bridge"; do + for binary in "orchestrator" "cdn-broker" "cdn-marshal" "cdn-whitelist" "sequencer" "commitment-task" "submit-transactions" "reset-storage" "state-relay-server" "state-prover" "deploy" "keygen" "permissionless-builder" "nasty-client" "pub-key" "espresso-bridge" "espresso-dev-node"; do cp -v "${CARGO_TARGET_DIR}/${TARGET}/release/$binary" ${WORKDIR}/target/$ARCH/release done + + # Download the latest foundry binary and extract anvil for the dev-node docker image. + curl -L https://github.com/foundry-rs/foundry/releases/download/nightly/foundry_nightly_linux_${ARCH}.tar.gz -o ${WORKDIR}/foundry.tar.gz + tar -xzvf ${WORKDIR}/foundry.tar.gz -C ${WORKDIR}/target/$ARCH/release anvil done +# Copy the dev-node launch script +cp -v scripts/launch-dev-node-with-postgres ${WORKDIR} + export DOCKER_BUILDKIT=1 docker build -t ghcr.io/espressosystems/espresso-sequencer/orchestrator:main -f docker/orchestrator.Dockerfile ${WORKDIR} docker build -t ghcr.io/espressosystems/espresso-sequencer/cdn-broker:main -f docker/cdn-broker.Dockerfile ${WORKDIR} @@ -54,4 +60,5 @@ docker build -t ghcr.io/espressosystems/espresso-sequencer/submit-transactions:m docker build -t ghcr.io/espressosystems/espresso-sequencer/deploy:main -f docker/deploy.Dockerfile ${WORKDIR} docker build -t ghcr.io/espressosystems/espresso-sequencer/builder:main -f docker/permissionless-builder.Dockerfile ${WORKDIR} docker build -t ghcr.io/espressosystems/espresso-sequencer/nasty-client:main -f docker/nasty-client.Dockerfile ${WORKDIR} +docker build -t ghcr.io/espressosystems/espresso-sequencer/espresso-dev-node:main -f docker/espresso-dev-node.Dockerfile ${WORKDIR} docker build -t ghcr.io/espressosystems/espresso-sequencer/bridge:main -f docker/bridge.Dockerfile ${WORKDIR} diff --git a/scripts/build-docker-images-native b/scripts/build-docker-images-native index 3749b12be5..63e4266a68 100755 --- a/scripts/build-docker-images-native +++ b/scripts/build-docker-images-native @@ -87,7 +87,7 @@ mkdir -p ${WORKDIR}/data cp -rv data/genesis ${WORKDIR}/data/ mkdir -p "${WORKDIR}/target/$ARCH/release" -for binary in "orchestrator" "cdn-broker" "cdn-marshal" "cdn-whitelist" "sequencer" "commitment-task" "submit-transactions" "reset-storage" "state-relay-server" "state-prover" "deploy" "keygen" "permissionless-builder" "nasty-client" "pub-key" "espresso-bridge"; do +for binary in "orchestrator" "cdn-broker" "cdn-marshal" "cdn-whitelist" "sequencer" "commitment-task" "submit-transactions" "reset-storage" "state-relay-server" "state-prover" "deploy" "keygen" "permissionless-builder" "nasty-client" "pub-key" "espresso-bridge" "espresso-dev-node"; do cp -v "${CARGO_TARGET_DIR}/release/$binary" "${WORKDIR}/target/$ARCH/release" # Patch the interpreter for running without nix inside the ubuntu based docker image. if [ $KERNEL == "linux" ]; then @@ -95,6 +95,14 @@ for binary in "orchestrator" "cdn-broker" "cdn-marshal" "cdn-whitelist" "sequenc fi done +# Copy the dev-node launch script +cp -v scripts/launch-dev-node-with-postgres ${WORKDIR} + +# Download the latest foundry binary and extract anvil for the dev-node docker image. +curl -L https://github.com/foundry-rs/foundry/releases/download/nightly/foundry_nightly_linux_${ARCH}.tar.gz -o ${WORKDIR}/foundry.tar.gz +tar -xzvf ${WORKDIR}/foundry.tar.gz -C ${WORKDIR}/target/$ARCH/release anvil + + export DOCKER_BUILDKIT=1 docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/orchestrator:main -f docker/orchestrator.Dockerfile ${WORKDIR} docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/cdn-broker:main -f docker/cdn-broker.Dockerfile ${WORKDIR} @@ -108,4 +116,5 @@ docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/ docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/deploy:main -f docker/deploy.Dockerfile ${WORKDIR} docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/builder:main -f docker/permissionless-builder.Dockerfile ${WORKDIR} docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/nasty-client:main -f docker/nasty-client.Dockerfile ${WORKDIR} +docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/espresso-dev-node:main -f docker/espresso-dev-node.Dockerfile ${WORKDIR} docker build --platform $PLATFORM -t ghcr.io/espressosystems/espresso-sequencer/bridge:main -f docker/bridge.Dockerfile ${WORKDIR} diff --git a/scripts/launch-dev-node-with-postgres b/scripts/launch-dev-node-with-postgres new file mode 100644 index 0000000000..1aef1e556a --- /dev/null +++ b/scripts/launch-dev-node-with-postgres @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -euxo pipefail + +export ESPRESSO_SEQUENCER_POSTGRES_HOST=${ESPRESSO_SEQUENCER_POSTGRES_HOST:-localhost} +export ESPRESSO_SEQUENCER_POSTGRES_PORT=${ESPRESSO_SEQUENCER_POSTGRES_PORT:-5432} +export ESPRESSO_SEQUENCER_POSTGRES_USER=${ESPRESSO_SEQUENCER_POSTGRES_USER:-root} +export ESPRESSO_SEQUENCER_POSTGRES_PASSWORD=${ESPRESSO_SEQUENCER_POSTGRES_PASSWORD:-password} + +export POSTGRES_USER=$ESPRESSO_SEQUENCER_POSTGRES_USER +export POSTGRES_PASSWORD=$ESPRESSO_SEQUENCER_POSTGRES_PASSWORD + +export RUST_LOG=${RUST_LOG:-info} + +# Start postgres in the background +docker-entrypoint.sh postgres & + +# Wait (twice) for postgres to be ready +# Postgres can be falsely "ready" once before running init scripts. +until pg_isready && sleep 1 && pg_isready; do + echo "Waiting for postgres..." + sleep 1 +done + +# Start the dev node +espresso-dev-node diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index ff5154acc0..2236b92c32 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -9,7 +9,12 @@ edition = "2021" testing = ["hotshot-testing"] libp2p = [] +[[bin]] +name = "espresso-dev-node" +required-features = ["testing"] + [dev-dependencies] +escargot = "0.5.10" espresso-macros = { git = "https://github.com/EspressoSystems/espresso-macros.git", tag = "0.1.0" } hotshot-query-service = { workspace = true, features = ["testing"] } pretty_assertions = { workspace = true } @@ -56,6 +61,7 @@ ethers-contract-derive = "2.0.10" futures = { workspace = true } hotshot = { workspace = true } +hotshot-contract-adapter = { workspace = true } hotshot-events-service = { workspace = true } hotshot-orchestrator = { workspace = true } hotshot-query-service = { workspace = true } diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index ae74faeb6b..f4a858c3d9 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -20,6 +20,7 @@ use futures::{ use hotshot::types::{Event, SystemContextHandle}; use hotshot_events_service::events_source::{BuilderEvent, EventsSource, EventsStreamer}; use hotshot_query_service::data_source::ExtensibleDataSource; +use hotshot_state_prover::service::light_client_genesis_from_stake_table; use hotshot_types::{data::ViewNumber, light_client::StateSignatureRequestBody, HotShotConfig}; use jf_merkle_tree::MerkleTreeScheme; use serde::{Deserialize, Serialize}; @@ -275,8 +276,8 @@ impl; TestConfig::NUM_NODES], catchup: [impl StateCatchup + 'static; TestConfig::NUM_NODES], + l1: Url, ) -> Self { - let mut cfg = TestConfig::default(); + let mut cfg = TestConfig::default_with_l1(l1); let (builder_task, builder_url) = run_test_builder().await; @@ -391,16 +395,23 @@ mod test_helpers { pub async fn new( opt: Options, persistence: [impl PersistenceOptions; TestConfig::NUM_NODES], + l1: Url, ) -> Self { Self::with_state( opt, Default::default(), persistence, std::array::from_fn(|_| MockStateCatchup::default()), + l1, ) .await } + pub fn light_client_genesis(&self) -> ParsedLightClientState { + let st = self.cfg.stake_table(STAKE_TABLE_CAPACITY_FOR_TEST as usize); + light_client_genesis_from_stake_table(st).unwrap() + } + pub async fn stop_consensus(&mut self) { self.server.consensus_mut().shut_down().await; for ctx in &mut self.peers { @@ -425,8 +436,10 @@ mod test_helpers { let client: Client = Client::new(url); let options = opt(Options::from(options::Http { port }).status(Default::default())); + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); let _network = - TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES]).await; + TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES], l1).await; client.connect(None).await; // The status API is well tested in the query service repo. Here we are just smoke testing @@ -472,7 +485,10 @@ mod test_helpers { let client: Client = Client::new(url); let options = opt(Options::from(options::Http { port }).submit(Default::default())); - let network = TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES]).await; + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); + let network = + TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES], l1).await; let mut events = network.server.event_stream(); client.connect(None).await; @@ -501,7 +517,10 @@ mod test_helpers { let client: Client = Client::new(url); let options = opt(Options::from(options::Http { port })); - let network = TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES]).await; + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); + let network = + TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES], l1).await; let mut height: u64; // Wait for block >=2 appears @@ -537,8 +556,10 @@ mod test_helpers { let client: Client = Client::new(url); let options = opt(Options::from(options::Http { port }).catchup(Default::default())); + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); let mut network = - TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES]).await; + TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES], l1).await; client.connect(None).await; // Wait for a few blocks to be decided. @@ -627,6 +648,7 @@ mod api_tests { use data_source::testing::TestableSequencerDataSource; use endpoints::NamespaceProofQueryData; use es_version::SequencerVersion; + use ethers::utils::Anvil; use futures::stream::StreamExt; use hotshot_query_service::availability::LeafQueryData; use hotshot_types::vid::vid_scheme; @@ -667,9 +689,12 @@ mod api_tests { // Start query service. let port = pick_unused_port().expect("No ports free"); let storage = D::create_storage().await; + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); let network = TestNetwork::new( D::options(&storage, options::Http { port }.into()).submit(Default::default()), [no_storage::Options; TestConfig::NUM_NODES], + l1, ) .await; let mut events = network.server.event_stream(); @@ -767,8 +792,10 @@ mod api_tests { }) .hotshot_events(hotshot_events); + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); let _network = - TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES]).await; + TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES], l1).await; let mut subscribed_events = client .socket("hotshot-events/events") @@ -813,10 +840,9 @@ mod test { use async_std::task::sleep; use committable::Commitment; use es_version::{SequencerVersion, SEQUENCER_VERSION}; - use futures::{ - future::{self, join_all}, - stream::{StreamExt, TryStreamExt}, - }; + use ethers::utils::Anvil; + use futures::future::{self, join_all}; + use futures::stream::{StreamExt, TryStreamExt}; use hotshot::types::EventType; use hotshot_query_service::{ availability::{BlockQueryData, LeafQueryData}, @@ -845,8 +871,10 @@ mod test { let url = format!("http://localhost:{port}").parse().unwrap(); let client: Client = Client::new(url); let options = Options::from(options::Http { port }); + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); let _network = - TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES]).await; + TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES], l1).await; client.connect(None).await; let health = client.get::("healthcheck").send().await.unwrap(); @@ -888,8 +916,10 @@ mod test { .status(Default::default()), ); + let anvil: ethers::utils::AnvilInstance = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); let mut network = - TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES]).await; + TestNetwork::new(options, [no_storage::Options; TestConfig::NUM_NODES], l1).await; let url = format!("http://localhost:{port}").parse().unwrap(); let client: Client = Client::new(url); @@ -948,6 +978,8 @@ mod test { // Start a sequencer network, using the query service for catchup. let port = pick_unused_port().expect("No ports free"); + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); let mut network = TestNetwork::with_state( Options::from(options::Http { port }).catchup(Default::default()), Default::default(), @@ -957,6 +989,7 @@ mod test { .parse() .unwrap()]) }), + l1, ) .await; @@ -1047,6 +1080,8 @@ mod test { .try_into() .unwrap(); let port = pick_unused_port().unwrap(); + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); let mut network = TestNetwork::with_state( SqlDataSource::options(&storage[0], options::Http { port }.into()) .state(Default::default()) @@ -1054,6 +1089,7 @@ mod test { Default::default(), persistence, std::array::from_fn(|_| MockStateCatchup::default()), + l1, ) .await; @@ -1106,6 +1142,8 @@ mod test { // Start up again, resuming from the last decided leaf. let port = pick_unused_port().expect("No ports free"); + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); let persistence = storage .iter() .map(::persistence_options) @@ -1125,6 +1163,7 @@ mod test { .parse() .unwrap()]) }), + l1, ) .await; let client: Client = diff --git a/sequencer/src/bin/deploy.rs b/sequencer/src/bin/deploy.rs index 2935c0f244..930c10db76 100644 --- a/sequencer/src/bin/deploy.rs +++ b/sequencer/src/bin/deploy.rs @@ -1,30 +1,12 @@ -use anyhow::{ensure, Context}; use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; -use async_std::sync::Arc; -use clap::{Parser, ValueEnum}; -use contract_bindings::{ - erc1967_proxy::ERC1967Proxy, fee_contract::FeeContract, hot_shot::HotShot, - light_client::LightClient, -}; -use ethers::prelude::{coins_bip39::English, *}; -use futures::future::FutureExt; +use clap::Parser; +use futures::FutureExt; use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; use hotshot_state_prover::service::light_client_genesis; -use sequencer_utils::deployer::{ - deploy_light_client_contract, deploy_mock_light_client_contract, Contract, Contracts, - DeployedContracts, -}; +use sequencer_utils::deployer::{deploy, ContractGroup, Contracts, DeployedContracts}; use std::{fs::File, io::stdout, path::PathBuf}; use url::Url; -#[derive(Clone, Copy, Debug, PartialEq, Eq, ValueEnum)] -enum ContractGroup { - #[clap(name = "hotshot")] - HotShot, - FeeContract, - LightClient, -} - /// Deploy contracts needed to run the sequencer. /// /// This script deploys contracts needed to run the sequencer to an L1. It outputs a .env file @@ -104,95 +86,28 @@ struct Options { pub stake_table_capacity: usize, } -fn should_deploy(group: ContractGroup, only: &Option>) -> bool { - match only { - Some(groups) => groups.contains(&group), - None => true, - } -} - #[async_std::main] async fn main() -> anyhow::Result<()> { setup_logging(); setup_backtrace(); let opt = Options::parse(); - let mut contracts = Contracts::from(opt.contracts); - - let provider = Provider::::try_from(opt.rpc_url.to_string())?; - let chain_id = provider.get_chainid().await?.as_u64(); - let wallet = MnemonicBuilder::::default() - .phrase(opt.mnemonic.as_str()) - .index(opt.account_index)? - .build()? - .with_chain_id(chain_id); - let owner = wallet.address(); - let l1 = Arc::new(SignerMiddleware::new(provider, wallet)); - - // As a sanity check, check that the deployer address has some balance of ETH it can use to pay - // gas. - let balance = l1.get_balance(owner, None).await?; - ensure!( - balance > 0.into(), - "deployer account {owner:#x} is not funded!" - ); - tracing::info!(%balance, "deploying from address {owner:#x}"); - - // `HotShot.sol` - if should_deploy(ContractGroup::HotShot, &opt.only) { - contracts - .deploy_tx(Contract::HotShot, HotShot::deploy(l1.clone(), ())?) - .await?; - } - - // `LightClient.sol` - if should_deploy(ContractGroup::LightClient, &opt.only) { - // Deploy the upgradable light client contract first, then initialize it through a proxy contract - let lc_address = if opt.use_mock_contract { - contracts - .deploy_fn(Contract::LightClient, |contracts| { - deploy_mock_light_client_contract(l1.clone(), contracts, None).boxed() - }) - .await? - } else { - contracts - .deploy_fn(Contract::LightClient, |contracts| { - deploy_light_client_contract(l1.clone(), contracts).boxed() - }) - .await? - }; - let light_client = LightClient::new(lc_address, l1.clone()); - - let genesis = light_client_genesis(&opt.orchestrator_url, opt.stake_table_capacity).await?; - let data = light_client - .initialize(genesis.into(), u32::MAX, owner) - .calldata() - .context("calldata for initialize transaction not available")?; - contracts - .deploy_tx( - Contract::LightClientProxy, - ERC1967Proxy::deploy(l1.clone(), (lc_address, data))?, - ) - .await?; - } - - // `FeeContract.sol` - if should_deploy(ContractGroup::FeeContract, &opt.only) { - let fee_contract_address = contracts - .deploy_tx(Contract::FeeContract, FeeContract::deploy(l1.clone(), ())?) - .await?; - let fee_contract = FeeContract::new(fee_contract_address, l1.clone()); - let data = fee_contract - .initialize(owner) - .calldata() - .context("calldata for initialize transaction not available")?; - contracts - .deploy_tx( - Contract::FeeContractProxy, - ERC1967Proxy::deploy(l1.clone(), (fee_contract_address, data))?, - ) - .await?; - } + let contracts = Contracts::from(opt.contracts); + + let orchestrator_url = opt.orchestrator_url.clone(); + + let genesis = light_client_genesis(&orchestrator_url, opt.stake_table_capacity).boxed(); + + let contracts = deploy( + opt.rpc_url, + opt.mnemonic, + opt.account_index, + opt.use_mock_contract, + opt.only, + genesis, + contracts, + ) + .await?; if let Some(out) = &opt.out { let file = File::options() diff --git a/sequencer/src/bin/espresso-dev-node.rs b/sequencer/src/bin/espresso-dev-node.rs new file mode 100644 index 0000000000..9f82b061d7 --- /dev/null +++ b/sequencer/src/bin/espresso-dev-node.rs @@ -0,0 +1,326 @@ +use std::{io, time::Duration}; + +use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; +use async_std::task::spawn; +use clap::Parser; +use es_version::SEQUENCER_VERSION; +use ethers::types::Address; +use futures::FutureExt; +use sequencer::{ + api::options, + api::test_helpers::TestNetwork, + hotshot_commitment::{run_hotshot_commitment_task, CommitmentTaskOptions}, + persistence, + testing::TestConfig, +}; +use sequencer_utils::{ + deployer::{deploy, Contract, Contracts}, + AnvilOptions, +}; +use tide_disco::{error::ServerError, Api}; +use url::Url; +use vbs::version::StaticVersionType; + +#[derive(Clone, Debug, Parser)] +struct Args { + /// A JSON-RPC endpoint for the L1 to deploy to. If this is not provided, an Avil node will be + /// launched automatically. + #[clap(short, long, env = "ESPRESSO_SEQUENCER_L1_PROVIDER")] + rpc_url: Option, + /// Mnemonic for an L1 wallet. + /// + /// This wallet is used to deploy the contracts, so the account indicated by ACCOUNT_INDEX must + /// be funded with with ETH. + #[clap( + long, + name = "MNEMONIC", + env = "ESPRESSO_SEQUENCER_ETH_MNEMONIC", + default_value = "test test test test test test test test test test test junk" + )] + mnemonic: String, + /// Account index in the L1 wallet generated by MNEMONIC to use when deploying the contracts. + #[clap( + long, + name = "ACCOUNT_INDEX", + env = "ESPRESSO_DEPLOYER_ACCOUNT_INDEX", + default_value = "0" + )] + account_index: u32, + /// Port that the HTTP API will use. + #[clap(long, env = "ESPRESSO_SEQUENCER_API_PORT")] + sequencer_api_port: u16, + /// If provided, the service will run a basic HTTP server on the given port. + /// + /// The server provides healthcheck and version endpoints. + #[clap(short, long, env = "ESPRESSO_COMMITMENT_TASK_PORT")] + commitment_task_port: u16, + + #[clap(flatten)] + sql: persistence::sql::Options, +} + +#[async_std::main] +async fn main() -> anyhow::Result<()> { + setup_logging(); + setup_backtrace(); + + let opt = Args::parse(); + let options = options::Options::from(options::Http { + port: opt.sequencer_api_port, + }) + .status(Default::default()) + .state(Default::default()) + .submit(Default::default()) + .query_sql(Default::default(), opt.sql); + + let (url, _anvil) = if let Some(url) = opt.rpc_url { + (url, None) + } else { + tracing::warn!("L1 url is not provided. running an anvil node"); + let instance = AnvilOptions::default().spawn().await; + let url = instance.url(); + tracing::info!("l1 url: {}", url); + (url, Some(instance)) + }; + + let network = TestNetwork::new( + options, + [persistence::no_storage::Options; TestConfig::NUM_NODES], + url.clone(), + ) + .await; + + let config = network.cfg.hotshot_config(); + tracing::info!("Hotshot config {config:?}"); + + let contracts = Contracts::new(); + + tracing::info!("deploying the contracts"); + + let light_client_genesis = network.light_client_genesis(); + + let contracts = deploy( + url.clone(), + opt.mnemonic.clone(), + opt.account_index, + true, + None, + async { Ok(light_client_genesis) }.boxed(), + contracts, + ) + .await?; + + let hotshot_address = contracts + .get_contract_address(Contract::HotShot) + .expect("Cannot get the hotshot contract address"); + tracing::info!("hotshot address: {}", hotshot_address); + + tracing::info!("starting the commitment server"); + start_commitment_server(opt.commitment_task_port, hotshot_address, SEQUENCER_VERSION).unwrap(); + + let sequencer_url = + Url::parse(format!("http://localhost:{}", opt.sequencer_api_port).as_str()).unwrap(); + let commitment_task_options = CommitmentTaskOptions { + l1_provider: url, + l1_chain_id: None, + hotshot_address, + sequencer_mnemonic: opt.mnemonic, + sequencer_account_index: opt.account_index, + query_service_url: Some(sequencer_url), + request_timeout: Duration::from_secs(5), + delay: None, + }; + + tracing::info!("starting hotshot commitment task"); + run_hotshot_commitment_task::(&commitment_task_options).await; + + Ok(()) +} + +// Copied from `commitment_task::start_http_server`. +// TODO: Remove these redundant code +fn start_commitment_server( + port: u16, + hotshot_address: Address, + bind_version: Ver, +) -> io::Result<()> { + let mut app = tide_disco::App::<(), ServerError>::with_state(()); + let toml = toml::from_str::(include_str!("../../api/commitment_task.toml")) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + + let mut api = Api::<(), ServerError, Ver>::new(toml) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + + api.get("gethotshotcontract", move |_, _| { + async move { Ok(hotshot_address) }.boxed() + }) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + + app.register_module("api", api) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + + spawn(app.serve(format!("localhost:{port}"), bind_version)); + Ok(()) +} + +#[cfg(test)] +mod tests { + use std::{process::Child, thread::sleep, time::Duration}; + + use async_compatibility_layer::logging::{setup_backtrace, setup_logging}; + use async_std::stream::StreamExt; + use committable::{Commitment, Committable}; + use es_version::SequencerVersion; + use escargot::CargoBuild; + use futures::TryStreamExt; + use hotshot_query_service::{ + availability::{BlockQueryData, TransactionQueryData}, + data_source::sql::testing::TmpDb, + }; + use jf_merkle_tree::MerkleTreeScheme; + use portpicker::pick_unused_port; + use sequencer::{ + api::endpoints::NamespaceProofQueryData, state::BlockMerkleTree, Header, SeqTypes, + Transaction, + }; + use surf_disco::Client; + use tide_disco::error::ServerError; + + pub struct BackgroundProcess(Child); + + impl Drop for BackgroundProcess { + fn drop(&mut self) { + self.0.kill().unwrap(); + } + } + + // If this test failed and you are doing changes on the following stuff, please + // sync your changes to [`espresso-sequencer-go`](https://github.com/EspressoSystems/espresso-sequencer-go) + // and open a PR. + // - APIs update + // - Types (like `Header`) update + #[async_std::test] + async fn dev_node_test() { + setup_logging(); + setup_backtrace(); + + let commitment_task_port = pick_unused_port().unwrap(); + + let api_port = pick_unused_port().unwrap(); + + let db = TmpDb::init().await; + let postgres_port = db.port(); + + let process = CargoBuild::new() + .bin("espresso-dev-node") + .features("testing") + .current_target() + .run() + .unwrap() + .command() + .env( + "ESPRESSO_COMMITMENT_TASK_PORT", + commitment_task_port.to_string(), + ) + .env("ESPRESSO_SEQUENCER_API_PORT", api_port.to_string()) + .env("ESPRESSO_SEQUENCER_POSTGRES_HOST", "localhost") + .env( + "ESPRESSO_SEQUENCER_POSTGRES_PORT", + postgres_port.to_string(), + ) + .env("ESPRESSO_SEQUENCER_POSTGRES_USER", "postgres") + .env("ESPRESSO_SEQUENCER_POSTGRES_PASSWORD", "password") + .spawn() + .unwrap(); + + let _process = BackgroundProcess(process); + + let api_client: Client = + Client::new(format!("http://localhost:{api_port}").parse().unwrap()); + api_client.connect(None).await; + + tracing::info!("waiting for blocks"); + let _ = api_client + .socket("availability/stream/blocks/0") + .subscribe::>() + .await + .unwrap() + .take(5) + .try_collect::>() + .await + .unwrap(); + + let commitment_api_client: Client = Client::new( + format!("http://localhost:{commitment_task_port}/api") + .parse() + .unwrap(), + ); + commitment_api_client.connect(None).await; + + let hotshot_contract = commitment_api_client + .get::("hotshot_contract") + .send() + .await + .unwrap(); + + assert!(!hotshot_contract.is_empty()); + + let tx = Transaction::new(100.into(), vec![1, 2, 3]); + + let hash: Commitment = api_client + .post("submit/submit") + .body_json(&tx) + .unwrap() + .send() + .await + .unwrap(); + + let tx_hash = tx.commit(); + assert_eq!(hash, tx_hash); + + while api_client + .get::>(&format!( + "availability/transaction/hash/{}", + tx_hash + )) + .send() + .await + .is_err() + { + sleep(Duration::from_secs(3)) + } + + // These endpoints are currently used in `espresso-sequencer-go`. These checks + // serve as reminders of syncing the API updates to go client repo when they change. + { + api_client + .get::("status/block-height") + .send() + .await + .unwrap(); + + api_client + .get::
("availability/header/3") + .send() + .await + .unwrap(); + + api_client + .get::("availability/block/2/namespace/0") + .send() + .await + .unwrap(); + + while api_client + .get::<::MembershipProof>("block-state/3/2") + .send() + .await + .is_err() + { + sleep(Duration::from_secs(3)) + } + } + + drop(db); + } +} diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index a2b2343322..fbfbe50c82 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -62,6 +62,7 @@ use hotshot_types::{ metrics::Metrics, network::ConnectedNetwork, node_implementation::{NodeImplementation, NodeType}, + signature_key::{BuilderSignatureKey, StakeTableEntryType}, states::InstanceState, storage::Storage, }, @@ -501,7 +502,6 @@ pub mod testing { persistence::no_storage::{self, NoStorage}, }; use committable::Committable; - use ethers::utils::{Anvil, AnvilInstance}; use futures::{ future::join_all, stream::{Stream, StreamExt}, @@ -511,16 +511,15 @@ pub mod testing { BlockPayload, }; use hotshot::types::{EventType::Decide, Message}; + use hotshot_stake_table::vec_based::StakeTable; use hotshot_testing::block_builder::{ BuilderTask, SimpleBuilderImplementation, TestBuilderImplementation, }; use hotshot_types::{ event::LeafInfo, - light_client::StateKeyPair, - traits::{ - block_contents::BlockHeader, metrics::NoMetrics, signature_key::BuilderSignatureKey, - }, - ExecutionType, HotShotConfig, PeerConfig, ValidatorConfig, + light_client::{CircuitField, StateKeyPair, StateVerKey}, + traits::{block_contents::BlockHeader, metrics::NoMetrics, stake_table::StakeTableScheme}, + ExecutionType, HotShotConfig, PeerConfig, }; use portpicker::pick_unused_port; use std::time::Duration; @@ -541,7 +540,7 @@ pub mod testing { priv_keys: Vec, state_key_pairs: Vec, master_map: Arc, PubKey>>, - anvil: Arc, + url: Url, } impl Default for TestConfig { @@ -602,7 +601,7 @@ pub mod testing { priv_keys, state_key_pairs, master_map, - anvil: Arc::new(Anvil::new().spawn()), + url: "http://localhost:8545".parse().unwrap(), } } } @@ -622,6 +621,13 @@ pub mod testing { self.config.builder_url = builder_url; } + pub fn default_with_l1(l1: Url) -> Self { + TestConfig { + url: l1, + ..Default::default() + } + } + pub async fn init_nodes( &self, bind_version: Ver, @@ -641,6 +647,28 @@ pub mod testing { .await } + pub fn stake_table( + &self, + stake_table_capacity: usize, + ) -> StakeTable { + let mut st = + StakeTable::::new(stake_table_capacity); + self.config + .known_nodes_with_stake + .iter() + .for_each(|config| { + st.register( + *config.stake_table_entry.key(), + config.stake_table_entry.stake(), + config.state_ver_key.clone(), + ) + .unwrap() + }); + st.advance(); + st.advance(); + st + } + #[allow(clippy::too_many_arguments)] pub async fn init_node( &self, @@ -681,7 +709,7 @@ pub mod testing { let node_state = NodeState::new( i as u64, ChainConfig::default(), - L1Client::new(self.anvil.endpoint().parse().unwrap(), 10000), + L1Client::new(self.url.clone(), 1000), catchup::local_and_remote(persistence_opt.clone(), catchup).await, ) .with_genesis(state); @@ -763,6 +791,7 @@ mod test { vid_commitment, BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES, }, }; + use sequencer_utils::AnvilOptions; use testing::{wait_for_decide_on_handle, TestConfig}; #[async_std::test] @@ -771,7 +800,9 @@ mod test { setup_backtrace(); let ver = SequencerVersion::instance(); // Assign `config` so it isn't dropped early. - let mut config = TestConfig::default(); + let anvil = AnvilOptions::default().spawn().await; + let url = anvil.url(); + let mut config = TestConfig::default_with_l1(url); let (builder_task, builder_url) = run_test_builder().await; @@ -811,7 +842,9 @@ mod test { let success_height = 30; let ver = SequencerVersion::instance(); // Assign `config` so it isn't dropped early. - let mut config = TestConfig::default(); + let anvil = AnvilOptions::default().spawn().await; + let url = anvil.url(); + let mut config = TestConfig::default_with_l1(url); let (builder_task, builder_url) = run_test_builder().await; diff --git a/utils/Cargo.toml b/utils/Cargo.toml index bee6870764..bfd401dc0c 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -15,7 +15,7 @@ contract-bindings = { path = "../contract-bindings" } derive_more = { workspace = true } ethers = { workspace = true } futures = { workspace = true } -hotshot-contract-adapter ={ path = "../contracts/rust/adapter" } +hotshot-contract-adapter = { workspace = true } portpicker = { workspace = true } serde = { workspace = true } serde_json = "^1.0.113" diff --git a/utils/src/deployer.rs b/utils/src/deployer.rs index de576f929f..57be73dc65 100644 --- a/utils/src/deployer.rs +++ b/utils/src/deployer.rs @@ -1,17 +1,23 @@ use anyhow::{ensure, Context}; use async_std::sync::Arc; -use clap::{builder::OsStr, Parser}; +use clap::{builder::OsStr, Parser, ValueEnum}; use contract_bindings::{ - light_client::LIGHTCLIENT_ABI, light_client_mock::LIGHTCLIENTMOCK_ABI, + erc1967_proxy::ERC1967Proxy, + fee_contract::FeeContract, + hot_shot::HotShot, + light_client::{LightClient, LIGHTCLIENT_ABI}, + light_client_mock::LIGHTCLIENTMOCK_ABI, light_client_state_update_vk::LightClientStateUpdateVK, - light_client_state_update_vk_mock::LightClientStateUpdateVKMock, plonk_verifier::PlonkVerifier, + light_client_state_update_vk_mock::LightClientStateUpdateVKMock, + plonk_verifier::PlonkVerifier, shared_types::LightClientState, }; use derive_more::Display; -use ethers::{prelude::*, solc::artifacts::BytecodeObject}; +use ethers::{prelude::*, signers::coins_bip39::English, solc::artifacts::BytecodeObject}; use futures::future::{BoxFuture, FutureExt}; use hotshot_contract_adapter::light_client::ParsedLightClientState; use std::{collections::HashMap, io::Write, ops::Deref}; +use url::Url; /// Set of predeployed contracts. #[derive(Clone, Debug, Parser)] @@ -103,6 +109,13 @@ impl From for Contracts { } impl Contracts { + pub fn new() -> Self { + Contracts(HashMap::new()) + } + + pub fn get_contract_address(&self, contract: Contract) -> Option
{ + self.0.get(&contract).copied() + } /// Deploy a contract by calling a function. /// /// The `deploy` function will be called only if contract `name` is not already deployed; @@ -288,3 +301,104 @@ pub async fn deploy_mock_light_client_contract( .await?; Ok(contract.address()) } + +pub async fn deploy( + l1url: Url, + mnemonic: String, + account_index: u32, + use_mock_contract: bool, + only: Option>, + genesis: BoxFuture<'_, anyhow::Result>, + mut contracts: Contracts, +) -> anyhow::Result { + let provider = Provider::::try_from(l1url.to_string())?; + let chain_id = provider.get_chainid().await?.as_u64(); + let wallet = MnemonicBuilder::::default() + .phrase(mnemonic.as_str()) + .index(account_index)? + .build()? + .with_chain_id(chain_id); + let owner = wallet.address(); + let l1 = Arc::new(SignerMiddleware::new(provider, wallet)); + + // As a sanity check, check that the deployer address has some balance of ETH it can use to pay + // gas. + let balance = l1.get_balance(owner, None).await?; + ensure!( + balance > 0.into(), + "deployer account {owner:#x} is not funded!" + ); + tracing::info!(%balance, "deploying from address {owner:#x}"); + + // `HotShot.sol` + if should_deploy(ContractGroup::HotShot, &only) { + contracts + .deploy_tx(Contract::HotShot, HotShot::deploy(l1.clone(), ())?) + .await?; + } + + // `LightClient.sol` + if should_deploy(ContractGroup::LightClient, &only) { + // Deploy the upgradable light client contract first, then initialize it through a proxy contract + let lc_address = if use_mock_contract { + contracts + .deploy_fn(Contract::LightClient, |contracts| { + deploy_mock_light_client_contract(l1.clone(), contracts, None).boxed() + }) + .await? + } else { + contracts + .deploy_fn(Contract::LightClient, |contracts| { + deploy_light_client_contract(l1.clone(), contracts).boxed() + }) + .await? + }; + let light_client = LightClient::new(lc_address, l1.clone()); + + let data = light_client + .initialize(genesis.await?.into(), u32::MAX, owner) + .calldata() + .context("calldata for initialize transaction not available")?; + contracts + .deploy_tx( + Contract::LightClientProxy, + ERC1967Proxy::deploy(l1.clone(), (lc_address, data))?, + ) + .await?; + } + + // `FeeContract.sol` + if should_deploy(ContractGroup::FeeContract, &only) { + let fee_contract_address = contracts + .deploy_tx(Contract::FeeContract, FeeContract::deploy(l1.clone(), ())?) + .await?; + let fee_contract = FeeContract::new(fee_contract_address, l1.clone()); + let data = fee_contract + .initialize(owner) + .calldata() + .context("calldata for initialize transaction not available")?; + contracts + .deploy_tx( + Contract::FeeContractProxy, + ERC1967Proxy::deploy(l1.clone(), (fee_contract_address, data))?, + ) + .await?; + } + + Ok(contracts) +} + +fn should_deploy(group: ContractGroup, only: &Option>) -> bool { + match only { + Some(groups) => groups.contains(&group), + None => true, + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, ValueEnum)] +pub enum ContractGroup { + #[clap(name = "hotshot")] + HotShot, + FeeContract, + LightClient, +}