Skip to content

Commit

Permalink
Merge pull request #2809 from subspace/snap-sync
Browse files Browse the repository at this point in the history
Initial implementation of snap sync
  • Loading branch information
nazar-pc authored May 30, 2024
2 parents c92507e + d921fd3 commit e99f77b
Show file tree
Hide file tree
Showing 12 changed files with 905 additions and 148 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

22 changes: 13 additions & 9 deletions crates/sc-consensus-subspace/src/archiver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -865,6 +865,10 @@ where
// Special sync mode where verified blocks were inserted into blockchain
// directly, archiving of this block will naturally happen later
continue;
} else if best_archived_block_number.is_zero() {
// We may have imported some block using special sync mode right after genesis,
// in which case archiver will be stuck at genesis block
continue;
} else {
let error = format!(
"There was a gap in blockchain history and the last contiguous series of \
Expand Down Expand Up @@ -1007,15 +1011,15 @@ where
.filter(|block_number| *block_number > client.info().finalized_number);

if let Some(block_number_to_finalize) = maybe_block_number_to_finalize {
let block_hash_to_finalize = client
.hash(block_number_to_finalize)?
.expect("Block about to be finalized must always exist");
finalize_block(
client.as_ref(),
telemetry.clone(),
block_hash_to_finalize,
block_number_to_finalize,
);
// Block is not guaranteed to be present this deep if we have only synced recent blocks
if let Some(block_hash_to_finalize) = client.hash(block_number_to_finalize)? {
finalize_block(
client.as_ref(),
telemetry.clone(),
block_hash_to_finalize,
block_number_to_finalize,
);
}
}
}

Expand Down
124 changes: 56 additions & 68 deletions crates/sc-consensus-subspace/src/block_import.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ use sc_client_api::BlockBackend;
use sc_consensus::block_import::{
BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult,
};
use sc_consensus::StateAction;
use sc_proof_of_time::verifier::PotVerifier;
use sp_api::{ApiError, ApiExt, ProvideRuntimeApi};
use sp_block_builder::BlockBuilder as BlockBuilderApi;
Expand Down Expand Up @@ -170,9 +171,6 @@ pub enum Error<Header: HeaderT> {
/// Farmer in block list
#[error("Farmer {0} is in block list")]
FarmerInBlockList(FarmerPublicKey),
/// No block weight for parent header
#[error("No block weight for parent header {0}")]
NoBlockWeight(Header::Hash),
/// Segment commitment not found
#[error("Segment commitment for segment index {0} not found")]
SegmentCommitmentNotFound(SegmentIndex),
Expand Down Expand Up @@ -337,7 +335,6 @@ where
}
}

#[allow(clippy::too_many_arguments)]
async fn block_import_verification(
&self,
block_hash: Block::Hash,
Expand All @@ -350,7 +347,6 @@ where
FarmerSignature,
>,
justifications: &Option<Justifications>,
skip_runtime_access: bool,
) -> Result<(), Error<Block::Header>> {
let block_number = *header.number();
let parent_hash = *header.parent_hash();
Expand All @@ -367,14 +363,7 @@ where
if self
.client
.runtime_api()
.is_in_block_list(parent_hash, &pre_digest.solution().public_key)
.or_else(|error| {
if skip_runtime_access {
Ok(false)
} else {
Err(Error::RuntimeApi(error))
}
})?
.is_in_block_list(parent_hash, &pre_digest.solution().public_key)?
{
warn!(
public_key = %pre_digest.solution().public_key,
Expand Down Expand Up @@ -492,9 +481,6 @@ where
pre_digest.solution().sector_index,
);

// TODO: Below `skip_runtime_access` has no impact on this, but ideally it
// should (though we don't support fast sync yet, so doesn't matter in
// practice)
let max_pieces_in_sector = self
.client
.runtime_api()
Expand Down Expand Up @@ -542,9 +528,6 @@ where
recent_segments: chain_constants.recent_segments(),
recent_history_fraction: chain_constants.recent_history_fraction(),
min_sector_lifetime: chain_constants.min_sector_lifetime(),
// TODO: Below `skip_runtime_access` has no impact on this, but ideally it
// should (though we don't support fast sync yet, so doesn't matter in
// practice)
current_history_size: self.client.runtime_api().history_size(parent_hash)?,
sector_expiration_check_segment_commitment,
}),
Expand All @@ -553,37 +536,35 @@ where
)
.map_err(|error| VerificationError::VerificationError(pre_digest.slot(), error))?;

if !skip_runtime_access {
// If the body is passed through, we need to use the runtime to check that the
// internally-set timestamp in the inherents actually matches the slot set in the seal
// and segment headers in the inherents are set correctly.
if let Some(extrinsics) = extrinsics {
let create_inherent_data_providers = self
.create_inherent_data_providers
.create_inherent_data_providers(parent_hash, ())
.await
.map_err(|error| Error::Client(sp_blockchain::Error::from(error)))?;

let inherent_data = create_inherent_data_providers
.create_inherent_data()
.await
.map_err(Error::CreateInherents)?;

let inherent_res = self.client.runtime_api().check_inherents(
parent_hash,
Block::new(header, extrinsics),
inherent_data,
)?;

if !inherent_res.ok() {
for (i, e) in inherent_res.into_errors() {
match create_inherent_data_providers
.try_handle_error(&i, &e)
.await
{
Some(res) => res.map_err(Error::CheckInherents)?,
None => return Err(Error::CheckInherentsUnhandled(i)),
}
// If the body is passed through, we need to use the runtime to check that the
// internally-set timestamp in the inherents actually matches the slot set in the seal
// and segment headers in the inherents are set correctly.
if let Some(extrinsics) = extrinsics {
let create_inherent_data_providers = self
.create_inherent_data_providers
.create_inherent_data_providers(parent_hash, ())
.await
.map_err(|error| Error::Client(sp_blockchain::Error::from(error)))?;

let inherent_data = create_inherent_data_providers
.create_inherent_data()
.await
.map_err(Error::CreateInherents)?;

let inherent_res = self.client.runtime_api().check_inherents(
parent_hash,
Block::new(header, extrinsics),
inherent_data,
)?;

if !inherent_res.ok() {
for (i, e) in inherent_res.into_errors() {
match create_inherent_data_providers
.try_handle_error(&i, &e)
.await
{
Some(res) => res.map_err(Error::CheckInherents)?,
None => return Err(Error::CheckInherentsUnhandled(i)),
}
}
}
Expand Down Expand Up @@ -634,27 +615,33 @@ where
}

let subspace_digest_items = extract_subspace_digest_items(&block.header)?;
let skip_execution_checks = block.state_action.skip_execution_checks();

let root_plot_public_key = self
.client
.runtime_api()
.root_plot_public_key(*block.header.parent_hash())?;

self.block_import_verification(
block_hash,
block.header.clone(),
block.body.clone(),
&root_plot_public_key,
&subspace_digest_items,
&block.justifications,
skip_execution_checks,
)
.await?;
// Only do import verification if we do not have the state already. If state needs to be
// applied this means verification and execution already happened before and doesn't need to
// be done again here (often can't because parent block would be missing in special sync
// modes).
if !matches!(block.state_action, StateAction::ApplyChanges(_)) {
let root_plot_public_key = self
.client
.runtime_api()
.root_plot_public_key(*block.header.parent_hash())?;

self.block_import_verification(
block_hash,
block.header.clone(),
block.body.clone(),
&root_plot_public_key,
&subspace_digest_items,
&block.justifications,
)
.await?;
}

let parent_weight = if block_number.is_one() {
0
} else {
// Parent block weight might be missing in special sync modes where block is imported in
// the middle of the blockchain history directly
aux_schema::load_block_weight(self.client.as_ref(), block.header.parent_hash())?
.unwrap_or_default()
};
Expand Down Expand Up @@ -695,8 +682,9 @@ where
// need to cover again here
parent_weight
} else {
aux_schema::load_block_weight(&*self.client, info.best_hash)?
.ok_or_else(|| Error::NoBlockWeight(info.best_hash))?
// Best block weight might be missing in special sync modes where block is imported
// in the middle of the blockchain history right after genesis
aux_schema::load_block_weight(&*self.client, info.best_hash)?.unwrap_or_default()
};

ForkChoiceStrategy::Custom(total_weight > last_best_weight)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ fn main() -> Result<(), Error> {
force_new_slot_notifications: true,
subspace_networking: SubspaceNetworking::Create { config: dsn_config },
dsn_piece_getter: None,
sync_from_dsn: true,
sync: Default::default(),
is_timekeeper: false,
timekeeper_cpu_cores: Default::default(),
};
Expand Down
42 changes: 28 additions & 14 deletions crates/subspace-node/src/commands/run/consensus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use sc_cli::{
TransactionPoolParams, RPC_DEFAULT_PORT,
};
use sc_informant::OutputFormat;
use sc_network::config::{MultiaddrWithPeerId, NonReservedPeerMode, SetConfig};
use sc_network::config::{MultiaddrWithPeerId, NonReservedPeerMode, SetConfig, SyncMode};
use sc_service::{BlocksPruning, Configuration, PruningMode};
use sc_storage_monitor::StorageMonitorParams;
use sc_telemetry::TelemetryEndpoints;
Expand All @@ -20,7 +20,7 @@ use std::str::FromStr;
use subspace_networking::libp2p::multiaddr::Protocol;
use subspace_networking::libp2p::Multiaddr;
use subspace_service::config::{
SubspaceConfiguration, SubspaceNetworking, SubstrateConfiguration,
ChainSyncMode, SubspaceConfiguration, SubspaceNetworking, SubstrateConfiguration,
SubstrateNetworkConfiguration, SubstrateRpcConfiguration,
};
use subspace_service::dsn::DsnConfig;
Expand Down Expand Up @@ -162,6 +162,8 @@ enum StatePruningMode {
Archive,
/// Keep only the data of finalized blocks.
ArchiveCanonical,
/// Keep the data of the last number of finalized blocks.
Number(u32),
}

impl FromStr for StatePruningMode {
Expand All @@ -171,17 +173,21 @@ impl FromStr for StatePruningMode {
match input {
"archive" => Ok(Self::Archive),
"archive-canonical" => Ok(Self::ArchiveCanonical),
_ => Err("Invalid state pruning mode specified".to_string()),
n => n
.parse()
.map_err(|_| "Invalid state pruning mode specified".to_string())
.map(Self::Number),
}
}
}

impl fmt::Display for StatePruningMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Self::Archive => "archive",
Self::ArchiveCanonical => "archive-canonical",
})
match self {
Self::Archive => f.write_str("archive"),
Self::ArchiveCanonical => f.write_str("archive-canonical"),
Self::Number(n) => f.write_str(n.to_string().as_str()),
}
}
}

Expand Down Expand Up @@ -256,6 +262,7 @@ impl PruningOptions {
match self.state_pruning {
StatePruningMode::Archive => PruningMode::ArchiveAll,
StatePruningMode::ArchiveCanonical => PruningMode::ArchiveCanonical,
StatePruningMode::Number(num) => PruningMode::blocks_pruning(num),
}
}

Expand Down Expand Up @@ -387,16 +394,16 @@ pub(super) struct ConsensusChainOptions {
#[clap(flatten)]
dsn_options: DsnOptions,

/// Enables DSN-sync on startup.
#[arg(long, default_value_t = true, action = clap::ArgAction::Set)]
sync_from_dsn: bool,

/// Parameters used to create the storage monitor.
#[clap(flatten)]
storage_monitor: StorageMonitorParams,

#[clap(flatten)]
timekeeper_options: TimekeeperOptions,

/// Sync mode
#[arg(long, default_value_t = ChainSyncMode::Full)]
sync: ChainSyncMode,
}

pub(super) struct PrometheusConfiguration {
Expand Down Expand Up @@ -437,9 +444,9 @@ pub(super) fn create_consensus_chain_configuration(
mut force_authoring,
pot_external_entropy,
dsn_options,
sync_from_dsn,
storage_monitor,
mut timekeeper_options,
sync,
} = consensus_node_options;

let transaction_pool;
Expand Down Expand Up @@ -584,7 +591,14 @@ pub(super) fn create_consensus_chain_configuration(
chain_spec: Box::new(chain_spec),
informant_output_format: OutputFormat { enable_color },
};
let consensus_chain_config = Configuration::from(consensus_chain_config);
let mut consensus_chain_config = Configuration::from(consensus_chain_config);
// TODO: revisit SyncMode change after https://github.com/paritytech/polkadot-sdk/issues/4407
if sync == ChainSyncMode::Snap {
consensus_chain_config.network.sync_mode = SyncMode::LightState {
skip_proofs: true,
storage_chain_mode: false,
};
}

let pot_external_entropy =
derive_pot_external_entropy(&consensus_chain_config, pot_external_entropy)?;
Expand Down Expand Up @@ -650,7 +664,7 @@ pub(super) fn create_consensus_chain_configuration(
force_new_slot_notifications: domains_enabled,
subspace_networking: SubspaceNetworking::Create { config: dsn_config },
dsn_piece_getter: None,
sync_from_dsn,
sync,
is_timekeeper: timekeeper_options.timekeeper,
timekeeper_cpu_cores: timekeeper_options.timekeeper_cpu_cores,
},
Expand Down
1 change: 1 addition & 0 deletions crates/subspace-service/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ pallet-transaction-payment-rpc = { git = "https://github.com/subspace/polkadot-s
parity-scale-codec = "3.6.9"
parking_lot = "0.12.2"
prometheus-client = "0.22.2"
prost = "0.12"
sc-basic-authorship = { git = "https://github.com/subspace/polkadot-sdk", rev = "6da3c45e1d5b3c1f09b5e54152b8848149f9d5e6" }
sc-chain-spec = { git = "https://github.com/subspace/polkadot-sdk", rev = "6da3c45e1d5b3c1f09b5e54152b8848149f9d5e6" }
sc-client-api = { git = "https://github.com/subspace/polkadot-sdk", rev = "6da3c45e1d5b3c1f09b5e54152b8848149f9d5e6" }
Expand Down
Loading

0 comments on commit e99f77b

Please sign in to comment.