Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor(core): avoid unwrap, expect, panic in persistence.rs #259

Merged
merged 3 commits into from
Dec 6, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 5 additions & 8 deletions kindelia/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -672,10 +672,7 @@ fn init_socket() -> Option<UdpSocket> {
}

// Clean
fn clean(
data_path: &Path,
command: NodeCleanCommand,
) -> Result<(), std::io::Error> {
fn clean(data_path: &Path, command: NodeCleanCommand) -> anyhow::Result<()> {
fn user_confirm(data_path: &Path) -> Result<bool, std::io::Error> {
// warning
println!(
Expand All @@ -697,8 +694,8 @@ fn clean(
fn exclude_n_files(
data_path: &std::path::Path,
n: usize,
) -> Result<(), std::io::Error> {
let entries = get_ordered_blocks_path(data_path);
) -> anyhow::Result<()> {
let entries = get_ordered_blocks_path(data_path)?;
let mut count = 0;
for entry in entries.iter().rev() {
if entry.1.is_file() {
Expand All @@ -715,7 +712,7 @@ fn clean(
fn clean_node(
data_path: &Path,
command: NodeCleanCommand,
) -> Result<(), std::io::Error> {
) -> anyhow::Result<()> {
match command {
NodeCleanCommand::All => std::fs::remove_dir_all(data_path)?,
NodeCleanCommand::Blocks { command } => {
Expand Down Expand Up @@ -832,7 +829,7 @@ pub fn start_node<C: ProtoComm + 'static>(
threads.extend(miner_thrds.into_iter());

// File writter
let file_writter = SimpleFileStorage::new(node_config.data_path.clone());
let file_writter = SimpleFileStorage::new(node_config.data_path.clone())?;

// Node state object
let (node_query_sender, node) = Node::new(
Expand Down
4 changes: 2 additions & 2 deletions kindelia_core/benches/bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -140,13 +140,13 @@ fn block_loading(c: &mut Criterion) {
// creates a temporary directory
let dir = temp_dir();
// creates the storage with temp dir
let storage = persistence::SimpleFileStorage::new(dir.clone());
let storage = persistence::SimpleFileStorage::new(dir.clone()).unwrap();

// writes `n` max blocks in disk
let n = 1000;
let block = max_block();
for i in 0..n {
storage.write_block(i, block.clone().hashed());
storage.write_block(i, block.clone().hashed()).unwrap();
}

// empty `ProtoComm` to pass the created node
Expand Down
40 changes: 22 additions & 18 deletions kindelia_core/src/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ use crate::bits::ProtoSerialize;
use crate::config::MineConfig;
use crate::constants;
use crate::net::{ProtoAddr, ProtoComm};
use crate::persistence::BlockStorage;
use crate::persistence::{BlockStorage, BlockStorageError};
use crate::runtime::*;
use crate::util::*;

Expand Down Expand Up @@ -759,12 +759,19 @@ pub fn miner_loop(
// Node
// ----

/// Errors associated with Node.
#[derive(Error, Debug)]
pub enum NodeError {
#[error(transparent)]
BlockStorage(#[from] BlockStorageError),
}

impl<C: ProtoComm, S: BlockStorage> Node<C, S> {
#[allow(clippy::too_many_arguments)]
pub fn new(
data_path: PathBuf,
network_id: u32,
addr: C::Address, // todo: review? https://github.com/Kindelia/Kindelia-Chain/pull/252#discussion_r1037732536
addr: C::Address, // todo: review? https://github.com/Kindelia/Kindelia-Chain/pull/252#discussion_r1037732536
initial_peers: Vec<C::Address>,
comm: C,
miner_comm: Option<MinerCommunication>,
Expand Down Expand Up @@ -981,9 +988,12 @@ impl<C: ProtoComm, S: BlockStorage> Node<C, S> {
// TODO: on separate thread
for bhash_comp in must_compute.iter().rev() {
let block_height = self.height[bhash_comp];
self
if let Err(e) = self
.storage
.write_block(block_height, self.block[bhash_comp].clone());
.write_block(block_height, self.block[bhash_comp].clone())
{
eprintln!("WARN: Error writing block to disk.\n{}", e)
}
}
// 4. Reverts the runtime to a state older than that block
// On the example above, we'd find `runtime.tick = 1`
Expand Down Expand Up @@ -1536,21 +1546,13 @@ impl<C: ProtoComm, S: BlockStorage> Node<C, S> {
self.send_blocks_to(addrs, true, blocks, 3);
}

pub fn load_blocks(&mut self) {
pub fn load_blocks(&mut self) -> Result<(), NodeError> {
self.storage.disable();
let storage = self.storage.clone();
storage.read_blocks(|(block, file_path)| match block {
Some(block) => {
self.add_block(&block.hashed());
}
None => {
eprintln!(
"WARN: Could not load block from file '{}'",
file_path.display()
);
}
});
storage
.read_blocks(|(block, _file_path)| self.add_block(&block.hashed()))?;
self.storage.enable();
Ok(())
}

fn send_to_miner(&mut self, msg: MinerMessage) {
Expand Down Expand Up @@ -1666,8 +1668,10 @@ impl<C: ProtoComm, S: BlockStorage> Node<C, S> {
eprintln!(" - {}", peer.address);
}

eprintln!("Loading block from disk...");
self.load_blocks();
eprintln!("Loading blocks from disk...");
if let Err(e) = self.load_blocks() {
eprintln!("Error loading blocks from disk.\n{}", e);
};

// A task that is executed continuously on the main loop
struct Task<C: ProtoComm, S: BlockStorage> {
Expand Down
Loading