Skip to content

Commit

Permalink
Allow plot cache disabling and change default on Windows, improve log…
Browse files Browse the repository at this point in the history
…ging
  • Loading branch information
nazar-pc committed Mar 13, 2024
1 parent d584200 commit 328125b
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 35 deletions.
34 changes: 30 additions & 4 deletions crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ const PIECE_GETTER_MAX_RETRIES: u16 = 7;
const GET_PIECE_INITIAL_INTERVAL: Duration = Duration::from_secs(5);
/// Defines max duration between get_piece calls.
const GET_PIECE_MAX_INTERVAL: Duration = Duration::from_secs(40);
/// NOTE: for large gaps between the plotted part and the end of the file plot cache will result in
/// very long period of writing zeroes on Windows, see https://stackoverflow.com/q/78058306/3806795
const MAX_SPACE_PLEDGED_FOR_PLOT_CACHE_ON_WINDOWS: u64 = 7 * 1024 * 1024 * 1024 * 1024;

fn should_farm_during_initial_plotting() -> bool {
let total_cpu_cores = all_cpu_cores()
Expand Down Expand Up @@ -244,6 +247,15 @@ pub(crate) struct FarmingArgs {
/// farming is successful and computer can be used comfortably for other things
#[arg(long, default_value_t = PlottingThreadPriority::Min)]
plotting_thread_priority: PlottingThreadPriority,
/// Enable plot cache.
///
/// Plot cache uses unplotted space as additional cache improving plotting speeds, especially
/// for small farmers.
///
/// On Windows enabled by default if total plotting space doesn't exceed 7TiB, for other OSs
/// enabled by default regardless of farm size.
#[arg(long)]
plot_cache: Option<bool>,
/// Disable farm locking, for example if file system doesn't support it
#[arg(long)]
disable_farm_locking: bool,
Expand Down Expand Up @@ -398,9 +410,19 @@ where
replotting_thread_pool_size,
replotting_cpu_cores,
plotting_thread_priority,
plot_cache,
disable_farm_locking,
} = farming_args;

let plot_cache = plot_cache.unwrap_or_else(|| {
!cfg!(windows)
|| disk_farms
.iter()
.map(|farm| farm.allocated_plotting_space)
.sum::<u64>()
<= MAX_SPACE_PLEDGED_FOR_PLOT_CACHE_ON_WINDOWS
});

// Override flags with `--dev`
dsn.allow_private_ips = dsn.allow_private_ips || dev;
dsn.disable_bootstrap_on_start = dsn.disable_bootstrap_on_start || dev;
Expand Down Expand Up @@ -747,10 +769,14 @@ where
.iter()
.map(|single_disk_farm| single_disk_farm.piece_cache())
.collect(),
single_disk_farms
.iter()
.map(|single_disk_farm| single_disk_farm.plot_cache())
.collect(),
if plot_cache {
single_disk_farms
.iter()
.map(|single_disk_farm| single_disk_farm.plot_cache())
.collect()
} else {
Vec::new()
},
)
.await;
drop(farmer_cache);
Expand Down
52 changes: 21 additions & 31 deletions crates/subspace-farmer/src/single_disk_farm/plot_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,6 @@ use subspace_networking::utils::multihash::ToMultihash;
use thiserror::Error;
use tracing::{debug, info, warn};

/// Max plot space for which to use caching, for larger gaps between the plotted part and the end of
/// the file it will result in very long period of writing zeroes on Windows, see
/// https://stackoverflow.com/q/78058306/3806795
///
/// Currently set to 2TiB.
const MAX_WINDOWS_PLOT_SPACE_FOR_CACHE: u64 = 2 * 1024 * 1024 * 1024 * 1024;

/// Disk plot cache open error
#[derive(Debug, Error)]
pub enum DiskPlotCacheError {
Expand Down Expand Up @@ -74,7 +67,7 @@ impl DiskPlotCache {
target_sector_count: SectorIndex,
sector_size: usize,
) -> Self {
info!("Checking plot cache contents");
info!("Checking plot cache contents, this can take a while");
let sector_size = sector_size as u64;
let cached_pieces = {
let sectors_metadata = sectors_metadata.read_blocking();
Expand All @@ -87,39 +80,36 @@ impl DiskPlotCache {
let file_size = sector_size * u64::from(target_sector_count);
let plotted_size = sector_size * sectors_metadata.len() as u64;

// Avoid writing over large gaps on Windows that is very lengthy process
if !cfg!(windows) || (file_size - plotted_size) <= MAX_WINDOWS_PLOT_SPACE_FOR_CACHE {
// Step over all free potential offsets for pieces that could have been cached
let from_offset = (plotted_size / Self::element_size() as u64) as u32;
let to_offset = (file_size / Self::element_size() as u64) as u32;
// TODO: Parallelize or read in larger batches
for offset in (from_offset..to_offset).rev() {
match Self::read_piece_internal(file, offset, &mut element) {
Ok(maybe_piece_index) => match maybe_piece_index {
Some(piece_index) => {
map.insert(RecordKey::from(piece_index.to_multihash()), offset);
}
None => {
next_offset.replace(offset);
break;
}
},
Err(DiskPlotCacheError::ChecksumMismatch) => {
next_offset.replace(offset);
break;
// Step over all free potential offsets for pieces that could have been cached
let from_offset = (plotted_size / Self::element_size() as u64) as u32;
let to_offset = (file_size / Self::element_size() as u64) as u32;
// TODO: Parallelize or read in larger batches
for offset in (from_offset..to_offset).rev() {
match Self::read_piece_internal(file, offset, &mut element) {
Ok(maybe_piece_index) => match maybe_piece_index {
Some(piece_index) => {
map.insert(RecordKey::from(piece_index.to_multihash()), offset);
}
Err(error) => {
warn!(%error, %offset, "Failed to read plot cache element");
None => {
next_offset.replace(offset);
break;
}
},
Err(DiskPlotCacheError::ChecksumMismatch) => {
next_offset.replace(offset);
break;
}
Err(error) => {
warn!(%error, %offset, "Failed to read plot cache element");
break;
}
}
}

CachedPieces { map, next_offset }
};

debug!("Finished checking plot cache contents");
info!("Finished checking plot cache contents");

Self {
file: Arc::downgrade(file),
Expand Down

0 comments on commit 328125b

Please sign in to comment.