Skip to content

Commit

Permalink
fix(docker): propagate SIGTERM to irohad (#4822)
Browse files Browse the repository at this point in the history
  • Loading branch information
mversic authored Jul 9, 2024
1 parent b415c73 commit 14a05dc
Show file tree
Hide file tree
Showing 6 changed files with 41 additions and 31 deletions.
2 changes: 1 addition & 1 deletion configs/swarm/docker-compose.local.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ services:
--private-key $$GENESIS_PRIVATE_KEY \\
--out-file $$GENESIS \\
&& \\
irohad
exec irohad
"
irohad1:
depends_on:
Expand Down
2 changes: 1 addition & 1 deletion configs/swarm/docker-compose.single.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,5 +43,5 @@ services:
--private-key $$GENESIS_PRIVATE_KEY \\
--out-file $$GENESIS \\
&& \\
irohad
exec irohad
"
2 changes: 1 addition & 1 deletion configs/swarm/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ services:
--private-key $$GENESIS_PRIVATE_KEY \\
--out-file $$GENESIS \\
&& \\
irohad
exec irohad
"
irohad1:
image: hyperledger/iroha:dev
Expand Down
54 changes: 32 additions & 22 deletions core/src/kura.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ impl Kura {
let block_index_count: usize = block_store
.read_index_count()?
.try_into()
.expect("We don't have 4 billion blocks.");
.expect("INTERNAL BUG: block index count exceeds usize::MAX");

let block_hashes = match self.mode {
InitMode::Fast => {
Expand All @@ -128,8 +128,7 @@ impl Kura {
let block_count = block_hashes.len();
info!(mode=?self.mode, block_count, "Kura init complete");

// The none value is set in order to indicate that the blocks exist on disk but
// are not yet loaded.
// The none value is set in order to indicate that the blocks exist on disk but are not yet loaded.
*self.block_data.lock() = block_hashes.into_iter().map(|hash| (hash, None)).collect();
Ok(BlockCount(block_count))
}
Expand All @@ -141,7 +140,7 @@ impl Kura {
let block_hashes_count = block_store
.read_hashes_count()?
.try_into()
.expect("We don't have 4 billion blocks.");
.expect("INTERNAL BUG: block hashes count exceeds usize::MAX");
if block_hashes_count == block_index_count {
block_store.read_block_hashes(0, block_hashes_count)
} else {
Expand Down Expand Up @@ -200,6 +199,7 @@ impl Kura {
let block_data_guard = kura.block_data.lock();
(block_data_guard.len(), block_data_guard.last().map(|d| d.0))
};

let mut should_exit = false;
loop {
// If kura receive shutdown then close block channel and write remaining blocks to the storage
Expand Down Expand Up @@ -234,10 +234,9 @@ impl Kura {
let start_height = written_block_count;
let mut blocks_to_be_written = Vec::new();
while written_block_count < block_data_guard.len() {
let block_ref = block_data_guard[written_block_count]
.1
.as_ref()
.expect("The block to be written cannot be None, see store_block function.");
let block_ref = block_data_guard[written_block_count].1.as_ref().expect(
"INTERNAL BUG: The block to be written is None. Check store_block function.",
);
blocks_to_be_written.push(Arc::clone(block_ref));
written_block_count += 1;
}
Expand All @@ -250,11 +249,11 @@ impl Kura {
.create(true)
.append(true)
.open(path)
.expect("Couldn't create file for plain text blocks.");
.expect("INTERNAL BUG: Couldn't create file for plain text blocks.");

for new_block in &blocks_to_be_written {
serde_json::to_writer_pretty(&mut plain_text_file, new_block.as_ref())
.expect("Failed to write to plain text file for blocks.");
.expect("INTERNAL BUG: Failed to write to plain text file for blocks.");
}
}

Expand Down Expand Up @@ -312,14 +311,18 @@ impl Kura {
let block_store = self.block_store.lock();
let BlockIndex { start, length } = block_store
.read_block_index(block_index as u64)
.expect("Failed to read block index from disk.");
.expect("INTERNAL BUG: Failed to read block index from disk.");

let mut block_buf =
vec![0_u8; usize::try_from(length).expect("index_len didn't fit in 32-bits")];
let mut block_buf = vec![
0_u8;
usize::try_from(length)
.expect("INTERNAL BUG: index_len didn't fit in 32-bits")
];
block_store
.read_block_data(start, &mut block_buf)
.expect("Failed to read block data.");
let block = SignedBlock::decode_all_versioned(&block_buf).expect("Failed to decode block");
.expect("INTERNAL BUG: Failed to read block data.");
let block = SignedBlock::decode_all_versioned(&block_buf)
.expect("INTERNAL BUG: Failed to decode block");

let block_arc = Arc::new(block);
data_array_guard[block_index].1 = Some(Arc::clone(&block_arc));
Expand Down Expand Up @@ -355,7 +358,10 @@ pub struct BlockStore {
impl Drop for BlockStore {
fn drop(&mut self) {
let path = self.path_to_blockchain.join(LOCK_FILE_NAME);
let _ = fs::remove_file(path); // we don't care if this succeeds or not

if let Err(err) = fs::remove_file(path) {
error!(?err, "Failed to remove lock file");
}
}
}

Expand All @@ -382,8 +388,8 @@ impl BlockStore {
///
/// # Panics
/// * if you pass in `LockStatus::Unlocked` and it is unable to lock the block store.
pub fn new(store_path: impl AsRef<Path>, already_locked: LockStatus) -> Self {
if matches!(already_locked, LockStatus::Unlocked) {
pub fn new(store_path: impl AsRef<Path>, lock_status: LockStatus) -> Self {
if matches!(lock_status, LockStatus::Unlocked) {
let lock_path = store_path.as_ref().join(LOCK_FILE_NAME);
if let Err(e) = fs::File::options()
.read(true)
Expand Down Expand Up @@ -414,10 +420,11 @@ impl BlockStore {
}
_ => Err(Error::IO(e, lock_path)),
}
.expect("Kura must be able to lock the blockstore");
.expect("INTERNAL BUG: Kura must be able to lock the blockstore");
}
}
BlockStore {

Self {
path_to_blockchain: store_path.as_ref().to_path_buf(),
}
}
Expand Down Expand Up @@ -1005,15 +1012,18 @@ mod tests {
#[test]
fn lock_and_unlock() {
let dir = tempfile::tempdir().unwrap();

{
let _store = BlockStore::new(dir.path(), LockStatus::Unlocked);

assert!(
dir.path().join(LOCK_FILE_NAME).try_exists().expect("IO"),
dir.path().join(LOCK_FILE_NAME).exists(),
"Lockfile should have been created"
);
}

assert!(
!dir.path().join(LOCK_FILE_NAME).try_exists().expect("IO"),
!dir.path().join(LOCK_FILE_NAME).exists(),
"Lockfile should have been deleted"
);
}
Expand Down
10 changes: 5 additions & 5 deletions tools/swarm/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ mod tests {
--private-key $$GENESIS_PRIVATE_KEY \\
--out-file $$GENESIS \\
&& \\
irohad
exec irohad
"
"##]).assert_eq(&build_as_string(
nonzero_ext::nonzero!(1u16),
Expand Down Expand Up @@ -264,7 +264,7 @@ mod tests {
--private-key $$GENESIS_PRIVATE_KEY \\
--out-file $$GENESIS \\
&& \\
irohad
exec irohad
"
"##]).assert_eq(&build_as_string(
nonzero_ext::nonzero!(1u16),
Expand Down Expand Up @@ -316,7 +316,7 @@ mod tests {
--private-key $$GENESIS_PRIVATE_KEY \\
--out-file $$GENESIS \\
&& \\
irohad
exec irohad
"
irohad1:
depends_on:
Expand Down Expand Up @@ -426,7 +426,7 @@ mod tests {
--private-key $$GENESIS_PRIVATE_KEY \\
--out-file $$GENESIS \\
&& \\
irohad
exec irohad
"
"#]).assert_eq(&build_as_string(
nonzero_ext::nonzero!(1u16),
Expand Down Expand Up @@ -481,7 +481,7 @@ mod tests {
--private-key $$GENESIS_PRIVATE_KEY \\
--out-file $$GENESIS \\
&& \\
irohad
exec irohad
"
irohad1:
image: hyperledger/iroha:dev
Expand Down
2 changes: 1 addition & 1 deletion tools/swarm/src/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ const SIGN_AND_SUBMIT_GENESIS: &str = r#"/bin/sh -c "
--private-key $$GENESIS_PRIVATE_KEY \\
--out-file $$GENESIS \\
&& \\
irohad
exec irohad
""#;

/// Configuration of the `irohad` service that submits genesis.
Expand Down

0 comments on commit 14a05dc

Please sign in to comment.