|
|
@@ -3,11 +3,13 @@ use {
|
|
|
duplicate_shred::{self, DuplicateShred, Error},
|
|
|
duplicate_shred_listener::DuplicateShredHandlerTrait,
|
|
|
},
|
|
|
+ crossbeam_channel::Sender,
|
|
|
log::error,
|
|
|
solana_ledger::{blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache},
|
|
|
solana_runtime::bank_forks::BankForks,
|
|
|
solana_sdk::{
|
|
|
clock::{Epoch, Slot},
|
|
|
+ feature_set,
|
|
|
pubkey::Pubkey,
|
|
|
},
|
|
|
std::{
|
|
|
@@ -44,6 +46,8 @@ pub struct DuplicateShredHandler {
|
|
|
cached_on_epoch: Epoch,
|
|
|
cached_staked_nodes: Arc<HashMap<Pubkey, u64>>,
|
|
|
cached_slots_in_epoch: u64,
|
|
|
+ // Used to notify duplicate consensus state machine
|
|
|
+ duplicate_slots_sender: Sender<Slot>,
|
|
|
}
|
|
|
|
|
|
impl DuplicateShredHandlerTrait for DuplicateShredHandler {
|
|
|
@@ -63,6 +67,7 @@ impl DuplicateShredHandler {
|
|
|
blockstore: Arc<Blockstore>,
|
|
|
leader_schedule_cache: Arc<LeaderScheduleCache>,
|
|
|
bank_forks: Arc<RwLock<BankForks>>,
|
|
|
+ duplicate_slots_sender: Sender<Slot>,
|
|
|
) -> Self {
|
|
|
Self {
|
|
|
buffer: HashMap::<(Slot, Pubkey), BufferEntry>::default(),
|
|
|
@@ -74,6 +79,7 @@ impl DuplicateShredHandler {
|
|
|
blockstore,
|
|
|
leader_schedule_cache,
|
|
|
bank_forks,
|
|
|
+ duplicate_slots_sender,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -131,12 +137,30 @@ impl DuplicateShredHandler {
|
|
|
shred1.into_payload(),
|
|
|
shred2.into_payload(),
|
|
|
)?;
|
|
|
+ if self.should_notify_state_machine(slot) {
|
|
|
+ // Notify duplicate consensus state machine
|
|
|
+ self.duplicate_slots_sender
|
|
|
+ .send(slot)
|
|
|
+ .map_err(|_| Error::DuplicateSlotSenderFailure)?;
|
|
|
+ }
|
|
|
}
|
|
|
self.consumed.insert(slot, true);
|
|
|
}
|
|
|
Ok(())
|
|
|
}
|
|
|
|
|
|
+ fn should_notify_state_machine(&self, slot: Slot) -> bool {
|
|
|
+ let root_bank = self.bank_forks.read().unwrap().root_bank();
|
|
|
+ let Some(activated_slot) = root_bank
|
|
|
+ .feature_set
|
|
|
+ .activated_slot(&feature_set::enable_gossip_duplicate_proof_ingestion::id())
|
|
|
+ else {
|
|
|
+ return false;
|
|
|
+ };
|
|
|
+ root_bank.epoch_schedule().get_epoch(slot)
|
|
|
+ > root_bank.epoch_schedule().get_epoch(activated_slot)
|
|
|
+ }
|
|
|
+
|
|
|
fn should_consume_slot(&mut self, slot: Slot) -> bool {
|
|
|
slot > self.last_root
|
|
|
&& slot < self.last_root.saturating_add(self.cached_slots_in_epoch)
|
|
|
@@ -211,12 +235,14 @@ mod tests {
|
|
|
cluster_info::DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
|
|
|
duplicate_shred::{from_shred, tests::new_rand_shred},
|
|
|
},
|
|
|
+ crossbeam_channel::unbounded,
|
|
|
+ itertools::Itertools,
|
|
|
solana_ledger::{
|
|
|
genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo},
|
|
|
get_tmp_ledger_path_auto_delete,
|
|
|
shred::Shredder,
|
|
|
},
|
|
|
- solana_runtime::bank::Bank,
|
|
|
+ solana_runtime::{accounts_background_service::AbsRequestSender, bank::Bank},
|
|
|
solana_sdk::{
|
|
|
signature::{Keypair, Signer},
|
|
|
timing::timestamp,
|
|
|
@@ -271,16 +297,34 @@ mod tests {
|
|
|
let my_pubkey = my_keypair.pubkey();
|
|
|
let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000);
|
|
|
let GenesisConfigInfo { genesis_config, .. } = genesis_config_info;
|
|
|
- let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config));
|
|
|
+ let mut bank = Bank::new_for_tests(&genesis_config);
|
|
|
+ bank.activate_feature(&feature_set::enable_gossip_duplicate_proof_ingestion::id());
|
|
|
+ let slots_in_epoch = bank.get_epoch_info().slots_in_epoch;
|
|
|
+ let bank_forks_arc = BankForks::new_rw_arc(bank);
|
|
|
+ {
|
|
|
+ let mut bank_forks = bank_forks_arc.write().unwrap();
|
|
|
+ let bank0 = bank_forks.get(0).unwrap();
|
|
|
+ bank_forks.insert(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 9));
|
|
|
+ bank_forks.set_root(9, &AbsRequestSender::default(), None);
|
|
|
+ }
|
|
|
+ blockstore.set_roots([0, 9].iter()).unwrap();
|
|
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(
|
|
|
- &bank_forks.read().unwrap().working_bank(),
|
|
|
+ &bank_forks_arc.read().unwrap().working_bank(),
|
|
|
));
|
|
|
- let mut duplicate_shred_handler =
|
|
|
- DuplicateShredHandler::new(blockstore.clone(), leader_schedule_cache, bank_forks);
|
|
|
+ let (sender, receiver) = unbounded();
|
|
|
+ // The feature will only be activated at Epoch 1.
|
|
|
+ let start_slot: Slot = slots_in_epoch + 1;
|
|
|
+
|
|
|
+ let mut duplicate_shred_handler = DuplicateShredHandler::new(
|
|
|
+ blockstore.clone(),
|
|
|
+ leader_schedule_cache,
|
|
|
+ bank_forks_arc,
|
|
|
+ sender,
|
|
|
+ );
|
|
|
let chunks = create_duplicate_proof(
|
|
|
my_keypair.clone(),
|
|
|
None,
|
|
|
- 1,
|
|
|
+ start_slot,
|
|
|
None,
|
|
|
DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
|
|
|
)
|
|
|
@@ -288,20 +332,24 @@ mod tests {
|
|
|
let chunks1 = create_duplicate_proof(
|
|
|
my_keypair.clone(),
|
|
|
None,
|
|
|
- 2,
|
|
|
+ start_slot + 1,
|
|
|
None,
|
|
|
DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
|
|
|
)
|
|
|
.unwrap();
|
|
|
- assert!(!blockstore.has_duplicate_shreds_in_slot(1));
|
|
|
- assert!(!blockstore.has_duplicate_shreds_in_slot(2));
|
|
|
+ assert!(!blockstore.has_duplicate_shreds_in_slot(start_slot));
|
|
|
+ assert!(!blockstore.has_duplicate_shreds_in_slot(start_slot + 1));
|
|
|
// Test that two proofs are mixed together, but we can store the proofs fine.
|
|
|
for (chunk1, chunk2) in chunks.zip(chunks1) {
|
|
|
duplicate_shred_handler.handle(chunk1);
|
|
|
duplicate_shred_handler.handle(chunk2);
|
|
|
}
|
|
|
- assert!(blockstore.has_duplicate_shreds_in_slot(1));
|
|
|
- assert!(blockstore.has_duplicate_shreds_in_slot(2));
|
|
|
+ assert!(blockstore.has_duplicate_shreds_in_slot(start_slot));
|
|
|
+ assert!(blockstore.has_duplicate_shreds_in_slot(start_slot + 1));
|
|
|
+ assert_eq!(
|
|
|
+ receiver.try_iter().collect_vec(),
|
|
|
+ vec![start_slot, start_slot + 1]
|
|
|
+ );
|
|
|
|
|
|
// Test all kinds of bad proofs.
|
|
|
for error in [
|
|
|
@@ -312,7 +360,7 @@ mod tests {
|
|
|
match create_duplicate_proof(
|
|
|
my_keypair.clone(),
|
|
|
None,
|
|
|
- 3,
|
|
|
+ start_slot + 2,
|
|
|
Some(error),
|
|
|
DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
|
|
|
) {
|
|
|
@@ -321,7 +369,8 @@ mod tests {
|
|
|
for chunk in chunks {
|
|
|
duplicate_shred_handler.handle(chunk);
|
|
|
}
|
|
|
- assert!(!blockstore.has_duplicate_shreds_in_slot(3));
|
|
|
+ assert!(!blockstore.has_duplicate_shreds_in_slot(start_slot + 2));
|
|
|
+ assert!(receiver.is_empty());
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
@@ -337,13 +386,29 @@ mod tests {
|
|
|
let my_pubkey = my_keypair.pubkey();
|
|
|
let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000);
|
|
|
let GenesisConfigInfo { genesis_config, .. } = genesis_config_info;
|
|
|
- let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config));
|
|
|
+ let mut bank = Bank::new_for_tests(&genesis_config);
|
|
|
+ bank.activate_feature(&feature_set::enable_gossip_duplicate_proof_ingestion::id());
|
|
|
+ let slots_in_epoch = bank.get_epoch_info().slots_in_epoch;
|
|
|
+ let bank_forks_arc = BankForks::new_rw_arc(bank);
|
|
|
+ {
|
|
|
+ let mut bank_forks = bank_forks_arc.write().unwrap();
|
|
|
+ let bank0 = bank_forks.get(0).unwrap();
|
|
|
+ bank_forks.insert(Bank::new_from_parent(bank0.clone(), &Pubkey::default(), 9));
|
|
|
+ bank_forks.set_root(9, &AbsRequestSender::default(), None);
|
|
|
+ }
|
|
|
+ blockstore.set_roots([0, 9].iter()).unwrap();
|
|
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(
|
|
|
- &bank_forks.read().unwrap().working_bank(),
|
|
|
+ &bank_forks_arc.read().unwrap().working_bank(),
|
|
|
));
|
|
|
- let mut duplicate_shred_handler =
|
|
|
- DuplicateShredHandler::new(blockstore.clone(), leader_schedule_cache, bank_forks);
|
|
|
- let start_slot: Slot = 1;
|
|
|
+ let (sender, receiver) = unbounded();
|
|
|
+ let mut duplicate_shred_handler = DuplicateShredHandler::new(
|
|
|
+ blockstore.clone(),
|
|
|
+ leader_schedule_cache,
|
|
|
+ bank_forks_arc,
|
|
|
+ sender,
|
|
|
+ );
|
|
|
+ // The feature will only be activated at Epoch 1.
|
|
|
+ let start_slot: Slot = slots_in_epoch + 1;
|
|
|
|
|
|
// This proof will not be accepted because num_chunks is too large.
|
|
|
let chunks = create_duplicate_proof(
|
|
|
@@ -358,6 +423,7 @@ mod tests {
|
|
|
duplicate_shred_handler.handle(chunk);
|
|
|
}
|
|
|
assert!(!blockstore.has_duplicate_shreds_in_slot(start_slot));
|
|
|
+ assert!(receiver.is_empty());
|
|
|
|
|
|
// This proof will be rejected because the slot is too far away in the future.
|
|
|
let future_slot =
|
|
|
@@ -374,6 +440,7 @@ mod tests {
|
|
|
duplicate_shred_handler.handle(chunk);
|
|
|
}
|
|
|
assert!(!blockstore.has_duplicate_shreds_in_slot(future_slot));
|
|
|
+ assert!(receiver.is_empty());
|
|
|
|
|
|
// Send in two proofs, the first proof showing up will be accepted, the following
|
|
|
// proofs will be discarded.
|
|
|
@@ -388,10 +455,54 @@ mod tests {
|
|
|
// handle chunk 0 of the first proof.
|
|
|
duplicate_shred_handler.handle(chunks.next().unwrap());
|
|
|
assert!(!blockstore.has_duplicate_shreds_in_slot(start_slot));
|
|
|
+ assert!(receiver.is_empty());
|
|
|
// Now send in the rest of the first proof, it will succeed.
|
|
|
for chunk in chunks {
|
|
|
duplicate_shred_handler.handle(chunk);
|
|
|
}
|
|
|
assert!(blockstore.has_duplicate_shreds_in_slot(start_slot));
|
|
|
+ assert_eq!(receiver.try_iter().collect_vec(), vec![start_slot]);
|
|
|
+ }
|
|
|
+
|
|
|
+ #[test]
|
|
|
+ fn test_feature_disabled() {
|
|
|
+ let ledger_path = get_tmp_ledger_path_auto_delete!();
|
|
|
+ let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap());
|
|
|
+ let my_keypair = Arc::new(Keypair::new());
|
|
|
+ let my_pubkey = my_keypair.pubkey();
|
|
|
+ let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000);
|
|
|
+ let GenesisConfigInfo { genesis_config, .. } = genesis_config_info;
|
|
|
+ let mut bank = Bank::new_for_tests(&genesis_config);
|
|
|
+ bank.deactivate_feature(&feature_set::enable_gossip_duplicate_proof_ingestion::id());
|
|
|
+ assert!(!bank
|
|
|
+ .feature_set
|
|
|
+ .is_active(&feature_set::enable_gossip_duplicate_proof_ingestion::id()));
|
|
|
+ let bank_forks_arc = BankForks::new_rw_arc(bank);
|
|
|
+ let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(
|
|
|
+ &bank_forks_arc.read().unwrap().working_bank(),
|
|
|
+ ));
|
|
|
+ let (sender, receiver) = unbounded();
|
|
|
+
|
|
|
+ let mut duplicate_shred_handler = DuplicateShredHandler::new(
|
|
|
+ blockstore.clone(),
|
|
|
+ leader_schedule_cache,
|
|
|
+ bank_forks_arc,
|
|
|
+ sender,
|
|
|
+ );
|
|
|
+ let chunks = create_duplicate_proof(
|
|
|
+ my_keypair.clone(),
|
|
|
+ None,
|
|
|
+ 1,
|
|
|
+ None,
|
|
|
+ DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
|
|
|
+ )
|
|
|
+ .unwrap();
|
|
|
+ assert!(!blockstore.has_duplicate_shreds_in_slot(1));
|
|
|
+ for chunk in chunks {
|
|
|
+ duplicate_shred_handler.handle(chunk);
|
|
|
+ }
|
|
|
+ // If feature disabled, blockstore gets signal but state machine doesn't see it.
|
|
|
+ assert!(blockstore.has_duplicate_shreds_in_slot(1));
|
|
|
+ assert!(receiver.try_iter().collect_vec().is_empty());
|
|
|
}
|
|
|
}
|