Explorar o código

v1.18: gossip: do not allow duplicate proofs for incorrect shred versions (backport of #1931) (#1940)

gossip: do not allow duplicate proofs for incorrect shred versions (#1931)

* gossip: do not allow duplicate proofs for incorrect shred versions

* pr feedback: refactor test function to take shred_version

(cherry picked from commit 69ea21e947975ea04e6ac31dbe529dcb48e960a4)

Co-authored-by: Ashwin Sekar <ashwin@anza.xyz>
mergify[bot] hai 1 ano
pai
achega
292093ebef

+ 1 - 0
core/src/tvu.rs

@@ -342,6 +342,7 @@ impl Tvu {
                 leader_schedule_cache.clone(),
                 bank_forks.clone(),
                 duplicate_slots_sender,
+                tvu_config.shred_version,
             ),
         );
 

+ 2 - 0
gossip/src/cluster_info.rs

@@ -1194,6 +1194,7 @@ impl ClusterInfo {
             other_payload,
             None::<fn(Slot) -> Option<Pubkey>>, // Leader schedule
             DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
+            self.my_shred_version(),
         )?;
         Ok(())
     }
@@ -3537,6 +3538,7 @@ mod tests {
             Some(leader_schedule),
             timestamp(),
             DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
+            version,
         )
         .unwrap()
         .collect();

+ 2 - 0
gossip/src/crds_gossip.rs

@@ -90,6 +90,7 @@ impl CrdsGossip {
         leader_schedule: Option<F>,
         // Maximum serialized size of each DuplicateShred chunk payload.
         max_payload_size: usize,
+        shred_version: u16,
     ) -> Result<(), duplicate_shred::Error>
     where
         F: FnOnce(Slot) -> Option<Pubkey>,
@@ -114,6 +115,7 @@ impl CrdsGossip {
             leader_schedule,
             timestamp(),
             max_payload_size,
+            shred_version,
         )?;
         // Find the index of oldest duplicate shred.
         let mut num_dup_shreds = 0;

+ 163 - 11
gossip/src/duplicate_shred.rs

@@ -68,6 +68,8 @@ pub enum Error {
     InvalidErasureMetaConflict,
     #[error("invalid last index conflict")]
     InvalidLastIndexConflict,
+    #[error("invalid shred version: {0}")]
+    InvalidShredVersion(u16),
     #[error("invalid signature")]
     InvalidSignature,
     #[error("invalid size limit")]
@@ -92,6 +94,7 @@ pub enum Error {
 
 /// Check that `shred1` and `shred2` indicate a valid duplicate proof
 ///     - Must be for the same slot
+///     - Must match the expected shred version
 ///     - Must both sigverify for the correct leader
 ///     - Must have a merkle root conflict, otherwise `shred1` and `shred2` must have the same `shred_type`
 ///     - If `shred1` and `shred2` share the same index they must be not equal
@@ -100,7 +103,12 @@ pub enum Error {
 ///       LAST_SHRED_IN_SLOT, however the other shred must have a higher index.
 ///     - If `shred1` and `shred2` do not share the same index and are coding shreds
 ///       verify that they have conflicting erasure metas
-fn check_shreds<F>(leader_schedule: Option<F>, shred1: &Shred, shred2: &Shred) -> Result<(), Error>
+fn check_shreds<F>(
+    leader_schedule: Option<F>,
+    shred1: &Shred,
+    shred2: &Shred,
+    shred_version: u16,
+) -> Result<(), Error>
 where
     F: FnOnce(Slot) -> Option<Pubkey>,
 {
@@ -108,6 +116,13 @@ where
         return Err(Error::SlotMismatch);
     }
 
+    if shred1.version() != shred_version {
+        return Err(Error::InvalidShredVersion(shred1.version()));
+    }
+    if shred2.version() != shred_version {
+        return Err(Error::InvalidShredVersion(shred2.version()));
+    }
+
     if let Some(leader_schedule) = leader_schedule {
         let slot_leader =
             leader_schedule(shred1.slot()).ok_or(Error::UnknownSlotLeader(shred1.slot()))?;
@@ -167,6 +182,7 @@ pub(crate) fn from_shred<F>(
     leader_schedule: Option<F>,
     wallclock: u64,
     max_size: usize, // Maximum serialized size of each DuplicateShred.
+    shred_version: u16,
 ) -> Result<impl Iterator<Item = DuplicateShred>, Error>
 where
     F: FnOnce(Slot) -> Option<Pubkey>,
@@ -175,7 +191,7 @@ where
         return Err(Error::InvalidDuplicateShreds);
     }
     let other_shred = Shred::new_from_serialized_shred(other_payload)?;
-    check_shreds(leader_schedule, &shred, &other_shred)?;
+    check_shreds(leader_schedule, &shred, &other_shred, shred_version)?;
     let slot = shred.slot();
     let proof = DuplicateSlotProof {
         shred1: shred.into_payload(),
@@ -228,6 +244,7 @@ fn check_chunk(slot: Slot, num_chunks: u8) -> impl Fn(&DuplicateShred) -> Result
 pub(crate) fn into_shreds(
     slot_leader: &Pubkey,
     chunks: impl IntoIterator<Item = DuplicateShred>,
+    shred_version: u16,
 ) -> Result<(Shred, Shred), Error> {
     let mut chunks = chunks.into_iter();
     let DuplicateShred {
@@ -263,10 +280,16 @@ pub(crate) fn into_shreds(
     }
     let shred1 = Shred::new_from_serialized_shred(proof.shred1)?;
     let shred2 = Shred::new_from_serialized_shred(proof.shred2)?;
+
     if shred1.slot() != slot || shred2.slot() != slot {
         Err(Error::SlotMismatch)
     } else {
-        check_shreds(Some(|_| Some(slot_leader).copied()), &shred1, &shred2)?;
+        check_shreds(
+            Some(|_| Some(slot_leader).copied()),
+            &shred1,
+            &shred2,
+            shred_version,
+        )?;
         Ok((shred1, shred2))
     }
 }
@@ -489,11 +512,12 @@ pub(crate) mod tests {
             Some(leader_schedule),
             rng.gen(), // wallclock
             512,       // max_size
+            version,
         )
         .unwrap()
         .collect();
         assert!(chunks.len() > 4);
-        let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap();
+        let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
         assert_eq!(shred1, shred3);
         assert_eq!(shred2, shred4);
     }
@@ -544,6 +568,7 @@ pub(crate) mod tests {
                     Some(leader_schedule),
                     rng.gen(), // wallclock
                     512,       // max_size
+                    version,
                 )
                 .err()
                 .unwrap(),
@@ -562,7 +587,9 @@ pub(crate) mod tests {
             assert!(chunks.len() > 4);
 
             assert_matches!(
-                into_shreds(&leader.pubkey(), chunks).err().unwrap(),
+                into_shreds(&leader.pubkey(), chunks, version)
+                    .err()
+                    .unwrap(),
                 Error::InvalidDuplicateSlotProof
             );
         }
@@ -631,11 +658,12 @@ pub(crate) mod tests {
                 Some(leader_schedule),
                 rng.gen(), // wallclock
                 512,       // max_size
+                version,
             )
             .unwrap()
             .collect();
             assert!(chunks.len() > 4);
-            let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap();
+            let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
             assert_eq!(shred1, &shred3);
             assert_eq!(shred2, &shred4);
         }
@@ -739,6 +767,7 @@ pub(crate) mod tests {
                     Some(leader_schedule),
                     rng.gen(), // wallclock
                     512,       // max_size
+                    version,
                 )
                 .err()
                 .unwrap(),
@@ -757,7 +786,9 @@ pub(crate) mod tests {
             assert!(chunks.len() > 4);
 
             assert_matches!(
-                into_shreds(&leader.pubkey(), chunks).err().unwrap(),
+                into_shreds(&leader.pubkey(), chunks, version)
+                    .err()
+                    .unwrap(),
                 Error::InvalidLastIndexConflict
             );
         }
@@ -816,11 +847,12 @@ pub(crate) mod tests {
                 Some(leader_schedule),
                 rng.gen(), // wallclock
                 512,       // max_size
+                version,
             )
             .unwrap()
             .collect();
             assert!(chunks.len() > 4);
-            let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap();
+            let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
             assert_eq!(shred1, shred3);
             assert_eq!(shred2, shred4);
         }
@@ -897,6 +929,7 @@ pub(crate) mod tests {
                     Some(leader_schedule),
                     rng.gen(), // wallclock
                     512,       // max_size
+                    version,
                 )
                 .err()
                 .unwrap(),
@@ -915,7 +948,9 @@ pub(crate) mod tests {
             assert!(chunks.len() > 4);
 
             assert_matches!(
-                into_shreds(&leader.pubkey(), chunks).err().unwrap(),
+                into_shreds(&leader.pubkey(), chunks, version)
+                    .err()
+                    .unwrap(),
                 Error::InvalidErasureMetaConflict
             );
         }
@@ -988,11 +1023,12 @@ pub(crate) mod tests {
                 Some(leader_schedule),
                 rng.gen(), // wallclock
                 512,       // max_size
+                version,
             )
             .unwrap()
             .collect();
             assert!(chunks.len() > 4);
-            let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks).unwrap();
+            let (shred3, shred4) = into_shreds(&leader.pubkey(), chunks, version).unwrap();
             assert_eq!(shred1, shred3);
             assert_eq!(shred2, shred4);
         }
@@ -1079,6 +1115,7 @@ pub(crate) mod tests {
                     Some(leader_schedule),
                     rng.gen(), // wallclock
                     512,       // max_size
+                    version,
                 )
                 .err()
                 .unwrap(),
@@ -1097,9 +1134,124 @@ pub(crate) mod tests {
             assert!(chunks.len() > 4);
 
             assert_matches!(
-                into_shreds(&leader.pubkey(), chunks).err().unwrap(),
+                into_shreds(&leader.pubkey(), chunks, version)
+                    .err()
+                    .unwrap(),
                 Error::ShredTypeMismatch
             );
         }
     }
+
+    #[test]
+    fn test_shred_version() {
+        let mut rng = rand::thread_rng();
+        let leader = Arc::new(Keypair::new());
+        let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
+        let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap();
+        let next_shred_index = rng.gen_range(0..31_000);
+        let leader_schedule = |s| {
+            if s == slot {
+                Some(leader.pubkey())
+            } else {
+                None
+            }
+        };
+
+        let (data_shreds, coding_shreds) = new_rand_shreds(
+            &mut rng,
+            next_shred_index,
+            next_shred_index,
+            10,
+            true,
+            &shredder,
+            &leader,
+            true,
+        );
+
+        // Wrong shred version 1
+        let shredder = Shredder::new(slot, parent_slot, reference_tick, version + 1).unwrap();
+        let (wrong_data_shreds_1, wrong_coding_shreds_1) = new_rand_shreds(
+            &mut rng,
+            next_shred_index,
+            next_shred_index,
+            10,
+            true,
+            &shredder,
+            &leader,
+            true,
+        );
+
+        // Wrong shred version 2
+        let shredder = Shredder::new(slot, parent_slot, reference_tick, version + 2).unwrap();
+        let (wrong_data_shreds_2, wrong_coding_shreds_2) = new_rand_shreds(
+            &mut rng,
+            next_shred_index,
+            next_shred_index,
+            10,
+            true,
+            &shredder,
+            &leader,
+            true,
+        );
+
+        let test_cases = vec![
+            // One correct shred version, one wrong
+            (coding_shreds[0].clone(), wrong_coding_shreds_1[0].clone()),
+            (coding_shreds[0].clone(), wrong_data_shreds_1[0].clone()),
+            (data_shreds[0].clone(), wrong_coding_shreds_1[0].clone()),
+            (data_shreds[0].clone(), wrong_data_shreds_1[0].clone()),
+            // Both wrong shred version
+            (
+                wrong_coding_shreds_2[0].clone(),
+                wrong_coding_shreds_1[0].clone(),
+            ),
+            (
+                wrong_coding_shreds_2[0].clone(),
+                wrong_data_shreds_1[0].clone(),
+            ),
+            (
+                wrong_data_shreds_2[0].clone(),
+                wrong_coding_shreds_1[0].clone(),
+            ),
+            (
+                wrong_data_shreds_2[0].clone(),
+                wrong_data_shreds_1[0].clone(),
+            ),
+        ];
+
+        for (shred1, shred2) in test_cases.into_iter() {
+            assert_matches!(
+                from_shred(
+                    shred1.clone(),
+                    Pubkey::new_unique(), // self_pubkey
+                    shred2.payload().clone(),
+                    Some(leader_schedule),
+                    rng.gen(), // wallclock
+                    512,       // max_size
+                    version,
+                )
+                .err()
+                .unwrap(),
+                Error::InvalidShredVersion(_)
+            );
+
+            let chunks: Vec<_> = from_shred_bypass_checks(
+                shred1.clone(),
+                Pubkey::new_unique(), // self_pubkey
+                shred2.clone(),
+                rng.gen(), // wallclock
+                512,       // max_size
+            )
+            .unwrap()
+            .collect();
+            assert!(chunks.len() > 4);
+
+            assert_matches!(
+                into_shreds(&leader.pubkey(), chunks, version)
+                    .err()
+                    .unwrap(),
+                Error::InvalidShredVersion(_)
+            );
+        }
+    }
 }

+ 22 - 3
gossip/src/duplicate_shred_handler.rs

@@ -48,6 +48,7 @@ pub struct DuplicateShredHandler {
     cached_slots_in_epoch: u64,
     // Used to notify duplicate consensus state machine
     duplicate_slots_sender: Sender<Slot>,
+    shred_version: u16,
 }
 
 impl DuplicateShredHandlerTrait for DuplicateShredHandler {
@@ -68,6 +69,7 @@ impl DuplicateShredHandler {
         leader_schedule_cache: Arc<LeaderScheduleCache>,
         bank_forks: Arc<RwLock<BankForks>>,
         duplicate_slots_sender: Sender<Slot>,
+        shred_version: u16,
     ) -> Self {
         Self {
             buffer: HashMap::<(Slot, Pubkey), BufferEntry>::default(),
@@ -80,6 +82,7 @@ impl DuplicateShredHandler {
             leader_schedule_cache,
             bank_forks,
             duplicate_slots_sender,
+            shred_version,
         }
     }
 
@@ -130,7 +133,8 @@ impl DuplicateShredHandler {
                 .leader_schedule_cache
                 .slot_leader_at(slot, /*bank:*/ None)
                 .ok_or(Error::UnknownSlotLeader(slot))?;
-            let (shred1, shred2) = duplicate_shred::into_shreds(&pubkey, chunks)?;
+            let (shred1, shred2) =
+                duplicate_shred::into_shreds(&pubkey, chunks, self.shred_version)?;
             if !self.blockstore.has_duplicate_shreds_in_slot(slot) {
                 self.blockstore.store_duplicate_slot(
                     slot,
@@ -255,16 +259,17 @@ mod tests {
         slot: u64,
         expected_error: Option<Error>,
         chunk_size: usize,
+        shred_version: u16,
     ) -> Result<impl Iterator<Item = DuplicateShred>, Error> {
         let my_keypair = match expected_error {
             Some(Error::InvalidSignature) => Arc::new(Keypair::new()),
             _ => keypair,
         };
         let mut rng = rand::thread_rng();
-        let shredder = Shredder::new(slot, slot - 1, 0, 0).unwrap();
+        let shredder = Shredder::new(slot, slot - 1, 0, shred_version).unwrap();
         let next_shred_index = 353;
         let shred1 = new_rand_shred(&mut rng, next_shred_index, &shredder, &my_keypair);
-        let shredder1 = Shredder::new(slot + 1, slot, 0, 0).unwrap();
+        let shredder1 = Shredder::new(slot + 1, slot, 0, shred_version).unwrap();
         let shred2 = match expected_error {
             Some(Error::SlotMismatch) => {
                 new_rand_shred(&mut rng, next_shred_index, &shredder1, &my_keypair)
@@ -283,6 +288,7 @@ mod tests {
             None::<fn(Slot) -> Option<Pubkey>>,
             timestamp(), // wallclock
             chunk_size,  // max_size
+            shred_version,
         )?;
         Ok(chunks)
     }
@@ -295,6 +301,7 @@ mod tests {
         let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap());
         let my_keypair = Arc::new(Keypair::new());
         let my_pubkey = my_keypair.pubkey();
+        let shred_version = 0;
         let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000);
         let GenesisConfigInfo { genesis_config, .. } = genesis_config_info;
         let mut bank = Bank::new_for_tests(&genesis_config);
@@ -320,6 +327,7 @@ mod tests {
             leader_schedule_cache,
             bank_forks_arc,
             sender,
+            shred_version,
         );
         let chunks = create_duplicate_proof(
             my_keypair.clone(),
@@ -327,6 +335,7 @@ mod tests {
             start_slot,
             None,
             DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
+            shred_version,
         )
         .unwrap();
         let chunks1 = create_duplicate_proof(
@@ -335,6 +344,7 @@ mod tests {
             start_slot + 1,
             None,
             DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
+            shred_version,
         )
         .unwrap();
         assert!(!blockstore.has_duplicate_shreds_in_slot(start_slot));
@@ -363,6 +373,7 @@ mod tests {
                 start_slot + 2,
                 Some(error),
                 DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
+                shred_version,
             ) {
                 Err(_) => (),
                 Ok(chunks) => {
@@ -384,6 +395,7 @@ mod tests {
         let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap());
         let my_keypair = Arc::new(Keypair::new());
         let my_pubkey = my_keypair.pubkey();
+        let shred_version = 0;
         let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000);
         let GenesisConfigInfo { genesis_config, .. } = genesis_config_info;
         let mut bank = Bank::new_for_tests(&genesis_config);
@@ -406,6 +418,7 @@ mod tests {
             leader_schedule_cache,
             bank_forks_arc,
             sender,
+            shred_version,
         );
         // The feature will only be activated at Epoch 1.
         let start_slot: Slot = slots_in_epoch + 1;
@@ -417,6 +430,7 @@ mod tests {
             start_slot,
             None,
             DUPLICATE_SHRED_MAX_PAYLOAD_SIZE / 2,
+            shred_version,
         )
         .unwrap();
         for chunk in chunks {
@@ -434,6 +448,7 @@ mod tests {
             future_slot,
             None,
             DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
+            shred_version,
         )
         .unwrap();
         for chunk in chunks {
@@ -450,6 +465,7 @@ mod tests {
             start_slot,
             None,
             DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
+            shred_version,
         )
         .unwrap();
         // handle chunk 0 of the first proof.
@@ -470,6 +486,7 @@ mod tests {
         let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap());
         let my_keypair = Arc::new(Keypair::new());
         let my_pubkey = my_keypair.pubkey();
+        let shred_version = 0;
         let genesis_config_info = create_genesis_config_with_leader(10_000, &my_pubkey, 10_000);
         let GenesisConfigInfo { genesis_config, .. } = genesis_config_info;
         let mut bank = Bank::new_for_tests(&genesis_config);
@@ -488,6 +505,7 @@ mod tests {
             leader_schedule_cache,
             bank_forks_arc,
             sender,
+            shred_version,
         );
         let chunks = create_duplicate_proof(
             my_keypair.clone(),
@@ -495,6 +513,7 @@ mod tests {
             1,
             None,
             DUPLICATE_SHRED_MAX_PAYLOAD_SIZE,
+            shred_version,
         )
         .unwrap();
         assert!(!blockstore.has_duplicate_shreds_in_slot(1));