Bladeren bron

core: Resolve Rust 1.88 clippy lints and format strings (#6933)

steviez 4 maanden geleden
bovenliggende
commit
3d03f5ee57
34 gewijzigde bestanden met toevoegingen van 229 en 293 verwijderingen
  1. 5 3
      core/benches/receive_and_buffer_utils.rs
  2. 4 2
      core/benches/scheduler.rs
  3. 9 5
      core/src/accounts_hash_verifier.rs
  4. 21 16
      core/src/banking_simulation.rs
  5. 1 4
      core/src/banking_trace.rs
  6. 1 1
      core/src/cluster_info_vote_listener.rs
  7. 6 4
      core/src/cluster_slots_service/cluster_slots.rs
  8. 1 1
      core/src/completed_data_sets_service.rs
  9. 15 21
      core/src/consensus.rs
  10. 2 5
      core/src/consensus/fork_choice.rs
  11. 8 17
      core/src/consensus/heaviest_subtree_fork_choice.rs
  12. 2 7
      core/src/consensus/progress_map.rs
  13. 3 3
      core/src/consensus/tower_storage.rs
  14. 5 3
      core/src/consensus/tower_vote_state.rs
  15. 1 1
      core/src/fetch_stage.rs
  16. 2 2
      core/src/forwarding_stage.rs
  17. 3 9
      core/src/optimistic_confirmation_verifier.rs
  18. 3 3
      core/src/repair/ancestor_hashes_service.rs
  19. 14 13
      core/src/repair/cluster_slot_state_verifier.rs
  20. 9 10
      core/src/repair/duplicate_repair_status.rs
  21. 8 9
      core/src/repair/repair_service.rs
  22. 6 12
      core/src/repair/repair_weight.rs
  23. 3 2
      core/src/repair/serve_repair.rs
  24. 47 76
      core/src/replay_stage.rs
  25. 1 1
      core/src/sample_performance_service.rs
  26. 3 3
      core/src/sigverify_stage.rs
  27. 4 4
      core/src/snapshot_packager_service/pending_snapshot_packages.rs
  28. 2 2
      core/src/tpu.rs
  29. 24 33
      core/src/validator.rs
  30. 1 1
      core/src/voting_service.rs
  31. 2 2
      core/src/warm_quic_cache_service.rs
  32. 1 1
      core/src/window_service.rs
  33. 2 1
      core/tests/scheduler_cost_adjustment.rs
  34. 10 16
      core/tests/snapshots.rs

+ 5 - 3
core/benches/receive_and_buffer_utils.rs

@@ -83,9 +83,11 @@ fn generate_transactions(
 ) -> BankingPacketBatch {
 ) -> BankingPacketBatch {
     assert!(num_instructions_per_tx <= MAX_INSTRUCTIONS_PER_TRANSACTION);
     assert!(num_instructions_per_tx <= MAX_INSTRUCTIONS_PER_TRANSACTION);
     if set_rand_cu_price {
     if set_rand_cu_price {
-        assert!(num_instructions_per_tx > 0,
-            "`num_instructions_per_tx` must be at least 1 when `set_rand_cu_price` flag is set to count\
-             the set_compute_unit_price instruction.");
+        assert!(
+            num_instructions_per_tx > 0,
+            "`num_instructions_per_tx` must be at least 1 when `set_rand_cu_price` flag is set to \
+             count the set_compute_unit_price instruction."
+        );
     }
     }
     let blockhash = FaultyBlockhash::new(bank.last_blockhash(), probability_invalid_blockhash);
     let blockhash = FaultyBlockhash::new(bank.last_blockhash(), probability_invalid_blockhash);
 
 

+ 4 - 2
core/benches/scheduler.rs

@@ -133,8 +133,10 @@ fn bench_scheduler_impl<T: ReceiveAndBuffer + utils::ReceiveAndBufferCreator>(
         for (ix_count, ix_count_desc) in &ix_counts {
         for (ix_count, ix_count_desc) in &ix_counts {
             for (tx_count, tx_count_desc) in &tx_counts {
             for (tx_count, tx_count_desc) in &tx_counts {
                 for (conflict_type, conflict_type_desc) in &conflict_types {
                 for (conflict_type, conflict_type_desc) in &conflict_types {
-                    let bench_name =
-                    format!("{bench_name}/{scheduler_desc}/{ix_count_desc}/{tx_count_desc}/{conflict_type_desc}");
+                    let bench_name = format!(
+                        "{bench_name}/{scheduler_desc}/{ix_count_desc}/{tx_count_desc}/\
+                         {conflict_type_desc}"
+                    );
                     group.throughput(Throughput::Elements(*tx_count as u64));
                     group.throughput(Throughput::Elements(*tx_count as u64));
                     group.bench_function(&bench_name, |bencher| {
                     group.bench_function(&bench_name, |bencher| {
                         bencher.iter_custom(|iters| {
                         bencher.iter_custom(|iters| {

+ 9 - 5
core/src/accounts_hash_verifier.rs

@@ -246,14 +246,14 @@ impl AccountsHashVerifier {
         match accounts_package.accounts_hash_algorithm {
         match accounts_package.accounts_hash_algorithm {
             AccountsHashAlgorithm::Merkle => {
             AccountsHashAlgorithm::Merkle => {
                 debug!(
                 debug!(
-                    "calculate_and_verify_accounts_hash(): snapshots lt hash is disabled, \
-                     DO merkle-based accounts hash calculation",
+                    "calculate_and_verify_accounts_hash(): snapshots lt hash is disabled, DO \
+                     merkle-based accounts hash calculation",
                 );
                 );
             }
             }
             AccountsHashAlgorithm::Lattice => {
             AccountsHashAlgorithm::Lattice => {
                 debug!(
                 debug!(
-                    "calculate_and_verify_accounts_hash(): snapshots lt hash is enabled, \
-                     SKIP merkle-based accounts hash calculation",
+                    "calculate_and_verify_accounts_hash(): snapshots lt hash is enabled, SKIP \
+                     merkle-based accounts hash calculation",
                 );
                 );
                 return Ok((MerkleOrLatticeAccountsHash::Lattice, None));
                 return Ok((MerkleOrLatticeAccountsHash::Lattice, None));
             }
             }
@@ -284,6 +284,7 @@ impl AccountsHashVerifier {
                     let Some((base_accounts_hash, base_capitalization)) =
                     let Some((base_accounts_hash, base_capitalization)) =
                         accounts_db.get_accounts_hash(base_slot)
                         accounts_db.get_accounts_hash(base_slot)
                     else {
                     else {
+                        #[rustfmt::skip]
                         panic!(
                         panic!(
                             "incremental snapshot requires accounts hash and capitalization from \
                             "incremental snapshot requires accounts hash and capitalization from \
                              the full snapshot it is based on\n\
                              the full snapshot it is based on\n\
@@ -447,7 +448,10 @@ impl AccountsHashVerifier {
             let MerkleOrLatticeAccountsHash::Merkle(AccountsHashKind::Full(accounts_hash)) =
             let MerkleOrLatticeAccountsHash::Merkle(AccountsHashKind::Full(accounts_hash)) =
                 merkle_or_lattice_accounts_hash
                 merkle_or_lattice_accounts_hash
             else {
             else {
-                panic!("EAH requires a full accounts hash, but was given {merkle_or_lattice_accounts_hash:?}");
+                panic!(
+                    "EAH requires a full accounts hash, but was given \
+                     {merkle_or_lattice_accounts_hash:?}"
+                );
             };
             };
             info!(
             info!(
                 "saving epoch accounts hash, slot: {}, hash: {}",
                 "saving epoch accounts hash, slot: {}, hash: {}",

+ 21 - 16
core/src/banking_simulation.rs

@@ -194,8 +194,8 @@ impl BankingTraceEvents {
             ) {
             ) {
                 // Silence errors here as this can happen under normal operation...
                 // Silence errors here as this can happen under normal operation...
                 warn!(
                 warn!(
-                    "Reading {:?} failed {:?} due to file corruption or unclean validator shutdown",
-                    event_file_path, read_result,
+                    "Reading {event_file_path:?} failed {read_result:?} due to file corruption or \
+                     unclean validator shutdown",
                 );
                 );
             } else {
             } else {
                 read_result?
                 read_result?
@@ -343,10 +343,14 @@ struct SenderLoop {
 impl SenderLoop {
 impl SenderLoop {
     fn log_starting(&self) {
     fn log_starting(&self) {
         info!(
         info!(
-            "simulating events: {} (out of {}), starting at slot {} (based on {} from traced event slot: {}) (warmup: -{:?})",
-            self.timed_batches_to_send.len(), self.total_batch_count, self.first_simulated_slot,
+            "simulating events: {} (out of {}), starting at slot {} (based on {} from traced \
+             event slot: {}) (warmup: -{:?})",
+            self.timed_batches_to_send.len(),
+            self.total_batch_count,
+            self.first_simulated_slot,
             SenderLoopLogger::format_as_timestamp(self.raw_base_event_time),
             SenderLoopLogger::format_as_timestamp(self.raw_base_event_time),
-            self.parent_slot, WARMUP_DURATION,
+            self.parent_slot,
+            WARMUP_DURATION,
         );
         );
     }
     }
 
 
@@ -595,10 +599,7 @@ impl<'a> SenderLoopLogger<'a> {
         batch_count: usize,
         batch_count: usize,
         tx_count: usize,
         tx_count: usize,
     ) {
     ) {
-        debug!(
-            "sent {:?} {} batches ({} txes)",
-            label, batch_count, tx_count
-        );
+        debug!("sent {label:?} {batch_count} batches ({tx_count} txes)");
 
 
         use ChannelLabel::*;
         use ChannelLabel::*;
         let (total_batch_count, total_tx_count) = match label {
         let (total_batch_count, total_tx_count) = match label {
@@ -626,9 +627,16 @@ impl<'a> SenderLoopLogger<'a> {
             let gossip_vote_tps =
             let gossip_vote_tps =
                 (self.gossip_vote_tx_count - self.last_gossip_vote_tx_count) as f64 / duration;
                 (self.gossip_vote_tx_count - self.last_gossip_vote_tx_count) as f64 / duration;
             info!(
             info!(
-                "senders(non-,tpu-,gossip-vote): tps: {:.0} (={:.0}+{:.0}+{:.0}) over {:?} not-recved: ({}+{}+{})",
-                tps, non_vote_tps, tpu_vote_tps, gossip_vote_tps, log_interval,
-                self.non_vote_sender.len(), self.tpu_vote_sender.len(), self.gossip_vote_sender.len(),
+                "senders(non-,tpu-,gossip-vote): tps: {:.0} (={:.0}+{:.0}+{:.0}) over {:?} \
+                 not-recved: ({}+{}+{})",
+                tps,
+                non_vote_tps,
+                tpu_vote_tps,
+                gossip_vote_tps,
+                log_interval,
+                self.non_vote_sender.len(),
+                self.tpu_vote_sender.len(),
+                self.gossip_vote_sender.len(),
             );
             );
             self.last_log_duration = simulation_duration;
             self.last_log_duration = simulation_duration;
             self.last_tx_count = current_tx_count;
             self.last_tx_count = current_tx_count;
@@ -764,10 +772,7 @@ impl BankingSimulator {
         )))
         )))
         .unwrap();
         .unwrap();
         assert!(retracer.is_enabled());
         assert!(retracer.is_enabled());
-        info!(
-            "Enabled banking retracer (dir_byte_limit: {})",
-            BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT,
-        );
+        info!("Enabled banking retracer (dir_byte_limit: {BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT})",);
 
 
         // Create a partially-dummy ClusterInfo for the banking stage.
         // Create a partially-dummy ClusterInfo for the banking stage.
         let cluster_info_for_banking = Arc::new(DummyClusterInfo {
         let cluster_info_for_banking = Arc::new(DummyClusterInfo {

+ 1 - 4
core/src/banking_trace.rs

@@ -450,10 +450,7 @@ impl TracedSender {
                         TracedEvent::PacketBatch(self.label, BankingPacketBatch::clone(&batch)),
                         TracedEvent::PacketBatch(self.label, BankingPacketBatch::clone(&batch)),
                     ))
                     ))
                     .map_err(|err| {
                     .map_err(|err| {
-                        error!(
-                            "unexpected error when tracing a banking event...: {:?}",
-                            err
-                        );
+                        error!("unexpected error when tracing a banking event...: {err:?}");
                         SendError(BankingPacketBatch::clone(&batch))
                         SendError(BankingPacketBatch::clone(&batch))
                     })?;
                     })?;
             }
             }

+ 1 - 1
core/src/cluster_info_vote_listener.rs

@@ -545,7 +545,7 @@ impl ClusterInfoVoteListener {
                         sender
                         sender
                             .send(BankNotification::OptimisticallyConfirmed(slot))
                             .send(BankNotification::OptimisticallyConfirmed(slot))
                             .unwrap_or_else(|err| {
                             .unwrap_or_else(|err| {
-                                warn!("bank_notification_sender failed: {:?}", err)
+                                warn!("bank_notification_sender failed: {err:?}")
                             });
                             });
                     }
                     }
                 }
                 }

+ 6 - 4
core/src/cluster_slots_service/cluster_slots.rs

@@ -247,7 +247,7 @@ impl ClusterSlots {
         let epoch_metadata = self.epoch_metadata.read().unwrap();
         let epoch_metadata = self.epoch_metadata.read().unwrap();
         //startup init, this is very slow but only ever happens once
         //startup init, this is very slow but only ever happens once
         if cluster_slots.is_empty() {
         if cluster_slots.is_empty() {
-            info!("Init cluster_slots at range {:?}", slot_range);
+            info!("Init cluster_slots at range {slot_range:?}");
             for slot in slot_range.clone() {
             for slot in slot_range.clone() {
                 // Epoch should be defined for all slots in the window
                 // Epoch should be defined for all slots in the window
                 let epoch = self
                 let epoch = self
@@ -293,7 +293,10 @@ impl ClusterSlots {
                 .get_epoch_for_slot(slot)
                 .get_epoch_for_slot(slot)
                 .expect("Epoch should be defined for all slots in the window");
                 .expect("Epoch should be defined for all slots in the window");
             let Some(stake_info) = epoch_metadata.get(&epoch) else {
             let Some(stake_info) = epoch_metadata.get(&epoch) else {
-                warn!("Epoch slots can not reuse slot entry for slot {slot} since stakes for epoch {epoch} are not available");
+                warn!(
+                    "Epoch slots can not reuse slot entry for slot {slot} since stakes for epoch \
+                     {epoch} are not available"
+                );
                 cluster_slots.push_back(RowContent {
                 cluster_slots.push_back(RowContent {
                     slot,
                     slot,
                     supporters: Arc::new(SlotSupporters::new_blank()),
                     supporters: Arc::new(SlotSupporters::new_blank()),
@@ -515,8 +518,7 @@ mod tests {
             assert_eq!(
             assert_eq!(
                 rg.len(),
                 rg.len(),
                 CLUSTER_SLOTS_TRIM_SIZE,
                 CLUSTER_SLOTS_TRIM_SIZE,
-                "ring should have exactly {} elements",
-                CLUSTER_SLOTS_TRIM_SIZE
+                "ring should have exactly {CLUSTER_SLOTS_TRIM_SIZE} elements"
             );
             );
             assert_eq!(rg.front().unwrap().slot, 1, "first slot should be root + 1");
             assert_eq!(rg.front().unwrap().slot, 1, "first slot should be root + 1");
             assert_eq!(
             assert_eq!(

+ 1 - 1
core/src/completed_data_sets_service.rs

@@ -74,7 +74,7 @@ impl CompletedDataSetsService {
                         rpc_subscriptions.notify_signatures_received((slot, transactions));
                         rpc_subscriptions.notify_signatures_received((slot, transactions));
                     }
                     }
                 }
                 }
-                Err(e) => warn!("completed-data-set-service deserialize error: {:?}", e),
+                Err(e) => warn!("completed-data-set-service deserialize error: {e:?}"),
             }
             }
             slot
             slot
         };
         };

+ 15 - 21
core/src/consensus.rs

@@ -407,7 +407,7 @@ impl Tower {
             if voted_stake == 0 {
             if voted_stake == 0 {
                 continue;
                 continue;
             }
             }
-            trace!("{} {} with stake {}", vote_account_pubkey, key, voted_stake);
+            trace!("{vote_account_pubkey} {key} with stake {voted_stake}");
             let mut vote_state = TowerVoteState::from(account.vote_state_view());
             let mut vote_state = TowerVoteState::from(account.vote_state_view());
             for vote in &vote_state.votes {
             for vote in &vote_state.votes {
                 lockout_intervals
                 lockout_intervals
@@ -418,7 +418,7 @@ impl Tower {
 
 
             if key == *vote_account_pubkey {
             if key == *vote_account_pubkey {
                 my_latest_landed_vote = vote_state.nth_recent_lockout(0).map(|l| l.slot());
                 my_latest_landed_vote = vote_state.nth_recent_lockout(0).map(|l| l.slot());
-                debug!("vote state {:?}", vote_state);
+                debug!("vote state {vote_state:?}");
                 debug!(
                 debug!(
                     "observed slot {}",
                     "observed slot {}",
                     vote_state
                     vote_state
@@ -578,8 +578,8 @@ impl Tower {
         if let Some(last_voted_slot) = self.last_vote.last_voted_slot() {
         if let Some(last_voted_slot) = self.last_vote.last_voted_slot() {
             if heaviest_slot_on_same_fork <= last_voted_slot {
             if heaviest_slot_on_same_fork <= last_voted_slot {
                 warn!(
                 warn!(
-                    "Trying to refresh timestamp for vote on {last_voted_slot} \
-                     using smaller heaviest bank {heaviest_slot_on_same_fork}"
+                    "Trying to refresh timestamp for vote on {last_voted_slot} using smaller \
+                     heaviest bank {heaviest_slot_on_same_fork}"
                 );
                 );
                 return;
                 return;
             }
             }
@@ -961,7 +961,7 @@ impl Tower {
                      vote({last_voted_slot}), meaning some inconsistency between saved tower and \
                      vote({last_voted_slot}), meaning some inconsistency between saved tower and \
                      ledger."
                      ledger."
                 );
                 );
-                warn!("{}", message);
+                warn!("{message}");
                 datapoint_warn!("tower_warn", ("warn", message, String));
                 datapoint_warn!("tower_warn", ("warn", message, String));
             }
             }
             &empty_ancestors
             &empty_ancestors
@@ -1116,8 +1116,8 @@ impl Tower {
                             last_vote_ancestors,
                             last_vote_ancestors,
                         )
                         )
                         .expect(
                         .expect(
-                            "candidate_slot and switch_slot exist in descendants map, \
-                             so they must exist in ancestors map",
+                            "candidate_slot and switch_slot exist in descendants map, so they \
+                             must exist in ancestors map",
                         )
                         )
                 }
                 }
             {
             {
@@ -1255,11 +1255,7 @@ impl Tower {
         );
         );
         let new_check = Some((switch_slot, decision.clone()));
         let new_check = Some((switch_slot, decision.clone()));
         if new_check != self.last_switch_threshold_check {
         if new_check != self.last_switch_threshold_check {
-            trace!(
-                "new switch threshold check: slot {}: {:?}",
-                switch_slot,
-                decision,
-            );
+            trace!("new switch threshold check: slot {switch_slot}: {decision:?}",);
             self.last_switch_threshold_check = new_check;
             self.last_switch_threshold_check = new_check;
         }
         }
         decision
         decision
@@ -1472,7 +1468,7 @@ impl Tower {
                     "For some reason, we're REPROCESSING slots which has already been voted and \
                     "For some reason, we're REPROCESSING slots which has already been voted and \
                      ROOTED by us; VOTING will be SUSPENDED UNTIL {last_voted_slot}!",
                      ROOTED by us; VOTING will be SUSPENDED UNTIL {last_voted_slot}!",
                 );
                 );
-                error!("{}", message);
+                error!("{message}");
                 datapoint_error!("tower_error", ("error", message, String));
                 datapoint_error!("tower_error", ("error", message, String));
 
 
                 // Let's pass-through adjust_lockouts_with_slot_history just for sanitization,
                 // Let's pass-through adjust_lockouts_with_slot_history just for sanitization,
@@ -1571,7 +1567,7 @@ impl Tower {
         }
         }
 
 
         // Check for errors if not anchored
         // Check for errors if not anchored
-        info!("adjusted tower's anchored slot: {:?}", anchored_slot);
+        info!("adjusted tower's anchored slot: {anchored_slot:?}");
         if anchored_slot.is_none() {
         if anchored_slot.is_none() {
             // this error really shouldn't happen unless ledger/tower is corrupted
             // this error really shouldn't happen unless ledger/tower is corrupted
             return Err(TowerError::FatallyInconsistent(
             return Err(TowerError::FatallyInconsistent(
@@ -1737,9 +1733,8 @@ pub fn reconcile_blockstore_roots_with_external_source(
             .collect();
             .collect();
         if !new_roots.is_empty() {
         if !new_roots.is_empty() {
             info!(
             info!(
-                "Reconciling slots as root based on external root: {:?} (external: {:?}, \
-                 blockstore: {})",
-                new_roots, external_source, last_blockstore_root
+                "Reconciling slots as root based on external root: {new_roots:?} (external: \
+                 {external_source:?}, blockstore: {last_blockstore_root})"
             );
             );
 
 
             // Unfortunately, we can't supply duplicate-confirmed hashes,
             // Unfortunately, we can't supply duplicate-confirmed hashes,
@@ -1761,10 +1756,9 @@ pub fn reconcile_blockstore_roots_with_external_source(
             // That's because we might have a chance of recovering properly with
             // That's because we might have a chance of recovering properly with
             // newer snapshot.
             // newer snapshot.
             warn!(
             warn!(
-                "Couldn't find any ancestor slots from external source ({:?}) towards blockstore \
-                 root ({}); blockstore pruned or only tower moved into new ledger or just hard \
-                 fork?",
-                external_source, last_blockstore_root,
+                "Couldn't find any ancestor slots from external source ({external_source:?}) \
+                 towards blockstore root ({last_blockstore_root}); blockstore pruned or only \
+                 tower moved into new ledger or just hard fork?",
             );
             );
         }
         }
     }
     }

+ 2 - 5
core/src/consensus/fork_choice.rs

@@ -144,11 +144,8 @@ fn recheck_fork_decision_failed_switch_threshold(
     // then there will be no blocks to include the votes for slot 4, and the network halts
     // then there will be no blocks to include the votes for slot 4, and the network halts
     // because 90% of validators can't vote
     // because 90% of validators can't vote
     info!(
     info!(
-        "Waiting to switch vote to {heaviest_bank_slot}, \
-        resetting to slot {:?} for now, \
-        switch proof stake: {switch_proof_stake}, \
-        threshold stake: {}, \
-        total stake: {total_stake}",
+        "Waiting to switch vote to {heaviest_bank_slot}, resetting to slot {:?} for now, switch \
+         proof stake: {switch_proof_stake}, threshold stake: {}, total stake: {total_stake}",
         reset_bank.as_ref().map(|b| b.slot()),
         reset_bank.as_ref().map(|b| b.slot()),
         total_stake as f64 * SWITCH_FORK_THRESHOLD,
         total_stake as f64 * SWITCH_FORK_THRESHOLD,
     );
     );

+ 8 - 17
core/src/consensus/heaviest_subtree_fork_choice.rs

@@ -143,9 +143,9 @@ impl ForkInfo {
         if let Some(latest_invalid_ancestor) = self.latest_invalid_ancestor {
         if let Some(latest_invalid_ancestor) = self.latest_invalid_ancestor {
             if latest_invalid_ancestor <= newly_valid_ancestor {
             if latest_invalid_ancestor <= newly_valid_ancestor {
                 info!(
                 info!(
-                    "Fork choice for {:?} clearing latest invalid ancestor {:?} because {:?} was \
-                     duplicate confirmed",
-                    my_key, latest_invalid_ancestor, newly_valid_ancestor
+                    "Fork choice for {my_key:?} clearing latest invalid ancestor \
+                     {latest_invalid_ancestor:?} because {newly_valid_ancestor:?} was duplicate \
+                     confirmed"
                 );
                 );
                 self.latest_invalid_ancestor = None;
                 self.latest_invalid_ancestor = None;
             }
             }
@@ -936,10 +936,7 @@ impl HeaviestSubtreeForkChoice {
         let fork_info = self.fork_infos.get_mut(&slot_hash_key).unwrap();
         let fork_info = self.fork_infos.get_mut(&slot_hash_key).unwrap();
         if is_duplicate_confirmed {
         if is_duplicate_confirmed {
             if !fork_info.is_duplicate_confirmed {
             if !fork_info.is_duplicate_confirmed {
-                info!(
-                    "Fork choice setting {:?} to duplicate confirmed",
-                    slot_hash_key
-                );
+                info!("Fork choice setting {slot_hash_key:?} to duplicate confirmed");
             }
             }
             fork_info.set_duplicate_confirmed();
             fork_info.set_duplicate_confirmed();
         }
         }
@@ -1038,8 +1035,8 @@ impl HeaviestSubtreeForkChoice {
                     {
                     {
                         assert!(if new_vote_slot == old_latest_vote_slot {
                         assert!(if new_vote_slot == old_latest_vote_slot {
                             warn!(
                             warn!(
-                                "Got a duplicate vote for validator: {pubkey}, \
-                                 slot_hash: {new_vote_slot_hash:?}",
+                                "Got a duplicate vote for validator: {pubkey}, slot_hash: \
+                                 {new_vote_slot_hash:?}",
                             );
                             );
                             // If the slots are equal, then the new
                             // If the slots are equal, then the new
                             // vote must be for a smaller hash
                             // vote must be for a smaller hash
@@ -1331,10 +1328,7 @@ impl ForkChoice for HeaviestSubtreeForkChoice {
     }
     }
 
 
     fn mark_fork_invalid_candidate(&mut self, invalid_slot_hash_key: &SlotHashKey) {
     fn mark_fork_invalid_candidate(&mut self, invalid_slot_hash_key: &SlotHashKey) {
-        info!(
-            "marking fork starting at: {:?} invalid candidate",
-            invalid_slot_hash_key
-        );
+        info!("marking fork starting at: {invalid_slot_hash_key:?} invalid candidate");
         let fork_info = self.fork_infos.get_mut(invalid_slot_hash_key);
         let fork_info = self.fork_infos.get_mut(invalid_slot_hash_key);
         if let Some(fork_info) = fork_info {
         if let Some(fork_info) = fork_info {
             // Should not be marking duplicate confirmed blocks as invalid candidates
             // Should not be marking duplicate confirmed blocks as invalid candidates
@@ -1359,10 +1353,7 @@ impl ForkChoice for HeaviestSubtreeForkChoice {
     }
     }
 
 
     fn mark_fork_valid_candidate(&mut self, valid_slot_hash_key: &SlotHashKey) -> Vec<SlotHashKey> {
     fn mark_fork_valid_candidate(&mut self, valid_slot_hash_key: &SlotHashKey) -> Vec<SlotHashKey> {
-        info!(
-            "marking fork starting at: {:?} valid candidate",
-            valid_slot_hash_key
-        );
+        info!("marking fork starting at: {valid_slot_hash_key:?} valid candidate");
         let mut newly_duplicate_confirmed_ancestors = vec![];
         let mut newly_duplicate_confirmed_ancestors = vec![];
 
 
         for ancestor_key in std::iter::once(*valid_slot_hash_key)
         for ancestor_key in std::iter::once(*valid_slot_hash_key)

+ 2 - 7
core/src/consensus/progress_map.rs

@@ -402,13 +402,8 @@ impl ProgressMap {
     pub fn log_propagated_stats(&self, slot: Slot, bank_forks: &RwLock<BankForks>) {
     pub fn log_propagated_stats(&self, slot: Slot, bank_forks: &RwLock<BankForks>) {
         if let Some(stats) = self.get_propagated_stats(slot) {
         if let Some(stats) = self.get_propagated_stats(slot) {
             info!(
             info!(
-                "Propagated stats: \
-                 total staked: {}, \
-                 observed staked: {}, \
-                 vote pubkeys: {:?}, \
-                 node_pubkeys: {:?}, \
-                 slot: {slot}, \
-                 epoch: {:?}",
+                "Propagated stats: total staked: {}, observed staked: {}, vote pubkeys: {:?}, \
+                 node_pubkeys: {:?}, slot: {slot}, epoch: {:?}",
                 stats.total_epoch_stake,
                 stats.total_epoch_stake,
                 stats.propagated_validators_stake,
                 stats.propagated_validators_stake,
                 stats.propagated_validators,
                 stats.propagated_validators,

+ 3 - 3
core/src/consensus/tower_storage.rs

@@ -292,7 +292,7 @@ impl TowerStorage for EtcdTowerStorage {
         self.runtime
         self.runtime
             .block_on(async { self.client.lock().await.txn(txn).await })
             .block_on(async { self.client.lock().await.txn(txn).await })
             .map_err(|err| {
             .map_err(|err| {
-                error!("Failed to acquire etcd instance lock: {}", err);
+                error!("Failed to acquire etcd instance lock: {err}");
                 Self::etdc_to_tower_error(err)
                 Self::etdc_to_tower_error(err)
             })?;
             })?;
 
 
@@ -308,7 +308,7 @@ impl TowerStorage for EtcdTowerStorage {
             .runtime
             .runtime
             .block_on(async { self.client.lock().await.txn(txn).await })
             .block_on(async { self.client.lock().await.txn(txn).await })
             .map_err(|err| {
             .map_err(|err| {
-                error!("Failed to read etcd saved tower: {}", err);
+                error!("Failed to read etcd saved tower: {err}");
                 Self::etdc_to_tower_error(err)
                 Self::etdc_to_tower_error(err)
             })?;
             })?;
 
 
@@ -353,7 +353,7 @@ impl TowerStorage for EtcdTowerStorage {
             .runtime
             .runtime
             .block_on(async { self.client.lock().await.txn(txn).await })
             .block_on(async { self.client.lock().await.txn(txn).await })
             .map_err(|err| {
             .map_err(|err| {
-                error!("Failed to write etcd saved tower: {}", err);
+                error!("Failed to write etcd saved tower: {err}");
                 err
                 err
             })
             })
             .map_err(Self::etdc_to_tower_error)?;
             .map_err(Self::etdc_to_tower_error)?;

+ 5 - 3
core/src/consensus/tower_vote_state.rs

@@ -70,9 +70,11 @@ impl TowerVoteState {
         for (i, v) in self.votes.iter_mut().enumerate() {
         for (i, v) in self.votes.iter_mut().enumerate() {
             // Don't increase the lockout for this vote until we get more confirmations
             // Don't increase the lockout for this vote until we get more confirmations
             // than the max number of confirmations this vote has seen
             // than the max number of confirmations this vote has seen
-            if stack_depth >
-                i.checked_add(v.confirmation_count() as usize)
-                    .expect("`confirmation_count` and tower_size should be bounded by `MAX_LOCKOUT_HISTORY`")
+            if stack_depth
+                > i.checked_add(v.confirmation_count() as usize).expect(
+                    "`confirmation_count` and tower_size should be bounded by \
+                     `MAX_LOCKOUT_HISTORY`",
+                )
             {
             {
                 v.increase_confirmation_count(1);
                 v.increase_confirmation_count(1);
             }
             }

+ 1 - 1
core/src/fetch_stage.rs

@@ -238,7 +238,7 @@ impl FetchStage {
                         Error::RecvTimeout(RecvTimeoutError::Timeout) => (),
                         Error::RecvTimeout(RecvTimeoutError::Timeout) => (),
                         Error::Recv(_) => break,
                         Error::Recv(_) => break,
                         Error::Send => break,
                         Error::Send => break,
-                        _ => error!("{:?}", e),
+                        _ => error!("{e:?}"),
                     }
                     }
                 }
                 }
             })
             })

+ 2 - 2
core/src/forwarding_stage.rs

@@ -279,8 +279,8 @@ impl<VoteClient: ForwardingClient, NonVoteClient: ForwardingClient>
             {
             {
                 let Some(packet_data) = packet.data(..) else {
                 let Some(packet_data) = packet.data(..) else {
                     unreachable!(
                     unreachable!(
-                        "packet.meta().discard() was already checked. \
-                         If not discarded, packet MUST have data"
+                        "packet.meta().discard() was already checked. If not discarded, packet \
+                         MUST have data"
                     );
                     );
                 };
                 };
 
 

+ 3 - 9
core/src/optimistic_confirmation_verifier.rs

@@ -109,15 +109,9 @@ impl OptimisticConfirmationVerifier {
                     .unwrap_or(0);
                     .unwrap_or(0);
 
 
                 error!(
                 error!(
-                    "{}, \
-                     hash: {hash}, \
-                     epoch: {epoch}, \
-                     voted keys: {:?}, \
-                     root: {root}, \
-                     root bank hash: {}, \
-                     voted stake: {voted_stake}, \
-                     total epoch stake: {total_epoch_stake}, \
-                     pct: {}",
+                    "{}, hash: {hash}, epoch: {epoch}, voted keys: {:?}, root: {root}, root bank \
+                     hash: {}, voted stake: {voted_stake}, total epoch stake: \
+                     {total_epoch_stake}, pct: {}",
                     Self::format_optimistic_confirmed_slot_violation_log(*optimistic_slot),
                     Self::format_optimistic_confirmed_slot_violation_log(*optimistic_slot),
                     r_slot_tracker
                     r_slot_tracker
                         .as_ref()
                         .as_ref()

+ 3 - 3
core/src/repair/ancestor_hashes_service.rs

@@ -129,7 +129,7 @@ impl AncestorRepairRequestsStats {
 
 
         let repair_total = self.ancestor_requests.count;
         let repair_total = self.ancestor_requests.count;
         if self.last_report.elapsed().as_secs() > 2 && repair_total > 0 {
         if self.last_report.elapsed().as_secs() > 2 && repair_total > 0 {
-            info!("ancestor_repair_requests_stats: {:?}", slot_to_count);
+            info!("ancestor_repair_requests_stats: {slot_to_count:?}");
             datapoint_info!(
             datapoint_info!(
                 "ancestor-repair",
                 "ancestor-repair",
                 ("ancestor-repair-count", self.ancestor_requests.count, i64)
                 ("ancestor-repair-count", self.ancestor_requests.count, i64)
@@ -747,8 +747,8 @@ impl AncestorHashesService {
 
 
         for (slot, request_type) in potential_slot_requests.take(number_of_allowed_requests) {
         for (slot, request_type) in potential_slot_requests.take(number_of_allowed_requests) {
             warn!(
             warn!(
-                "Cluster froze slot: {slot}, but we marked it as {}. \
-                 Initiating protocol to sample cluster for dead slot ancestors.",
+                "Cluster froze slot: {slot}, but we marked it as {}. Initiating protocol to \
+                 sample cluster for dead slot ancestors.",
                 if request_type.is_pruned() {
                 if request_type.is_pruned() {
                     "pruned"
                     "pruned"
                 } else {
                 } else {

+ 14 - 13
core/src/repair/cluster_slot_state_verifier.rs

@@ -377,8 +377,8 @@ fn check_duplicate_confirmed_hash_against_bank_status(
             // If the cluster duplicate confirmed some version of this slot, then
             // If the cluster duplicate confirmed some version of this slot, then
             // there's another version of our dead slot
             // there's another version of our dead slot
             warn!(
             warn!(
-                "Cluster duplicate confirmed slot {} with hash {}, but we marked slot dead",
-                slot, duplicate_confirmed_hash
+                "Cluster duplicate confirmed slot {slot} with hash {duplicate_confirmed_hash}, \
+                 but we marked slot dead"
             );
             );
             state_changes.push(ResultingStateChange::RepairDuplicateConfirmedVersion(
             state_changes.push(ResultingStateChange::RepairDuplicateConfirmedVersion(
                 duplicate_confirmed_hash,
                 duplicate_confirmed_hash,
@@ -397,8 +397,8 @@ fn check_duplicate_confirmed_hash_against_bank_status(
             // Modify fork choice rule to exclude our version from being voted
             // Modify fork choice rule to exclude our version from being voted
             // on and also repair the correct version
             // on and also repair the correct version
             warn!(
             warn!(
-                "Cluster duplicate confirmed slot {} with hash {}, but our version has hash {}",
-                slot, duplicate_confirmed_hash, bank_frozen_hash
+                "Cluster duplicate confirmed slot {slot} with hash {duplicate_confirmed_hash}, \
+                 but our version has hash {bank_frozen_hash}"
             );
             );
             state_changes.push(ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash));
             state_changes.push(ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash));
             state_changes.push(ResultingStateChange::RepairDuplicateConfirmedVersion(
             state_changes.push(ResultingStateChange::RepairDuplicateConfirmedVersion(
@@ -435,8 +435,8 @@ fn check_epoch_slots_hash_against_bank_status(
         BankStatus::Frozen(bank_frozen_hash) => {
         BankStatus::Frozen(bank_frozen_hash) => {
             // The epoch slots hash does not match our frozen hash.
             // The epoch slots hash does not match our frozen hash.
             warn!(
             warn!(
-                "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, \
-                 but our version has hash {bank_frozen_hash:?}",
+                "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, but \
+                 our version has hash {bank_frozen_hash:?}",
             );
             );
             if !is_popular_pruned {
             if !is_popular_pruned {
                 // If the slot is not already pruned notify fork choice to mark as invalid
                 // If the slot is not already pruned notify fork choice to mark as invalid
@@ -446,8 +446,8 @@ fn check_epoch_slots_hash_against_bank_status(
         BankStatus::Dead => {
         BankStatus::Dead => {
             // Cluster sample found a hash for our dead slot, we must have the wrong version
             // Cluster sample found a hash for our dead slot, we must have the wrong version
             warn!(
             warn!(
-                "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, \
-                 but we marked slot dead",
+                "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, but \
+                 we marked slot dead",
             );
             );
         }
         }
         BankStatus::Unprocessed => {
         BankStatus::Unprocessed => {
@@ -456,8 +456,8 @@ fn check_epoch_slots_hash_against_bank_status(
             assert!(is_popular_pruned);
             assert!(is_popular_pruned);
             // The cluster sample found the troublesome slot which caused this fork to be pruned
             // The cluster sample found the troublesome slot which caused this fork to be pruned
             warn!(
             warn!(
-                "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, \
-                 but we have pruned it due to incorrect ancestry"
+                "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, but \
+                 we have pruned it due to incorrect ancestry"
             );
             );
         }
         }
     }
     }
@@ -645,7 +645,8 @@ fn on_epoch_slots_frozen(
             if epoch_slots_frozen_hash != duplicate_confirmed_hash {
             if epoch_slots_frozen_hash != duplicate_confirmed_hash {
                 warn!(
                 warn!(
                     "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, \
                     "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, \
-                     but we already saw duplicate confirmation on hash: {duplicate_confirmed_hash:?}",
+                     but we already saw duplicate confirmation on hash: \
+                     {duplicate_confirmed_hash:?}",
                 );
                 );
             }
             }
             return vec![];
             return vec![];
@@ -857,8 +858,8 @@ pub(crate) fn check_slot_agrees_with_cluster(
     slot_state_update: SlotStateUpdate,
     slot_state_update: SlotStateUpdate,
 ) {
 ) {
     info!(
     info!(
-        "check_slot_agrees_with_cluster() slot: {}, root: {}, slot_state_update: {:?}",
-        slot, root, slot_state_update
+        "check_slot_agrees_with_cluster() slot: {slot}, root: {root}, slot_state_update: \
+         {slot_state_update:?}"
     );
     );
 
 
     if slot <= root {
     if slot <= root {

+ 9 - 10
core/src/repair/duplicate_repair_status.rs

@@ -322,9 +322,9 @@ impl AncestorRequestStatus {
                             agreed_response[*mismatch_i];
                             agreed_response[*mismatch_i];
                         let mismatch_our_frozen_hash = blockstore.get_bank_hash(mismatch_slot);
                         let mismatch_our_frozen_hash = blockstore.get_bank_hash(mismatch_slot);
                         info!(
                         info!(
-                            "When processing the ancestor sample for {}, there was a mismatch \
-                             for {mismatch_slot}: we had frozen hash {:?} and the cluster agreed \
-                             upon {mismatch_agreed_upon_hash}. However for a later ancestor \
+                            "When processing the ancestor sample for {}, there was a mismatch for \
+                             {mismatch_slot}: we had frozen hash {:?} and the cluster agreed upon \
+                             {mismatch_agreed_upon_hash}. However for a later ancestor \
                              {ancestor_slot} we have agreement on {our_frozen_hash} as the bank \
                              {ancestor_slot} we have agreement on {our_frozen_hash} as the bank \
                              hash. This should never be possible, something is wrong or the \
                              hash. This should never be possible, something is wrong or the \
                              cluster sample is invalid. Rejecting and queuing the ancestor hashes \
                              cluster sample is invalid. Rejecting and queuing the ancestor hashes \
@@ -360,10 +360,9 @@ impl AncestorRequestStatus {
                             self.requested_mismatched_slot
                             self.requested_mismatched_slot
                         );
                         );
                     }
                     }
-                    (Some(decision), true) => panic!(
-                        "Programmer error, {:?} should not be set in decision loop",
-                        decision
-                    ),
+                    (Some(decision), true) => {
+                        panic!("Programmer error, {decision:?} should not be set in decision loop")
+                    }
                     (Some(_), false) => { /* Already found a mismatch, descendants continue to mismatch as well */
                     (Some(_), false) => { /* Already found a mismatch, descendants continue to mismatch as well */
                     }
                     }
                     (None, true) => { /* Mismatch hasn't been found yet */ }
                     (None, true) => { /* Mismatch hasn't been found yet */ }
@@ -476,9 +475,9 @@ impl AncestorRequestStatus {
                 // replay dump then repair to fix.
                 // replay dump then repair to fix.
 
 
                 warn!(
                 warn!(
-                    "Blockstore is missing frozen hash for slot {ancestor_slot}, \
-                     which the cluster claims is an ancestor of dead slot {}. Potentially \
-                     our version of the dead slot chains to the wrong fork!",
+                    "Blockstore is missing frozen hash for slot {ancestor_slot}, which the \
+                     cluster claims is an ancestor of dead slot {}. Potentially our version of \
+                     the dead slot chains to the wrong fork!",
                     self.requested_mismatched_slot
                     self.requested_mismatched_slot
                 );
                 );
             }
             }

+ 8 - 9
core/src/repair/repair_service.rs

@@ -172,7 +172,7 @@ impl RepairStats {
             .chain(self.orphan.slot_pubkeys.iter())
             .chain(self.orphan.slot_pubkeys.iter())
             .map(|(slot, slot_repairs)| (slot, slot_repairs.pubkey_repairs.values().sum::<u64>()))
             .map(|(slot, slot_repairs)| (slot, slot_repairs.pubkey_repairs.values().sum::<u64>()))
             .collect();
             .collect();
-        info!("repair_stats: {:?}", slot_to_count);
+        info!("repair_stats: {slot_to_count:?}");
         if repair_total > 0 {
         if repair_total > 0 {
             let nonzero_num = |x| if x == 0 { None } else { Some(x) };
             let nonzero_num = |x| if x == 0 { None } else { Some(x) };
             datapoint_info!(
             datapoint_info!(
@@ -609,10 +609,7 @@ impl RepairService {
             }
             }
         });
         });
         if !popular_pruned_forks.is_empty() {
         if !popular_pruned_forks.is_empty() {
-            warn!(
-                "Notifying repair of popular pruned forks {:?}",
-                popular_pruned_forks
-            );
+            warn!("Notifying repair of popular pruned forks {popular_pruned_forks:?}");
             popular_pruned_forks_sender
             popular_pruned_forks_sender
                 .send(popular_pruned_forks)
                 .send(popular_pruned_forks)
                 .unwrap_or_else(|err| error!("failed to send popular pruned forks {err}"));
                 .unwrap_or_else(|err| error!("failed to send popular pruned forks {err}"));
@@ -665,7 +662,9 @@ impl RepairService {
                 Ok(()) => (),
                 Ok(()) => (),
                 Err(SendPktsError::IoError(err, num_failed)) => {
                 Err(SendPktsError::IoError(err, num_failed)) => {
                     error!(
                     error!(
-                        "{} batch_send failed to send {num_failed}/{num_pkts} packets first error {err:?}", repair_info.cluster_info.id()
+                        "{} batch_send failed to send {num_failed}/{num_pkts} packets first error \
+                         {err:?}",
+                        repair_info.cluster_info.id()
                     );
                     );
                 }
                 }
             }
             }
@@ -1067,7 +1066,7 @@ impl RepairService {
                 debug!("successfully sent repair request to {pubkey} / {address}!");
                 debug!("successfully sent repair request to {pubkey} / {address}!");
             }
             }
             Err(SendPktsError::IoError(err, _num_failed)) => {
             Err(SendPktsError::IoError(err, _num_failed)) => {
-                error!("batch_send failed to send packet - error = {:?}", err);
+                error!("batch_send failed to send packet - error = {err:?}");
             }
             }
         }
         }
     }
     }
@@ -1183,8 +1182,8 @@ impl RepairService {
                             Ok(req) => {
                             Ok(req) => {
                                 if let Err(e) = repair_socket.send_to(&req, repair_addr) {
                                 if let Err(e) = repair_socket.send_to(&req, repair_addr) {
                                     info!(
                                     info!(
-                                        "repair req send_to {} ({}) error {:?}",
-                                        repair_pubkey, repair_addr, e
+                                        "repair req send_to {repair_pubkey} ({repair_addr}) error \
+                                         {e:?}"
                                     );
                                     );
                                 }
                                 }
                             }
                             }

+ 6 - 12
core/src/repair/repair_weight.rs

@@ -322,13 +322,13 @@ impl RepairWeight {
     pub fn split_off(&mut self, slot: Slot) -> HashSet<Slot> {
     pub fn split_off(&mut self, slot: Slot) -> HashSet<Slot> {
         assert!(slot >= self.root);
         assert!(slot >= self.root);
         if slot == self.root {
         if slot == self.root {
-            error!("Trying to orphan root of repair tree {}", slot);
+            error!("Trying to orphan root of repair tree {slot}");
             return HashSet::new();
             return HashSet::new();
         }
         }
         match self.slot_to_tree.get(&slot).copied() {
         match self.slot_to_tree.get(&slot).copied() {
             Some(TreeRoot::Root(subtree_root)) => {
             Some(TreeRoot::Root(subtree_root)) => {
                 if subtree_root == slot {
                 if subtree_root == slot {
-                    info!("{} is already orphan, skipping", slot);
+                    info!("{slot} is already orphan, skipping");
                     return HashSet::new();
                     return HashSet::new();
                 }
                 }
                 let subtree = self
                 let subtree = self
@@ -350,10 +350,7 @@ impl RepairWeight {
                 // If not they will once again be attached to the pruned set in
                 // If not they will once again be attached to the pruned set in
                 // `update_orphan_ancestors`.
                 // `update_orphan_ancestors`.
 
 
-                info!(
-                    "Dumping pruned slot {} of tree {} in repair",
-                    slot, subtree_root
-                );
+                info!("Dumping pruned slot {slot} of tree {subtree_root} in repair");
                 let mut subtree = self
                 let mut subtree = self
                     .pruned_trees
                     .pruned_trees
                     .remove(&subtree_root)
                     .remove(&subtree_root)
@@ -378,10 +375,7 @@ impl RepairWeight {
                 }
                 }
             }
             }
             None => {
             None => {
-                warn!(
-                    "Trying to split off slot {} which doesn't currently exist in repair",
-                    slot
-                );
+                warn!("Trying to split off slot {slot} which doesn't currently exist in repair");
                 HashSet::new()
                 HashSet::new()
             }
             }
         }
         }
@@ -440,7 +434,7 @@ impl RepairWeight {
 
 
             // Find all descendants of `self.root` that are not reachable from `new_root`.
             // Find all descendants of `self.root` that are not reachable from `new_root`.
             // Prune these out and add to `self.pruned_trees`
             // Prune these out and add to `self.pruned_trees`
-            trace!("pruning tree {} with {}", new_root_tree_root, new_root);
+            trace!("pruning tree {new_root_tree_root} with {new_root}");
             let (removed, pruned) = new_root_tree.purge_prune((new_root, Hash::default()));
             let (removed, pruned) = new_root_tree.purge_prune((new_root, Hash::default()));
             for pruned_tree in pruned {
             for pruned_tree in pruned {
                 let pruned_tree_root = pruned_tree.tree_root().0;
                 let pruned_tree_root = pruned_tree.tree_root().0;
@@ -471,7 +465,7 @@ impl RepairWeight {
             .drain()
             .drain()
             .flat_map(|(tree_root, mut pruned_tree)| {
             .flat_map(|(tree_root, mut pruned_tree)| {
                 if tree_root < new_root {
                 if tree_root < new_root {
-                    trace!("pruning tree {} with {}", tree_root, new_root);
+                    trace!("pruning tree {tree_root} with {new_root}");
                     let (removed, pruned) = pruned_tree.purge_prune((new_root, Hash::default()));
                     let (removed, pruned) = pruned_tree.purge_prune((new_root, Hash::default()));
                     for (slot, _) in removed {
                     for (slot, _) in removed {
                         self.slot_to_tree.remove(&slot);
                         self.slot_to_tree.remove(&slot);

+ 3 - 2
core/src/repair/serve_repair.rs

@@ -530,7 +530,7 @@ impl ServeRepair {
     fn report_time_spent(label: &str, time: &Duration, extra: &str) {
     fn report_time_spent(label: &str, time: &Duration, extra: &str) {
         let count = time.as_millis();
         let count = time.as_millis();
         if count > 5 {
         if count > 5 {
-            info!("{} took: {} ms {}", label, count, extra);
+            info!("{label} took: {count} ms {extra}");
         }
         }
     }
     }
 
 
@@ -1256,7 +1256,8 @@ impl ServeRepair {
                 Ok(()) => (),
                 Ok(()) => (),
                 Err(SendPktsError::IoError(err, num_failed)) => {
                 Err(SendPktsError::IoError(err, num_failed)) => {
                     warn!(
                     warn!(
-                        "batch_send failed to send {num_failed}/{num_pkts} packets. First error: {err:?}"
+                        "batch_send failed to send {num_failed}/{num_pkts} packets. First error: \
+                         {err:?}"
                     );
                     );
                 }
                 }
             }
             }

+ 47 - 76
core/src/replay_stage.rs

@@ -212,9 +212,8 @@ impl PartitionInfo {
     ) {
     ) {
         if self.partition_start_time.is_none() && partition_detected {
         if self.partition_start_time.is_none() && partition_detected {
             warn!(
             warn!(
-                "PARTITION DETECTED waiting to join heaviest fork: {} last vote: {:?}, reset \
-                 slot: {}",
-                heaviest_slot, last_voted_slot, reset_bank_slot,
+                "PARTITION DETECTED waiting to join heaviest fork: {heaviest_slot} last vote: \
+                 {last_voted_slot:?}, reset slot: {reset_bank_slot}",
             );
             );
             datapoint_info!(
             datapoint_info!(
                 "replay_stage-partition-start",
                 "replay_stage-partition-start",
@@ -235,8 +234,8 @@ impl PartitionInfo {
             self.partition_start_time = Some(Instant::now());
             self.partition_start_time = Some(Instant::now());
         } else if self.partition_start_time.is_some() && !partition_detected {
         } else if self.partition_start_time.is_some() && !partition_detected {
             warn!(
             warn!(
-                "PARTITION resolved heaviest fork: {} last vote: {:?}, reset slot: {}",
-                heaviest_slot, last_voted_slot, reset_bank_slot
+                "PARTITION resolved heaviest fork: {heaviest_slot} last vote: \
+                 {last_voted_slot:?}, reset slot: {reset_bank_slot}"
             );
             );
             datapoint_info!(
             datapoint_info!(
                 "replay_stage-partition-resolved",
                 "replay_stage-partition-resolved",
@@ -631,18 +630,14 @@ impl ReplayStage {
                     Ok(tower) => tower,
                     Ok(tower) => tower,
                     Err(err) => {
                     Err(err) => {
                         error!(
                         error!(
-                            "Unable to load new tower when attempting to change identity from {} \
-                             to {} on ReplayStage startup, Exiting: {}",
-                            my_old_pubkey, my_pubkey, err
+                            "Unable to load new tower when attempting to change identity from \
+                             {my_old_pubkey} to {my_pubkey} on ReplayStage startup, Exiting: {err}"
                         );
                         );
                         // drop(_exit) will set the exit flag, eventually tearing down the entire process
                         // drop(_exit) will set the exit flag, eventually tearing down the entire process
                         return;
                         return;
                     }
                     }
                 };
                 };
-                warn!(
-                    "Identity changed during startup from {} to {}",
-                    my_old_pubkey, my_pubkey
-                );
+                warn!("Identity changed during startup from {my_old_pubkey} to {my_pubkey}");
             }
             }
             let (mut progress, mut heaviest_subtree_fork_choice) =
             let (mut progress, mut heaviest_subtree_fork_choice) =
                 Self::initialize_progress_and_fork_choice_with_locked_bank_forks(
                 Self::initialize_progress_and_fork_choice_with_locked_bank_forks(
@@ -1078,8 +1073,8 @@ impl ReplayStage {
                                 Err(err) => {
                                 Err(err) => {
                                     error!(
                                     error!(
                                         "Unable to load new tower when attempting to change \
                                         "Unable to load new tower when attempting to change \
-                                         identity from {} to {} on set-identity, Exiting: {}",
-                                        my_old_pubkey, my_pubkey, err
+                                         identity from {my_old_pubkey} to {my_pubkey} on \
+                                         set-identity, Exiting: {err}"
                                     );
                                     );
                                     // drop(_exit) will set the exit flag, eventually tearing down the entire process
                                     // drop(_exit) will set the exit flag, eventually tearing down the entire process
                                     return;
                                     return;
@@ -1088,7 +1083,7 @@ impl ReplayStage {
                             // Ensure the validator can land votes with the new identity before
                             // Ensure the validator can land votes with the new identity before
                             // becoming leader
                             // becoming leader
                             has_new_vote_been_rooted = !wait_for_vote_to_start_leader;
                             has_new_vote_been_rooted = !wait_for_vote_to_start_leader;
-                            warn!("Identity changed from {} to {}", my_old_pubkey, my_pubkey);
+                            warn!("Identity changed from {my_old_pubkey} to {my_pubkey}");
                         }
                         }
 
 
                         Self::reset_poh_recorder(
                         Self::reset_poh_recorder(
@@ -1373,8 +1368,8 @@ impl ReplayStage {
             progress.get_leader_propagation_slot_must_exist(start_slot)
             progress.get_leader_propagation_slot_must_exist(start_slot)
         {
         {
             debug!(
             debug!(
-                "Slot not propagated: start_slot={} latest_leader_slot={}",
-                start_slot, latest_leader_slot
+                "Slot not propagated: start_slot={start_slot} \
+                 latest_leader_slot={latest_leader_slot}"
             );
             );
             Self::maybe_retransmit_unpropagated_slots(
             Self::maybe_retransmit_unpropagated_slots(
                 "replay_stage-retransmit-timing-based",
                 "replay_stage-retransmit-timing-based",
@@ -1704,7 +1699,7 @@ impl ReplayStage {
         bank_forks: &RwLock<BankForks>,
         bank_forks: &RwLock<BankForks>,
         blockstore: &Blockstore,
         blockstore: &Blockstore,
     ) {
     ) {
-        warn!("purging slot {}", duplicate_slot);
+        warn!("purging slot {duplicate_slot}");
 
 
         // Doesn't need to be root bank, just needs a common bank to
         // Doesn't need to be root bank, just needs a common bank to
         // access the status cache and accounts
         // access the status cache and accounts
@@ -1763,8 +1758,8 @@ impl ReplayStage {
                 // also be a duplicate. In this case we *need* to repair it, so we clear from
                 // also be a duplicate. In this case we *need* to repair it, so we clear from
                 // blockstore.
                 // blockstore.
                 warn!(
                 warn!(
-                    "purging duplicate descendant: {} with slot_id {} and bank hash {}, of slot {}",
-                    slot, slot_id, bank_hash, duplicate_slot
+                    "purging duplicate descendant: {slot} with slot_id {slot_id} and bank hash \
+                     {bank_hash}, of slot {duplicate_slot}"
                 );
                 );
                 // Clear the slot-related data in blockstore. This will:
                 // Clear the slot-related data in blockstore. This will:
                 // 1) Clear old shreds allowing new ones to be inserted
                 // 1) Clear old shreds allowing new ones to be inserted
@@ -1772,7 +1767,7 @@ impl ReplayStage {
                 // this slot
                 // this slot
                 blockstore.clear_unconfirmed_slot(slot);
                 blockstore.clear_unconfirmed_slot(slot);
             } else if slot == duplicate_slot {
             } else if slot == duplicate_slot {
-                warn!("purging duplicate slot: {} with slot_id {}", slot, slot_id);
+                warn!("purging duplicate slot: {slot} with slot_id {slot_id}");
                 blockstore.clear_unconfirmed_slot(slot);
                 blockstore.clear_unconfirmed_slot(slot);
             } else {
             } else {
                 // If a descendant was unable to replay and chained from a duplicate, it is not
                 // If a descendant was unable to replay and chained from a duplicate, it is not
@@ -2009,10 +2004,7 @@ impl ReplayStage {
                 } else {
                 } else {
                     ""
                     ""
                 };
                 };
-                info!(
-                    "LEADER CHANGE at slot: {} leader: {}{}",
-                    bank_slot, new_leader, msg
-                );
+                info!("LEADER CHANGE at slot: {bank_slot} leader: {new_leader}{msg}");
             }
             }
         }
         }
         current_leader.replace(new_leader.to_owned());
         current_leader.replace(new_leader.to_owned());
@@ -2107,12 +2099,12 @@ impl ReplayStage {
                     parent_slot,
                     parent_slot,
                 } => (poh_slot, parent_slot),
                 } => (poh_slot, parent_slot),
                 PohLeaderStatus::NotReached => {
                 PohLeaderStatus::NotReached => {
-                    trace!("{} poh_recorder hasn't reached_leader_slot", my_pubkey);
+                    trace!("{my_pubkey} poh_recorder hasn't reached_leader_slot");
                     return false;
                     return false;
                 }
                 }
             };
             };
 
 
-        trace!("{} reached_leader_slot", my_pubkey);
+        trace!("{my_pubkey} reached_leader_slot");
 
 
         let Some(parent) = bank_forks.read().unwrap().get(parent_slot) else {
         let Some(parent) = bank_forks.read().unwrap().get(parent_slot) else {
             warn!(
             warn!(
@@ -2130,15 +2122,10 @@ impl ReplayStage {
         }
         }
 
 
         if bank_forks.read().unwrap().get(poh_slot).is_some() {
         if bank_forks.read().unwrap().get(poh_slot).is_some() {
-            warn!("{} already have bank in forks at {}?", my_pubkey, poh_slot);
+            warn!("{my_pubkey} already have bank in forks at {poh_slot}?");
             return false;
             return false;
         }
         }
-        trace!(
-            "{} poh_slot {} parent_slot {}",
-            my_pubkey,
-            poh_slot,
-            parent_slot
-        );
+        trace!("{my_pubkey} poh_slot {poh_slot} parent_slot {parent_slot}");
 
 
         if let Some(next_leader) = leader_schedule_cache.slot_leader_at(poh_slot, Some(&parent)) {
         if let Some(next_leader) = leader_schedule_cache.slot_leader_at(poh_slot, Some(&parent)) {
             if !has_new_vote_been_rooted {
             if !has_new_vote_been_rooted {
@@ -2146,12 +2133,7 @@ impl ReplayStage {
                 return false;
                 return false;
             }
             }
 
 
-            trace!(
-                "{} leader {} at poh slot: {}",
-                my_pubkey,
-                next_leader,
-                poh_slot
-            );
+            trace!("{my_pubkey} leader {next_leader} at poh slot: {poh_slot}");
 
 
             // I guess I missed my slot
             // I guess I missed my slot
             if next_leader != *my_pubkey {
             if next_leader != *my_pubkey {
@@ -2198,10 +2180,7 @@ impl ReplayStage {
 
 
             let root_slot = bank_forks.read().unwrap().root();
             let root_slot = bank_forks.read().unwrap().root();
             datapoint_info!("replay_stage-my_leader_slot", ("slot", poh_slot, i64),);
             datapoint_info!("replay_stage-my_leader_slot", ("slot", poh_slot, i64),);
-            info!(
-                "new fork:{} parent:{} (leader) root:{}",
-                poh_slot, parent_slot, root_slot
-            );
+            info!("new fork:{poh_slot} parent:{parent_slot} (leader) root:{root_slot}");
 
 
             let root_distance = poh_slot - root_slot;
             let root_distance = poh_slot - root_slot;
             let vote_only_bank = if root_distance > MAX_ROOT_DISTANCE_FOR_VOTE_ONLY {
             let vote_only_bank = if root_distance > MAX_ROOT_DISTANCE_FOR_VOTE_ONLY {
@@ -2227,7 +2206,7 @@ impl ReplayStage {
             update_bank_forks_and_poh_recorder_for_new_tpu_bank(bank_forks, poh_recorder, tpu_bank);
             update_bank_forks_and_poh_recorder_for_new_tpu_bank(bank_forks, poh_recorder, tpu_bank);
             true
             true
         } else {
         } else {
-            error!("{} No next leader found", my_pubkey);
+            error!("{my_pubkey} No next leader found");
             false
             false
         }
         }
     }
     }
@@ -2509,10 +2488,7 @@ impl ReplayStage {
         }
         }
         let vote_account = match bank.get_vote_account(vote_account_pubkey) {
         let vote_account = match bank.get_vote_account(vote_account_pubkey) {
             None => {
             None => {
-                warn!(
-                    "Vote account {} does not exist.  Unable to vote",
-                    vote_account_pubkey,
-                );
+                warn!("Vote account {vote_account_pubkey} does not exist.  Unable to vote",);
                 return GenerateVoteTxResult::Failed;
                 return GenerateVoteTxResult::Failed;
             }
             }
             Some(vote_account) => vote_account,
             Some(vote_account) => vote_account,
@@ -2642,7 +2618,8 @@ impl ReplayStage {
             last_vote_refresh_time.last_print_time = Instant::now();
             last_vote_refresh_time.last_print_time = Instant::now();
             warn!(
             warn!(
                 "Last landed vote for slot {} in bank {} is greater than the current last vote \
                 "Last landed vote for slot {} in bank {} is greater than the current last vote \
-                 for slot: {} tracked by tower. This indicates a bug in the on chain adoption logic",
+                 for slot: {} tracked by tower. This indicates a bug in the on chain adoption \
+                 logic",
                 latest_landed_vote_slot,
                 latest_landed_vote_slot,
                 heaviest_bank_on_same_fork.slot(),
                 heaviest_bank_on_same_fork.slot(),
                 last_voted_slot
                 last_voted_slot
@@ -2759,7 +2736,7 @@ impl ReplayStage {
                     tx: vote_tx,
                     tx: vote_tx,
                     last_voted_slot,
                     last_voted_slot,
                 })
                 })
-                .unwrap_or_else(|err| warn!("Error: {:?}", err));
+                .unwrap_or_else(|err| warn!("Error: {err:?}"));
             last_vote_refresh_time.last_refresh_time = Instant::now();
             last_vote_refresh_time.last_refresh_time = Instant::now();
             true
             true
         } else if vote_tx_result.is_non_voting() {
         } else if vote_tx_result.is_non_voting() {
@@ -2805,7 +2782,7 @@ impl ReplayStage {
             tower.refresh_last_vote_tx_blockhash(vote_tx.message.recent_blockhash);
             tower.refresh_last_vote_tx_blockhash(vote_tx.message.recent_blockhash);
 
 
             let saved_tower = SavedTower::new(tower, identity_keypair).unwrap_or_else(|err| {
             let saved_tower = SavedTower::new(tower, identity_keypair).unwrap_or_else(|err| {
-                error!("Unable to create saved tower: {:?}", err);
+                error!("Unable to create saved tower: {err:?}");
                 std::process::exit(1);
                 std::process::exit(1);
             });
             });
 
 
@@ -2816,7 +2793,7 @@ impl ReplayStage {
                     tower_slots,
                     tower_slots,
                     saved_tower: SavedTowerVersions::from(saved_tower),
                     saved_tower: SavedTowerVersions::from(saved_tower),
                 })
                 })
-                .unwrap_or_else(|err| warn!("Error: {:?}", err));
+                .unwrap_or_else(|err| warn!("Error: {err:?}"));
         } else if vote_tx_result.is_non_voting() {
         } else if vote_tx_result.is_non_voting() {
             tower.mark_last_vote_tx_blockhash_non_voting();
             tower.mark_last_vote_tx_blockhash_non_voting();
         }
         }
@@ -2835,7 +2812,7 @@ impl ReplayStage {
             total_stake,
             total_stake,
             node_vote_state,
             node_vote_state,
         )) {
         )) {
-            trace!("lockouts_sender failed: {:?}", e);
+            trace!("lockouts_sender failed: {e:?}");
         }
         }
     }
     }
 
 
@@ -2915,7 +2892,7 @@ impl ReplayStage {
                         .unwrap_or(false)
                         .unwrap_or(false)
                     {
                     {
                         // If the fork was marked as dead, don't replay it
                         // If the fork was marked as dead, don't replay it
-                        debug!("bank_slot {:?} is marked dead", bank_slot);
+                        debug!("bank_slot {bank_slot:?} is marked dead");
                         replay_result.is_slot_dead = true;
                         replay_result.is_slot_dead = true;
                         return replay_result;
                         return replay_result;
                     }
                     }
@@ -3008,10 +2985,10 @@ impl ReplayStage {
             replay_result: None,
             replay_result: None,
         };
         };
         let my_pubkey = &my_pubkey.clone();
         let my_pubkey = &my_pubkey.clone();
-        trace!("Replay active bank: slot {}", bank_slot);
+        trace!("Replay active bank: slot {bank_slot}");
         if progress.get(&bank_slot).map(|p| p.is_dead).unwrap_or(false) {
         if progress.get(&bank_slot).map(|p| p.is_dead).unwrap_or(false) {
             // If the fork was marked as dead, don't replay it
             // If the fork was marked as dead, don't replay it
-            debug!("bank_slot {:?} is marked dead", bank_slot);
+            debug!("bank_slot {bank_slot:?} is marked dead");
             replay_result.is_slot_dead = true;
             replay_result.is_slot_dead = true;
         } else {
         } else {
             let bank = bank_forks
             let bank = bank_forks
@@ -3246,7 +3223,7 @@ impl ReplayStage {
                         is_leader_block,
                         is_leader_block,
                     })
                     })
                     .unwrap_or_else(|err| {
                     .unwrap_or_else(|err| {
-                        warn!("cost_update_sender failed sending bank stats: {:?}", err)
+                        warn!("cost_update_sender failed sending bank stats: {err:?}")
                     });
                     });
 
 
                 assert_ne!(bank.hash(), Hash::default());
                 assert_ne!(bank.hash(), Hash::default());
@@ -3307,7 +3284,7 @@ impl ReplayStage {
                     sender
                     sender
                         .sender
                         .sender
                         .send(BankNotification::Frozen(bank.clone_without_scheduler()))
                         .send(BankNotification::Frozen(bank.clone_without_scheduler()))
-                        .unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err));
+                        .unwrap_or_else(|err| warn!("bank_notification_sender failed: {err:?}"));
                 }
                 }
 
 
                 let bank_hash = bank.hash();
                 let bank_hash = bank.hash();
@@ -3399,11 +3376,7 @@ impl ReplayStage {
     ) -> bool /* completed a bank */ {
     ) -> bool /* completed a bank */ {
         let active_bank_slots = bank_forks.read().unwrap().active_bank_slots();
         let active_bank_slots = bank_forks.read().unwrap().active_bank_slots();
         let num_active_banks = active_bank_slots.len();
         let num_active_banks = active_bank_slots.len();
-        trace!(
-            "{} active bank(s) to replay: {:?}",
-            num_active_banks,
-            active_bank_slots
-        );
+        trace!("{num_active_banks} active bank(s) to replay: {active_bank_slots:?}");
         if active_bank_slots.is_empty() {
         if active_bank_slots.is_empty() {
             return false;
             return false;
         }
         }
@@ -3594,11 +3567,9 @@ impl ReplayStage {
             return;
             return;
         }
         }
         info!(
         info!(
-            "Frozen bank vote state slot {:?} \
-             is newer than our local vote state slot {:?}, \
-             adopting the bank vote state as our own. \
-             Bank votes: {:?}, root: {:?}, \
-             Local votes: {:?}, root: {:?}",
+            "Frozen bank vote state slot {:?} is newer than our local vote state slot {:?}, \
+             adopting the bank vote state as our own. Bank votes: {:?}, root: {:?}, Local votes: \
+             {:?}, root: {:?}",
             bank_vote_state.last_voted_slot(),
             bank_vote_state.last_voted_slot(),
             tower.vote_state.last_voted_slot(),
             tower.vote_state.last_voted_slot(),
             bank_vote_state.votes,
             bank_vote_state.votes,
@@ -3621,8 +3592,8 @@ impl ReplayStage {
                     .votes
                     .votes
                     .retain(|lockout| lockout.slot() > local_root);
                     .retain(|lockout| lockout.slot() > local_root);
                 info!(
                 info!(
-                    "Local root is larger than on chain root, \
-                     overwrote bank root {:?} and updated votes {:?}",
+                    "Local root is larger than on chain root, overwrote bank root {:?} and \
+                     updated votes {:?}",
                     bank_vote_state.root_slot, bank_vote_state.votes
                     bank_vote_state.root_slot, bank_vote_state.votes
                 );
                 );
 
 
@@ -4076,16 +4047,16 @@ impl ReplayStage {
             sender
             sender
                 .sender
                 .sender
                 .send(BankNotification::NewRootBank(root_bank))
                 .send(BankNotification::NewRootBank(root_bank))
-                .unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err));
+                .unwrap_or_else(|err| warn!("bank_notification_sender failed: {err:?}"));
 
 
             if let Some(new_chain) = rooted_slots_with_parents {
             if let Some(new_chain) = rooted_slots_with_parents {
                 sender
                 sender
                     .sender
                     .sender
                     .send(BankNotification::NewRootedChain(new_chain))
                     .send(BankNotification::NewRootedChain(new_chain))
-                    .unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err));
+                    .unwrap_or_else(|err| warn!("bank_notification_sender failed: {err:?}"));
             }
             }
         }
         }
-        info!("new root {}", new_root);
+        info!("new root {new_root}");
         Ok(())
         Ok(())
     }
     }
 
 
@@ -4114,7 +4085,7 @@ impl ReplayStage {
 
 
         drop_bank_sender
         drop_bank_sender
             .send(removed_banks)
             .send(removed_banks)
-            .unwrap_or_else(|err| warn!("bank drop failed: {:?}", err));
+            .unwrap_or_else(|err| warn!("bank drop failed: {err:?}"));
 
 
         // Dropping the bank_forks write lock and reacquiring as a read lock is
         // Dropping the bank_forks write lock and reacquiring as a read lock is
         // safe because updates to bank_forks are only made by a single thread.
         // safe because updates to bank_forks are only made by a single thread.
@@ -4195,7 +4166,7 @@ impl ReplayStage {
                 .expect("missing parent in bank forks");
                 .expect("missing parent in bank forks");
             for child_slot in children {
             for child_slot in children {
                 if forks.get(child_slot).is_some() || new_banks.contains_key(&child_slot) {
                 if forks.get(child_slot).is_some() || new_banks.contains_key(&child_slot) {
-                    trace!("child already active or frozen {}", child_slot);
+                    trace!("child already active or frozen {child_slot}");
                     continue;
                     continue;
                 }
                 }
                 let leader = leader_schedule_cache
                 let leader = leader_schedule_cache

+ 1 - 1
core/src/sample_performance_service.rs

@@ -67,7 +67,7 @@ impl SamplePerformanceService {
 
 
                 let highest_slot = snapshot.highest_slot;
                 let highest_slot = snapshot.highest_slot;
                 if let Err(e) = blockstore.write_perf_sample(highest_slot, &perf_sample) {
                 if let Err(e) = blockstore.write_perf_sample(highest_slot, &perf_sample) {
-                    error!("write_perf_sample failed: slot {:?} {:?}", highest_slot, e);
+                    error!("write_perf_sample failed: slot {highest_slot:?} {e:?}");
                 }
                 }
             }
             }
             sleep(SLEEP_INTERVAL);
             sleep(SLEEP_INTERVAL);

+ 3 - 3
core/src/sigverify_stage.rs

@@ -415,7 +415,7 @@ impl SigVerifyStage {
                             SigVerifyServiceError::Send(_) => {
                             SigVerifyServiceError::Send(_) => {
                                 break;
                                 break;
                             }
                             }
-                            _ => error!("{:?}", e),
+                            _ => error!("{e:?}"),
                         }
                         }
                     }
                     }
                     if last_print.elapsed().as_secs() > 2 {
                     if last_print.elapsed().as_secs() > 2 {
@@ -526,7 +526,7 @@ mod tests {
         }
         }
         let mut packet_s = Some(packet_s);
         let mut packet_s = Some(packet_s);
         let mut valid_received = 0;
         let mut valid_received = 0;
-        trace!("sent: {}", sent_len);
+        trace!("sent: {sent_len}");
         loop {
         loop {
             if let Ok(verifieds) = verified_r.recv() {
             if let Ok(verifieds) = verified_r.recv() {
                 valid_received += verifieds
                 valid_received += verifieds
@@ -544,7 +544,7 @@ mod tests {
                 packet_s.take();
                 packet_s.take();
             }
             }
         }
         }
-        trace!("received: {}", valid_received);
+        trace!("received: {valid_received}");
 
 
         if use_same_tx {
         if use_same_tx {
             assert_eq!(valid_received, 1);
             assert_eq!(valid_received, 1);

+ 4 - 4
core/src/snapshot_packager_service/pending_snapshot_packages.rs

@@ -33,8 +33,8 @@ impl PendingSnapshotPackages {
                             pending_full_snapshot_package,
                             pending_full_snapshot_package,
                         ),
                         ),
                         Greater,
                         Greater,
-                        "full snapshot package must be newer than pending package, \
-                         old: {pending_full_snapshot_package:?}, new: {snapshot_package:?}",
+                        "full snapshot package must be newer than pending package, old: \
+                         {pending_full_snapshot_package:?}, new: {snapshot_package:?}",
                     );
                     );
                     info!(
                     info!(
                         "overwrote pending full snapshot package, old slot: {}, new slot: {}",
                         "overwrote pending full snapshot package, old slot: {}, new slot: {}",
@@ -55,8 +55,8 @@ impl PendingSnapshotPackages {
                             pending_incremental_snapshot_package,
                             pending_incremental_snapshot_package,
                         ),
                         ),
                         Greater,
                         Greater,
-                        "incremental snapshot package must be newer than pending package, \
-                         old: {pending_incremental_snapshot_package:?}, new: {snapshot_package:?}",
+                        "incremental snapshot package must be newer than pending package, old: \
+                         {pending_incremental_snapshot_package:?}, new: {snapshot_package:?}",
                     );
                     );
                     info!(
                     info!(
                         "overwrote pending incremental snapshot package, old slot: {}, new slot: \
                         "overwrote pending incremental snapshot package, old slot: {}, new slot: \

+ 2 - 2
core/src/tpu.rs

@@ -426,8 +426,8 @@ impl Tpu {
         if let Some(tracer_thread_hdl) = self.tracer_thread_hdl {
         if let Some(tracer_thread_hdl) = self.tracer_thread_hdl {
             if let Err(tracer_result) = tracer_thread_hdl.join()? {
             if let Err(tracer_result) = tracer_thread_hdl.join()? {
                 error!(
                 error!(
-                    "banking tracer thread returned error after successful thread join: {:?}",
-                    tracer_result
+                    "banking tracer thread returned error after successful thread join: \
+                     {tracer_result:?}"
                 );
                 );
             }
             }
         }
         }

+ 24 - 33
core/src/validator.rs

@@ -468,7 +468,7 @@ impl BlockstoreRootScan {
     fn join(self) {
     fn join(self) {
         if let Some(blockstore_root_scan) = self.thread {
         if let Some(blockstore_root_scan) = self.thread {
             if let Err(err) = blockstore_root_scan.join() {
             if let Err(err) = blockstore_root_scan.join() {
-                warn!("blockstore_root_scan failed to join {:?}", err);
+                warn!("blockstore_root_scan failed to join {err:?}");
             }
             }
         }
         }
     }
     }
@@ -667,7 +667,7 @@ impl Validator {
         }
         }
 
 
         for cluster_entrypoint in &cluster_entrypoints {
         for cluster_entrypoint in &cluster_entrypoints {
-            info!("entrypoint: {:?}", cluster_entrypoint);
+            info!("entrypoint: {cluster_entrypoint:?}");
         }
         }
 
 
         if solana_perf::perf_libs::api().is_some() {
         if solana_perf::perf_libs::api().is_some() {
@@ -817,10 +817,7 @@ impl Validator {
             (root_bank.slot(), root_bank.hard_forks())
             (root_bank.slot(), root_bank.hard_forks())
         };
         };
         let shred_version = compute_shred_version(&genesis_config.hash(), Some(&hard_forks));
         let shred_version = compute_shred_version(&genesis_config.hash(), Some(&hard_forks));
-        info!(
-            "shred version: {shred_version}, hard forks: {:?}",
-            hard_forks
-        );
+        info!("shred version: {shred_version}, hard forks: {hard_forks:?}");
 
 
         if let Some(expected_shred_version) = config.expected_shred_version {
         if let Some(expected_shred_version) = config.expected_shred_version {
             if expected_shred_version != shred_version {
             if expected_shred_version != shred_version {
@@ -921,8 +918,11 @@ impl Validator {
             },
             },
         );
         );
         info!(
         info!(
-            "Using: block-verification-method: {}, block-production-method: {}, transaction-structure: {}",
-            config.block_verification_method, config.block_production_method, config.transaction_struct
+            "Using: block-verification-method: {}, block-production-method: {}, \
+             transaction-structure: {}",
+            config.block_verification_method,
+            config.block_production_method,
+            config.transaction_struct
         );
         );
 
 
         let (replay_vote_sender, replay_vote_receiver) = unbounded();
         let (replay_vote_sender, replay_vote_receiver) = unbounded();
@@ -1469,14 +1469,11 @@ impl Validator {
         };
         };
         let tower = match process_blockstore.process_to_create_tower() {
         let tower = match process_blockstore.process_to_create_tower() {
             Ok(tower) => {
             Ok(tower) => {
-                info!("Tower state: {:?}", tower);
+                info!("Tower state: {tower:?}");
                 tower
                 tower
             }
             }
             Err(e) => {
             Err(e) => {
-                warn!(
-                    "Unable to retrieve tower: {:?} creating default tower....",
-                    e
-                );
+                warn!("Unable to retrieve tower: {e:?} creating default tower....");
                 Tower::default()
                 Tower::default()
             }
             }
         };
         };
@@ -1942,7 +1939,7 @@ fn post_process_restored_tower(
             let message =
             let message =
                 format!("Hard fork is detected; discarding tower restoration result: {tower:?}");
                 format!("Hard fork is detected; discarding tower restoration result: {tower:?}");
             datapoint_error!("tower_error", ("error", message, String),);
             datapoint_error!("tower_error", ("error", message, String),);
-            error!("{}", message);
+            error!("{message}");
 
 
             // unconditionally relax tower requirement so that we can always restore tower
             // unconditionally relax tower requirement so that we can always restore tower
             // from root bank.
             // from root bank.
@@ -1989,8 +1986,7 @@ fn post_process_restored_tower(
             } else {
             } else {
                 error!(
                 error!(
                     "Rebuilding a new tower from the latest vote account due to failed tower \
                     "Rebuilding a new tower from the latest vote account due to failed tower \
-                     restore: {}",
-                    err
+                     restore: {err}"
                 );
                 );
             }
             }
 
 
@@ -2016,7 +2012,7 @@ fn load_genesis(
     assert!(leader_epoch_offset <= MAX_LEADER_SCHEDULE_EPOCH_OFFSET);
     assert!(leader_epoch_offset <= MAX_LEADER_SCHEDULE_EPOCH_OFFSET);
 
 
     let genesis_hash = genesis_config.hash();
     let genesis_hash = genesis_config.hash();
-    info!("genesis hash: {}", genesis_hash);
+    info!("genesis hash: {genesis_hash}");
 
 
     if let Some(expected_genesis_hash) = config.expected_genesis_hash {
     if let Some(expected_genesis_hash) = config.expected_genesis_hash {
         if genesis_hash != expected_genesis_hash {
         if genesis_hash != expected_genesis_hash {
@@ -2056,7 +2052,7 @@ fn load_blockstore(
     ),
     ),
     String,
     String,
 > {
 > {
-    info!("loading ledger from {:?}...", ledger_path);
+    info!("loading ledger from {ledger_path:?}...");
     *start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger;
     *start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger;
 
 
     let blockstore = Blockstore::open_with_options(ledger_path, config.blockstore_options.clone())
     let blockstore = Blockstore::open_with_options(ledger_path, config.blockstore_options.clone())
@@ -2310,7 +2306,7 @@ fn maybe_warp_slot(
                 working_bank.slot()
                 working_bank.slot()
             ));
             ));
         }
         }
-        info!("warping to slot {}", warp_slot);
+        info!("warping to slot {warp_slot}");
 
 
         let root_bank = bank_forks.root_bank();
         let root_bank = bank_forks.root_bank();
 
 
@@ -2381,7 +2377,7 @@ fn should_cleanup_blockstore_incorrect_shred_versions(
     let blockstore_min_slot = blockstore.lowest_slot();
     let blockstore_min_slot = blockstore.lowest_slot();
     info!(
     info!(
         "Blockstore contains data from slot {blockstore_min_slot} to {blockstore_max_slot}, the \
         "Blockstore contains data from slot {blockstore_min_slot} to {blockstore_max_slot}, the \
-        latest hard fork is {latest_hard_fork}"
+         latest hard fork is {latest_hard_fork}"
     );
     );
 
 
     if latest_hard_fork < blockstore_min_slot {
     if latest_hard_fork < blockstore_min_slot {
@@ -2469,8 +2465,7 @@ fn cleanup_blockstore_incorrect_shred_versions(
     // Backing up the shreds that will be deleted from primary blockstore is
     // Backing up the shreds that will be deleted from primary blockstore is
     // not critical, so swallow errors from backup blockstore operations.
     // not critical, so swallow errors from backup blockstore operations.
     let backup_folder = format!(
     let backup_folder = format!(
-        "{}_backup_{}_{}_{}",
-        BLOCKSTORE_DIRECTORY_ROCKS_LEVEL, incorrect_shred_version, start_slot, end_slot
+        "{BLOCKSTORE_DIRECTORY_ROCKS_LEVEL}_backup_{incorrect_shred_version}_{start_slot}_{end_slot}"
     );
     );
     match Blockstore::open_with_options(
     match Blockstore::open_with_options(
         &blockstore.ledger_path().join(backup_folder),
         &blockstore.ledger_path().join(backup_folder),
@@ -2555,8 +2550,8 @@ pub enum ValidatorError {
     GenesisHashMismatch(Hash, Hash),
     GenesisHashMismatch(Hash, Hash),
 
 
     #[error(
     #[error(
-        "ledger does not have enough data to wait for supermajority: \
-        current slot={0}, needed slot={1}"
+        "ledger does not have enough data to wait for supermajority: current slot={0}, needed \
+         slot={1}"
     )]
     )]
     NotEnoughLedgerData(Slot, Slot),
     NotEnoughLedgerData(Slot, Slot),
 
 
@@ -2646,8 +2641,8 @@ fn wait_for_supermajority(
 
 
                 if gossip_stake_percent >= WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT {
                 if gossip_stake_percent >= WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT {
                     info!(
                     info!(
-                        "Supermajority reached, {}% active stake detected, starting up now.",
-                        gossip_stake_percent,
+                        "Supermajority reached, {gossip_stake_percent}% active stake detected, \
+                         starting up now.",
                     );
                     );
                     break;
                     break;
                 }
                 }
@@ -2701,9 +2696,8 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo
         if let Some(peer) = peers.get(&vote_state_node_pubkey) {
         if let Some(peer) = peers.get(&vote_state_node_pubkey) {
             if peer.shred_version() == my_shred_version {
             if peer.shred_version() == my_shred_version {
                 trace!(
                 trace!(
-                    "observed {} in gossip, (activated_stake={})",
-                    vote_state_node_pubkey,
-                    activated_stake
+                    "observed {vote_state_node_pubkey} in gossip, \
+                     (activated_stake={activated_stake})"
                 );
                 );
                 online_stake += activated_stake;
                 online_stake += activated_stake;
             } else {
             } else {
@@ -2720,10 +2714,7 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo
 
 
     let online_stake_percentage = (online_stake as f64 / total_activated_stake as f64) * 100.;
     let online_stake_percentage = (online_stake as f64 / total_activated_stake as f64) * 100.;
     if log {
     if log {
-        info!(
-            "{:.3}% of active stake visible in gossip",
-            online_stake_percentage
-        );
+        info!("{online_stake_percentage:.3}% of active stake visible in gossip");
 
 
         if !wrong_shred_nodes.is_empty() {
         if !wrong_shred_nodes.is_empty() {
             info!(
             info!(

+ 1 - 1
core/src/voting_service.rs

@@ -113,7 +113,7 @@ impl VotingService {
         if let VoteOp::PushVote { saved_tower, .. } = &vote_op {
         if let VoteOp::PushVote { saved_tower, .. } = &vote_op {
             let mut measure = Measure::start("tower storage save");
             let mut measure = Measure::start("tower storage save");
             if let Err(err) = tower_storage.store(saved_tower) {
             if let Err(err) = tower_storage.store(saved_tower) {
-                error!("Unable to save tower to storage: {:?}", err);
+                error!("Unable to save tower to storage: {err:?}");
                 std::process::exit(1);
                 std::process::exit(1);
             }
             }
             measure.stop();
             measure.stop();

+ 2 - 2
core/src/warm_quic_cache_service.rs

@@ -42,8 +42,8 @@ impl WarmQuicCacheService {
                 let conn = connection_cache.get_connection(&addr);
                 let conn = connection_cache.get_connection(&addr);
                 if let Err(err) = conn.send_data(&[]) {
                 if let Err(err) = conn.send_data(&[]) {
                     warn!(
                     warn!(
-                        "Failed to warmup QUIC connection to the leader {leader_pubkey:?} at {addr:?}, \
-                        Context: {log_context}, Error: {err:?}"
+                        "Failed to warmup QUIC connection to the leader {leader_pubkey:?} at \
+                         {addr:?}, Context: {log_context}, Error: {err:?}"
                     );
                     );
                 }
                 }
             }
             }

+ 1 - 1
core/src/window_service.rs

@@ -96,7 +96,7 @@ impl WindowServiceMetrics {
             Error::RecvTimeout(_) => self.num_errors_cross_beam_recv_timeout += 1,
             Error::RecvTimeout(_) => self.num_errors_cross_beam_recv_timeout += 1,
             Error::Blockstore(err) => {
             Error::Blockstore(err) => {
                 self.num_errors_blockstore += 1;
                 self.num_errors_blockstore += 1;
-                error!("blockstore error: {}", err);
+                error!("blockstore error: {err}");
             }
             }
             _ => self.num_errors_other += 1,
             _ => self.num_errors_other += 1,
         }
         }

+ 2 - 1
core/tests/scheduler_cost_adjustment.rs

@@ -124,7 +124,8 @@ impl TestSetup {
             },
             },
             Err(err) => {
             Err(err) => {
                 unreachable!(
                 unreachable!(
-                    "All test Transactions should be well-formatted for execution and commit, err: '{}'", err
+                    "All test Transactions should be well-formatted for execution and commit, \
+                     err: '{err}'",
                 );
                 );
             }
             }
         }
         }

+ 10 - 16
core/tests/snapshots.rs

@@ -396,12 +396,10 @@ fn test_bank_forks_incremental_snapshot() {
     const LAST_SLOT: Slot = FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 2 - 1;
     const LAST_SLOT: Slot = FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 2 - 1;
 
 
     info!(
     info!(
-        "Running bank forks incremental snapshot test, full snapshot interval: {} slots, \
-         incremental snapshot interval: {} slots, last slot: {}, set root interval: {} slots",
-        FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS,
-        INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS,
-        LAST_SLOT,
-        SET_ROOT_INTERVAL
+        "Running bank forks incremental snapshot test, full snapshot interval: \
+         {FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS} slots, incremental snapshot interval: \
+         {INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS} slots, last slot: {LAST_SLOT}, set root \
+         interval: {SET_ROOT_INTERVAL} slots"
     );
     );
 
 
     let snapshot_test_config = SnapshotTestConfig::new(
     let snapshot_test_config = SnapshotTestConfig::new(
@@ -623,18 +621,14 @@ fn test_snapshots_with_background_services(
     const MAX_WAIT_DURATION: Duration = Duration::from_secs(10);
     const MAX_WAIT_DURATION: Duration = Duration::from_secs(10);
 
 
     info!("Running snapshots with background services test...");
     info!("Running snapshots with background services test...");
+    #[rustfmt::skip]
     trace!(
     trace!(
         "Test configuration parameters:\
         "Test configuration parameters:\
-         \n\tfull snapshot archive interval: {} slots\
-         \n\tincremental snapshot archive interval: {} slots\
-         \n\tbank snapshot interval: {} slots\
-         \n\tset root interval: {} slots\
-         \n\tlast slot: {}",
-        FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS,
-        INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS,
-        BANK_SNAPSHOT_INTERVAL_SLOTS,
-        SET_ROOT_INTERVAL_SLOTS,
-        LAST_SLOT
+         \n\tfull snapshot archive interval: {FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS} slots\
+         \n\tincremental snapshot archive interval: {INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS} slots\
+         \n\tbank snapshot interval: {BANK_SNAPSHOT_INTERVAL_SLOTS} slots\
+         \n\tset root interval: {SET_ROOT_INTERVAL_SLOTS} slots\
+         \n\tlast slot: {LAST_SLOT}"
     );
     );
 
 
     let snapshot_test_config = SnapshotTestConfig::new(
     let snapshot_test_config = SnapshotTestConfig::new(