瀏覽代碼

ledger: Resolve Rust 1.88 clippy lints and format strings (#6964)

- Ran cargo clippy with Rust 1.88.0 set in rust-toolchain.toml
- Ran cargo fmt with format_strings = true in rustfmt.toml
steviez 4 月之前
父節點
當前提交
d429f6efcd

+ 1 - 1
ledger/src/bigtable_delete.rs

@@ -44,7 +44,7 @@ pub async fn delete_confirmed_blocks(
     }
 
     measure.stop();
-    info!("{}", measure);
+    info!("{measure}");
     if failures > 0 {
         Err(format!("Incomplete deletion, {failures} operations failed").into())
     } else {

+ 13 - 17
ledger/src/bigtable_upload.rs

@@ -54,10 +54,7 @@ pub async fn upload_confirmed_blocks(
 ) -> Result<Slot, Box<dyn std::error::Error>> {
     let mut measure = Measure::start("entire upload");
 
-    info!(
-        "Loading ledger slots from {} to {}",
-        starting_slot, ending_slot
-    );
+    info!("Loading ledger slots from {starting_slot} to {ending_slot}");
     let blockstore_slots: Vec<_> = blockstore
         .rooted_slot_iterator(starting_slot)
         .map_err(|err| {
@@ -84,8 +81,8 @@ pub async fn upload_confirmed_blocks(
     let bigtable_slots = if !config.force_reupload {
         let mut bigtable_slots = vec![];
         info!(
-            "Loading list of bigtable blocks between slots {} and {}...",
-            first_blockstore_slot, last_blockstore_slot
+            "Loading list of bigtable blocks between slots {first_blockstore_slot} and \
+             {last_blockstore_slot}..."
         );
 
         let mut start_slot = first_blockstore_slot;
@@ -98,7 +95,7 @@ pub async fn upload_confirmed_blocks(
                 {
                     Ok(slots) => break slots,
                     Err(err) => {
-                        error!("get_confirmed_blocks for {} failed: {:?}", start_slot, err);
+                        error!("get_confirmed_blocks for {start_slot} failed: {err:?}");
                         // Consider exponential backoff...
                         tokio::time::sleep(Duration::from_secs(2)).await;
                     }
@@ -135,8 +132,7 @@ pub async fn upload_confirmed_blocks(
 
     if blocks_to_upload.is_empty() {
         info!(
-            "No blocks between {} and {} need to be uploaded to bigtable",
-            starting_slot, ending_slot
+            "No blocks between {starting_slot} and {ending_slot} need to be uploaded to bigtable"
         );
         return Ok(ending_slot);
     }
@@ -185,8 +181,8 @@ pub async fn upload_confirmed_blocks(
                                     }
                                     Err(err) => {
                                         warn!(
-                                            "Failed to get load confirmed block from slot {}: {:?}",
-                                            slot, err
+                                            "Failed to get load confirmed block from slot {slot}: \
+                                             {err:?}"
                                         );
                                         sender.send((slot, None))
                                     }
@@ -217,7 +213,7 @@ pub async fn upload_confirmed_blocks(
 
         let mut measure_upload = Measure::start("Upload");
         let mut num_blocks = blocks.len();
-        info!("Preparing the next {} blocks for upload", num_blocks);
+        info!("Preparing the next {num_blocks} blocks for upload");
 
         let uploads = blocks.into_iter().filter_map(|(slot, block)| match block {
             None => {
@@ -235,20 +231,20 @@ pub async fn upload_confirmed_blocks(
 
         for result in futures::future::join_all(uploads).await {
             if let Err(err) = result {
-                error!("upload_confirmed_block() join failed: {:?}", err);
+                error!("upload_confirmed_block() join failed: {err:?}");
                 failures += 1;
             } else if let Err(err) = result.unwrap() {
-                error!("upload_confirmed_block() upload failed: {:?}", err);
+                error!("upload_confirmed_block() upload failed: {err:?}");
                 failures += 1;
             }
         }
 
         measure_upload.stop();
-        info!("{} for {} blocks", measure_upload, num_blocks);
+        info!("{measure_upload} for {num_blocks} blocks");
     }
 
     measure.stop();
-    info!("{}", measure);
+    info!("{measure}");
 
     let blockstore_results = loader_threads.into_iter().map(|t| t.join());
 
@@ -263,7 +259,7 @@ pub async fn upload_confirmed_blocks(
                 blockstore_load_wallclock = max(stats.elapsed, blockstore_load_wallclock);
             }
             Err(e) => {
-                error!("error joining blockstore thread: {:?}", e);
+                error!("error joining blockstore thread: {e:?}");
                 blockstore_errors += 1;
             }
         }

+ 1 - 1
ledger/src/bigtable_upload_service.rs

@@ -110,7 +110,7 @@ impl BigTableUploadService {
             match result {
                 Ok(last_slot_uploaded) => start_slot = last_slot_uploaded.saturating_add(1),
                 Err(err) => {
-                    warn!("bigtable: upload_confirmed_blocks: {}", err);
+                    warn!("bigtable: upload_confirmed_blocks: {err}");
                     std::thread::sleep(std::time::Duration::from_secs(2));
                     if start_slot == 0 {
                         start_slot = blockstore.get_first_available_block().unwrap_or_default();

+ 1 - 2
ledger/src/bit_vec.rs

@@ -610,8 +610,7 @@ mod tests {
                 .collect();
             assert_eq!(
                 result, expected,
-                "Failed for bounds: start={:?}, end={:?}",
-                start_bound, end_bound
+                "Failed for bounds: start={start_bound:?}, end={end_bound:?}"
             );
         }
     }

+ 37 - 37
ledger/src/blockstore.rs

@@ -392,7 +392,7 @@ impl Blockstore {
 
         // Open the database
         let mut measure = Measure::start("blockstore open");
-        info!("Opening blockstore at {:?}", blockstore_path);
+        info!("Opening blockstore at {blockstore_path:?}");
         let db = Arc::new(Rocks::open(blockstore_path, options)?);
 
         let address_signatures_cf = db.column();
@@ -917,7 +917,7 @@ impl Blockstore {
                         }
                         Err(InsertDataShredError::BlockstoreError(err)) => {
                             metrics.num_data_shreds_blockstore_error += 1;
-                            error!("blockstore error: {}", err);
+                            error!("blockstore error: {err}");
                         }
                         Ok(()) => {
                             if is_repaired {
@@ -1382,11 +1382,10 @@ impl Blockstore {
         // we must retain the chain by preserving `next_slots`.
         match self.purge_slot_cleanup_chaining(slot) {
             Ok(_) => {}
-            Err(BlockstoreError::SlotUnavailable) => error!(
-                "clear_unconfirmed_slot() called on slot {} with no SlotMeta",
-                slot
-            ),
-            Err(e) => panic!("Purge database operations failed {}", e),
+            Err(BlockstoreError::SlotUnavailable) => {
+                error!("clear_unconfirmed_slot() called on slot {slot} with no SlotMeta")
+            }
+            Err(e) => panic!("Purge database operations failed {e}"),
         }
     }
 
@@ -1892,8 +1891,7 @@ impl Blockstore {
                 shred.clone().into_payload(),
             ) {
                 warn!(
-                    "Unable to store conflicting merkle root duplicate proof for {slot} \
-                     {:?} {e}",
+                    "Unable to store conflicting merkle root duplicate proof for {slot} {:?} {e}",
                     shred.erasure_set(),
                 );
             }
@@ -2040,10 +2038,10 @@ impl Blockstore {
                 .map(Cow::into_owned)
         else {
             warn!(
-                "Shred {prev_shred_id:?} indicated by the erasure meta {prev_erasure_meta:?} \
-                 is missing from blockstore. This can happen if you have recently upgraded \
-                 from a version < v1.18.13, or if blockstore cleanup has caught up to the root. \
-                 Skipping the backwards chained merkle root consistency check"
+                "Shred {prev_shred_id:?} indicated by the erasure meta {prev_erasure_meta:?} is \
+                 missing from blockstore. This can happen if you have recently upgraded from a \
+                 version < v1.18.13, or if blockstore cleanup has caught up to the root. Skipping \
+                 the backwards chained merkle root consistency check"
             );
             return true;
         };
@@ -2054,7 +2052,8 @@ impl Blockstore {
             warn!(
                 "Received conflicting chained merkle roots for slot: {slot}, shred {:?} type {:?} \
                  chains to merkle root {chained_merkle_root:?}, however previous fec set coding \
-                 shred {prev_erasure_set:?} has merkle root {merkle_root:?}. Reporting as duplicate",
+                 shred {prev_erasure_set:?} has merkle root {merkle_root:?}. Reporting as \
+                 duplicate",
                 shred.erasure_set(),
                 shred.shred_type(),
             );
@@ -2274,7 +2273,7 @@ impl Blockstore {
             Some(slot_meta),
         );
 
-        trace!("inserted shred into slot {:?} and index {:?}", slot, index);
+        trace!("inserted shred into slot {slot:?} and index {index:?}");
 
         Ok(newly_completed_data_sets)
     }
@@ -2738,8 +2737,8 @@ impl Blockstore {
                     .map(|transaction| {
                         if let Err(err) = transaction.sanitize() {
                             warn!(
-                                "Blockstore::get_block sanitize failed: {:?}, slot: {:?}, {:?}",
-                                err, slot, transaction,
+                                "Blockstore::get_block sanitize failed: {err:?}, slot: {slot:?}, \
+                                 {transaction:?}",
                             );
                         }
                         transaction
@@ -3240,9 +3239,8 @@ impl Blockstore {
             .map(|transaction| {
                 if let Err(err) = transaction.sanitize() {
                     warn!(
-                        "Blockstore::find_transaction_in_slot sanitize failed: {:?}, slot: {:?}, \
-                         {:?}",
-                        err, slot, transaction,
+                        "Blockstore::find_transaction_in_slot sanitize failed: {err:?}, slot: \
+                         {slot:?}, {transaction:?}",
                     );
                 }
                 transaction
@@ -3782,8 +3780,8 @@ impl Blockstore {
         let results = self.check_last_fec_set(slot);
         let Ok(results) = results else {
             warn!(
-                "Unable to check the last fec set for slot {slot} {bank_hash}, \
-                 marking as dead: {results:?}",
+                "Unable to check the last fec set for slot {slot} {bank_hash}, marking as dead: \
+                 {results:?}",
             );
             return Err(BlockstoreProcessorError::IncompleteFinalFecSet);
         };
@@ -3823,7 +3821,11 @@ impl Blockstore {
         #[cfg(test)]
         const_assert_eq!(MINIMUM_INDEX, 31);
         let Some(start_index) = last_shred_index.checked_sub(MINIMUM_INDEX) else {
-            warn!("Slot {slot} has only {} shreds, fewer than the {DATA_SHREDS_PER_FEC_BLOCK} required", last_shred_index + 1);
+            warn!(
+                "Slot {slot} has only {} shreds, fewer than the {DATA_SHREDS_PER_FEC_BLOCK} \
+                 required",
+                last_shred_index + 1
+            );
             return Ok(LastFECSetCheckResults {
                 last_fec_set_merkle_root: None,
                 is_retransmitter_signed: false,
@@ -4242,7 +4244,7 @@ impl Blockstore {
                 if exit.load(Ordering::Relaxed) {
                     return Ok(i * chunk_size);
                 }
-                trace!("{:?}", chunk);
+                trace!("{chunk:?}");
                 self.set_roots(chunk.iter())?;
             }
         } else {
@@ -4280,10 +4282,7 @@ impl Blockstore {
         if root_meta.is_connected() {
             return Ok(());
         }
-        info!(
-            "Marking slot {} and any full children slots as connected",
-            root
-        );
+        info!("Marking slot {root} and any full children slots as connected");
         let mut write_batch = self.get_write_batch()?;
 
         // Mark both connected bits on the root slot so that the flags for this
@@ -5318,8 +5317,8 @@ fn adjust_ulimit_nofile(enforce_ulimit_nofile: bool) -> Result<()> {
 
             if cfg!(target_os = "macos") {
                 error!(
-                    "On mac OS you may need to run |sudo launchctl limit maxfiles {} {}| first",
-                    desired_nofile, desired_nofile,
+                    "On mac OS you may need to run |sudo launchctl limit maxfiles \
+                     {desired_nofile} {desired_nofile}| first",
                 );
             }
             if enforce_ulimit_nofile {
@@ -6517,7 +6516,7 @@ pub mod tests {
         let (shreds, _) = make_many_slot_entries(start_slot, num_slots, entries_per_slot);
         blockstore.insert_shreds(shreds, None, false).unwrap();
         for slot in start_slot..start_slot + num_slots {
-            info!("Evaluating slot {}", slot);
+            info!("Evaluating slot {slot}");
             let meta = blockstore.meta(slot).unwrap().unwrap();
             assert!(meta.is_parent_connected());
             assert!(meta.is_connected());
@@ -7193,7 +7192,8 @@ pub mod tests {
                 ShredSource::Repaired,
                 &mut duplicate_shreds,
             ),
-            "Should not insert shred with 'last' flag set and index less than already existing shreds"
+            "Should not insert shred with 'last' flag set and index less than already existing \
+             shreds"
         );
         assert!(blockstore.has_duplicate_shreds_in_slot(0));
         assert_eq!(duplicate_shreds.len(), 1);
@@ -7723,11 +7723,11 @@ pub mod tests {
             .insert_shreds(vec![coding_shred.clone()], None, false)
             .expect("Insertion should succeed");
 
-        assert!(Blockstore::should_insert_coding_shred(
-            &coding_shred,
-            max_root
-        ),
-        "Inserting the same shred again should be allowed since this doesn't check for duplicate index");
+        assert!(
+            Blockstore::should_insert_coding_shred(&coding_shred, max_root),
+            "Inserting the same shred again should be allowed since this doesn't check for \
+             duplicate index"
+        );
 
         assert!(
             Blockstore::should_insert_coding_shred(&code_shreds[1], max_root),

+ 5 - 12
ledger/src/blockstore/blockstore_purge.rs

@@ -56,10 +56,7 @@ impl Blockstore {
             )
         );
         if let Err(e) = purge_result {
-            error!(
-                "Error: {:?}; Purge failed in range {:?} to {:?}",
-                e, from_slot, to_slot
-            );
+            error!("Error: {e:?}; Purge failed in range {from_slot:?} to {to_slot:?}");
         }
     }
 
@@ -104,8 +101,7 @@ impl Blockstore {
             count += 1;
             if last_print.elapsed().as_millis() > 2000 {
                 info!(
-                    "purged: {} slots rewritten: {} retain_time: {}us",
-                    count, rewritten, total_retain_us
+                    "purged: {count} slots rewritten: {rewritten} retain_time: {total_retain_us}us"
                 );
                 count = 0;
                 rewritten = 0;
@@ -178,10 +174,7 @@ impl Blockstore {
             .put_in_batch(&mut write_batch, slot, &slot_meta)?;
 
         self.write_batch(write_batch).inspect_err(|e| {
-            error!(
-                "Error: {:?} while submitting write batch for slot {:?}",
-                e, slot
-            )
+            error!("Error: {e:?} while submitting write batch for slot {slot:?}")
         })?;
         Ok(columns_purged)
     }
@@ -211,8 +204,8 @@ impl Blockstore {
         let mut write_timer = Measure::start("write_batch");
         self.write_batch(write_batch).inspect_err(|e| {
             error!(
-                "Error: {:?} while submitting write batch for purge from_slot {} to_slot {}",
-                e, from_slot, to_slot
+                "Error: {e:?} while submitting write batch for purge from_slot {from_slot} \
+                 to_slot {to_slot}"
             )
         })?;
         write_timer.stop();

+ 3 - 6
ledger/src/blockstore_cleanup_service.rs

@@ -387,7 +387,7 @@ mod tests {
         let (shreds, _) = make_many_slot_entries(0, initial_slots, initial_entries);
         blockstore.insert_shreds(shreds, None, false).unwrap();
         first_insert.stop();
-        info!("{}", first_insert);
+        info!("{first_insert}");
 
         let mut last_purge_slot = 0;
         let mut slot = initial_slots;
@@ -400,7 +400,7 @@ mod tests {
                 let (shreds, _) = make_many_slot_entries(slot + i * batch_size, batch_size, 5);
                 blockstore.insert_shreds(shreds, None, false).unwrap();
                 if i % 100 == 0 {
-                    info!("inserting..{} of {}", i, batches);
+                    info!("inserting..{i} of {batches}");
                 }
             }
             insert_time.stop();
@@ -414,10 +414,7 @@ mod tests {
                 10,
             );
             time.stop();
-            info!(
-                "slot: {} size: {} {} {}",
-                slot, num_slots, insert_time, time
-            );
+            info!("slot: {slot} size: {num_slots} {insert_time} {time}");
             slot += num_slots;
             num_slots *= 2;
         }

+ 1 - 2
ledger/src/blockstore_meta.rs

@@ -1093,8 +1093,7 @@ mod test {
             let result: Vec<_> = index.range((start_bound, end_bound)).collect();
             assert_eq!(
                 result, expected,
-                "Failed for bounds: start={:?}, end={:?}",
-                start_bound, end_bound
+                "Failed for bounds: start={start_bound:?}, end={end_bound:?}"
             );
         }
     }

+ 45 - 60
ledger/src/blockstore_processor.rs

@@ -120,10 +120,7 @@ fn do_get_first_error<T, Tx: SVMTransaction>(
             if first_err.is_none() {
                 first_err = Some((Err(err.clone()), *transaction.signature()));
             }
-            warn!(
-                "Unexpected validator error: {:?}, transaction: {:?}",
-                err, transaction
-            );
+            warn!("Unexpected validator error: {err:?}, transaction: {transaction:?}");
             datapoint_error!(
                 "validator_process_entry_error",
                 (
@@ -644,7 +641,7 @@ pub fn process_entries_for_tests(
         &ignored_prioritization_fee_cache,
     );
 
-    debug!("process_entries: {:?}", batch_timing);
+    debug!("process_entries: {batch_timing:?}");
     result
 }
 
@@ -1002,10 +999,7 @@ pub fn process_blockstore_from_root(
         let bank = bank_forks.read().unwrap().root_bank();
         #[cfg(feature = "dev-context-only-utils")]
         if let Some(hash_overrides) = &opts.hash_overrides {
-            info!(
-                "Will override following slots' hashes: {:#?}",
-                hash_overrides
-            );
+            info!("Will override following slots' hashes: {hash_overrides:#?}");
             bank.set_hash_overrides(hash_overrides.clone());
         }
         if opts.no_block_cost_limits {
@@ -1018,7 +1012,7 @@ pub fn process_blockstore_from_root(
         (bank.slot(), bank.hash())
     };
 
-    info!("Processing ledger from slot {}...", start_slot);
+    info!("Processing ledger from slot {start_slot}...");
     let now = Instant::now();
 
     // Ensure start_slot is rooted for correct replay; also ensure start_slot and
@@ -1035,13 +1029,13 @@ pub fn process_blockstore_from_root(
             .expect("Couldn't mark start_slot as connected during startup")
     } else {
         info!(
-            "Start slot {} isn't a root, and won't be updated due to secondary blockstore access",
-            start_slot
+            "Start slot {start_slot} isn't a root, and won't be updated due to secondary \
+             blockstore access"
         );
     }
 
     if let Ok(Some(highest_slot)) = blockstore.highest_slot() {
-        info!("ledger holds data through slot {}", highest_slot);
+        info!("ledger holds data through slot {highest_slot}");
     }
 
     let mut timing = ExecuteTimings::default();
@@ -1068,10 +1062,7 @@ pub fn process_blockstore_from_root(
         //
         // If the ledger has any data at all, the snapshot was likely taken at
         // a slot that is not within the range of ledger min/max slot(s).
-        warn!(
-            "Starting slot {} is not in Blockstore, unable to process",
-            start_slot
-        );
+        warn!("Starting slot {start_slot} is not in Blockstore, unable to process");
         (0, 0)
     };
 
@@ -1087,7 +1078,7 @@ pub fn process_blockstore_from_root(
         ("forks", bank_forks.read().unwrap().banks().len(), i64),
     );
 
-    info!("ledger processing timing: {:?}", timing);
+    info!("ledger processing timing: {timing:?}");
     {
         let bank_forks = bank_forks.read().unwrap();
         let mut bank_slots = bank_forks.banks().keys().copied().collect::<Vec<_>>();
@@ -1604,8 +1595,8 @@ fn confirm_slot_entries(
                     starting_transaction_index: entry_tx_starting_index,
                 }) {
                     warn!(
-                        "Slot {}, entry {} entry_notification_sender send failed: {:?}",
-                        slot, entry_index, err
+                        "Slot {slot}, entry {entry_index} entry_notification_sender send failed: \
+                         {err:?}"
                     );
                 }
             }
@@ -1617,12 +1608,8 @@ fn confirm_slot_entries(
         })
         .sum::<usize>();
     trace!(
-        "Fetched entries for slot {}, num_entries: {}, num_shreds: {}, num_txs: {}, slot_full: {}",
-        slot,
-        num_entries,
-        num_shreds,
-        num_txs,
-        slot_full,
+        "Fetched entries for slot {slot}, num_entries: {num_entries}, num_shreds: {num_shreds}, \
+         num_txs: {num_txs}, slot_full: {slot_full}",
     );
 
     if !skip_verification {
@@ -1653,7 +1640,7 @@ fn confirm_slot_entries(
             recyclers.clone(),
         );
         if entry_state.status() == EntryVerificationStatus::Failure {
-            warn!("Ledger proof of history failed at slot: {}", slot);
+            warn!("Ledger proof of history failed at slot: {slot}");
             return Err(BlockError::InvalidEntryHash.into());
         }
         Some(entry_state)
@@ -1830,7 +1817,7 @@ fn process_next_slots(
         let next_meta = blockstore
             .meta(*next_slot)
             .map_err(|err| {
-                warn!("Failed to load meta for slot {}: {:?}", next_slot, err);
+                warn!("Failed to load meta for slot {next_slot}: {err:?}");
                 BlockstoreProcessorError::FailedToLoadMeta
             })?
             .unwrap();
@@ -1881,8 +1868,8 @@ fn load_frozen_forks(
     let mut root = bank_forks.read().unwrap().root();
     let max_root = std::cmp::max(root, blockstore_max_root);
     info!(
-        "load_frozen_forks() latest root from blockstore: {}, max_root: {}",
-        blockstore_max_root, max_root,
+        "load_frozen_forks() latest root from blockstore: {blockstore_max_root}, max_root: \
+         {max_root}",
     );
 
     // The total number of slots processed
@@ -2173,15 +2160,14 @@ pub fn process_single_slot(
         Ok(())
     })
     .map_err(|err| {
-        warn!("slot {} failed to verify: {}", slot, err);
+        warn!("slot {slot} failed to verify: {err}");
         if blockstore.is_primary_access() {
             blockstore
                 .set_dead_slot(slot)
                 .expect("Failed to mark slot as dead in blockstore");
         } else {
             info!(
-                "Failed slot {} won't be marked dead due to being secondary blockstore access",
-                slot
+                "Failed slot {slot} won't be marked dead due to being secondary blockstore access"
             );
         }
         err
@@ -2191,15 +2177,21 @@ pub fn process_single_slot(
         result?
     }
 
-    let block_id = blockstore.check_last_fec_set_and_get_block_id(slot, bank.hash(), &bank.feature_set)
+    let block_id = blockstore
+        .check_last_fec_set_and_get_block_id(slot, bank.hash(), &bank.feature_set)
         .inspect_err(|err| {
-            warn!("slot {} failed last fec set checks: {}", slot, err);
+            warn!("slot {slot} failed last fec set checks: {err}");
             if blockstore.is_primary_access() {
-                blockstore.set_dead_slot(slot).expect("Failed to mark slot as dead in blockstore");
+                blockstore
+                    .set_dead_slot(slot)
+                    .expect("Failed to mark slot as dead in blockstore");
             } else {
-                info!("Failed last fec set checks slot {slot} won't be marked dead due to being secondary blockstore access");
+                info!(
+                    "Failed last fec set checks slot {slot} won't be marked dead due to being \
+                     secondary blockstore access"
+                );
             }
-    })?;
+        })?;
     bank.set_block_id(block_id);
     bank.freeze(); // all banks handled by this routine are created from complete slots
 
@@ -2264,11 +2256,7 @@ impl TransactionStatusSender {
                 transaction_indexes,
             }))
         {
-            trace!(
-                "Slot {} transaction_status send batch failed: {:?}",
-                slot,
-                e
-            );
+            trace!("Slot {slot} transaction_status send batch failed: {e:?}");
         }
     }
 
@@ -2278,10 +2266,7 @@ impl TransactionStatusSender {
             .send(TransactionStatusMessage::Freeze(bank.clone()))
         {
             let slot = bank.slot();
-            warn!(
-                "Slot {slot} transaction_status send freeze message failed: {:?}",
-                e
-            );
+            warn!("Slot {slot} transaction_status send freeze message failed: {e:?}");
         }
     }
 }
@@ -2608,7 +2593,7 @@ pub mod tests {
 
         // Create a new ledger with slot 0 full of ticks
         let (ledger_path, mut blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
-        debug!("ledger_path: {:?}", ledger_path);
+        debug!("ledger_path: {ledger_path:?}");
 
         let blockstore = Blockstore::open(ledger_path.path()).unwrap();
 
@@ -2681,7 +2666,7 @@ pub mod tests {
 
         // Create a new ledger with slot 0 full of ticks
         let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
-        debug!("ledger_path: {:?}", ledger_path);
+        debug!("ledger_path: {ledger_path:?}");
         let mut last_entry_hash = blockhash;
 
         /*
@@ -2722,8 +2707,8 @@ pub mod tests {
             last_slot1_entry_hash,
         );
 
-        info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
-        info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
+        info!("last_fork1_entry.hash: {last_fork1_entry_hash:?}");
+        info!("last_fork2_entry.hash: {last_fork2_entry_hash:?}");
 
         blockstore.set_roots([0, 1, 4].iter()).unwrap();
 
@@ -2760,7 +2745,7 @@ pub mod tests {
 
         // Create a new ledger with slot 0 full of ticks
         let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
-        debug!("ledger_path: {:?}", ledger_path);
+        debug!("ledger_path: {ledger_path:?}");
         let mut last_entry_hash = blockhash;
 
         /*
@@ -2801,8 +2786,8 @@ pub mod tests {
             last_slot1_entry_hash,
         );
 
-        info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
-        info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
+        info!("last_fork1_entry.hash: {last_fork1_entry_hash:?}");
+        info!("last_fork2_entry.hash: {last_fork2_entry_hash:?}");
 
         blockstore.set_roots([0, 1].iter()).unwrap();
 
@@ -2848,7 +2833,7 @@ pub mod tests {
         let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
         let ticks_per_slot = genesis_config.ticks_per_slot;
         let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
-        debug!("ledger_path: {:?}", ledger_path);
+        debug!("ledger_path: {ledger_path:?}");
 
         /*
                    slot 0
@@ -2895,7 +2880,7 @@ pub mod tests {
         let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
         let ticks_per_slot = genesis_config.ticks_per_slot;
         let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
-        debug!("ledger_path: {:?}", ledger_path);
+        debug!("ledger_path: {ledger_path:?}");
 
         /*
                    slot 0
@@ -2955,7 +2940,7 @@ pub mod tests {
         let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
         let ticks_per_slot = genesis_config.ticks_per_slot;
         let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config);
-        debug!("ledger_path: {:?}", ledger_path);
+        debug!("ledger_path: {ledger_path:?}");
 
         /*
                    slot 0
@@ -3116,7 +3101,7 @@ pub mod tests {
         genesis_config.poh_config.hashes_per_tick = Some(hashes_per_tick);
         let (ledger_path, mut last_entry_hash) =
             create_new_tmp_ledger_auto_delete!(&genesis_config);
-        debug!("ledger_path: {:?}", ledger_path);
+        debug!("ledger_path: {ledger_path:?}");
 
         let deducted_from_mint = 3;
         let mut entries = vec![];
@@ -4341,7 +4326,7 @@ pub mod tests {
                     })
                 })
                 .collect();
-            info!("paying iteration {}", i);
+            info!("paying iteration {i}");
             process_entries_for_tests_without_scheduler(&bank, entries).expect("paying failed");
 
             let entries: Vec<_> = (0..NUM_TRANSFERS)
@@ -4364,7 +4349,7 @@ pub mod tests {
                 })
                 .collect();
 
-            info!("refunding iteration {}", i);
+            info!("refunding iteration {i}");
             process_entries_for_tests_without_scheduler(&bank, entries).expect("refunding failed");
 
             // advance to next block

+ 1 - 4
ledger/src/leader_schedule_cache.rs

@@ -176,10 +176,7 @@ impl LeaderScheduleCache {
         // Forbid asking for slots in an unconfirmed epoch
         let bank_epoch = self.epoch_schedule.get_epoch_and_slot_index(slot).0;
         if bank_epoch > *self.max_epoch.read().unwrap() {
-            debug!(
-                "Requested leader in slot: {} of unconfirmed epoch: {}",
-                slot, bank_epoch
-            );
+            debug!("Requested leader in slot: {slot} of unconfirmed epoch: {bank_epoch}");
             return None;
         }
         if cache_result.is_some() {

+ 13 - 13
ledger/src/sigverify_shreds.rs

@@ -53,14 +53,14 @@ pub fn verify_shred_cpu(
     let Some(slot) = shred::layout::get_slot(shred) else {
         return false;
     };
-    trace!("slot {}", slot);
+    trace!("slot {slot}");
     let Some(pubkey) = slot_leaders.get(&slot) else {
         return false;
     };
     let Some(signature) = shred::layout::get_signature(shred) else {
         return false;
     };
-    trace!("signature {}", signature);
+    trace!("signature {signature}");
     let Some(data) = shred::layout::get_signed_data(shred) else {
         return false;
     };
@@ -87,7 +87,7 @@ fn verify_shreds_cpu(
     cache: &RwLock<LruCache>,
 ) -> Vec<Vec<u8>> {
     let packet_count = count_packets_in_batches(batches);
-    debug!("CPU SHRED ECDSA for {}", packet_count);
+    debug!("CPU SHRED ECDSA for {packet_count}");
     let rv = thread_pool.install(|| {
         batches
             .into_par_iter()
@@ -156,8 +156,8 @@ fn slot_key_data_for_gpu(
         offsets.push(key_offsets[&slot] as u32);
     }
     trace!("keyvec.len: {}", keyvec.len());
-    trace!("keyvec: {:?}", keyvec);
-    trace!("offsets: {:?}", offsets);
+    trace!("keyvec: {keyvec:?}");
+    trace!("offsets: {offsets:?}");
     (keyvec, offsets)
 }
 
@@ -317,7 +317,7 @@ pub fn verify_shreds_gpu(
         num: batch.len() as u32,
     }));
     let num_packets = elems.iter().map(|elem| elem.num).sum();
-    trace!("Starting verify num packets: {}", num_packets);
+    trace!("Starting verify num packets: {num_packets}");
     trace!("elem len: {}", elems.len() as u32);
     trace!("packet sizeof: {}", size_of::<Packet>() as u32);
     const USE_NON_DEFAULT_STREAM: u8 = 1;
@@ -336,11 +336,11 @@ pub fn verify_shreds_gpu(
             USE_NON_DEFAULT_STREAM,
         );
         if res != 0 {
-            trace!("RETURN!!!: {}", res);
+            trace!("RETURN!!!: {res}");
         }
     }
     trace!("done verify");
-    trace!("out buf {:?}", out);
+    trace!("out buf {out:?}");
 
     // Each shred has exactly one signature.
     let v_sig_lens = batches
@@ -364,7 +364,7 @@ fn sign_shred_cpu(keypair: &Keypair, packet: &mut PacketRefMut) {
         "packet is not large enough for a signature"
     );
     let signature = keypair.sign_message(msg.as_ref());
-    trace!("signature {:?}", signature);
+    trace!("signature {signature:?}");
     let mut buffer = packet
         .data(..)
         .expect("packet should not be discarded")
@@ -376,7 +376,7 @@ fn sign_shred_cpu(keypair: &Keypair, packet: &mut PacketRefMut) {
 #[cfg(test)]
 fn sign_shreds_cpu(thread_pool: &ThreadPool, keypair: &Keypair, batches: &mut [PacketBatch]) {
     let packet_count = count_packets_in_batches(batches);
-    debug!("CPU SHRED ECDSA for {}", packet_count);
+    debug!("CPU SHRED ECDSA for {packet_count}");
     thread_pool.install(|| {
         batches.par_iter_mut().for_each(|batch| {
             batch
@@ -441,7 +441,7 @@ fn sign_shreds_gpu(
             .map(move |offset| Some(offset? + shift))
     };
     let offset = pinned_keypair.len() + merkle_roots.len();
-    trace!("offset: {}", offset);
+    trace!("offset: {offset}");
     let (signature_offsets, msg_start_offsets, msg_sizes) =
         shred_gpu_offsets(offset, batches, merkle_roots_offsets, recycler_cache);
     let total_sigs = signature_offsets.len();
@@ -469,7 +469,7 @@ fn sign_shreds_gpu(
         num: batch.len() as u32,
     }));
     let num_packets = elems.iter().map(|elem| elem.num).sum();
-    trace!("Starting verify num packets: {}", num_packets);
+    trace!("Starting verify num packets: {num_packets}");
     trace!("elem len: {}", elems.len() as u32);
     trace!("packet sizeof: {}", size_of::<Packet>() as u32);
     const USE_NON_DEFAULT_STREAM: u8 = 1;
@@ -488,7 +488,7 @@ fn sign_shreds_gpu(
             USE_NON_DEFAULT_STREAM,
         );
         if res != 0 {
-            trace!("RETURN!!!: {}", res);
+            trace!("RETURN!!!: {res}");
         }
     }
     trace!("done sign");

+ 13 - 14
ledger/src/use_snapshot_archives_at_startup.rs

@@ -27,21 +27,20 @@ pub mod cli {
     pub const NAME: &str = "use_snapshot_archives_at_startup";
     pub const LONG_ARG: &str = "use-snapshot-archives-at-startup";
     pub const HELP: &str = "When should snapshot archives be used at startup?";
+    #[rustfmt::skip]
     pub const LONG_HELP: &str = "At startup, when should snapshot archives be extracted \
-        versus using what is already on disk? \
-        \nSpecifying \"always\" will always startup by extracting snapshot archives \
-        and disregard any snapshot-related state already on disk. \
-        Note that starting up from snapshot archives will incur the runtime costs \
-        associated with extracting the archives and rebuilding the local state. \
-        \nSpecifying \"never\" will never startup from snapshot archives \
-        and will only use snapshot-related state already on disk. \
-        If there is no state already on disk, startup will fail. \
-        Note, this will use the latest state available, \
-        which may be newer than the latest snapshot archive. \
-        \nSpecifying \"when-newest\" will use snapshot-related state \
-        already on disk unless there are snapshot archives newer than it. \
-        This can happen if a new snapshot archive is downloaded \
-        while the node is stopped.";
+        versus using what is already on disk?\n\
+        Specifying \"always\" will always startup by extracting snapshot archives and disregard \
+        any snapshot-related state already on disk. Note that starting up from snapshot archives \
+        will incur the runtime costs associated with extracting the archives and rebuilding the \
+        local state.\n\
+        Specifying \"never\" will never startup from snapshot archives and will only use \
+        snapshot-related state already on disk. If there is no state already on disk, startup will \
+        fail. Note, this will use the latest state available, which may be newer than the latest \
+        snapshot archive.\n\
+        Specifying \"when-newest\" will use snapshot-related state already on disk unless there \
+        are snapshot archives newer than it. This can happen if a new snapshot archive is \
+        downloaded while the node is stopped.";
 
     pub const POSSIBLE_VALUES: &[&str] = UseSnapshotArchivesAtStartup::VARIANTS;
 

+ 2 - 2
storage-bigtable/src/access_token.rs

@@ -86,7 +86,7 @@ impl AccessToken {
         credentials: &Credentials,
         scope: &Scope,
     ) -> Result<(Token, Instant), String> {
-        info!("Requesting token for {:?} scope", scope);
+        info!("Requesting token for {scope:?} scope");
         let claims = JwtClaims::new(
             credentials.iss(),
             scope,
@@ -136,7 +136,7 @@ impl AccessToken {
                         let mut token_w = this.token.write().unwrap();
                         *token_w = new_token;
                     }
-                    Err(err) => error!("Failed to fetch new token: {}", err),
+                    Err(err) => error!("Failed to fetch new token: {err}"),
                 },
                 Err(_timeout) => {
                     warn!("Token refresh timeout")

+ 5 - 5
storage-bigtable/src/bigtable.rs

@@ -146,7 +146,7 @@ impl BigTableConnection {
     ) -> Result<Self> {
         match std::env::var("BIGTABLE_EMULATOR_HOST") {
             Ok(endpoint) => {
-                info!("Connecting to bigtable emulator at {}", endpoint);
+                info!("Connecting to bigtable emulator at {endpoint}");
                 Self::new_for_emulator(
                     instance_name,
                     app_profile_id,
@@ -259,7 +259,7 @@ impl BigTableConnection {
                                 .insert("authorization", authorization_header);
                         }
                         Err(err) => {
-                            warn!("Failed to set authorization header: {}", err);
+                            warn!("Failed to set authorization header: {err}");
                         }
                     }
                 }
@@ -366,7 +366,7 @@ impl<F: FnMut(Request<()>) -> InterceptedRequestResult> BigTable<F> {
             for (i, mut chunk) in res.chunks.into_iter().enumerate() {
                 // The comments for `read_rows_response::CellChunk` provide essential details for
                 // understanding how the below decoding works...
-                trace!("chunk {}: {:?}", i, chunk);
+                trace!("chunk {i}: {chunk:?}");
 
                 // Starting a new row?
                 if !chunk.row_key.is_empty() {
@@ -934,7 +934,7 @@ where
 
     let data = decompress(value)?;
     T::decode(&data[..]).map_err(|err| {
-        warn!("Failed to deserialize {}/{}: {}", table, key, err);
+        warn!("Failed to deserialize {table}/{key}: {err}");
         Error::ObjectCorrupt(format!("{table}/{key}"))
     })
 }
@@ -955,7 +955,7 @@ where
 
     let data = decompress(value)?;
     bincode::deserialize(&data).map_err(|err| {
-        warn!("Failed to deserialize {}/{}: {}", table, key, err);
+        warn!("Failed to deserialize {table}/{key}: {err}");
         Error::ObjectCorrupt(format!("{table}/{key}"))
     })
 }

+ 25 - 62
storage-bigtable/src/lib.rs

@@ -105,7 +105,7 @@ fn key_to_slot(key: &str) -> Option<Slot> {
         Ok(slot) => Some(slot),
         Err(err) => {
             // bucket data is probably corrupt
-            warn!("Failed to parse object key as a slot: {}: {}", key, err);
+            warn!("Failed to parse object key as a slot: {key}: {err}");
             None
         }
     }
@@ -529,11 +529,7 @@ impl LedgerStorage {
     /// start_slot: slot to start the search from (inclusive)
     /// limit: stop after this many slots have been found
     pub async fn get_confirmed_blocks(&self, start_slot: Slot, limit: usize) -> Result<Vec<Slot>> {
-        trace!(
-            "LedgerStorage::get_confirmed_blocks request received: {:?} {:?}",
-            start_slot,
-            limit
-        );
+        trace!("LedgerStorage::get_confirmed_blocks request received: {start_slot:?} {limit:?}");
         self.stats.increment_num_queries();
         let mut bigtable = self.connection.client();
         let blocks = bigtable
@@ -552,10 +548,7 @@ impl LedgerStorage {
         &self,
         slots: &'a [Slot],
     ) -> Result<impl Iterator<Item = (Slot, ConfirmedBlock)> + 'a> {
-        trace!(
-            "LedgerStorage::get_confirmed_blocks_with_data request received: {:?}",
-            slots
-        );
+        trace!("LedgerStorage::get_confirmed_blocks_with_data request received: {slots:?}");
         self.stats.increment_num_queries();
         let mut bigtable = self.connection.client();
         let row_keys = slots.iter().copied().map(slot_to_blocks_key);
@@ -579,10 +572,7 @@ impl LedgerStorage {
 
     /// Fetch the confirmed block from the desired slot
     pub async fn get_confirmed_block(&self, slot: Slot) -> Result<ConfirmedBlock> {
-        trace!(
-            "LedgerStorage::get_confirmed_block request received: {:?}",
-            slot
-        );
+        trace!("LedgerStorage::get_confirmed_block request received: {slot:?}");
         self.stats.increment_num_queries();
         let mut bigtable = self.connection.client();
         let block_cell_data = bigtable
@@ -605,10 +595,7 @@ impl LedgerStorage {
 
     /// Does the confirmed block exist in the Bigtable
     pub async fn confirmed_block_exists(&self, slot: Slot) -> Result<bool> {
-        trace!(
-            "LedgerStorage::confirmed_block_exists request received: {:?}",
-            slot
-        );
+        trace!("LedgerStorage::confirmed_block_exists request received: {slot:?}");
         self.stats.increment_num_queries();
         let mut bigtable = self.connection.client();
 
@@ -621,10 +608,7 @@ impl LedgerStorage {
 
     /// Fetches a vector of block entries via a multirow fetch
     pub async fn get_entries(&self, slot: Slot) -> Result<impl Iterator<Item = EntrySummary>> {
-        trace!(
-            "LedgerStorage::get_block_entries request received: {:?}",
-            slot
-        );
+        trace!("LedgerStorage::get_block_entries request received: {slot:?}");
         self.stats.increment_num_queries();
         let mut bigtable = self.connection.client();
         let entry_cell_data = bigtable
@@ -639,10 +623,7 @@ impl LedgerStorage {
     }
 
     pub async fn get_signature_status(&self, signature: &Signature) -> Result<TransactionStatus> {
-        trace!(
-            "LedgerStorage::get_signature_status request received: {:?}",
-            signature
-        );
+        trace!("LedgerStorage::get_signature_status request received: {signature:?}");
         self.stats.increment_num_queries();
         let mut bigtable = self.connection.client();
         let transaction_info = bigtable
@@ -660,10 +641,7 @@ impl LedgerStorage {
         &self,
         signatures: &[Signature],
     ) -> Result<Vec<ConfirmedTransactionWithStatusMeta>> {
-        trace!(
-            "LedgerStorage::get_confirmed_transactions request received: {:?}",
-            signatures
-        );
+        trace!("LedgerStorage::get_confirmed_transactions request received: {signatures:?}");
         self.stats.increment_num_queries();
         let mut bigtable = self.connection.client();
 
@@ -700,8 +678,8 @@ impl LedgerStorage {
                         .and_then(|tx_with_meta| {
                             if tx_with_meta.transaction_signature().to_string() != *signature {
                                 warn!(
-                                    "Transaction info or confirmed block for {} is corrupt",
-                                    signature
+                                    "Transaction info or confirmed block for {signature} is \
+                                     corrupt"
                                 );
                                 None
                             } else {
@@ -722,10 +700,7 @@ impl LedgerStorage {
         &self,
         signature: &Signature,
     ) -> Result<Option<ConfirmedTransactionWithStatusMeta>> {
-        trace!(
-            "LedgerStorage::get_confirmed_transaction request received: {:?}",
-            signature
-        );
+        trace!("LedgerStorage::get_confirmed_transaction request received: {signature:?}");
         self.stats.increment_num_queries();
         let mut bigtable = self.connection.client();
 
@@ -743,15 +718,12 @@ impl LedgerStorage {
         match block.transactions.into_iter().nth(index as usize) {
             None => {
                 // report this somewhere actionable?
-                warn!("Transaction info for {} is corrupt", signature);
+                warn!("Transaction info for {signature} is corrupt");
                 Ok(None)
             }
             Some(tx_with_meta) => {
                 if tx_with_meta.transaction_signature() != signature {
-                    warn!(
-                        "Transaction info or confirmed block for {} is corrupt",
-                        signature
-                    );
+                    warn!("Transaction info or confirmed block for {signature} is corrupt");
                     Ok(None)
                 } else {
                     Ok(Some(ConfirmedTransactionWithStatusMeta {
@@ -782,10 +754,7 @@ impl LedgerStorage {
             u32, /*slot index*/
         )>,
     > {
-        trace!(
-            "LedgerStorage::get_confirmed_signatures_for_address request received: {:?}",
-            address
-        );
+        trace!("LedgerStorage::get_confirmed_signatures_for_address request received: {address:?}");
         self.stats.increment_num_queries();
         let mut bigtable = self.connection.client();
         let address_prefix = format!("{address}/");
@@ -919,10 +888,7 @@ impl LedgerStorage {
         slot: Slot,
         confirmed_block: VersionedConfirmedBlock,
     ) -> Result<()> {
-        trace!(
-            "LedgerStorage::upload_confirmed_block request received: {:?}",
-            slot
-        );
+        trace!("LedgerStorage::upload_confirmed_block request received: {slot:?}");
         self.upload_confirmed_block_with_entries(
             slot,
             VersionedConfirmedBlockWithEntries {
@@ -938,10 +904,7 @@ impl LedgerStorage {
         slot: Slot,
         confirmed_block: VersionedConfirmedBlockWithEntries,
     ) -> Result<()> {
-        trace!(
-            "LedgerStorage::upload_confirmed_block_with_entries request received: {:?}",
-            slot
-        );
+        trace!("LedgerStorage::upload_confirmed_block_with_entries request received: {slot:?}");
         let mut by_addr: HashMap<&Pubkey, Vec<TransactionByAddrInfo>> = HashMap::new();
         let VersionedConfirmedBlockWithEntries {
             block: confirmed_block,
@@ -1159,20 +1122,19 @@ impl LedgerStorage {
                     }
                     Some(Ok(fetched_tx_info)) => {
                         warn!(
-                            "skipped tx row {} because the bigtable entry ({:?}) did not match to {:?}",
-                            signature,
-                            fetched_tx_info,
-                            &expected_tx_info,
+                            "skipped tx row {} because the bigtable entry ({:?}) did not match to \
+                             {:?}",
+                            signature, fetched_tx_info, &expected_tx_info,
                         );
                     }
                     Some(Err(err)) => {
                         warn!(
-                            "skipped tx row {} because the bigtable entry was corrupted: {:?}",
-                            signature, err
+                            "skipped tx row {signature} because the bigtable entry was corrupted: \
+                             {err:?}"
                         );
                     }
                     None => {
-                        warn!("skipped tx row {} because it was not found", signature);
+                        warn!("skipped tx row {signature} because it was not found");
                     }
                 }
             }
@@ -1213,12 +1175,13 @@ impl LedgerStorage {
         }
 
         info!(
-            "{}deleted ledger data for slot {}: {} transaction rows, {} address slot rows, {} entry row",
+            "{}deleted ledger data for slot {}: {} transaction rows, {} address slot rows, {} \
+             entry row",
             if dry_run { "[dry run] " } else { "" },
             slot,
             tx_deletion_rows.len(),
             address_slot_rows.len(),
-            if entries_exist { "with" } else {"WITHOUT"}
+            if entries_exist { "with" } else { "WITHOUT" }
         );
 
         Ok(())