浏览代码

Finish clippy uninlined_format_args cleanup (#7825)

- Run cargo clippy with Rust toolchain 1.88.0
- Run cargo fmt with format_strings = true
- Manually fixup one or two format instances
steviez 2 月之前
父节点
当前提交
fd07ce286a

+ 43 - 55
accounts-cluster-bench/src/main.rs

@@ -61,10 +61,7 @@ pub fn poll_slot_height(client: &RpcClient) -> Slot {
             return slot;
         } else {
             num_retries -= 1;
-            warn!(
-                "get_slot_height failure: {:?}. remaining retries {}",
-                response, num_retries
-            );
+            warn!("get_slot_height failure: {response:?}. remaining retries {num_retries}");
         }
         if num_retries == 0 {
             panic!("failed to get_slot_height(), rpc node down?")
@@ -81,10 +78,7 @@ pub fn poll_get_latest_blockhash(client: &RpcClient) -> Option<Hash> {
             return Some(blockhash);
         } else {
             num_retries -= 1;
-            warn!(
-                "get_latest_blockhash failure: {:?}. remaining retries {}",
-                response, num_retries
-            );
+            warn!("get_latest_blockhash failure: {response:?}. remaining retries {num_retries}");
         }
         if num_retries == 0 {
             panic!("failed to get_latest_blockhash(), rpc node down?")
@@ -102,10 +96,7 @@ pub fn poll_get_fee_for_message(client: &RpcClient, message: &mut Message) -> (O
             return (Some(fee), message.recent_blockhash);
         } else {
             num_retries -= 1;
-            warn!(
-                "get_fee_for_message failure: {:?}. remaining retries {}",
-                response, num_retries
-            );
+            warn!("get_fee_for_message failure: {response:?}. remaining retries {num_retries}");
 
             let blockhash = poll_get_latest_blockhash(client).expect("blockhash");
             message.recent_blockhash = blockhash;
@@ -119,7 +110,7 @@ pub fn poll_get_fee_for_message(client: &RpcClient, message: &mut Message) -> (O
 
 fn airdrop_lamports(client: &RpcClient, id: &Keypair, desired_balance: u64) -> bool {
     let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
-    info!("starting balance {}", starting_balance);
+    info!("starting balance {starting_balance}");
 
     if starting_balance < desired_balance {
         let airdrop_amount = desired_balance - starting_balance;
@@ -143,7 +134,7 @@ fn airdrop_lamports(client: &RpcClient, id: &Keypair, desired_balance: u64) -> b
         let current_balance = client.get_balance(&id.pubkey()).unwrap_or_else(|e| {
             panic!("airdrop error {e}");
         });
-        info!("current balance {}...", current_balance);
+        info!("current balance {current_balance}...");
 
         if current_balance - starting_balance != airdrop_amount {
             info!(
@@ -396,10 +387,10 @@ fn process_get_multiple_accounts(
                 stats.total_errors_time_us += rpc_time.as_us();
                 stats.errors += 1;
                 if last_error.elapsed().as_secs() > 2 {
-                    info!("error: {:?}", e);
+                    info!("error: {e:?}");
                     *last_error = Instant::now();
                 }
-                debug!("error: {:?}", e);
+                debug!("error: {e:?}");
             }
         }
     }
@@ -519,7 +510,7 @@ fn run_rpc_bench_loop(
                         stats.total_errors_time_us += rpc_time.as_us();
                         stats.errors += 1;
                         if last_error.elapsed().as_secs() > 2 {
-                            info!("get_account_info error: {:?}", e);
+                            info!("get_account_info error: {e:?}");
                             last_error = Instant::now();
                         }
                     }
@@ -545,7 +536,7 @@ fn run_rpc_bench_loop(
                         stats.total_errors_time_us += rpc_time.as_us();
                         stats.errors += 1;
                         if last_error.elapsed().as_secs() > 2 {
-                            info!("get_block error: {:?}", e);
+                            info!("get_block error: {e:?}");
                             last_error = Instant::now();
                         }
                     }
@@ -569,7 +560,7 @@ fn run_rpc_bench_loop(
                         stats.total_errors_time_us += rpc_time.as_us();
                         stats.errors += 1;
                         if last_error.elapsed().as_secs() > 2 {
-                            info!("get_blocks error: {:?}", e);
+                            info!("get_blocks error: {e:?}");
                             last_error = Instant::now();
                         }
                     }
@@ -588,7 +579,7 @@ fn run_rpc_bench_loop(
                         stats.total_errors_time_us += rpc_time.as_us();
                         stats.errors += 1;
                         if last_error.elapsed().as_secs() > 2 {
-                            info!("get_first_available_block error: {:?}", e);
+                            info!("get_first_available_block error: {e:?}");
                             last_error = Instant::now();
                         }
                     }
@@ -607,7 +598,7 @@ fn run_rpc_bench_loop(
                         stats.total_errors_time_us += rpc_time.as_us();
                         stats.errors += 1;
                         if last_error.elapsed().as_secs() > 2 {
-                            info!("get_slot error: {:?}", e);
+                            info!("get_slot error: {e:?}");
                             last_error = Instant::now();
                         }
                     }
@@ -626,7 +617,7 @@ fn run_rpc_bench_loop(
                         stats.total_errors_time_us += rpc_time.as_us();
                         stats.errors += 1;
                         if last_error.elapsed().as_secs() > 2 {
-                            info!("get_token_supply error: {:?}", e);
+                            info!("get_token_supply error: {e:?}");
                             last_error = Instant::now();
                         }
                     }
@@ -659,7 +650,7 @@ fn run_rpc_bench_loop(
                         stats.errors += 1;
                         stats.total_errors_time_us += rpc_time.as_us();
                         if last_error.elapsed().as_secs() > 2 {
-                            info!("get-program-accounts error: {:?}", e);
+                            info!("get-program-accounts error: {e:?}");
                             last_error = Instant::now();
                         }
                     }
@@ -679,7 +670,7 @@ fn run_rpc_bench_loop(
                         stats.errors += 1;
                         stats.total_errors_time_us += rpc_time.as_us();
                         if last_error.elapsed().as_secs() > 2 {
-                            info!("get-token-accounts-by-delegate error: {:?}", e);
+                            info!("get-token-accounts-by-delegate error: {e:?}");
                             last_error = Instant::now();
                         }
                     }
@@ -699,7 +690,7 @@ fn run_rpc_bench_loop(
                         stats.errors += 1;
                         stats.total_errors_time_us += rpc_time.as_us();
                         if last_error.elapsed().as_secs() > 2 {
-                            info!("get-token-accounts-by-owner error: {:?}", e);
+                            info!("get-token-accounts-by-owner error: {e:?}");
                             last_error = Instant::now();
                         }
                     }
@@ -781,7 +772,7 @@ fn make_rpc_bench_threads(
                 let transaction_signature_tracker = transaction_signature_tracker.clone();
                 let mint = *mint;
                 Builder::new()
-                    .name(format!("rpc-bench-{}", thread))
+                    .name(format!("rpc-bench-{thread}"))
                     .spawn(move || {
                         start_bench.wait();
                         run_rpc_bench_loop(
@@ -853,7 +844,7 @@ fn run_accounts_bench(
     let transaction_signature_tracker =
         TransactionSignatureTracker(Arc::new(RwLock::new(VecDeque::with_capacity(5000))));
 
-    info!("Starting balance(s): {:?}", balances);
+    info!("Starting balance(s): {balances:?}");
 
     let executor = TransactionExecutor::new_with_rpc_client(client.clone());
 
@@ -917,10 +908,7 @@ fn run_accounts_bench(
                 }
                 last_balance = Instant::now();
                 if *balance < lamports * 2 {
-                    info!(
-                        "Balance {} is less than needed: {}, doing airdrop...",
-                        balance, lamports
-                    );
+                    info!("Balance {balance} is less than needed: {lamports}, doing airdrop...");
                     if !airdrop_lamports(&client, payer_keypairs[i], lamports * 100_000) {
                         warn!("failed airdrop, exiting");
                         return;
@@ -934,7 +922,7 @@ fn run_accounts_bench(
         if sigs_len < batch_size {
             let num_to_create = batch_size - sigs_len;
             if num_to_create >= payer_keypairs.len() {
-                info!("creating {} new", num_to_create);
+                info!("creating {num_to_create} new");
                 let chunk_size = num_to_create / payer_keypairs.len();
                 if chunk_size > 0 {
                     for (i, keypair) in payer_keypairs.iter().enumerate() {
@@ -1018,8 +1006,9 @@ fn run_accounts_bench(
             || max_accounts_met
         {
             info!(
-                "total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
-                total_accounts_created, total_accounts_closed, tx_sent_count, count, balances
+                "total_accounts_created: {total_accounts_created} total_accounts_closed: \
+                 {total_accounts_closed} tx_sent_count: {tx_sent_count} loop_count: {count} \
+                 balance(s): {balances:?}"
             );
             last_log = Instant::now();
         }
@@ -1061,9 +1050,9 @@ fn run_accounts_bench(
                     (max_created_seed - max_closed_seed) as usize,
                 );
                 if num_to_close >= payer_keypairs.len() {
-                    info!("closing {} accounts", num_to_close);
+                    info!("closing {num_to_close} accounts");
                     let chunk_size = num_to_close / payer_keypairs.len();
-                    info!("{:?} chunk_size", chunk_size);
+                    info!("{chunk_size:?} chunk_size");
                     if chunk_size > 0 {
                         for (i, keypair) in payer_keypairs.iter().enumerate() {
                             let txs: Vec<_> = (0..chunk_size)
@@ -1101,8 +1090,8 @@ fn run_accounts_bench(
             count += 1;
             if last_log.elapsed().as_millis() > 3000 || max_closed_seed >= max_created_seed {
                 info!(
-                    "total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
-                    total_accounts_closed, tx_sent_count, count, balances
+                    "total_accounts_closed: {total_accounts_closed} tx_sent_count: \
+                     {tx_sent_count} loop_count: {count} balance(s): {balances:?}"
                 );
                 last_log = Instant::now();
             }
@@ -1150,8 +1139,8 @@ fn main() {
                 .validator(is_url_or_moniker)
                 .conflicts_with("entrypoint")
                 .help(
-                    "URL for Solana's JSON RPC or moniker (or their first letter): \
-                       [mainnet-beta, testnet, devnet, localhost]",
+                    "URL for Solana's JSON RPC or moniker (or their first letter): [mainnet-beta, \
+                     testnet, devnet, localhost]",
                 ),
         )
         .arg(
@@ -1206,10 +1195,9 @@ fn main() {
                 .takes_value(true)
                 .value_name("BYTES")
                 .help(
-                    "Every `n` batches, create a batch of close transactions for \
-                     the earliest remaining batch of accounts created. \
-                     Note: Should be > 1 to avoid situations where the close \
-                     transactions will be submitted before the corresponding \
+                    "Every `n` batches, create a batch of close transactions for the earliest \
+                     remaining batch of accounts created. Note: Should be > 1 to avoid situations \
+                     where the close transactions will be submitted before the corresponding \
                      create transactions have been confirmed",
                 ),
         )
@@ -1232,7 +1220,10 @@ fn main() {
                 .long("max-accounts")
                 .takes_value(true)
                 .value_name("NUM_ACCOUNTS")
-                .help("Halt after client has created this number of accounts. Does not count closed accounts."),
+                .help(
+                    "Halt after client has created this number of accounts. Does not count closed \
+                     accounts.",
+                ),
         )
         .arg(
             Arg::with_name("check_gossip")
@@ -1273,10 +1264,7 @@ fn main() {
                 .takes_value(true)
                 .value_name("RPC_BENCH_TYPE(S)")
                 .multiple(true)
-                .requires_ifs(&[
-                    ("supply", "mint"),
-                    ("token-accounts-by-owner", "mint"),
-                ])
+                .requires_ifs(&[("supply", "mint"), ("token-accounts-by-owner", "mint")])
                 .help("Spawn a thread which calls a specific RPC method in a loop to benchmark it"),
         )
         .get_matches();
@@ -1333,7 +1321,7 @@ fn main() {
                 Some(
                     solana_net_utils::get_cluster_shred_version(&entrypoint_addr).unwrap_or_else(
                         |err| {
-                            eprintln!("Failed to get shred version: {}", err);
+                            eprintln!("Failed to get shred version: {err}");
                             exit(1);
                         },
                     ),
@@ -1344,7 +1332,7 @@ fn main() {
         };
 
         let rpc_addr = if !skip_gossip {
-            info!("Finding cluster entry: {:?}", entrypoint_addr);
+            info!("Finding cluster entry: {entrypoint_addr:?}");
             let (gossip_nodes, _validators) = discover(
                 None, // keypair
                 Some(&entrypoint_addr),
@@ -1364,7 +1352,7 @@ fn main() {
             info!("done found {} nodes", gossip_nodes.len());
             gossip_nodes[0].rpc().unwrap()
         } else {
-            info!("Using {:?} as the RPC address", entrypoint_addr);
+            info!("Using {entrypoint_addr:?} as the RPC address");
             entrypoint_addr
         };
 
@@ -1498,7 +1486,7 @@ pub mod test {
         );
         let post_txs = client.get_transaction_count().unwrap();
         start.stop();
-        info!("{} pre {} post {}", start, pre_txs, post_txs);
+        info!("{start} pre {pre_txs} post {post_txs}");
     }
 
     #[test]
@@ -1548,7 +1536,7 @@ pub mod test {
         );
         let post_txs = client.get_transaction_count().unwrap();
         start.stop();
-        info!("{} pre {} post {}", start, pre_txs, post_txs);
+        info!("{start} pre {pre_txs} post {post_txs}");
     }
 
     #[test]
@@ -1643,6 +1631,6 @@ pub mod test {
             0,
         );
         start.stop();
-        info!("{}", start);
+        info!("{start}");
     }
 }

+ 13 - 8
banking-bench/src/main.rs

@@ -377,10 +377,7 @@ fn main() {
         .iter()
         .map(|packets_for_single_iteration| packets_for_single_iteration.transactions.len() as u64)
         .sum();
-    info!(
-        "worker threads: {} txs: {}",
-        block_production_num_workers, total_num_transactions
-    );
+    info!("worker threads: {block_production_num_workers} txs: {total_num_transactions}");
 
     // fund all the accounts
     all_packets.iter().for_each(|packets_for_single_iteration| {
@@ -483,7 +480,7 @@ fn main() {
     let collector = solana_pubkey::new_rand();
     let mut total_sent = 0;
     for current_iteration_index in 0..iterations {
-        trace!("RUNNING ITERATION {}", current_iteration_index);
+        trace!("RUNNING ITERATION {current_iteration_index}");
         let now = Instant::now();
         let mut sent = 0;
 
@@ -592,10 +589,18 @@ fn main() {
         .unwrap()
         .working_bank()
         .transaction_count();
-    debug!("processed: {} base: {}", txs_processed, base_tx_count);
+    debug!("processed: {txs_processed} base: {base_tx_count}");
 
-    eprintln!("[total_sent: {}, base_tx_count: {}, txs_processed: {}, txs_landed: {}, total_us: {}, tx_total_us: {}]",
-            total_sent, base_tx_count, txs_processed, (txs_processed - base_tx_count), total_us, tx_total_us);
+    eprintln!(
+        "[total_sent: {}, base_tx_count: {}, txs_processed: {}, txs_landed: {}, total_us: {}, \
+         tx_total_us: {}]",
+        total_sent,
+        base_tx_count,
+        txs_processed,
+        (txs_processed - base_tx_count),
+        total_us,
+        tx_total_us
+    );
 
     eprintln!(
         "{{'name': 'banking_bench_total', 'median': '{:.2}'}}",

+ 3 - 3
core/src/banking_stage.rs

@@ -435,9 +435,9 @@ impl BankingStage {
             }
 
             info!(
-                "Spawning new banking stage non-vote threads with block-production-method: {:?} \
-                transaction-structure: {:?} num-workers: {}",
-                block_production_method, transaction_struct, num_workers
+                "Spawning new banking stage non-vote threads with block-production-method: \
+                 {block_production_method:?} transaction-structure: {transaction_struct:?} \
+                 num-workers: {num_workers}"
             );
             context.non_vote_exit_signal.store(false, Ordering::Relaxed);
             Self::new_central_scheduler(

+ 6 - 2
core/src/mock_alpenglow_consensus.rs

@@ -310,7 +310,11 @@ impl MockAlpenglowConsensus {
                             0 // no packets received
                         }
                         _ => {
-                            error!("Got error {:?} in mock alpenglow RX socket operation, exiting thread", e.raw_os_error());
+                            error!(
+                                "Got error {:?} in mock alpenglow RX socket operation, exiting \
+                                 thread",
+                                e.raw_os_error()
+                            );
                             return;
                         }
                     }
@@ -636,7 +640,7 @@ fn prep_and_sign_packet(
 const SIGNATURE: [u8; SIGNATURE_BYTES] = [7u8; SIGNATURE_BYTES];
 
 fn report_collected_votes(peers: HashMap<Pubkey, PeerData>, total_staked: Stake, slot: Slot) {
-    trace!("Reporting statistics for slot {}", slot);
+    trace!("Reporting statistics for slot {slot}");
     let (total_voted_nodes, stake_weighted_delay, percent_collected) =
         compute_stake_weighted_means(&peers, total_staked);
     datapoint_info!(

+ 7 - 7
dos/src/main.rs

@@ -438,9 +438,9 @@ fn get_target(
     } else {
         info!("************ NODE ***********");
         for node in nodes {
-            info!("{:?}", node);
+            info!("{node:?}");
         }
-        info!("ADDR = {}", entrypoint_addr);
+        info!("ADDR = {entrypoint_addr}");
 
         for node in nodes {
             if node.gossip() == Some(entrypoint_addr) {
@@ -652,7 +652,7 @@ fn run_dos<T: 'static + TpsClient + Send + Sync>(
         && params.transaction_params.unique_transactions
     {
         let (_, target_addr) = target.expect("should have target");
-        info!("Targeting {}", target_addr);
+        info!("Targeting {target_addr}");
         run_dos_transactions(
             target_addr,
             iterations,
@@ -664,7 +664,7 @@ fn run_dos<T: 'static + TpsClient + Send + Sync>(
         );
     } else {
         let (target_id, target_addr) = target.expect("should have target");
-        info!("Targeting {}", target_addr);
+        info!("Targeting {target_addr}");
         let mut data = match params.data_type {
             DataType::RepairHighest => {
                 let slot = 100;
@@ -700,7 +700,7 @@ fn run_dos<T: 'static + TpsClient + Send + Sync>(
             }
             DataType::Transaction => {
                 let tp = params.transaction_params;
-                info!("{:?}", tp);
+                info!("{tp:?}");
 
                 let valid_blockhash = tp.valid_blockhash;
                 let payers: Vec<Option<Keypair>> =
@@ -720,7 +720,7 @@ fn run_dos<T: 'static + TpsClient + Send + Sync>(
 
                 let mut transaction_generator = TransactionGenerator::new(tp);
                 let tx = transaction_generator.generate(payer, keypairs_chunk, client.as_ref());
-                info!("{:?}", tx);
+                info!("{tx:?}");
                 bincode::serialize(&tx).unwrap()
             }
             _ => panic!("Unsupported data_type detected"),
@@ -768,7 +768,7 @@ fn main() {
         cmd_params.shred_version = Some(
             solana_net_utils::get_cluster_shred_version(&cmd_params.entrypoint_addr)
                 .unwrap_or_else(|err| {
-                    eprintln!("Failed to get shred version: {}", err);
+                    eprintln!("Failed to get shred version: {err}");
                     exit(1);
                 }),
         );

+ 1 - 1
download-utils/src/lib.rs

@@ -106,7 +106,7 @@ pub fn download_snapshot_archive(
             progress_notify_callback,
         ) {
             Ok(()) => return Ok(()),
-            Err(err) => info!("{}", err),
+            Err(err) => info!("{err}"),
         }
     }
     Err(format!(

+ 6 - 8
entry/src/entry.rs

@@ -49,7 +49,7 @@ pub fn init_poh() {
 fn init(name: &OsStr) {
     static INIT_HOOK: Once = Once::new();
 
-    info!("Loading {:?}", name);
+    info!("Loading {name:?}");
     INIT_HOOK.call_once(|| {
         let path;
         let lib_name = if let Some(perf_libs_path) = solana_perf::perf_libs::locate_perf_libs() {
@@ -887,10 +887,8 @@ impl EntrySlice for [Entry] {
             if entry.is_tick() {
                 if *tick_hash_count != hashes_per_tick {
                     warn!(
-                        "invalid tick hash count!: entry: {:#?}, tick_hash_count: {}, hashes_per_tick: {}",
-                        entry,
-                        tick_hash_count,
-                        hashes_per_tick
+                        "invalid tick hash count!: entry: {entry:#?}, tick_hash_count: \
+                         {tick_hash_count}, hashes_per_tick: {hashes_per_tick}"
                     );
                     return false;
                 }
@@ -1406,7 +1404,7 @@ mod tests {
         for _ in 0..100 {
             let mut time = Measure::start("ticks");
             let num_ticks = thread_rng().gen_range(1..100);
-            info!("create {} ticks:", num_ticks);
+            info!("create {num_ticks} ticks:");
             let mut entries = create_random_ticks(num_ticks, 100, Hash::default());
             time.stop();
 
@@ -1417,12 +1415,12 @@ mod tests {
                 entries[modify_idx].hash = hash(&[1, 2, 3]);
             }
 
-            info!("done.. {}", time);
+            info!("done.. {time}");
             let mut time = Measure::start("poh");
             let res = entries.verify(&Hash::default(), &thread_pool_for_tests());
             assert_eq!(res, !modified);
             time.stop();
-            info!("{} {}", time, res);
+            info!("{time} {res}");
         }
     }
 

+ 1 - 1
entry/src/poh.rs

@@ -148,7 +148,7 @@ impl Poh {
 }
 
 pub fn compute_hash_time(hashes_sample_size: u64) -> Duration {
-    info!("Running {} hashes...", hashes_sample_size);
+    info!("Running {hashes_sample_size} hashes...");
     let mut v = Hash::default();
     let start = Instant::now();
     for _ in 0..hashes_sample_size {

+ 29 - 31
faucet/src/faucet.rs

@@ -124,8 +124,8 @@ impl Faucet {
         if let Some((per_request_cap, per_time_cap)) = per_request_cap.zip(per_time_cap) {
             if per_time_cap < per_request_cap {
                 warn!(
-                    "per_time_cap {} SOL < per_request_cap {} SOL; \
-                    maximum single requests will fail",
+                    "per_time_cap {} SOL < per_request_cap {} SOL; maximum single requests will \
+                     fail",
                     build_balance_message(per_time_cap, false, false),
                     build_balance_message(per_request_cap, false, false),
                 );
@@ -176,7 +176,7 @@ impl Faucet {
         req: FaucetRequest,
         ip: IpAddr,
     ) -> Result<FaucetTransaction, FaucetError> {
-        trace!("build_airdrop_transaction: {:?}", req);
+        trace!("build_airdrop_transaction: {req:?}");
         match req {
             FaucetRequest::GetAirdrop {
                 lamports,
@@ -235,7 +235,7 @@ impl Faucet {
     ) -> Result<Vec<u8>, FaucetError> {
         let req: FaucetRequest = deserialize(bytes)?;
 
-        info!("Airdrop transaction requested...{:?}", req);
+        info!("Airdrop transaction requested...{req:?}");
         let res = self.build_airdrop_transaction(req, ip);
         match res {
             Ok(tx) => {
@@ -245,7 +245,7 @@ impl Faucet {
                         tx
                     }
                     FaucetTransaction::Memo((tx, memo)) => {
-                        warn!("Memo transaction returned: {}", memo);
+                        warn!("Memo transaction returned: {memo}");
                         tx
                     }
                 };
@@ -258,7 +258,7 @@ impl Faucet {
                 Ok(response_vec_with_length)
             }
             Err(err) => {
-                warn!("Airdrop transaction failed: {}", err);
+                warn!("Airdrop transaction failed: {err}");
                 Err(err)
             }
         }
@@ -278,8 +278,8 @@ pub fn request_airdrop_transaction(
     blockhash: Hash,
 ) -> Result<Transaction, FaucetError> {
     info!(
-        "request_airdrop_transaction: faucet_addr={} id={} lamports={} blockhash={}",
-        faucet_addr, id, lamports, blockhash
+        "request_airdrop_transaction: faucet_addr={faucet_addr} id={id} lamports={lamports} \
+         blockhash={blockhash}"
     );
 
     let mut stream = TcpStream::connect_timeout(faucet_addr, Duration::new(3, 0))?;
@@ -295,10 +295,7 @@ pub fn request_airdrop_transaction(
     // Read length of transaction
     let mut buffer = [0; 2];
     stream.read_exact(&mut buffer).map_err(|err| {
-        info!(
-            "request_airdrop_transaction: buffer length read_exact error: {:?}",
-            err
-        );
+        info!("request_airdrop_transaction: buffer length read_exact error: {err:?}");
         err
     })?;
     let transaction_length = u16::from_le_bytes(buffer) as usize;
@@ -311,10 +308,7 @@ pub fn request_airdrop_transaction(
     // Read the transaction
     let mut buffer = vec![0; transaction_length];
     stream.read_exact(&mut buffer).map_err(|err| {
-        info!(
-            "request_airdrop_transaction: buffer read_exact error: {:?}",
-            err
-        );
+        info!("request_airdrop_transaction: buffer read_exact error: {err:?}");
         err
     })?;
 
@@ -360,25 +354,29 @@ pub async fn run_faucet(
 ) {
     let listener = TcpListener::bind(&faucet_addr).await;
     if let Some(sender) = sender {
-        sender.send(
-            listener.as_ref().map(|listener| listener.local_addr().unwrap())
-                .map_err(|err| {
-                    format!(
-                        "Unable to bind faucet to {faucet_addr:?}, check the address is not already in use: {err}"
-                    )
-                })
+        sender
+            .send(
+                listener
+                    .as_ref()
+                    .map(|listener| listener.local_addr().unwrap())
+                    .map_err(|err| {
+                        format!(
+                            "Unable to bind faucet to {faucet_addr:?}, check the address is not \
+                             already in use: {err}"
+                        )
+                    }),
             )
             .unwrap();
     }
 
     let listener = match listener {
         Err(err) => {
-            error!("Faucet failed to start: {}", err);
+            error!("Faucet failed to start: {err}");
             return;
         }
         Ok(listener) => listener,
     };
-    info!("Faucet started. Listening on: {}", faucet_addr);
+    info!("Faucet started. Listening on: {faucet_addr}");
     info!(
         "Faucet account address: {}",
         faucet.lock().unwrap().faucet_keypair.pubkey()
@@ -390,11 +388,11 @@ pub async fn run_faucet(
             Ok((stream, _)) => {
                 tokio::spawn(async move {
                     if let Err(e) = process(stream, faucet).await {
-                        info!("failed to process request; error = {:?}", e);
+                        info!("failed to process request; error = {e:?}");
                     }
                 });
             }
-            Err(e) => debug!("failed to accept socket; error = {:?}", e),
+            Err(e) => debug!("failed to accept socket; error = {e:?}"),
         }
     }
 }
@@ -413,7 +411,7 @@ async fn process(
         .unwrap() as usize
     ];
     while stream.read_exact(&mut request).await.is_ok() {
-        trace!("{:?}", request);
+        trace!("{request:?}");
 
         let response = {
             match stream.peer_addr() {
@@ -423,15 +421,15 @@ async fn process(
                 }
                 Ok(peer_addr) => {
                     let ip = peer_addr.ip();
-                    info!("Request IP: {:?}", ip);
+                    info!("Request IP: {ip:?}");
 
                     match faucet.lock().unwrap().process_faucet_request(&request, ip) {
                         Ok(response_bytes) => {
-                            trace!("Airdrop response_bytes: {:?}", response_bytes);
+                            trace!("Airdrop response_bytes: {response_bytes:?}");
                             response_bytes
                         }
                         Err(e) => {
-                            info!("Error in request: {}", e);
+                            info!("Error in request: {e}");
                             ERROR_RESPONSE.to_vec()
                         }
                     }

+ 5 - 3
genesis-utils/src/lib.rs

@@ -17,7 +17,8 @@ fn check_genesis_hash(
     if let Some(expected_genesis_hash) = expected_genesis_hash {
         if expected_genesis_hash != genesis_hash {
             return Err(format!(
-                "Genesis hash mismatch: expected {expected_genesis_hash} but downloaded genesis hash is {genesis_hash}",
+                "Genesis hash mismatch: expected {expected_genesis_hash} but downloaded genesis \
+                 hash is {genesis_hash}",
             ));
         }
     }
@@ -79,7 +80,7 @@ fn set_and_verify_expected_genesis_hash(
 ) -> Result<(), String> {
     let genesis_hash = genesis_config.hash();
     if expected_genesis_hash.is_none() {
-        info!("Expected genesis hash set to {}", genesis_hash);
+        info!("Expected genesis hash set to {genesis_hash}");
         *expected_genesis_hash = Some(genesis_hash);
     }
     let expected_genesis_hash = expected_genesis_hash.unwrap();
@@ -92,7 +93,8 @@ fn set_and_verify_expected_genesis_hash(
 
     if expected_genesis_hash != rpc_genesis_hash {
         return Err(format!(
-            "Genesis hash mismatch: expected {expected_genesis_hash} but RPC node genesis hash is {rpc_genesis_hash}"
+            "Genesis hash mismatch: expected {expected_genesis_hash} but RPC node genesis hash is \
+             {rpc_genesis_hash}"
         ));
     }
 

+ 12 - 25
gossip/src/push_active_set.rs

@@ -70,7 +70,7 @@ fn get_weight(bucket: u64, alpha: u64) -> u64 {
 fn gossip_interpolate_weight(base: u64, base_squared: u64, alpha: u64) -> u64 {
     let scale = lpf::SCALE.get();
     let t = alpha.saturating_sub(ALPHA_MIN);
-    debug_assert!(t <= scale, "interpolation t={} > SCALE={}", t, scale);
+    debug_assert!(t <= scale, "interpolation t={t} > SCALE={scale}");
     // ((base * (scale - t) + base_squared * t) + scale / 2) / scale
     ((base.saturating_mul(scale.saturating_sub(t))).saturating_add(base_squared.saturating_mul(t)))
         .saturating_add(scale / 2)
@@ -104,7 +104,7 @@ impl PushActiveSet {
             (WeightingMode::Static, WeightingConfigTyped::Static) => (),
             (current_mode, WeightingConfigTyped::Static) => {
                 // Dynamic -> Static: Switch mode
-                info!("Switching mode: {:?} -> Static", current_mode);
+                info!("Switching mode: {current_mode:?} -> Static");
                 self.mode = WeightingMode::Static;
             }
             (
@@ -125,13 +125,13 @@ impl PushActiveSet {
                 }
             }
             (current_mode, WeightingConfigTyped::Dynamic { .. }) => {
-                info!("Switching mode: {:?} -> Dynamic", current_mode);
+                info!("Switching mode: {current_mode:?} -> Dynamic");
                 self.mode = WeightingMode::from(config_type);
                 if let WeightingMode::Dynamic {
                     filter_k, tc_ms, ..
                 } = self.mode
                 {
-                    info!("Initialized filter K = {} (tc_ms = {})", filter_k, tc_ms);
+                    info!("Initialized filter K = {filter_k} (tc_ms = {tc_ms})");
                 }
             }
         }
@@ -622,9 +622,7 @@ mod tests {
         let actual_alpha = alpha_of(&active_set);
         assert!(
             (actual_alpha as i32 - expected_alpha_milli as i32).abs() <= TOLERANCE_MILLI as i32,
-            "alpha={} did not converge to expected alpha={}",
-            actual_alpha,
-            expected_alpha_milli
+            "alpha={actual_alpha} did not converge to expected alpha={expected_alpha_milli}"
         );
 
         // 93% unstaked → alpha_target = 1,000,000 + 93 * 10000 = 1,930,000
@@ -640,9 +638,7 @@ mod tests {
         let actual_alpha = alpha_of(&active_set);
         assert!(
             (actual_alpha as i32 - expected_alpha_milli as i32).abs() <= TOLERANCE_MILLI as i32,
-            "alpha={} did not reconverge to expected alpha={}",
-            actual_alpha,
-            expected_alpha_milli
+            "alpha={actual_alpha} did not reconverge to expected alpha={expected_alpha_milli}"
         );
     }
 
@@ -669,9 +665,7 @@ mod tests {
         let alpha = alpha_of(&active_set);
         assert!(
             (alpha as i32 - expected_alpha_0).abs() <= TOLERANCE_MILLI as i32,
-            "alpha={} did not converge to alpha_0={}",
-            alpha,
-            expected_alpha_0
+            "alpha={alpha} did not converge to alpha_0={expected_alpha_0}"
         );
 
         // 100% unstaked → alpha_target = 2,000,000
@@ -684,9 +678,7 @@ mod tests {
         let alpha = alpha_of(&active_set);
         assert!(
             (alpha as i32 - expected_alpha_100).abs() <= TOLERANCE_MILLI as i32,
-            "alpha={} did not converge to alpha_100={}",
-            alpha,
-            expected_alpha_100
+            "alpha={alpha} did not converge to alpha_100={expected_alpha_100}"
         );
 
         // back to 0% unstaked → alpha_target = 1,000,000
@@ -698,9 +690,7 @@ mod tests {
         let alpha = alpha_of(&active_set);
         assert!(
             (alpha as i32 - expected_alpha_0).abs() <= TOLERANCE_MILLI as i32,
-            "alpha={} did not reconverge to alpha_0={}",
-            alpha,
-            expected_alpha_0
+            "alpha={alpha} did not reconverge to alpha_0={expected_alpha_0}"
         );
     }
 
@@ -727,8 +717,7 @@ mod tests {
             );
             assert_eq!(
                 alpha, *expected as u64,
-                "step {}: alpha did not match expected during convergence down",
-                i
+                "step {i}: alpha did not match expected during convergence down"
             );
         }
 
@@ -747,8 +736,7 @@ mod tests {
             );
             assert_eq!(
                 alpha, *expected as u64,
-                "step {}: alpha did not match expected during convergence up",
-                i
+                "step {i}: alpha did not match expected during convergence up"
             );
         }
 
@@ -767,8 +755,7 @@ mod tests {
             );
             assert_eq!(
                 alpha, *expected as u64,
-                "step {}: alpha did not match expected during final convergence down",
-                i
+                "step {i}: alpha did not match expected during final convergence down"
             );
         }
     }

+ 77 - 50
keygen/src/keygen.rs

@@ -83,7 +83,7 @@ fn grind_parser(grind_type: GrindType) -> ValueParser {
         };
         if v.matches(':').count() != required_div_count || (v.starts_with(':') || v.ends_with(':'))
         {
-            return Err(format!("Expected : between {} and COUNT", prefix_suffix));
+            return Err(format!("Expected : between {prefix_suffix} and COUNT"));
         }
         // `args` is guaranteed to have length at least 1 by the previous if statement
         let mut args: Vec<&str> = v.split(':').collect();
@@ -248,15 +248,16 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> {
                         .index(2)
                         .value_name("KEYPAIR")
                         .takes_value(true)
-                        .value_parser(
-                            SignerSourceParserBuilder::default().allow_all().build()
-                        )
+                        .value_parser(SignerSourceParserBuilder::default().allow_all().build())
                         .help("Filepath or URL to a keypair"),
-                )
+                ),
         )
         .subcommand(
             Command::new("new")
-                .about("Generate new keypair file from a random seed phrase and optional BIP39 passphrase")
+                .about(
+                    "Generate new keypair file from a random seed phrase and optional BIP39 \
+                     passphrase",
+                )
                 .disable_version_flag(true)
                 .arg(
                     Arg::new("outfile")
@@ -272,19 +273,13 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> {
                         .long("force")
                         .help("Overwrite the output file if it exists"),
                 )
-                .arg(
-                    Arg::new("silent")
-                        .short('s')
-                        .long("silent")
-                        .help("Do not display seed phrase. Useful when piping output to other programs that prompt for user input, like gpg"),
-                )
-                .arg(
-                    derivation_path_arg()
-                )
+                .arg(Arg::new("silent").short('s').long("silent").help(
+                    "Do not display seed phrase. Useful when piping output to other programs that \
+                     prompt for user input, like gpg",
+                ))
+                .arg(derivation_path_arg())
                 .key_generation_common_args()
-                .arg(no_outfile_arg()
-                    .conflicts_with_all(&["outfile", "silent"])
-                )
+                .arg(no_outfile_arg().conflicts_with_all(&["outfile", "silent"])),
         )
         .subcommand(
             Command::new("grind")
@@ -304,7 +299,11 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> {
                         .action(ArgAction::Append)
                         .multiple_values(true)
                         .value_parser(grind_parser(GrindType::Starts))
-                        .help("Saves specified number of keypairs whos public key starts with the indicated prefix\nExample: --starts-with sol:4\nPREFIX type is Base58\nCOUNT type is u64"),
+                        .help(
+                            "Saves specified number of keypairs whos public key starts with the \
+                             indicated prefix\nExample: --starts-with sol:4\nPREFIX type is \
+                             Base58\nCOUNT type is u64",
+                        ),
                 )
                 .arg(
                     Arg::new("ends_with")
@@ -315,7 +314,11 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> {
                         .action(ArgAction::Append)
                         .multiple_values(true)
                         .value_parser(grind_parser(GrindType::Ends))
-                        .help("Saves specified number of keypairs whos public key ends with the indicated suffix\nExample: --ends-with ana:4\nSUFFIX type is Base58\nCOUNT type is u64"),
+                        .help(
+                            "Saves specified number of keypairs whos public key ends with the \
+                             indicated suffix\nExample: --ends-with ana:4\nSUFFIX type is \
+                             Base58\nCOUNT type is u64",
+                        ),
                 )
                 .arg(
                     Arg::new("starts_and_ends_with")
@@ -326,7 +329,12 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> {
                         .action(ArgAction::Append)
                         .multiple_values(true)
                         .value_parser(grind_parser(GrindType::StartsAndEnds))
-                        .help("Saves specified number of keypairs whos public key starts and ends with the indicated prefix and suffix\nExample: --starts-and-ends-with sol:ana:4\nPREFIX and SUFFIX type is Base58\nCOUNT type is u64"),
+                        .help(
+                            "Saves specified number of keypairs whos public key starts and ends \
+                             with the indicated prefix and suffix\nExample: \
+                             --starts-and-ends-with sol:ana:4\nPREFIX and SUFFIX type is \
+                             Base58\nCOUNT type is u64",
+                        ),
                 )
                 .arg(
                     Arg::new("num_threads")
@@ -337,22 +345,18 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> {
                         .default_value(num_threads)
                         .help("Specify the number of grind threads"),
                 )
-                .arg(
-                    Arg::new("use_mnemonic")
-                        .long("use-mnemonic")
-                        .help("Generate using a mnemonic key phrase.  Expect a significant slowdown in this mode"),
-                )
-                .arg(
-                    derivation_path_arg()
-                        .requires("use_mnemonic")
-                )
+                .arg(Arg::new("use_mnemonic").long("use-mnemonic").help(
+                    "Generate using a mnemonic key phrase.  Expect a significant slowdown in this \
+                     mode",
+                ))
+                .arg(derivation_path_arg().requires("use_mnemonic"))
                 .key_generation_common_args()
                 .arg(
                     no_outfile_arg()
-                    // Require a seed phrase to avoid generating a keypair
-                    // but having no way to get the private key
-                    .requires("use_mnemonic")
-                )
+                        // Require a seed phrase to avoid generating a keypair
+                        // but having no way to get the private key
+                        .requires("use_mnemonic"),
+                ),
         )
         .subcommand(
             Command::new("pubkey")
@@ -363,9 +367,7 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> {
                         .index(1)
                         .value_name("KEYPAIR")
                         .takes_value(true)
-                        .value_parser(
-                            SignerSourceParserBuilder::default().allow_all().build()
-                        )
+                        .value_parser(SignerSourceParserBuilder::default().allow_all().build())
                         .help("Filepath or URL to a keypair"),
                 )
                 .arg(
@@ -386,7 +388,7 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> {
                         .short('f')
                         .long("force")
                         .help("Overwrite the output file if it exists"),
-                )
+                ),
         )
         .subcommand(
             Command::new("recover")
@@ -397,7 +399,12 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> {
                         .index(1)
                         .value_name("KEYPAIR")
                         .takes_value(true)
-                        .value_parser(SignerSourceParserBuilder::default().allow_prompt().allow_legacy().build())
+                        .value_parser(
+                            SignerSourceParserBuilder::default()
+                                .allow_prompt()
+                                .allow_legacy()
+                                .build(),
+                        )
                         .help("`prompt:` URI scheme or `ASK` keyword"),
                 )
                 .arg(
@@ -419,7 +426,6 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> {
                         .long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
                         .help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
                 ),
-
         )
 }
 
@@ -517,8 +523,14 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box<dyn error::Error>> {
                 let phrase: &str = mnemonic.phrase();
                 let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap();
                 println!(
-                    "{}\npubkey: {}\n{}\nSave this seed phrase{} to recover your new keypair:\n{}\n{}",
-                    &divider, keypair.pubkey(), &divider, passphrase_message, phrase, &divider
+                    "{}\npubkey: {}\n{}\nSave this seed phrase{} to recover your new \
+                     keypair:\n{}\n{}",
+                    &divider,
+                    keypair.pubkey(),
+                    &divider,
+                    passphrase_message,
+                    phrase,
+                    &divider
                 );
             }
         }
@@ -600,7 +612,9 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box<dyn error::Error>> {
                 && starts_and_ends_with_args.is_empty()
             {
                 return Err(
-                    "Error: No keypair search criteria provided (--starts-with or --ends-with or --starts-and-ends-with)".into()
+                    "Error: No keypair search criteria provided (--starts-with or --ends-with or \
+                     --starts-and-ends-with)"
+                        .into(),
                 );
             }
 
@@ -681,15 +695,21 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box<dyn error::Error>> {
                             let mnemonic = Mnemonic::new(mnemonic_type, language);
                             let seed = Seed::new(&mnemonic, &passphrase);
                             let keypair = match derivation_path {
-                                Some(_) => keypair_from_seed_and_derivation_path(seed.as_bytes(), derivation_path.clone()),
+                                Some(_) => keypair_from_seed_and_derivation_path(
+                                    seed.as_bytes(),
+                                    derivation_path.clone(),
+                                ),
                                 None => keypair_from_seed(seed.as_bytes()),
-                            }.unwrap();
+                            }
+                            .unwrap();
                             (keypair, mnemonic.phrase().to_string())
                         } else {
                             (Keypair::new(), "".to_string())
                         };
                         // Skip keypairs that will never match the user specified prefix
-                        if skip_len_44_pubkeys && keypair.pubkey() >= smallest_length_44_public_key::PUBKEY {
+                        if skip_len_44_pubkeys
+                            && keypair.pubkey() >= smallest_length_44_public_key::PUBKEY
+                        {
                             continue;
                         }
                         let mut pubkey = bs58::encode(keypair.pubkey()).into_string();
@@ -718,7 +738,10 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box<dyn error::Error>> {
                                     .count
                                     .fetch_sub(1, Ordering::Relaxed);
                                 if !no_outfile {
-                                    write_keypair_file(&keypair, format!("{}.json", keypair.pubkey()))
+                                    write_keypair_file(
+                                        &keypair,
+                                        format!("{}.json", keypair.pubkey()),
+                                    )
                                     .unwrap();
                                     println!(
                                         "Wrote keypair to {}",
@@ -726,12 +749,16 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box<dyn error::Error>> {
                                     );
                                 }
                                 if use_mnemonic {
-                                    let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap();
+                                    let divider =
+                                        String::from_utf8(vec![b'='; phrase.len()]).unwrap();
                                     println!(
                                         "{}\nFound matching key {}",
-                                        &divider, keypair.pubkey());
+                                        &divider,
+                                        keypair.pubkey()
+                                    );
                                     println!(
-                                        "\nSave this seed phrase{} to recover your new keypair:\n{}\n{}",
+                                        "\nSave this seed phrase{} to recover your new \
+                                         keypair:\n{}\n{}",
                                         passphrase_message, phrase, &divider
                                     );
                                 }

+ 6 - 6
metrics/src/counter.rs

@@ -174,13 +174,12 @@ impl Counter {
         let metricsrate = self.metricsrate.load(Ordering::Relaxed);
 
         if times % lograte == 0 && times > 0 && log_enabled!(level) {
-            log!(level,
-                "COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"samples\": {},  \"now\": {}, \"events\": {}}}",
+            log!(
+                level,
+                "COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"samples\": {times}, \"now\": \
+                 {now}, \"events\": {events}}}",
                 self.name,
                 counts + events,
-                times,
-                now,
-                events,
             );
         }
 
@@ -298,7 +297,8 @@ mod tests {
         assert_eq!(
             Counter::default_log_rate(),
             DEFAULT_LOG_RATE,
-            "default_log_rate() is {}, expected {}, SOLANA_DEFAULT_LOG_RATE environment variable set?",
+            "default_log_rate() is {}, expected {}, SOLANA_DEFAULT_LOG_RATE environment variable \
+             set?",
             Counter::default_log_rate(),
             DEFAULT_LOG_RATE,
         );

+ 12 - 15
metrics/src/metrics.rs

@@ -81,7 +81,7 @@ impl InfluxDbMetricsWriter {
 
     fn build_write_url() -> Result<String, MetricsError> {
         let config = get_metrics_config().map_err(|err| {
-            info!("metrics disabled: {}", err);
+            info!("metrics disabled: {err}");
             err
         })?;
 
@@ -149,7 +149,7 @@ impl MetricsWriter for InfluxDbMetricsWriter {
             let client = match client {
                 Ok(client) => client,
                 Err(err) => {
-                    warn!("client instantiation failed: {}", err);
+                    warn!("client instantiation failed: {err}");
                     return;
                 }
             };
@@ -161,7 +161,7 @@ impl MetricsWriter for InfluxDbMetricsWriter {
                     let text = resp
                         .text()
                         .unwrap_or_else(|_| "[text body empty]".to_string());
-                    warn!("submit response unsuccessful: {} {}", status, text,);
+                    warn!("submit response unsuccessful: {status} {text}",);
                 }
             } else {
                 warn!("submit error: {}", response.unwrap_err());
@@ -226,13 +226,12 @@ impl MetricsAgent {
         let fit_counters = max_points.saturating_sub(points.len());
         let points_written = cmp::min(num_points, max_points);
 
-        debug!("run: attempting to write {} points", num_points);
+        debug!("run: attempting to write {num_points} points");
 
         if num_points > max_points {
             warn!(
-                "Max submission rate of {} datapoints per second exceeded.  Only the \
-                 first {} of {} points will be submitted.",
-                max_points_per_sec, max_points, num_points
+                "Max submission rate of {max_points_per_sec} datapoints per second exceeded. \
+                 Only the first {max_points} of {num_points} points will be submitted."
             );
         }
 
@@ -321,11 +320,11 @@ impl MetricsAgent {
                         barrier.wait();
                     }
                     MetricsCommand::Submit(point, level) => {
-                        log!(level, "{}", point);
+                        log!(level, "{point}");
                         points.push(point);
                     }
                     MetricsCommand::SubmitCounter(counter, _level, bucket) => {
-                        debug!("{:?}", counter);
+                        debug!("{counter:?}");
                         let key = (counter.name, bucket);
                         if let Some(value) = counters.get_mut(&key) {
                             value.count += counter.count;
@@ -351,11 +350,9 @@ impl MetricsAgent {
 
         debug_assert!(
             points.is_empty() && counters.is_empty(),
-            "Controlling `MetricsAgent` is expected to call `flush()` from the `Drop` \n\
-             implementation, before exiting.  So both `points` and `counters` must be empty at \n\
-             this point.\n\
-             `points`: {points:?}\n\
-             `counters`: {counters:?}",
+            "Controlling `MetricsAgent` is expected to call `flush()` from the `Drop` \
+             implementation, before exiting. So both `points` and `counters` must be empty at \
+             this point. `points`: {points:?}, `counters`: {counters:?}",
         );
 
         trace!("run: exit");
@@ -406,7 +403,7 @@ static HOST_ID: std::sync::LazyLock<RwLock<String>> = std::sync::LazyLock::new(|
 });
 
 pub fn set_host_id(host_id: String) {
-    info!("host id: {}", host_id);
+    info!("host id: {host_id}");
     *HOST_ID.write().unwrap() = host_id;
 }
 

+ 2 - 2
net-utils/src/multihomed_sockets.rs

@@ -97,8 +97,8 @@ impl BindIpAddrs {
             for ip in &addrs {
                 if ip.is_loopback() || ip.is_unspecified() || ip.is_multicast() {
                     return Err(format!(
-                        "Invalid configuration: {:?} is not allowed with multiple --bind-address values (loopback, unspecified, or multicast)",
-                        ip
+                        "Invalid configuration: {ip:?} is not allowed with multiple \
+                         --bind-address values (loopback, unspecified, or multicast)"
                     ));
                 }
             }

+ 11 - 12
notifier/src/lib.rs

@@ -112,7 +112,7 @@ impl Default for Notifier {
 
 impl Notifier {
     pub fn new(env_prefix: &str) -> Self {
-        info!("Initializing {}Notifier", env_prefix);
+        info!("Initializing {env_prefix}Notifier");
 
         let mut notifiers = vec![];
 
@@ -143,10 +143,9 @@ impl Notifier {
         if let Ok(log_level) = env::var(format!("{env_prefix}LOG_NOTIFIER_LEVEL")) {
             match Level::from_str(&log_level) {
                 Ok(level) => notifiers.push(NotificationChannel::Log(level)),
-                Err(e) => warn!(
-                    "could not parse specified log notifier level string ({}): {}",
-                    log_level, e
-                ),
+                Err(e) => {
+                    warn!("could not parse specified log notifier level string ({log_level}): {e}")
+                }
             }
         }
 
@@ -170,14 +169,14 @@ impl Notifier {
                         // Discord rate limiting is aggressive, limit to 1 message a second
                         sleep(Duration::from_millis(1000));
 
-                        info!("Sending {}", line);
+                        info!("Sending {line}");
                         let data = json!({ "content": line });
 
                         loop {
                             let response = self.client.post(webhook).json(&data).send();
 
                             if let Err(err) = response {
-                                warn!("Failed to send Discord message: \"{}\": {:?}", line, err);
+                                warn!("Failed to send Discord message: \"{line}\": {err:?}");
                                 break;
                             } else if let Ok(response) = response {
                                 info!("response status: {}", response.status());
@@ -195,7 +194,7 @@ impl Notifier {
                 NotificationChannel::Slack(webhook) => {
                     let data = json!({ "text": msg });
                     if let Err(err) = self.client.post(webhook).json(&data).send() {
-                        warn!("Failed to send Slack message: {:?}", err);
+                        warn!("Failed to send Slack message: {err:?}");
                     }
                 }
                 NotificationChannel::PagerDuty(routing_key) => {
@@ -212,7 +211,7 @@ impl Notifier {
                     let url = "https://events.pagerduty.com/v2/enqueue";
 
                     if let Err(err) = self.client.post(url).json(&data).send() {
-                        warn!("Failed to send PagerDuty alert: {:?}", err);
+                        warn!("Failed to send PagerDuty alert: {err:?}");
                     }
                 }
 
@@ -221,7 +220,7 @@ impl Notifier {
                     let url = format!("https://api.telegram.org/bot{bot_token}/sendMessage");
 
                     if let Err(err) = self.client.post(url).json(&data).send() {
-                        warn!("Failed to send Telegram message: {:?}", err);
+                        warn!("Failed to send Telegram message: {err:?}");
                     }
                 }
 
@@ -236,11 +235,11 @@ impl Notifier {
                     );
                     let params = [("To", to), ("From", from), ("Body", &msg.to_string())];
                     if let Err(err) = self.client.post(url).form(&params).send() {
-                        warn!("Failed to send Twilio message: {:?}", err);
+                        warn!("Failed to send Twilio message: {err:?}");
                     }
                 }
                 NotificationChannel::Log(level) => {
-                    log!(*level, "{}", msg)
+                    log!(*level, "{msg}")
                 }
             }
         }

+ 3 - 2
poh/src/poh_recorder.rs

@@ -584,7 +584,7 @@ impl PohRecorder {
             self.clear_bank();
         }
         if send_result.is_err() {
-            info!("WorkingBank::sender disconnected {:?}", send_result);
+            info!("WorkingBank::sender disconnected {send_result:?}");
             // revert the cache, but clear the working bank
             self.clear_bank();
         } else {
@@ -680,7 +680,8 @@ impl PohRecorder {
     /// leaders needed to be skipped).
     pub fn reached_leader_slot(&self, my_pubkey: &Pubkey) -> PohLeaderStatus {
         trace!(
-            "tick_height {}, start_tick_height {}, leader_first_tick_height {:?}, grace_ticks {}, has_bank {}",
+            "tick_height {}, start_tick_height {}, leader_first_tick_height {:?}, grace_ticks {}, \
+             has_bank {}",
             self.tick_height(),
             self.start_tick_height,
             self.leader_first_tick_height.load(),

+ 1 - 1
remote-wallet/src/ledger.rs

@@ -267,7 +267,7 @@ impl LedgerWallet {
         }
         let status =
             ((message[message.len() - 2] as usize) << 8) | (message[message.len() - 1] as usize);
-        trace!("Read status {:x}", status);
+        trace!("Read status {status:x}");
         Self::parse_status(status)?;
         let new_len = message.len() - 2;
         message.truncate(new_len);

+ 4 - 4
remote-wallet/src/remote_wallet.rs

@@ -127,7 +127,7 @@ impl RemoteWalletManager {
                         Ok(info) => {
                             ledger.pretty_path = info.get_pretty_path();
                             let path = device_info.path().to_str().unwrap().to_string();
-                            trace!("Found device: {:?}", info);
+                            trace!("Found device: {info:?}");
                             detected_devices.push(Device {
                                 path,
                                 info,
@@ -135,12 +135,12 @@ impl RemoteWalletManager {
                             })
                         }
                         Err(err) => {
-                            error!("Error connecting to ledger device to read info: {}", err);
+                            error!("Error connecting to ledger device to read info: {err}");
                             errors.push(err)
                         }
                     }
                 }
-                Err(err) => error!("Error connecting to ledger device to read info: {}", err),
+                Err(err) => error!("Error connecting to ledger device to read info: {err}"),
             }
         }
 
@@ -198,7 +198,7 @@ impl RemoteWalletManager {
         while start_time.elapsed() <= *max_polling_duration {
             if let Ok(num_devices) = self.update_devices() {
                 let plural = if num_devices == 1 { "" } else { "s" };
-                trace!("{} Remote Wallet{} found", num_devices, plural);
+                trace!("{num_devices} Remote Wallet{plural} found");
                 return true;
             }
         }

+ 12 - 15
test-validator/src/lib.rs

@@ -347,7 +347,7 @@ impl TestValidatorGenesis {
     {
         let addresses: Vec<Pubkey> = addresses.into_iter().collect();
         for chunk in addresses.chunks(MAX_MULTIPLE_ACCOUNTS) {
-            info!("Fetching {:?} over RPC...", chunk);
+            info!("Fetching {chunk:?} over RPC...");
             let responses = rpc_client
                 .get_multiple_accounts(chunk)
                 .map_err(|err| format!("Failed to fetch: {err}"))?;
@@ -355,7 +355,7 @@ impl TestValidatorGenesis {
                 if let Some(account) = res {
                     self.add_account(*address, transform(address, account)?);
                 } else if skip_missing {
-                    warn!("Could not find {}, skipping.", address);
+                    warn!("Could not find {address}, skipping.");
                 } else {
                     return Err(format!("Failed to fetch {address}"));
                 }
@@ -399,7 +399,7 @@ impl TestValidatorGenesis {
         let mut alt_entries: Vec<Pubkey> = Vec::new();
 
         for chunk in addresses.chunks(MAX_MULTIPLE_ACCOUNTS) {
-            info!("Fetching {:?} over RPC...", chunk);
+            info!("Fetching {chunk:?} over RPC...");
             let responses = rpc_client
                 .get_multiple_accounts(chunk)
                 .map_err(|err| format!("Failed to fetch: {err}"))?;
@@ -571,7 +571,7 @@ impl TestValidatorGenesis {
             json_files.extend(matched_files);
         }
 
-        debug!("account files found: {:?}", json_files);
+        debug!("account files found: {json_files:?}");
 
         let accounts: Vec<_> = json_files
             .iter()
@@ -874,12 +874,9 @@ impl TestValidator {
         let mut feature_set = FeatureSet::default().inactive().clone();
         for feature in &config.deactivate_feature_set {
             if feature_set.remove(feature) {
-                info!("Feature for {:?} deactivated", feature)
+                info!("Feature for {feature:?} deactivated")
             } else {
-                warn!(
-                    "Feature {:?} set for deactivation is not a known Feature public key",
-                    feature,
-                )
+                warn!("Feature {feature:?} set for deactivation is not a known Feature public key",)
             }
         }
 
@@ -1208,13 +1205,13 @@ impl TestValidator {
                             }
                         }
                         Err(err) => {
-                            warn!("get_fee_for_message() failed: {:?}", err);
+                            warn!("get_fee_for_message() failed: {err:?}");
                             break;
                         }
                     }
                 }
                 Err(err) => {
-                    warn!("get_latest_blockhash() failed: {:?}", err);
+                    warn!("get_latest_blockhash() failed: {err:?}");
                     break;
                 }
             }
@@ -1257,13 +1254,13 @@ impl TestValidator {
                 match rpc_client.send_transaction(&transaction).await {
                     Ok(_) => *is_deployed = true,
                     Err(e) => {
-                        if format!("{:?}", e).contains("Program is not deployed") {
-                            debug!("{:?} - not deployed", program_id);
+                        if format!("{e:?}").contains("Program is not deployed") {
+                            debug!("{program_id:?} - not deployed");
                         } else {
                             // Assuming all other other errors could only occur *after*
                             // program is deployed for usability.
                             *is_deployed = true;
-                            debug!("{:?} - Unexpected error: {:?}", program_id, e);
+                            debug!("{program_id:?} - Unexpected error: {e:?}");
                         }
                     }
                 }
@@ -1272,7 +1269,7 @@ impl TestValidator {
                 return;
             }
 
-            println!("Waiting for programs to be fully deployed {} ...", attempt);
+            println!("Waiting for programs to be fully deployed {attempt} ...");
             sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT)).await;
         }
         panic!("Timeout waiting for program to become usable");

+ 1 - 1
tps-client/src/lib.rs

@@ -63,7 +63,7 @@ pub trait TpsClient {
                     return Ok(new_blockhash);
                 }
             }
-            debug!("Got same blockhash ({:?}), will retry...", blockhash);
+            debug!("Got same blockhash ({blockhash:?}), will retry...");
 
             // Retry ~twice during a slot
             sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT / 2));

+ 1 - 1
tps-client/src/utils.rs

@@ -23,7 +23,7 @@ fn find_node_activated_stake(
 ) -> Result<(u64, u64), ()> {
     let vote_accounts = rpc_client.get_vote_accounts();
     if let Err(error) = vote_accounts {
-        error!("Failed to get vote accounts, error: {}", error);
+        error!("Failed to get vote accounts, error: {error}");
         return Err(());
     }
 

+ 4 - 5
tpu-client-next/tests/connection_workers_scheduler_test.rs

@@ -327,7 +327,8 @@ async fn test_connection_denied_until_allowed() {
     // Expect at least 2 errors: initial rejection + retry attempts.
     assert!(
         stats.write_error_connection_lost + stats.connection_error_application_closed >= 2,
-        "Expected at least 2 connection errors, got write_error_connection_lost: {}, connection_error_application_closed: {}",
+        "Expected at least 2 connection errors, got write_error_connection_lost: {}, \
+         connection_error_application_closed: {}",
         stats.write_error_connection_lost,
         stats.connection_error_application_closed
     );
@@ -384,8 +385,7 @@ async fn test_connection_pruned_and_reopened() {
     // Proactive detection catches pruning immediately, expect multiple retries.
     assert!(
         stats.connection_error_application_closed + stats.write_error_connection_lost >= 1,
-        "Expected at least 1 connection error from pruning and retries. Stats: {:?}",
-        stats
+        "Expected at least 1 connection error from pruning and retries. Stats: {stats:?}"
     );
 
     // Exit server
@@ -814,8 +814,7 @@ async fn test_proactive_connection_close_detection() {
     // Verify proactive close detection
     assert!(
         stats.connection_error_application_closed > 0 || stats.write_error_connection_lost > 0,
-        "Should detect connection close proactively. Stats: {:?}",
-        stats
+        "Should detect connection close proactively. Stats: {stats:?}"
     );
 
     // Exit server

+ 29 - 24
transaction-dos/src/main.rs

@@ -43,7 +43,7 @@ pub fn airdrop_lamports(
     desired_balance: u64,
 ) -> bool {
     let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
-    info!("starting balance {}", starting_balance);
+    info!("starting balance {starting_balance}");
 
     if starting_balance < desired_balance {
         let airdrop_amount = desired_balance - starting_balance;
@@ -67,14 +67,16 @@ pub fn airdrop_lamports(
                     }
                     if tries >= 5 {
                         panic!(
-                            "Error requesting airdrop: to addr: {faucet_addr:?} amount: {airdrop_amount} {result:?}"
+                            "Error requesting airdrop: to addr: {faucet_addr:?} amount: \
+                             {airdrop_amount} {result:?}"
                         )
                     }
                 }
             }
             Err(err) => {
                 panic!(
-                    "Error requesting airdrop: {err:?} to addr: {faucet_addr:?} amount: {airdrop_amount}"
+                    "Error requesting airdrop: {err:?} to addr: {faucet_addr:?} amount: \
+                     {airdrop_amount}"
                 );
             }
         };
@@ -82,7 +84,7 @@ pub fn airdrop_lamports(
         let current_balance = client.get_balance(&id.pubkey()).unwrap_or_else(|e| {
             panic!("airdrop error {e}");
         });
-        info!("current balance {}...", current_balance);
+        info!("current balance {current_balance}...");
 
         if current_balance - starting_balance != airdrop_amount {
             info!(
@@ -159,7 +161,7 @@ fn run_transactions_dos(
         CommitmentConfig::confirmed(),
     ));
 
-    info!("Targeting {}", entrypoint_addr);
+    info!("Targeting {entrypoint_addr}");
 
     let space = maybe_space.unwrap_or(1000);
 
@@ -266,7 +268,7 @@ fn run_transactions_dos(
         .collect();
     let mut last_balance = Instant::now();
 
-    info!("Starting balance(s): {:?}", balances);
+    info!("Starting balance(s): {balances:?}");
 
     let executor = TransactionExecutor::new(entrypoint_addr);
 
@@ -297,10 +299,7 @@ fn run_transactions_dos(
                 }
                 last_balance = Instant::now();
                 if *balance < lamports * 2 {
-                    info!(
-                        "Balance {} is less than needed: {}, doing aidrop...",
-                        balance, lamports
-                    );
+                    info!("Balance {balance} is less than needed: {lamports}, doing aidrop...");
                     if !airdrop_lamports(
                         &client,
                         &faucet_addr,
@@ -375,7 +374,7 @@ fn run_transactions_dos(
             accounts_created = true;
         } else {
             // Create dos transactions
-            info!("creating new batch of size: {}", batch_size);
+            info!("creating new batch of size: {batch_size}");
             let chunk_size = batch_size / payer_keypairs.len();
             for (i, keypair) in payer_keypairs.iter().enumerate() {
                 let txs: Vec<_> = (0..chunk_size)
@@ -412,8 +411,8 @@ fn run_transactions_dos(
         count += 1;
         if last_log.elapsed().as_secs() > 3 {
             info!(
-                "total_dos_messages_sent: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
-                total_dos_messages_sent, tx_sent_count, count, balances
+                "total_dos_messages_sent: {total_dos_messages_sent} tx_sent_count: \
+                 {tx_sent_count} loop_count: {count} balance(s): {balances:?}"
             );
             last_log = Instant::now();
         }
@@ -474,14 +473,17 @@ fn main() {
                 .takes_value(true)
                 .multiple(true)
                 .value_name("FILE")
-                .help("One or more keypairs to create accounts owned by the program and which the program will write to."),
+                .help(
+                    "One or more keypairs to create accounts owned by the program and which the \
+                     program will write to.",
+                ),
         )
         .arg(
             Arg::with_name("account_groups")
-            .long("account_groups")
-            .takes_value(true)
-            .value_name("NUM")
-            .help("Number of groups of accounts to split the accounts into")
+                .long("account_groups")
+                .takes_value(true)
+                .value_name("NUM")
+                .help("Number of groups of accounts to split the accounts into"),
         )
         .arg(
             Arg::with_name("batch_size")
@@ -516,7 +518,10 @@ fn main() {
                 .long("batch-sleep-ms")
                 .takes_value(true)
                 .value_name("NUM")
-                .help("Sleep for this long the num outstanding transactions is greater than the batch size."),
+                .help(
+                    "Sleep for this long the num outstanding transactions is greater than the \
+                     batch size.",
+                ),
         )
         .arg(
             Arg::with_name("check_gossip")
@@ -561,7 +566,7 @@ fn main() {
             Some(
                 solana_net_utils::get_cluster_shred_version(&entrypoint_addr).unwrap_or_else(
                     |err| {
-                        eprintln!("Failed to get shred version: {}", err);
+                        eprintln!("Failed to get shred version: {err}");
                         exit(1);
                     },
                 ),
@@ -615,7 +620,7 @@ fn main() {
     let account_keypair_refs: Vec<&Keypair> = account_keypairs.iter().collect();
 
     let rpc_addr = if !skip_gossip {
-        info!("Finding cluster entry: {:?}", entrypoint_addr);
+        info!("Finding cluster entry: {entrypoint_addr:?}");
         let (gossip_nodes, _validators) = discover(
             None, // keypair
             Some(&entrypoint_addr),
@@ -635,7 +640,7 @@ fn main() {
         info!("done found {} nodes", gossip_nodes.len());
         gossip_nodes[0].rpc().unwrap()
     } else {
-        info!("Using {:?} as the RPC address", entrypoint_addr);
+        info!("Using {entrypoint_addr:?} as the RPC address");
         entrypoint_addr
     };
 
@@ -694,7 +699,7 @@ pub mod test {
         let blockhash = solana_hash::Hash::default();
         let tx = Transaction::new(&signers, message, blockhash);
         let size = bincode::serialized_size(&tx).unwrap();
-        info!("size:{}", size);
+        info!("size:{size}");
         assert!(size < PACKET_DATA_SIZE as u64);
     }
 
@@ -758,6 +763,6 @@ pub mod test {
             100,
         );
         start.stop();
-        info!("{}", start);
+        info!("{start}");
     }
 }

+ 4 - 6
transaction-view/src/bytes.rs

@@ -301,13 +301,12 @@ mod tests {
             let read_value = read_compressed_u16(&buffer, &mut offset);
 
             // Assert that the read value matches the original value
-            assert_eq!(read_value, Ok(value), "Value mismatch for: {}", value);
+            assert_eq!(read_value, Ok(value), "Value mismatch for: {value}");
 
             // Assert that the offset matches the serialized length
             assert_eq!(
                 offset, serialized_len as usize,
-                "Offset mismatch for: {}",
-                value
+                "Offset mismatch for: {value}"
             );
         }
 
@@ -354,13 +353,12 @@ mod tests {
             let read_value = optimized_read_compressed_u16(&buffer, &mut offset);
 
             // Assert that the read value matches the original value
-            assert_eq!(read_value, Ok(value), "Value mismatch for: {}", value);
+            assert_eq!(read_value, Ok(value), "Value mismatch for: {value}");
 
             // Assert that the offset matches the serialized length
             assert_eq!(
                 offset, serialized_len as usize,
-                "Offset mismatch for: {}",
-                value
+                "Offset mismatch for: {value}"
             );
         }
 

+ 9 - 8
unified-scheduler-pool/src/lib.rs

@@ -201,7 +201,7 @@ impl<S: SpawnableScheduler<TH>, TH: TaskHandler> BlockProductionSchedulerInner<S
     fn take_pooled(&mut self) -> S::Inner {
         let id = {
             let Self::Pooled(inner) = &self else {
-                panic!("cannot take: {:?}", self)
+                panic!("cannot take: {self:?}")
             };
             inner.id()
         };
@@ -609,8 +609,9 @@ where
                 };
 
                 info!(
-                    "Scheduler pool cleaner: dropped {} idle inners, {} trashed inners, triggered {} timeout listeners",
-                    idle_inner_count, trashed_inner_count, triggered_timeout_listener_count,
+                    "Scheduler pool cleaner: dropped {idle_inner_count} idle inners, \
+                     {trashed_inner_count} trashed inners, triggered \
+                     {triggered_timeout_listener_count} timeout listeners",
                 );
                 sleepless_testing::at(CheckPoint::IdleSchedulerCleaned(idle_inner_count));
                 sleepless_testing::at(CheckPoint::TrashedSchedulerCleaned(trashed_inner_count));
@@ -2328,10 +2329,10 @@ impl<S: SpawnableScheduler<TH>, TH: TaskHandler> ThreadManager<S, TH> {
                         let current_thread = thread::current();
                         error!("handler thread is panicking: {:?}", current_thread);
                         if sender.send(Err(HandlerPanicked)).is_ok() {
-                            info!("notified a panic from {:?}", current_thread);
+                            info!("notified a panic from {current_thread:?}");
                         } else {
                             // It seems that the scheduler thread has been aborted already...
-                            warn!("failed to notify a panic from {:?}", current_thread);
+                            warn!("failed to notify a panic from {current_thread:?}");
                         }
                     }
                     let mut task = ExecutedTask::new_boxed(task);
@@ -2364,7 +2365,7 @@ impl<S: SpawnableScheduler<TH>, TH: TaskHandler> ThreadManager<S, TH> {
             .map({
                 |thx| {
                     thread::Builder::new()
-                        .name(format!("solScHandle{mode_char}{:02}", thx))
+                        .name(format!("solScHandle{mode_char}{thx:02}"))
                         .spawn_tracked(handler_main_loop())
                         .unwrap()
                 }
@@ -2395,13 +2396,13 @@ impl<S: SpawnableScheduler<TH>, TH: TaskHandler> ThreadManager<S, TH> {
                     (_, Some(s)) => s,
                     (None, None) => "<No panic info>",
                 };
-                panic!("{} (From: {:?})", panic_message, thread);
+                panic!("{panic_message} (From: {thread:?})");
             })
         }
 
         if let Some(scheduler_thread) = self.scheduler_thread.take() {
             for thread in self.handler_threads.drain(..) {
-                debug!("joining...: {:?}", thread);
+                debug!("joining...: {thread:?}");
                 () = join_with_panic_message(thread).unwrap();
             }
             () = join_with_panic_message(scheduler_thread).unwrap();

+ 1 - 1
version/build.rs

@@ -5,7 +5,7 @@ fn main() {
         if git_output.status.success() {
             if let Ok(git_commit_hash) = String::from_utf8(git_output.stdout) {
                 let trimmed_hash = git_commit_hash.trim().to_string();
-                println!("cargo:rustc-env=AGAVE_GIT_COMMIT_HASH={}", trimmed_hash);
+                println!("cargo:rustc-env=AGAVE_GIT_COMMIT_HASH={trimmed_hash}");
             }
         }
     }

+ 37 - 26
watchtower/src/main.rs

@@ -44,7 +44,8 @@ fn get_config() -> Config {
     let matches = App::new(crate_name!())
         .about(crate_description!())
         .version(solana_version::version!())
-        .after_help("ADDITIONAL HELP:
+        .after_help(
+            "ADDITIONAL HELP:
         To receive a Slack, Discord, PagerDuty and/or Telegram notification on sanity failure,
         define environment variables before running `agave-watchtower`:
 
@@ -56,7 +57,8 @@ fn get_config() -> Config {
         export TELEGRAM_BOT_TOKEN=...
         export TELEGRAM_CHAT_ID=...
 
-        PagerDuty requires an Integration Key from the Events API v2 (Add this integration to your PagerDuty service to get this)
+        PagerDuty requires an Integration Key from the Events API v2 (Add this integration to your \
+             PagerDuty service to get this)
 
         export PAGERDUTY_INTEGRATION_KEY=...
 
@@ -64,7 +66,10 @@ fn get_config() -> Config {
         and a sending number owned by that account,
         define environment variable before running `agave-watchtower`:
 
-        export TWILIO_CONFIG='ACCOUNT=<account>,TOKEN=<securityToken>,TO=<receivingNumber>,FROM=<sendingNumber>'")
+        export \
+             TWILIO_CONFIG='ACCOUNT=<account>,TOKEN=<securityToken>,TO=<receivingNumber>,\
+             FROM=<sendingNumber>'",
+        )
         .arg({
             let arg = Arg::with_name("config_file")
                 .short("C")
@@ -96,7 +101,9 @@ fn get_config() -> Config {
                 .multiple(true)
                 .number_of_values(3)
                 .conflicts_with("json_rpc_url")
-                .help("JSON RPC URLs for the cluster (takes exactly 3 values, conflicts with --url)"),
+                .help(
+                    "JSON RPC URLs for the cluster (takes exactly 3 values, conflicts with --url)",
+                ),
         )
         .arg(
             Arg::with_name("rpc_timeout")
@@ -120,7 +127,7 @@ fn get_config() -> Config {
                 .value_name("COUNT")
                 .takes_value(true)
                 .default_value("1")
-                .help("How many consecutive failures must occur to trigger a notification")
+                .help("How many consecutive failures must occur to trigger a notification"),
         )
         .arg(
             Arg::with_name("validator_identities")
@@ -129,7 +136,7 @@ fn get_config() -> Config {
                 .takes_value(true)
                 .validator(is_pubkey_or_keypair)
                 .multiple(true)
-                .help("Validator identities to monitor for delinquency")
+                .help("Validator identities to monitor for delinquency"),
         )
         .arg(
             Arg::with_name("minimum_validator_identity_balance")
@@ -138,19 +145,22 @@ fn get_config() -> Config {
                 .takes_value(true)
                 .default_value("10")
                 .validator(is_parsable::<f64>)
-                .help("Alert when the validator identity balance is less than this amount of SOL")
+                .help("Alert when the validator identity balance is less than this amount of SOL"),
         )
         .arg(
             // Deprecated parameter, now always enabled
             Arg::with_name("no_duplicate_notifications")
                 .long("no-duplicate-notifications")
-                .hidden(hidden_unless_forced())
+                .hidden(hidden_unless_forced()),
         )
         .arg(
             Arg::with_name("monitor_active_stake")
                 .long("monitor-active-stake")
                 .takes_value(false)
-                .help("Alert when the current stake for the cluster drops below the amount specified by --active-stake-alert-threshold"),
+                .help(
+                    "Alert when the current stake for the cluster drops below the amount \
+                     specified by --active-stake-alert-threshold",
+                ),
         )
         .arg(
             Arg::with_name("active_stake_alert_threshold")
@@ -165,10 +175,11 @@ fn get_config() -> Config {
             Arg::with_name("ignore_http_bad_gateway")
                 .long("ignore-http-bad-gateway")
                 .takes_value(false)
-                .help("Ignore HTTP 502 Bad Gateway errors from the JSON RPC URL. \
-                    This flag can help reduce false positives, at the expense of \
-                    no alerting should a Bad Gateway error be a side effect of \
-                    the real problem")
+                .help(
+                    "Ignore HTTP 502 Bad Gateway errors from the JSON RPC URL. This flag can help \
+                     reduce false positives, at the expense of no alerting should a Bad Gateway \
+                     error be a side effect of the real problem",
+                ),
         )
         .arg(
             Arg::with_name("name_suffix")
@@ -176,7 +187,7 @@ fn get_config() -> Config {
                 .value_name("SUFFIX")
                 .takes_value(true)
                 .default_value("")
-                .help("Add this string into all notification messages after \"agave-watchtower\"")
+                .help("Add this string into all notification messages after \"agave-watchtower\""),
         )
         .arg(
             Arg::with_name("acceptable_slot_range")
@@ -185,7 +196,7 @@ fn get_config() -> Config {
                 .takes_value(true)
                 .default_value("50")
                 .validator(is_parsable::<u64>)
-                .help("Acceptable range of slots for endpoints, checked at watchtower startup")
+                .help("Acceptable range of slots for endpoints, checked at watchtower startup"),
         )
         .get_matches();
 
@@ -281,8 +292,8 @@ fn query_endpoint(
 
     match get_cluster_info(config, &endpoint.rpc_client) {
         Ok((transaction_count, recent_blockhash, vote_accounts, validator_balances)) => {
-            info!("Current transaction count: {}", transaction_count);
-            info!("Recent blockhash: {}", recent_blockhash);
+            info!("Current transaction count: {transaction_count}");
+            info!("Recent blockhash: {recent_blockhash}");
             info!("Current validator count: {}", vote_accounts.current.len());
             info!(
                 "Delinquent validator count: {}",
@@ -384,12 +395,12 @@ fn query_endpoint(
             if let client_error::ErrorKind::Reqwest(reqwest_err) = err.kind() {
                 if let Some(client_error::reqwest::StatusCode::BAD_GATEWAY) = reqwest_err.status() {
                     if config.ignore_http_bad_gateway {
-                        warn!("Error suppressed: {}", err);
+                        warn!("Error suppressed: {err}");
                         return Ok(None);
                     }
                 }
             }
-            warn!("rpc-error: {}", err);
+            warn!("rpc-error: {err}");
             Err(err)
         }
     }
@@ -412,8 +423,8 @@ fn validate_endpoints(
         let slot = endpoint.rpc_client.get_slot()?;
         let genesis_hash = endpoint.rpc_client.get_genesis_hash()?;
 
-        info!("Genesis hash: {}", genesis_hash);
-        info!("Current slot: {}", slot);
+        info!("Genesis hash: {genesis_hash}");
+        info!("Current slot: {slot}");
 
         max_slot = max_slot.max(slot);
         min_slot = min_slot.min(slot);
@@ -457,7 +468,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
         .collect();
 
     if let Err(err) = validate_endpoints(&config, &endpoints) {
-        error!("Endpoint validation failed: {}", err);
+        error!("Endpoint validation failed: {err}");
         std::process::exit(1);
     }
 
@@ -509,9 +520,9 @@ fn main() -> Result<(), Box<dyn error::Error>> {
             if failures.len() > 1 {
                 failures.clear(); // Ignoring other failures when watchtower is unreliable
 
-                let watchtower_unreliable_msg =
-                    "Watchtower is unreliable, RPC endpoints provide inconsistent information"
-                        .into();
+                let watchtower_unreliable_msg = "Watchtower is unreliable, RPC endpoints provide \
+                                                 inconsistent information"
+                    .into();
                 failures.insert("watchtower-reliability", watchtower_unreliable_msg);
             }
 
@@ -549,7 +560,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
                     "All clear after {}",
                     humantime::format_duration(alarm_duration)
                 );
-                info!("{}", all_clear_msg);
+                info!("{all_clear_msg}");
                 notifier.send(
                     &format!("agave-watchtower{}: {}", config.name_suffix, all_clear_msg),
                     &NotificationType::Resolve { incident },