Browse Source

Clippy cleanup for all targets and nighly rust (also support 1.44.0) (#10445)

* address warnings from 'rustup run beta cargo clippy --workspace'

minor refactoring in:
- cli/src/cli.rs
- cli/src/offline/blockhash_query.rs
- logger/src/lib.rs
- runtime/src/accounts_db.rs

expect some performance improvement AccountsDB::clean_accounts()

* address warnings from 'rustup run beta cargo clippy --workspace --tests'

* address warnings from 'rustup run nightly cargo clippy --workspace --all-targets'

* rustfmt

* fix warning stragglers

* properly fix clippy warnings test_vote_subscribe()
replace ref-to-arc with ref parameters where arc not cloned

* Remove lock around JsonRpcRequestProcessor (#10417)

automerge

* make ancestors parameter optional to avoid forcing construction of empty hash maps

Co-authored-by: Greg Fitzgerald <greg@solana.com>
Kristofer Peterson 5 years ago
parent
commit
e23340d89e
63 changed files with 259 additions and 309 deletions
  1. 1 1
      banking-bench/src/main.rs
  2. 11 18
      cli/src/cli.rs
  3. 8 11
      cli/src/cluster_query.rs
  4. 5 10
      cli/src/offline/blockhash_query.rs
  5. 3 4
      client/src/rpc_client.rs
  6. 4 5
      client/src/rpc_request.rs
  7. 4 5
      core/benches/banking_stage.rs
  8. 0 2
      core/benches/blockstore.rs
  9. 2 2
      core/benches/cluster_info.rs
  10. 2 4
      core/benches/poh_verify.rs
  11. 4 2
      core/benches/retransmit_stage.rs
  12. 2 4
      core/benches/sigverify_stage.rs
  13. 14 15
      core/src/cluster_info.rs
  14. 11 17
      core/src/cluster_info_vote_listener.rs
  15. 1 1
      core/src/consensus.rs
  16. 2 2
      core/src/crds_gossip_push.rs
  17. 3 3
      core/src/crds_value.rs
  18. 1 1
      core/src/gossip_service.rs
  19. 2 2
      core/src/poh_recorder.rs
  20. 2 2
      core/src/replay_stage.rs
  21. 3 3
      core/src/retransmit_stage.rs
  22. 2 2
      core/src/rpc.rs
  23. 2 4
      core/src/rpc_pubsub.rs
  24. 1 1
      core/src/validator.rs
  25. 3 3
      core/src/window_service.rs
  26. 1 1
      core/tests/gossip.rs
  27. 13 13
      faucet/src/faucet.rs
  28. 1 1
      genesis/src/main.rs
  29. 1 1
      install/src/lib.rs
  30. 2 2
      ledger/benches/sigverify_shreds.rs
  31. 7 7
      ledger/src/blockstore.rs
  32. 1 1
      ledger/src/blockstore_processor.rs
  33. 1 1
      ledger/src/sigverify_shreds.rs
  34. 2 2
      ledger/tests/shred.rs
  35. 2 2
      local-cluster/tests/local_cluster.rs
  36. 2 4
      logger/src/lib.rs
  37. 1 2
      metrics/src/metrics.rs
  38. 3 3
      net-utils/src/ip_echo_server.rs
  39. 3 4
      net-utils/src/lib.rs
  40. 2 2
      perf/benches/sigverify.rs
  41. 1 2
      programs/budget/src/budget_expr.rs
  42. 2 2
      runtime/benches/accounts.rs
  43. 3 6
      runtime/benches/accounts_index.rs
  44. 2 8
      runtime/benches/append_vec.rs
  45. 13 15
      runtime/benches/bank.rs
  46. 1 4
      runtime/benches/bloom.rs
  47. 1 1
      runtime/benches/status_cache.rs
  48. 3 3
      runtime/src/accounts.rs
  49. 19 26
      runtime/src/accounts_db.rs
  50. 26 23
      runtime/src/accounts_index.rs
  51. 3 3
      runtime/src/bank.rs
  52. 8 3
      runtime/src/bloom.rs
  53. 1 1
      runtime/src/legacy_system_instruction_processor0.rs
  54. 2 2
      runtime/src/nonce_utils.rs
  55. 10 5
      runtime/src/rent_collector.rs
  56. 1 1
      runtime/src/status_cache.rs
  57. 1 1
      runtime/src/system_instruction_processor.rs
  58. 2 2
      sdk/benches/short_vec.rs
  59. 1 1
      sdk/benches/slot_history.rs
  60. 1 1
      sdk/src/abi_example.rs
  61. 19 19
      sdk/src/nonce/account.rs
  62. 3 9
      validator/src/main.rs
  63. 1 1
      watchtower/src/main.rs

+ 1 - 1
banking-bench/src/main.rs

@@ -209,7 +209,7 @@ fn main() {
         bank.clear_signatures();
     }
 
-    let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), packets_per_chunk);
+    let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
     let ledger_path = get_tmp_ledger_path!();
     {
         let blockstore = Arc::new(

+ 11 - 18
cli/src/cli.rs

@@ -754,25 +754,18 @@ pub fn parse_command(
         ("airdrop", Some(matches)) => {
             let faucet_port = matches
                 .value_of("faucet_port")
-                .unwrap()
+                .ok_or_else(|| CliError::BadParameter("Missing faucet port".to_string()))?
                 .parse()
-                .or_else(|err| {
-                    Err(CliError::BadParameter(format!(
-                        "Invalid faucet port: {}",
-                        err
-                    )))
-                })?;
-
-            let faucet_host = if let Some(faucet_host) = matches.value_of("faucet_host") {
-                Some(solana_net_utils::parse_host(faucet_host).or_else(|err| {
-                    Err(CliError::BadParameter(format!(
-                        "Invalid faucet host: {}",
-                        err
-                    )))
-                })?)
-            } else {
-                None
-            };
+                .map_err(|err| CliError::BadParameter(format!("Invalid faucet port: {}", err)))?;
+
+            let faucet_host = matches
+                .value_of("faucet_host")
+                .map(|faucet_host| {
+                    solana_net_utils::parse_host(faucet_host).map_err(|err| {
+                        CliError::BadParameter(format!("Invalid faucet host: {}", err))
+                    })
+                })
+                .transpose()?;
             let pubkey = pubkey_of_signer(matches, "to", wallet_manager)?;
             let signers = if pubkey.is_some() {
                 vec![]

+ 8 - 11
cli/src/cluster_query.rs

@@ -659,7 +659,7 @@ pub fn process_get_epoch_info(
     commitment_config: CommitmentConfig,
 ) -> ProcessResult {
     let epoch_info: CliEpochInfo = rpc_client
-        .get_epoch_info_with_commitment(commitment_config.clone())?
+        .get_epoch_info_with_commitment(commitment_config)?
         .into();
     Ok(config.output_format.formatted_string(&epoch_info))
 }
@@ -673,7 +673,7 @@ pub fn process_get_slot(
     rpc_client: &RpcClient,
     commitment_config: CommitmentConfig,
 ) -> ProcessResult {
-    let slot = rpc_client.get_slot_with_commitment(commitment_config.clone())?;
+    let slot = rpc_client.get_slot_with_commitment(commitment_config)?;
     Ok(slot.to_string())
 }
 
@@ -681,7 +681,7 @@ pub fn process_get_epoch(
     rpc_client: &RpcClient,
     commitment_config: CommitmentConfig,
 ) -> ProcessResult {
-    let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?;
+    let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config)?;
     Ok(epoch_info.epoch.to_string())
 }
 
@@ -868,7 +868,7 @@ pub fn process_supply(
     commitment_config: CommitmentConfig,
     print_accounts: bool,
 ) -> ProcessResult {
-    let supply_response = rpc_client.supply_with_commitment(commitment_config.clone())?;
+    let supply_response = rpc_client.supply_with_commitment(commitment_config)?;
     let mut supply: CliSupply = supply_response.value.into();
     supply.print_accounts = print_accounts;
     Ok(config.output_format.formatted_string(&supply))
@@ -878,7 +878,7 @@ pub fn process_total_supply(
     rpc_client: &RpcClient,
     commitment_config: CommitmentConfig,
 ) -> ProcessResult {
-    let total_supply = rpc_client.total_supply_with_commitment(commitment_config.clone())?;
+    let total_supply = rpc_client.total_supply_with_commitment(commitment_config)?;
     Ok(format!("{} SOL", lamports_to_sol(total_supply)))
 }
 
@@ -886,8 +886,7 @@ pub fn process_get_transaction_count(
     rpc_client: &RpcClient,
     commitment_config: CommitmentConfig,
 ) -> ProcessResult {
-    let transaction_count =
-        rpc_client.get_transaction_count_with_commitment(commitment_config.clone())?;
+    let transaction_count = rpc_client.get_transaction_count_with_commitment(commitment_config)?;
     Ok(transaction_count.to_string())
 }
 
@@ -952,10 +951,8 @@ pub fn process_ping(
             Ok(signature) => {
                 let transaction_sent = Instant::now();
                 loop {
-                    let signature_status = rpc_client.get_signature_status_with_commitment(
-                        &signature,
-                        commitment_config.clone(),
-                    )?;
+                    let signature_status = rpc_client
+                        .get_signature_status_with_commitment(&signature, commitment_config)?;
                     let elapsed_time = Instant::now().duration_since(transaction_sent);
                     if let Some(transaction_status) = signature_status {
                         match transaction_status {

+ 5 - 10
cli/src/offline/blockhash_query.rs

@@ -35,16 +35,11 @@ impl Source {
                 Ok(res)
             }
             Self::NonceAccount(ref pubkey) => {
-                let res = nonce::get_account(rpc_client, pubkey)
-                    .and_then(|ref a| nonce::data_from_account(a))
-                    .and_then(|d| {
-                        if d.blockhash == *blockhash {
-                            Ok(Some(d.fee_calculator))
-                        } else {
-                            Ok(None)
-                        }
-                    })?;
-                Ok(res)
+                let res = nonce::get_account(rpc_client, pubkey)?;
+                let res = nonce::data_from_account(&res)?;
+                Ok(Some(res)
+                    .filter(|d| d.blockhash == *blockhash)
+                    .map(|d| d.fee_calculator))
             }
         }
     }

+ 3 - 4
client/src/rpc_client.rs

@@ -664,7 +664,7 @@ impl RpcClient {
     ) -> ClientResult<u64> {
         let now = Instant::now();
         loop {
-            match self.get_balance_with_commitment(&pubkey, commitment_config.clone()) {
+            match self.get_balance_with_commitment(&pubkey, commitment_config) {
                 Ok(bal) => {
                     return Ok(bal.value);
                 }
@@ -699,8 +699,7 @@ impl RpcClient {
     ) -> Option<u64> {
         const LAST: usize = 30;
         for run in 0..LAST {
-            let balance_result =
-                self.poll_get_balance_with_commitment(pubkey, commitment_config.clone());
+            let balance_result = self.poll_get_balance_with_commitment(pubkey, commitment_config);
             if expected_balance.is_none() {
                 return balance_result.ok();
             }
@@ -734,7 +733,7 @@ impl RpcClient {
         let now = Instant::now();
         loop {
             if let Ok(Some(_)) =
-                self.get_signature_status_with_commitment(&signature, commitment_config.clone())
+                self.get_signature_status_with_commitment(&signature, commitment_config)
             {
                 break;
             }

+ 4 - 5
client/src/rpc_request.rs

@@ -136,12 +136,12 @@ mod tests {
     fn test_build_request_json() {
         let test_request = RpcRequest::GetAccountInfo;
         let addr = json!("deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx");
-        let request = test_request.build_request_json(1, json!([addr.clone()]));
+        let request = test_request.build_request_json(1, json!([addr]));
         assert_eq!(request["method"], "getAccountInfo");
         assert_eq!(request["params"], json!([addr]));
 
         let test_request = RpcRequest::GetBalance;
-        let request = test_request.build_request_json(1, json!([addr.clone()]));
+        let request = test_request.build_request_json(1, json!([addr]));
         assert_eq!(request["method"], "getBalance");
 
         let test_request = RpcRequest::GetEpochInfo;
@@ -186,13 +186,12 @@ mod tests {
 
         // Test request with CommitmentConfig and no params
         let test_request = RpcRequest::GetRecentBlockhash;
-        let request = test_request.build_request_json(1, json!([commitment_config.clone()]));
+        let request = test_request.build_request_json(1, json!([commitment_config]));
         assert_eq!(request["params"], json!([commitment_config.clone()]));
 
         // Test request with CommitmentConfig and params
         let test_request = RpcRequest::GetBalance;
-        let request =
-            test_request.build_request_json(1, json!([addr.clone(), commitment_config.clone()]));
+        let request = test_request.build_request_json(1, json!([addr, commitment_config]));
         assert_eq!(request["params"], json!([addr, commitment_config]));
     }
 }

+ 4 - 5
core/benches/banking_stage.rs

@@ -109,7 +109,6 @@ fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Tra
 fn make_programs_txs(txes: usize, hash: Hash) -> Vec<Transaction> {
     let progs = 4;
     (0..txes)
-        .into_iter()
         .map(|_| {
             let mut instructions = vec![];
             let from_key = Keypair::new();
@@ -181,7 +180,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
         assert!(r.is_ok(), "sanity parallel execution");
     }
     bank.clear_signatures();
-    let verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
+    let verified: Vec<_> = to_packets_chunked(&transactions, PACKETS_PER_BATCH);
     let ledger_path = get_tmp_ledger_path!();
     {
         let blockstore = Arc::new(
@@ -207,7 +206,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
         // If it is dropped before poh_service, then poh_service will error when
         // calling send() on the channel.
         let signal_receiver = Arc::new(signal_receiver);
-        let signal_receiver2 = signal_receiver.clone();
+        let signal_receiver2 = signal_receiver;
         bencher.iter(move || {
             let now = Instant::now();
             let mut sent = 0;
@@ -262,7 +261,7 @@ fn simulate_process_entries(
     mint_keypair: &Keypair,
     mut tx_vector: Vec<Transaction>,
     genesis_config: &GenesisConfig,
-    keypairs: &Vec<Keypair>,
+    keypairs: &[Keypair],
     initial_lamports: u64,
     num_accounts: usize,
 ) {
@@ -288,7 +287,7 @@ fn simulate_process_entries(
         hash: next_hash(&bank.last_blockhash(), 1, &tx_vector),
         transactions: tx_vector,
     };
-    process_entries(&bank, &vec![entry], randomize_txs, None).unwrap();
+    process_entries(&bank, &[entry], randomize_txs, None).unwrap();
 }
 
 fn bench_process_entries(randomize_txs: bool, bencher: &mut Bencher) {

+ 0 - 2
core/benches/blockstore.rs

@@ -1,6 +1,4 @@
 #![feature(test)]
-use rand;
-
 extern crate solana_ledger;
 extern crate test;
 

+ 2 - 2
core/benches/cluster_info.rs

@@ -22,7 +22,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
     solana_logger::setup();
     let leader_pubkey = Pubkey::new_rand();
     let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
-    let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
+    let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info);
     let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
 
     const NUM_SHREDS: usize = 32;
@@ -37,7 +37,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
     }
     let stakes = Arc::new(stakes);
     let cluster_info = Arc::new(cluster_info);
-    let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(stakes.clone()));
+    let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(stakes));
     let shreds = Arc::new(shreds);
     let last_datapoint = Arc::new(AtomicU64::new(0));
     bencher.iter(move || {

+ 2 - 4
core/benches/poh_verify.rs

@@ -14,7 +14,6 @@ const NUM_ENTRIES: usize = 800;
 fn bench_poh_verify_ticks(bencher: &mut Bencher) {
     let zero = Hash::default();
     let mut cur_hash = hash(&zero.as_ref());
-    let start = *&cur_hash;
 
     let mut ticks: Vec<Entry> = Vec::with_capacity(NUM_ENTRIES);
     for _ in 0..NUM_ENTRIES {
@@ -22,7 +21,7 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) {
     }
 
     bencher.iter(|| {
-        ticks.verify(&start);
+        ticks.verify(&cur_hash);
     })
 }
 
@@ -30,7 +29,6 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) {
 fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) {
     let zero = Hash::default();
     let mut cur_hash = hash(&zero.as_ref());
-    let start = *&cur_hash;
 
     let keypair1 = Keypair::new();
     let pubkey1 = keypair1.pubkey();
@@ -42,6 +40,6 @@ fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) {
     }
 
     bencher.iter(|| {
-        ticks.verify(&start);
+        ticks.verify(&cur_hash);
     })
 }

+ 4 - 2
core/benches/retransmit_stage.rs

@@ -65,7 +65,10 @@ fn bench_retransmitter(bencher: &mut Bencher) {
     let tx = test_tx();
     const NUM_PACKETS: usize = 50;
     let chunk_size = NUM_PACKETS / (4 * NUM_THREADS);
-    let batches = to_packets_chunked(&vec![tx; NUM_PACKETS], chunk_size);
+    let batches = to_packets_chunked(
+        &std::iter::repeat(tx).take(NUM_PACKETS).collect::<Vec<_>>(),
+        chunk_size,
+    );
     info!("batches: {}", batches.len());
 
     let retransmitter_handles = retransmitter(
@@ -80,7 +83,6 @@ fn bench_retransmitter(bencher: &mut Bencher) {
     bencher.iter(move || {
         let peer_sockets1 = peer_sockets.clone();
         let handles: Vec<_> = (0..NUM_PEERS)
-            .into_iter()
             .map(|p| {
                 let peer_sockets2 = peer_sockets1.clone();
                 let total2 = total.clone();

+ 2 - 4
core/benches/sigverify_stage.rs

@@ -37,16 +37,14 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
         let from_keypair = Keypair::new();
         let to_keypair = Keypair::new();
         let txs: Vec<_> = (0..len)
-            .into_iter()
             .map(|_| {
                 let amount = thread_rng().gen();
-                let tx = system_transaction::transfer(
+                system_transaction::transfer(
                     &from_keypair,
                     &to_keypair.pubkey(),
                     amount,
                     Hash::default(),
-                );
-                tx
+                )
             })
             .collect();
         to_packets_chunked(&txs, chunk_size)

+ 14 - 15
core/src/cluster_info.rs

@@ -438,7 +438,7 @@ impl ClusterInfo {
 
     pub fn update_contact_info<F>(&self, modify: F)
     where
-        F: FnOnce(&mut ContactInfo) -> (),
+        F: FnOnce(&mut ContactInfo),
     {
         let my_id = self.id();
         modify(&mut self.my_contact_info.write().unwrap());
@@ -1917,19 +1917,18 @@ impl ClusterInfo {
             .into_iter()
             .filter_map(|(from, prune_set)| {
                 inc_new_counter_debug!("cluster_info-push_message-prunes", prune_set.len());
-                me.lookup_contact_info(&from, |ci| ci.clone())
-                    .and_then(|ci| {
-                        let mut prune_msg = PruneData {
-                            pubkey: self_id,
-                            prunes: prune_set.into_iter().collect(),
-                            signature: Signature::default(),
-                            destination: from,
-                            wallclock: timestamp(),
-                        };
-                        prune_msg.sign(&me.keypair);
-                        let rsp = Protocol::PruneMessage(self_id, prune_msg);
-                        Some((ci.gossip, rsp))
-                    })
+                me.lookup_contact_info(&from, |ci| ci.clone()).map(|ci| {
+                    let mut prune_msg = PruneData {
+                        pubkey: self_id,
+                        prunes: prune_set.into_iter().collect(),
+                        signature: Signature::default(),
+                        destination: from,
+                        wallclock: timestamp(),
+                    };
+                    prune_msg.sign(&me.keypair);
+                    let rsp = Protocol::PruneMessage(self_id, prune_msg);
+                    (ci.gossip, rsp)
+                })
             })
             .collect();
         if rsp.is_empty() {
@@ -2932,7 +2931,7 @@ mod tests {
         assert_eq!(slots.len(), 1);
         assert!(since.is_some());
 
-        let (slots, since2) = cluster_info.get_epoch_slots_since(since.clone());
+        let (slots, since2) = cluster_info.get_epoch_slots_since(since);
         assert!(slots.is_empty());
         assert_eq!(since2, since);
     }

+ 11 - 17
core/src/cluster_info_vote_listener.rs

@@ -385,7 +385,7 @@ impl ClusterInfoVoteListener {
                 &vote_txs_receiver,
                 &vote_tracker,
                 root_bank.slot(),
-                subscriptions.clone(),
+                &subscriptions,
                 epoch_stakes,
             ) {
                 match e {
@@ -404,9 +404,9 @@ impl ClusterInfoVoteListener {
     #[cfg(test)]
     pub fn get_and_process_votes_for_tests(
         vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
-        vote_tracker: &Arc<VoteTracker>,
+        vote_tracker: &VoteTracker,
         last_root: Slot,
-        subscriptions: Arc<RpcSubscriptions>,
+        subscriptions: &RpcSubscriptions,
     ) -> Result<()> {
         Self::get_and_process_votes(
             vote_txs_receiver,
@@ -419,9 +419,9 @@ impl ClusterInfoVoteListener {
 
     fn get_and_process_votes(
         vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
-        vote_tracker: &Arc<VoteTracker>,
+        vote_tracker: &VoteTracker,
         last_root: Slot,
-        subscriptions: Arc<RpcSubscriptions>,
+        subscriptions: &RpcSubscriptions,
         epoch_stakes: Option<&EpochStakes>,
     ) -> Result<()> {
         let timer = Duration::from_millis(200);
@@ -443,7 +443,7 @@ impl ClusterInfoVoteListener {
         vote_tracker: &VoteTracker,
         vote_txs: Vec<Transaction>,
         root: Slot,
-        subscriptions: Arc<RpcSubscriptions>,
+        subscriptions: &RpcSubscriptions,
         epoch_stakes: Option<&EpochStakes>,
     ) {
         let mut diff: HashMap<Slot, HashSet<Arc<Pubkey>>> = HashMap::new();
@@ -574,7 +574,7 @@ impl ClusterInfoVoteListener {
     fn notify_for_stake_change(
         current_stake: u64,
         previous_stake: u64,
-        subscriptions: &Arc<RpcSubscriptions>,
+        subscriptions: &RpcSubscriptions,
         epoch_stakes: Option<&EpochStakes>,
         slot: Slot,
     ) {
@@ -804,7 +804,7 @@ mod tests {
             &votes_receiver,
             &vote_tracker,
             0,
-            subscriptions,
+            &subscriptions,
             None,
         )
         .unwrap();
@@ -854,7 +854,7 @@ mod tests {
             &votes_receiver,
             &vote_tracker,
             0,
-            subscriptions,
+            &subscriptions,
             None,
         )
         .unwrap();
@@ -974,13 +974,7 @@ mod tests {
             &validator0_keypairs.vote_keypair,
         )];
 
-        ClusterInfoVoteListener::process_votes(
-            &vote_tracker,
-            vote_tx,
-            0,
-            subscriptions.clone(),
-            None,
-        );
+        ClusterInfoVoteListener::process_votes(&vote_tracker, vote_tx, 0, &subscriptions, None);
         let ref_count = Arc::strong_count(
             &vote_tracker
                 .keys
@@ -1031,7 +1025,7 @@ mod tests {
             })
             .collect();
 
-        ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, subscriptions, None);
+        ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, &subscriptions, None);
 
         let ref_count = Arc::strong_count(
             &vote_tracker

+ 1 - 1
core/src/consensus.rs

@@ -165,7 +165,7 @@ impl Tower {
                 let key = all_pubkeys.get_or_insert(&key);
                 lockout_intervals
                     .entry(vote.expiration_slot())
-                    .or_insert_with(|| vec![])
+                    .or_insert_with(Vec::new)
                     .push((vote.slot, key));
             }
 

+ 2 - 2
core/src/crds_gossip_push.rs

@@ -152,7 +152,7 @@ impl CrdsGossipPush {
         let new_value = crds.new_versioned(now, value);
         let value_hash = new_value.value_hash;
         if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) {
-            received_set.insert(from.clone());
+            received_set.insert(*from);
             return Err(CrdsGossipError::PushMessageAlreadyReceived);
         }
         let old = crds.insert_versioned(new_value);
@@ -160,7 +160,7 @@ impl CrdsGossipPush {
             return Err(CrdsGossipError::PushMessageOldVersion);
         }
         let mut received_set = HashSet::new();
-        received_set.insert(from.clone());
+        received_set.insert(*from);
         self.push_messages.insert(label, value_hash);
         self.received_cache.insert(value_hash, (now, received_set));
         Ok(old.ok().and_then(|opt| opt))

+ 3 - 3
core/src/crds_value.rs

@@ -459,7 +459,7 @@ mod test {
     fn test_keys_and_values() {
         let v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
         assert_eq!(v.wallclock(), 0);
-        let key = v.clone().contact_info().unwrap().id;
+        let key = v.contact_info().unwrap().id;
         assert_eq!(v.label(), CrdsValueLabel::ContactInfo(key));
 
         let v = CrdsValue::new_unsigned(CrdsData::Vote(
@@ -467,7 +467,7 @@ mod test {
             Vote::new(&Pubkey::default(), test_tx(), 0),
         ));
         assert_eq!(v.wallclock(), 0);
-        let key = v.clone().vote().unwrap().from;
+        let key = v.vote().unwrap().from;
         assert_eq!(v.label(), CrdsValueLabel::Vote(0, key));
 
         let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(
@@ -475,7 +475,7 @@ mod test {
             LowestSlot::new(Pubkey::default(), 0, 0),
         ));
         assert_eq!(v.wallclock(), 0);
-        let key = v.clone().lowest_slot().unwrap().from;
+        let key = v.lowest_slot().unwrap().from;
         assert_eq!(v.label(), CrdsValueLabel::LowestSlot(key));
     }
 

+ 1 - 1
core/src/gossip_service.rs

@@ -262,7 +262,7 @@ fn make_gossip_node(
         cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint));
     }
     let cluster_info = Arc::new(cluster_info);
-    let gossip_service = GossipService::new(&cluster_info.clone(), None, gossip_socket, &exit);
+    let gossip_service = GossipService::new(&cluster_info, None, gossip_socket, &exit);
     (gossip_service, ip_echo, cluster_info)
 }
 

+ 2 - 2
core/src/poh_recorder.rs

@@ -721,7 +721,7 @@ mod tests {
             assert_eq!(poh_recorder.tick_height, 5);
             assert!(poh_recorder.working_bank.is_none());
             let mut num_entries = 0;
-            while let Ok(_) = entry_receiver.try_recv() {
+            while entry_receiver.try_recv().is_ok() {
                 num_entries += 1;
             }
             assert_eq!(num_entries, 3);
@@ -1409,7 +1409,7 @@ mod tests {
             for _ in 0..(bank.ticks_per_slot() * 2) {
                 poh_recorder.tick();
             }
-            poh_recorder.set_bank(&bank.clone());
+            poh_recorder.set_bank(&bank);
             assert_eq!(Some(false), bank.check_hash_age(&genesis_hash, 1));
         }
     }

+ 2 - 2
core/src/replay_stage.rs

@@ -1289,11 +1289,11 @@ impl ReplayStage {
         let newly_voted_pubkeys = slot_vote_tracker
             .as_ref()
             .and_then(|slot_vote_tracker| slot_vote_tracker.write().unwrap().get_updates())
-            .unwrap_or_else(|| vec![]);
+            .unwrap_or_else(Vec::new);
 
         let cluster_slot_pubkeys = cluster_slot_pubkeys
             .map(|v| v.read().unwrap().keys().cloned().collect())
-            .unwrap_or_else(|| vec![]);
+            .unwrap_or_else(Vec::new);
 
         Self::update_fork_propagated_threshold_from_votes(
             progress,

+ 3 - 3
core/src/retransmit_stage.rs

@@ -431,7 +431,7 @@ impl RetransmitStage {
             epoch_schedule,
             duplicate_slots_reset_sender,
         };
-        let leader_schedule_cache = leader_schedule_cache.clone();
+        let leader_schedule_cache_clone = leader_schedule_cache.clone();
         let window_service = WindowService::new(
             blockstore,
             cluster_info.clone(),
@@ -440,7 +440,7 @@ impl RetransmitStage {
             repair_socket,
             exit,
             repair_info,
-            &leader_schedule_cache.clone(),
+            leader_schedule_cache,
             move |id, shred, working_bank, last_root| {
                 let is_connected = cfg
                     .as_ref()
@@ -449,7 +449,7 @@ impl RetransmitStage {
                 let rv = should_retransmit_and_persist(
                     shred,
                     working_bank,
-                    &leader_schedule_cache,
+                    &leader_schedule_cache_clone,
                     id,
                     last_root,
                     shred_version,

+ 2 - 2
core/src/rpc.rs

@@ -1171,7 +1171,7 @@ impl RpcSol for RpcSolImpl {
                         leader_schedule.get_slot_leaders().iter().enumerate()
                     {
                         let pubkey = pubkey.to_string();
-                        map.entry(pubkey).or_insert_with(|| vec![]).push(slot_index);
+                        map.entry(pubkey).or_insert_with(Vec::new).push(slot_index);
                     }
                     map
                 },
@@ -1314,7 +1314,7 @@ impl RpcSol for RpcSolImpl {
         let faucet_addr = meta.config.faucet_addr.ok_or_else(Error::invalid_request)?;
         let pubkey = verify_pubkey(pubkey_str)?;
 
-        let blockhash = meta.bank(commitment.clone())?.confirmed_last_blockhash().0;
+        let blockhash = meta.bank(commitment)?.confirmed_last_blockhash().0;
         let transaction = request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash)
             .map_err(|err| {
             info!("request_airdrop_transaction failed: {:?}", err);

+ 2 - 4
core/src/rpc_pubsub.rs

@@ -387,7 +387,7 @@ mod tests {
     };
 
     fn process_transaction_and_notify(
-        bank_forks: &Arc<RwLock<BankForks>>,
+        bank_forks: &RwLock<BankForks>,
         tx: &Transaction,
         subscriptions: &RpcSubscriptions,
         current_slot: Slot,
@@ -921,13 +921,11 @@ mod tests {
         });
 
         // Process votes and check they were notified.
-        // FIX-ME-BETTER-LATER - clone below is required for testcase to pass
-        #[allow(clippy::redundant_clone)]
         ClusterInfoVoteListener::get_and_process_votes_for_tests(
             &votes_receiver,
             &vote_tracker,
             0,
-            rpc.subscriptions.clone(),
+            &rpc.subscriptions,
         )
         .unwrap();
 

+ 1 - 1
core/src/validator.rs

@@ -115,7 +115,7 @@ pub struct ValidatorExit {
 }
 
 impl ValidatorExit {
-    pub fn register_exit(&mut self, exit: Box<dyn FnOnce() -> () + Send + Sync>) {
+    pub fn register_exit(&mut self, exit: Box<dyn FnOnce() + Send + Sync>) {
         self.exits.push(exit);
     }
 

+ 3 - 3
core/src/window_service.rs

@@ -125,7 +125,7 @@ fn run_insert<F>(
     metrics: &mut BlockstoreInsertionMetrics,
 ) -> Result<()>
 where
-    F: Fn(Shred) -> (),
+    F: Fn(Shred),
 {
     let timer = Duration::from_millis(200);
     let (mut shreds, mut repair_infos) = shred_receiver.recv_timeout(timer)?;
@@ -503,8 +503,8 @@ impl WindowService {
 
     fn should_exit_on_error<F, H>(e: Error, handle_timeout: &mut F, handle_error: &H) -> bool
     where
-        F: FnMut() -> (),
-        H: Fn() -> (),
+        F: FnMut(),
+        H: Fn(),
     {
         match e {
             Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => true,

+ 1 - 1
core/tests/gossip.rs

@@ -33,7 +33,7 @@ fn test_node(exit: &Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSoc
 /// tests that actually use this function are below
 fn run_gossip_topo<F>(num: usize, topo: F)
 where
-    F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>) -> (),
+    F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
 {
     let exit = Arc::new(AtomicBool::new(false));
     let listen: Vec<_> = (0..num).map(|_| test_node(&exit)).collect();

+ 13 - 13
faucet/src/faucet.rs

@@ -142,22 +142,22 @@ impl Faucet {
         }
     }
     pub fn process_faucet_request(&mut self, bytes: &BytesMut) -> Result<Bytes, io::Error> {
-        let req: FaucetRequest = deserialize(bytes).or_else(|err| {
-            Err(io::Error::new(
+        let req: FaucetRequest = deserialize(bytes).map_err(|err| {
+            io::Error::new(
                 io::ErrorKind::Other,
                 format!("deserialize packet in faucet: {:?}", err),
-            ))
+            )
         })?;
 
         info!("Airdrop transaction requested...{:?}", req);
         let res = self.build_airdrop_transaction(req);
         match res {
             Ok(tx) => {
-                let response_vec = bincode::serialize(&tx).or_else(|err| {
-                    Err(io::Error::new(
+                let response_vec = bincode::serialize(&tx).map_err(|err| {
+                    io::Error::new(
                         io::ErrorKind::Other,
                         format!("deserialize packet in faucet: {:?}", err),
-                    ))
+                    )
                 })?;
 
                 let mut response_vec_with_length = vec![0; 2];
@@ -205,12 +205,12 @@ pub fn request_airdrop_transaction(
 
     // Read length of transaction
     let mut buffer = [0; 2];
-    stream.read_exact(&mut buffer).or_else(|err| {
+    stream.read_exact(&mut buffer).map(|err| {
         info!(
             "request_airdrop_transaction: buffer length read_exact error: {:?}",
             err
         );
-        Err(Error::new(ErrorKind::Other, "Airdrop failed"))
+        Error::new(ErrorKind::Other, "Airdrop failed")
     })?;
     let transaction_length = LittleEndian::read_u16(&buffer) as usize;
     if transaction_length >= PACKET_DATA_SIZE {
@@ -226,19 +226,19 @@ pub fn request_airdrop_transaction(
     // Read the transaction
     let mut buffer = Vec::new();
     buffer.resize(transaction_length, 0);
-    stream.read_exact(&mut buffer).or_else(|err| {
+    stream.read_exact(&mut buffer).map_err(|err| {
         info!(
             "request_airdrop_transaction: buffer read_exact error: {:?}",
             err
         );
-        Err(Error::new(ErrorKind::Other, "Airdrop failed"))
+        Error::new(ErrorKind::Other, "Airdrop failed")
     })?;
 
-    let transaction: Transaction = deserialize(&buffer).or_else(|err| {
-        Err(Error::new(
+    let transaction: Transaction = deserialize(&buffer).map_err(|err| {
+        Error::new(
             ErrorKind::Other,
             format!("request_airdrop_transaction deserialize failure: {:?}", err),
-        ))
+        )
     })?;
     Ok(transaction)
 }

+ 1 - 1
genesis/src/main.rs

@@ -452,7 +452,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
     );
 
     let native_instruction_processors =
-        solana_genesis_programs::get_programs(operating_mode, 0).unwrap_or_else(|| vec![]);
+        solana_genesis_programs::get_programs(operating_mode, 0).unwrap_or_else(Vec::new);
     let inflation = solana_genesis_programs::get_inflation(operating_mode, 0).unwrap();
 
     let mut genesis_config = GenesisConfig {

+ 1 - 1
install/src/lib.rs

@@ -255,7 +255,7 @@ pub fn main() -> Result<(), String> {
             let program_arguments = matches
                 .values_of("program_arguments")
                 .map(Iterator::collect)
-                .unwrap_or_else(|| vec![]);
+                .unwrap_or_else(Vec::new);
 
             command::run(config_file, program_name, program_arguments)
         }

+ 2 - 2
ledger/benches/sigverify_shreds.rs

@@ -20,7 +20,7 @@ fn bench_sigverify_shreds_sign_gpu(bencher: &mut Bencher) {
 
     let mut packets = Packets::default();
     packets.packets.set_pinnable();
-    let slot = 0xdeadc0de;
+    let slot = 0xdead_c0de;
     // need to pin explicitly since the resize will not cause re-allocation
     packets.packets.reserve_and_pin(NUM_PACKETS);
     packets.packets.resize(NUM_PACKETS, Packet::default());
@@ -54,7 +54,7 @@ fn bench_sigverify_shreds_sign_gpu(bencher: &mut Bencher) {
 #[bench]
 fn bench_sigverify_shreds_sign_cpu(bencher: &mut Bencher) {
     let mut packets = Packets::default();
-    let slot = 0xdeadc0de;
+    let slot = 0xdead_c0de;
     packets.packets.resize(NUM_PACKETS, Packet::default());
     for p in packets.packets.iter_mut() {
         let shred = Shred::new_from_data(

+ 7 - 7
ledger/src/blockstore.rs

@@ -621,7 +621,7 @@ impl Blockstore {
         metrics: &mut BlockstoreInsertionMetrics,
     ) -> Result<()>
     where
-        F: Fn(Shred) -> (),
+        F: Fn(Shred),
     {
         let mut total_start = Measure::start("Total elapsed");
         let mut start = Measure::start("Blockstore lock");
@@ -918,7 +918,7 @@ impl Blockstore {
         is_recovered: bool,
     ) -> bool
     where
-        F: Fn(Shred) -> (),
+        F: Fn(Shred),
     {
         let slot = shred.slot();
         let shred_index = u64::from(shred.index());
@@ -1533,7 +1533,7 @@ impl Blockstore {
                 let blockhash = get_last_hash(slot_entries.iter())
                     .unwrap_or_else(|| panic!("Rooted slot {:?} must have blockhash", slot));
 
-                let rewards = self.rewards_cf.get(slot)?.unwrap_or_else(|| vec![]);
+                let rewards = self.rewards_cf.get(slot)?.unwrap_or_else(Vec::new);
 
                 let block = ConfirmedBlock {
                     previous_blockhash: previous_blockhash.to_string(),
@@ -1743,7 +1743,7 @@ impl Blockstore {
             "blockstore-rpc-api",
             ("method", "get_confirmed_transaction".to_string(), String)
         );
-        if let Some((slot, status)) = self.get_transaction_status(signature.clone())? {
+        if let Some((slot, status)) = self.get_transaction_status(signature)? {
             let transaction = self.find_transaction_in_slot(slot, signature)?
                 .expect("Transaction to exist in slot entries if it exists in statuses and hasn't been cleaned up");
             let encoding = encoding.unwrap_or(TransactionEncoding::Json);
@@ -4948,7 +4948,7 @@ pub mod tests {
 
             // Insert will fail, slot < root
             blockstore
-                .insert_shreds(shreds1.clone()[..].to_vec(), None, false)
+                .insert_shreds(shreds1[..].to_vec(), None, false)
                 .unwrap();
             assert!(blockstore.get_data_shred(1, 0).unwrap().is_none());
 
@@ -5229,7 +5229,7 @@ pub mod tests {
             stakes.insert(keypair.pubkey(), (1 + i as u64, Account::default()));
         }
         let slot_duration = Duration::from_millis(400);
-        let block_time_slot_3 = blockstore.get_block_time(3, slot_duration.clone(), &stakes);
+        let block_time_slot_3 = blockstore.get_block_time(3, slot_duration, &stakes);
 
         let mut total_stake = 0;
         let mut expected_time: u64 = (0..6)
@@ -5246,7 +5246,7 @@ pub mod tests {
         assert_eq!(block_time_slot_3.unwrap().unwrap() as u64, expected_time);
         assert_eq!(
             blockstore
-                .get_block_time(8, slot_duration.clone(), &stakes)
+                .get_block_time(8, slot_duration, &stakes)
                 .unwrap()
                 .unwrap() as u64,
             expected_time + 2 // At 400ms block duration, 5 slots == 2sec

+ 1 - 1
ledger/src/blockstore_processor.rs

@@ -257,7 +257,7 @@ pub enum BlockstoreProcessorError {
 }
 
 /// Callback for accessing bank state while processing the blockstore
-pub type ProcessCallback = Arc<dyn Fn(&Bank) -> () + Sync + Send>;
+pub type ProcessCallback = Arc<dyn Fn(&Bank) + Sync + Send>;
 
 #[derive(Default, Clone)]
 pub struct ProcessOptions {

+ 1 - 1
ledger/src/sigverify_shreds.rs

@@ -133,7 +133,7 @@ fn slot_key_data_for_gpu<
             let key = slot_keys.get(slot).unwrap();
             keys_to_slots
                 .entry(*key)
-                .or_insert_with(|| vec![])
+                .or_insert_with(Vec::new)
                 .push(*slot);
         }
     }

+ 2 - 2
ledger/tests/shred.rs

@@ -177,7 +177,7 @@ fn sort_data_coding_into_fec_sets(
         data_slot_and_index.insert(key);
         let fec_entry = fec_data
             .entry(shred.common_header.fec_set_index)
-            .or_insert_with(|| vec![]);
+            .or_insert_with(Vec::new);
         fec_entry.push(shred);
     }
     for shred in coding_shreds {
@@ -188,7 +188,7 @@ fn sort_data_coding_into_fec_sets(
         coding_slot_and_index.insert(key);
         let fec_entry = fec_coding
             .entry(shred.common_header.fec_set_index)
-            .or_insert_with(|| vec![]);
+            .or_insert_with(Vec::new);
         fec_entry.push(shred);
     }
 }

+ 2 - 2
local-cluster/tests/local_cluster.rs

@@ -213,8 +213,8 @@ fn run_cluster_partition<E, F>(
     on_partition_start: E,
     on_partition_resolved: F,
 ) where
-    E: Fn(&mut LocalCluster) -> (),
-    F: Fn(&mut LocalCluster) -> (),
+    E: Fn(&mut LocalCluster),
+    F: Fn(&mut LocalCluster),
 {
     solana_logger::setup();
     info!("PARTITION_TEST!");

+ 2 - 4
logger/src/lib.rs

@@ -23,10 +23,8 @@ impl log::Log for LoggerShim {
 }
 
 fn replace_logger(logger: env_logger::Logger) {
-    let max_level = logger.filter();
-    log::set_max_level(max_level);
-    let mut rw = LOGGER.write().unwrap();
-    std::mem::replace(&mut *rw, logger);
+    log::set_max_level(logger.filter());
+    *LOGGER.write().unwrap() = logger;
     let _ = log::set_boxed_logger(Box::new(LoggerShim {}));
 }
 

+ 1 - 2
metrics/src/metrics.rs

@@ -332,9 +332,8 @@ lazy_static! {
 }
 
 pub fn set_host_id(host_id: String) {
-    let mut rw = HOST_ID.write().unwrap();
     info!("host id: {}", host_id);
-    std::mem::replace(&mut *rw, host_id);
+    *HOST_ID.write().unwrap() = host_id;
 }
 
 /// Submits a new point from any thread.  Note that points are internally queued

+ 3 - 3
net-utils/src/ip_echo_server.rs

@@ -85,11 +85,11 @@ pub fn ip_echo_server(tcp: std::net::TcpListener) -> IpEchoServer {
 
                     bincode::deserialize::<IpEchoServerMessage>(&data[4..])
                         .map(Some)
-                        .or_else(|err| {
-                            Err(io::Error::new(
+                        .map_err(|err| {
+                            io::Error::new(
                                 io::ErrorKind::Other,
                                 format!("Failed to deserialize IpEchoServerMessage: {:?}", err),
-                            ))
+                            )
                         })
                 })
                 .and_then(move |maybe_msg| {

+ 3 - 4
net-utils/src/lib.rs

@@ -372,7 +372,7 @@ pub fn bind_to(ip_addr: IpAddr, port: u16, reuseaddr: bool) -> io::Result<UdpSoc
     let addr = SocketAddr::new(ip_addr, port);
 
     sock.bind(&SockAddr::from(addr))
-        .and_then(|_| Result::Ok(sock.into_udp_socket()))
+        .map(|_| sock.into_udp_socket())
 }
 
 // binds both a UdpSocket and a TcpListener
@@ -385,9 +385,8 @@ pub fn bind_common(
 
     let addr = SocketAddr::new(ip_addr, port);
     let sock_addr = SockAddr::from(addr);
-    sock.bind(&sock_addr).and_then(|_| {
-        TcpListener::bind(&addr).and_then(|listener| Result::Ok((sock.into_udp_socket(), listener)))
-    })
+    sock.bind(&sock_addr)
+        .and_then(|_| TcpListener::bind(&addr).map(|listener| (sock.into_udp_socket(), listener)))
 }
 
 pub fn find_available_port_in_range(ip_addr: IpAddr, range: PortRange) -> io::Result<u16> {

+ 2 - 2
perf/benches/sigverify.rs

@@ -13,7 +13,7 @@ fn bench_sigverify(bencher: &mut Bencher) {
     let tx = test_tx();
 
     // generate packet vector
-    let batches = to_packets(&vec![tx; 128]);
+    let batches = to_packets(&std::iter::repeat(tx).take(128).collect::<Vec<_>>());
 
     let recycler = Recycler::default();
     let recycler_out = Recycler::default();
@@ -28,7 +28,7 @@ fn bench_get_offsets(bencher: &mut Bencher) {
     let tx = test_tx();
 
     // generate packet vector
-    let batches = to_packets(&vec![tx; 1024]);
+    let batches = to_packets(&std::iter::repeat(tx).take(1024).collect::<Vec<_>>());
 
     let recycler = Recycler::default();
     // verify packets

+ 1 - 2
programs/budget/src/budget_expr.rs

@@ -7,7 +7,6 @@ use chrono::prelude::*;
 use serde_derive::{Deserialize, Serialize};
 use solana_sdk::hash::Hash;
 use solana_sdk::pubkey::Pubkey;
-use std::mem;
 
 /// The types of events a payment plan can process.
 #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
@@ -256,7 +255,7 @@ impl BudgetExpr {
             _ => None,
         };
         if let Some(expr) = new_expr {
-            mem::replace(self, *expr);
+            *self = *expr;
         }
     }
 }

+ 2 - 2
runtime/benches/accounts.rs

@@ -14,7 +14,7 @@ fn deposit_many(bank: &Bank, pubkeys: &mut Vec<Pubkey>, num: usize) {
     for t in 0..num {
         let pubkey = Pubkey::new_rand();
         let account = Account::new((t + 1) as u64, 0, &Account::default().owner);
-        pubkeys.push(pubkey.clone());
+        pubkeys.push(pubkey);
         assert!(bank.get_account(&pubkey).is_none());
         bank.deposit(&pubkey, (t + 1) as u64);
         assert_eq!(bank.get_account(&pubkey).unwrap(), account);
@@ -48,7 +48,7 @@ fn test_accounts_squash(bencher: &mut Bencher) {
         &[],
     ));
     let mut pubkeys: Vec<Pubkey> = vec![];
-    deposit_many(&bank1, &mut pubkeys, 250000);
+    deposit_many(&bank1, &mut pubkeys, 250_000);
     bank1.freeze();
 
     // Measures the performance of the squash operation.

+ 3 - 6
runtime/benches/accounts_index.rs

@@ -10,18 +10,15 @@ use test::Bencher;
 #[bench]
 fn bench_accounts_index(bencher: &mut Bencher) {
     const NUM_PUBKEYS: usize = 10_000;
-    let pubkeys: Vec<_> = (0..NUM_PUBKEYS)
-        .into_iter()
-        .map(|_| Pubkey::new_rand())
-        .collect();
+    let pubkeys: Vec<_> = (0..NUM_PUBKEYS).map(|_| Pubkey::new_rand()).collect();
 
     const NUM_FORKS: u64 = 16;
 
     let mut reclaims = vec![];
     let mut index = AccountsIndex::<AccountInfo>::default();
     for f in 0..NUM_FORKS {
-        for _p in 0..NUM_PUBKEYS {
-            index.insert(f, &pubkeys[_p], AccountInfo::default(), &mut reclaims);
+        for pubkey in pubkeys.iter().take(NUM_PUBKEYS) {
+            index.insert(f, pubkey, AccountInfo::default(), &mut reclaims);
         }
     }
 

+ 2 - 8
runtime/benches/append_vec.rs

@@ -32,7 +32,6 @@ fn append_vec_append(bencher: &mut Bencher) {
 
 fn add_test_accounts(vec: &AppendVec, size: usize) -> Vec<(usize, usize)> {
     (0..size)
-        .into_iter()
         .filter_map(|sample| {
             let (meta, account) = create_test_account(sample);
             vec.append_account(meta, &account, Hash::default())
@@ -92,7 +91,7 @@ fn append_vec_concurrent_append_read(bencher: &mut Bencher) {
     bencher.iter(|| {
         let len = indexes.lock().unwrap().len();
         let random_index: usize = thread_rng().gen_range(0, len);
-        let (sample, pos) = indexes.lock().unwrap().get(random_index).unwrap().clone();
+        let (sample, pos) = *indexes.lock().unwrap().get(random_index).unwrap();
         let (account, _next) = vec.get_account(pos).unwrap();
         let (_meta, test) = create_test_account(sample);
         assert_eq!(account.data, test.data.as_slice());
@@ -112,12 +111,7 @@ fn append_vec_concurrent_read_append(bencher: &mut Bencher) {
             continue;
         }
         let random_index: usize = thread_rng().gen_range(0, len + 1);
-        let (sample, pos) = indexes1
-            .lock()
-            .unwrap()
-            .get(random_index % len)
-            .unwrap()
-            .clone();
+        let (sample, pos) = *indexes1.lock().unwrap().get(random_index % len).unwrap();
         let (account, _next) = vec1.get_account(pos).unwrap();
         let (_meta, test) = create_test_account(sample);
         assert_eq!(account.data, test.data.as_slice());

+ 13 - 15
runtime/benches/bank.rs

@@ -19,13 +19,13 @@ use std::{sync::Arc, thread::sleep, time::Duration};
 use test::Bencher;
 
 const BUILTIN_PROGRAM_ID: [u8; 32] = [
-    098, 117, 105, 108, 116, 105, 110, 095, 112, 114, 111, 103, 114, 097, 109, 095, 105, 100, 0, 0,
-    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    98, 117, 105, 108, 116, 105, 110, 95, 112, 114, 111, 103, 114, 97, 109, 95, 105, 100, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 ];
 
 const NOOP_PROGRAM_ID: [u8; 32] = [
-    098, 117, 105, 108, 116, 105, 110, 095, 112, 114, 111, 103, 114, 097, 109, 095, 105, 100, 0, 0,
-    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+    98, 117, 105, 108, 116, 105, 110, 95, 112, 114, 111, 103, 114, 97, 109, 95, 105, 100, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
 ];
 
 fn process_instruction(
@@ -43,13 +43,12 @@ pub fn create_builtin_transactions(
     let program_id = Pubkey::new(&BUILTIN_PROGRAM_ID);
 
     (0..4096)
-        .into_iter()
         .map(|_| {
             // Seed the signer account
             let rando0 = Keypair::new();
             bank_client
                 .transfer(10_000, &mint_keypair, &rando0.pubkey())
-                .expect(&format!("{}:{}", line!(), file!()));
+                .unwrap_or_else(|_| panic!("{}:{}", line!(), file!()));
 
             let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8);
             let (blockhash, _fee_calculator) = bank_client.get_recent_blockhash().unwrap();
@@ -65,13 +64,12 @@ pub fn create_native_loader_transactions(
     let program_id = Pubkey::new(&NOOP_PROGRAM_ID);
 
     (0..4096)
-        .into_iter()
         .map(|_| {
             // Seed the signer account©41
             let rando0 = Keypair::new();
             bank_client
                 .transfer(10_000, &mint_keypair, &rando0.pubkey())
-                .expect(&format!("{}:{}", line!(), file!()));
+                .unwrap_or_else(|_| panic!("{}:{}", line!(), file!()));
 
             let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8);
             let (blockhash, _fee_calculator) = bank_client.get_recent_blockhash().unwrap();
@@ -80,13 +78,13 @@ pub fn create_native_loader_transactions(
         .collect()
 }
 
-fn sync_bencher(bank: &Arc<Bank>, _bank_client: &BankClient, transactions: &Vec<Transaction>) {
+fn sync_bencher(bank: &Arc<Bank>, _bank_client: &BankClient, transactions: &[Transaction]) {
     let results = bank.process_transactions(&transactions);
     assert!(results.iter().all(Result::is_ok));
 }
 
-fn async_bencher(bank: &Arc<Bank>, bank_client: &BankClient, transactions: &Vec<Transaction>) {
-    for transaction in transactions.clone() {
+fn async_bencher(bank: &Arc<Bank>, bank_client: &BankClient, transactions: &[Transaction]) {
+    for transaction in transactions.to_owned() {
         bank_client.async_send_transaction(transaction).unwrap();
     }
     for _ in 0..1_000_000_000_u64 {
@@ -98,23 +96,23 @@ fn async_bencher(bank: &Arc<Bank>, bank_client: &BankClient, transactions: &Vec<
         }
         sleep(Duration::from_nanos(1));
     }
-    if !bank
+    if bank
         .get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap())
         .unwrap()
-        .is_ok()
+        .is_err()
     {
         error!(
             "transaction failed: {:?}",
             bank.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap())
                 .unwrap()
         );
-        assert!(false);
+        panic!();
     }
 }
 
 fn do_bench_transactions(
     bencher: &mut Bencher,
-    bench_work: &dyn Fn(&Arc<Bank>, &BankClient, &Vec<Transaction>),
+    bench_work: &dyn Fn(&Arc<Bank>, &BankClient, &[Transaction]),
     create_transactions: &dyn Fn(&BankClient, &Keypair) -> Vec<Transaction>,
 ) {
     solana_logger::setup();

+ 1 - 4
runtime/benches/bloom.rs

@@ -47,10 +47,7 @@ fn bench_sigs_bloom(bencher: &mut Bencher) {
     // https://hur.st/bloomfilter/?n=1000000&p=1.0E-8&m=&k=
     let blockhash = hash(Hash::default().as_ref());
     //    info!("blockhash = {:?}", blockhash);
-    let keys = (0..27)
-        .into_iter()
-        .map(|i| blockhash.hash_at_index(i))
-        .collect();
+    let keys = (0..27).map(|i| blockhash.hash_at_index(i)).collect();
     let mut sigs: Bloom<Signature> = Bloom::new(38_340_234, keys);
 
     let mut id = blockhash;

+ 1 - 1
runtime/benches/status_cache.rs

@@ -30,6 +30,6 @@ fn test_statuscache_serialize(bencher: &mut Bencher) {
         }
     }
     bencher.iter(|| {
-        let _ = serialize(&status_cache.slot_deltas(&vec![0])).unwrap();
+        let _ = serialize(&status_cache.slot_deltas(&[0])).unwrap();
     });
 }

+ 3 - 3
runtime/src/accounts.rs

@@ -153,12 +153,12 @@ impl Accounts {
                         }
                         let (account, rent) =
                             AccountsDB::load(storage, ancestors, accounts_index, key)
-                                .and_then(|(mut account, _)| {
+                                .map(|(mut account, _)| {
                                     if message.is_writable(i) && !account.executable {
                                         let rent_due = rent_collector.update(&key, &mut account);
-                                        Some((account, rent_due))
+                                        (account, rent_due)
                                     } else {
-                                        Some((account, 0))
+                                        (account, 0)
                                     }
                                 })
                                 .unwrap_or_default();

+ 19 - 26
runtime/src/accounts_db.rs

@@ -617,7 +617,6 @@ impl AccountsDB {
     pub fn clean_accounts(&self) {
         self.report_store_stats();
 
-        let no_ancestors = HashMap::new();
         let mut accounts_scan = Measure::start("accounts_scan");
         let accounts_index = self.accounts_index.read().unwrap();
         let pubkeys: Vec<Pubkey> = accounts_index.account_maps.keys().cloned().collect();
@@ -628,7 +627,7 @@ impl AccountsDB {
                 let mut purges_in_root = Vec::new();
                 let mut purges = HashMap::new();
                 for pubkey in pubkeys {
-                    if let Some((list, index)) = accounts_index.get(pubkey, &no_ancestors) {
+                    if let Some((list, index)) = accounts_index.get(pubkey, None) {
                         let (slot, account_info) = &list[index];
                         if account_info.lamports == 0 {
                             purges.insert(*pubkey, accounts_index.would_purge(pubkey));
@@ -641,16 +640,11 @@ impl AccountsDB {
             })
             .reduce(
                 || (HashMap::new(), Vec::new()),
-                |m1, m2| {
+                |mut m1, m2| {
                     // Collapse down the hashmaps/vecs into one.
-                    let x = m2.0.iter().fold(m1.0, |mut acc, (k, vs)| {
-                        acc.insert(k.clone(), vs.clone());
-                        acc
-                    });
-                    let mut y = vec![];
-                    y.extend(m1.1);
-                    y.extend(m2.1);
-                    (x, y)
+                    m1.0.extend(m2.0);
+                    m1.1.extend(m2.1);
+                    m1
                 },
             );
 
@@ -806,7 +800,6 @@ impl AccountsDB {
         }
 
         let alive_accounts: Vec<_> = {
-            let no_ancestors = HashMap::new();
             let accounts_index = self.accounts_index.read().unwrap();
             stored_accounts
                 .iter()
@@ -819,7 +812,7 @@ impl AccountsDB {
                         (store_id, offset),
                         _write_version,
                     )| {
-                        if let Some((list, _)) = accounts_index.get(pubkey, &no_ancestors) {
+                        if let Some((list, _)) = accounts_index.get(pubkey, None) {
                             list.iter()
                                 .any(|(_slot, i)| i.store_id == *store_id && i.offset == *offset)
                         } else {
@@ -927,7 +920,7 @@ impl AccountsDB {
 
     pub fn scan_accounts<F, A>(&self, ancestors: &Ancestors, scan_func: F) -> A
     where
-        F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>) -> (),
+        F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>),
         A: Default,
     {
         let mut collector = A::default();
@@ -946,7 +939,7 @@ impl AccountsDB {
 
     pub fn range_scan_accounts<F, A, R>(&self, ancestors: &Ancestors, range: R, scan_func: F) -> A
     where
-        F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>) -> (),
+        F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>),
         A: Default,
         R: RangeBounds<Pubkey>,
     {
@@ -968,7 +961,7 @@ impl AccountsDB {
     // PERF: Sequentially read each storage entry in parallel
     pub fn scan_account_storage<F, B>(&self, slot: Slot, scan_func: F) -> Vec<B>
     where
-        F: Fn(&StoredAccount, AppendVecId, &mut B) -> () + Send + Sync,
+        F: Fn(&StoredAccount, AppendVecId, &mut B) + Send + Sync,
         B: Send + Default,
     {
         let storage_maps: Vec<Arc<AccountStorageEntry>> = self
@@ -1020,7 +1013,7 @@ impl AccountsDB {
         accounts_index: &AccountsIndex<AccountInfo>,
         pubkey: &Pubkey,
     ) -> Option<(Account, Slot)> {
-        let (lock, index) = accounts_index.get(pubkey, ancestors)?;
+        let (lock, index) = accounts_index.get(pubkey, Some(ancestors))?;
         let slot = lock[index].0;
         //TODO: thread this as a ref
         if let Some(slot_storage) = storage.0.get(&slot) {
@@ -1037,7 +1030,7 @@ impl AccountsDB {
     #[cfg(test)]
     fn load_account_hash(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Hash {
         let accounts_index = self.accounts_index.read().unwrap();
-        let (lock, index) = accounts_index.get(pubkey, ancestors).unwrap();
+        let (lock, index) = accounts_index.get(pubkey, Some(ancestors)).unwrap();
         let slot = lock[index].0;
         let storage = self.storage.read().unwrap();
         let slot_storage = storage.0.get(&slot).unwrap();
@@ -1449,7 +1442,7 @@ impl AccountsDB {
         let hashes: Vec<_> = keys
             .par_iter()
             .filter_map(|pubkey| {
-                if let Some((list, index)) = accounts_index.get(pubkey, ancestors) {
+                if let Some((list, index)) = accounts_index.get(pubkey, Some(ancestors)) {
                     let (slot, account_info) = &list[index];
                     if account_info.lamports != 0 {
                         storage
@@ -1839,7 +1832,7 @@ impl AccountsDB {
                         };
                         let entry = accum
                             .entry(stored_account.meta.pubkey)
-                            .or_insert_with(|| vec![]);
+                            .or_insert_with(Vec::new);
                         entry.push((stored_account.meta.write_version, account_info));
                     },
                 );
@@ -1847,7 +1840,7 @@ impl AccountsDB {
             let mut accounts_map: HashMap<Pubkey, Vec<(u64, AccountInfo)>> = HashMap::new();
             for accumulator_entry in accumulator.iter() {
                 for (pubkey, storage_entry) in accumulator_entry {
-                    let entry = accounts_map.entry(*pubkey).or_insert_with(|| vec![]);
+                    let entry = accounts_map.entry(*pubkey).or_insert_with(Vec::new);
                     entry.extend(storage_entry.iter().cloned());
                 }
             }
@@ -2118,7 +2111,7 @@ pub mod tests {
             .accounts_index
             .read()
             .unwrap()
-            .get(&key, &ancestors)
+            .get(&key, Some(&ancestors))
             .is_some());
         assert_load_account(&db, unrooted_slot, key, 1);
 
@@ -2139,7 +2132,7 @@ pub mod tests {
             .accounts_index
             .read()
             .unwrap()
-            .get(&key, &ancestors)
+            .get(&key, Some(&ancestors))
             .is_none());
 
         // Test we can store for the same slot again and get the right information
@@ -2188,14 +2181,14 @@ pub mod tests {
         for t in 0..num {
             let pubkey = Pubkey::new_rand();
             let account = Account::new((t + 1) as u64, space, &Account::default().owner);
-            pubkeys.push(pubkey.clone());
+            pubkeys.push(pubkey);
             assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
             accounts.store(slot, &[(&pubkey, &account)]);
         }
         for t in 0..num_vote {
             let pubkey = Pubkey::new_rand();
             let account = Account::new((num + t + 1) as u64, space, &solana_vote_program::id());
-            pubkeys.push(pubkey.clone());
+            pubkeys.push(pubkey);
             let ancestors = vec![(slot, 0)].into_iter().collect();
             assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
             accounts.store(slot, &[(&pubkey, &account)]);
@@ -2435,7 +2428,7 @@ pub mod tests {
         let ancestors = vec![(0, 0)].into_iter().collect();
         let id = {
             let index = accounts.accounts_index.read().unwrap();
-            let (list, idx) = index.get(&pubkey, &ancestors).unwrap();
+            let (list, idx) = index.get(&pubkey, Some(&ancestors)).unwrap();
             list[idx].1.store_id
         };
         accounts.add_root(1);

+ 26 - 23
runtime/src/accounts_index.rs

@@ -24,29 +24,29 @@ pub struct AccountsIndex<T> {
 impl<'a, T: 'a + Clone> AccountsIndex<T> {
     fn do_scan_accounts<F, I>(&self, ancestors: &Ancestors, mut func: F, iter: I)
     where
-        F: FnMut(&Pubkey, (&T, Slot)) -> (),
+        F: FnMut(&Pubkey, (&T, Slot)),
         I: Iterator<Item = (&'a Pubkey, &'a AccountMapEntry<T>)>,
     {
         for (pubkey, list) in iter {
             let list_r = &list.1.read().unwrap();
-            if let Some(index) = self.latest_slot(ancestors, &list_r) {
+            if let Some(index) = self.latest_slot(Some(ancestors), &list_r) {
                 func(pubkey, (&list_r[index].1, list_r[index].0));
             }
         }
     }
 
     /// call func with every pubkey and index visible from a given set of ancestors
-    pub fn scan_accounts<F>(&self, ancestors: &Ancestors, func: F)
+    pub(crate) fn scan_accounts<F>(&self, ancestors: &Ancestors, func: F)
     where
-        F: FnMut(&Pubkey, (&T, Slot)) -> (),
+        F: FnMut(&Pubkey, (&T, Slot)),
     {
         self.do_scan_accounts(ancestors, func, self.account_maps.iter());
     }
 
     /// call func with every pubkey and index visible from a given set of ancestors with range
-    pub fn range_scan_accounts<F, R>(&self, ancestors: &Ancestors, range: R, func: F)
+    pub(crate) fn range_scan_accounts<F, R>(&self, ancestors: &Ancestors, range: R, func: F)
     where
-        F: FnMut(&Pubkey, (&T, Slot)) -> (),
+        F: FnMut(&Pubkey, (&T, Slot)),
         R: RangeBounds<Pubkey>,
     {
         self.do_scan_accounts(ancestors, func, self.account_maps.range(range));
@@ -76,11 +76,14 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
 
     // find the latest slot and T in a slice for a given ancestor
     // returns index into 'slice' if found, None if not.
-    fn latest_slot(&self, ancestors: &Ancestors, slice: SlotSlice<T>) -> Option<usize> {
+    fn latest_slot(&self, ancestors: Option<&Ancestors>, slice: SlotSlice<T>) -> Option<usize> {
         let mut max = 0;
         let mut rv = None;
         for (i, (slot, _t)) in slice.iter().rev().enumerate() {
-            if *slot >= max && (ancestors.contains_key(slot) || self.is_root(*slot)) {
+            if *slot >= max
+                && (ancestors.map_or(false, |ancestors| ancestors.contains_key(slot))
+                    || self.is_root(*slot))
+            {
                 rv = Some((slice.len() - 1) - i);
                 max = *slot;
             }
@@ -90,10 +93,10 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
 
     /// Get an account
     /// The latest account that appears in `ancestors` or `roots` is returned.
-    pub fn get(
+    pub(crate) fn get(
         &self,
         pubkey: &Pubkey,
-        ancestors: &Ancestors,
+        ancestors: Option<&Ancestors>,
     ) -> Option<(RwLockReadGuard<SlotList<T>>, usize)> {
         self.account_maps.get(pubkey).and_then(|list| {
             let list_r = list.1.read().unwrap();
@@ -245,7 +248,8 @@ mod tests {
         let key = Keypair::new();
         let index = AccountsIndex::<bool>::default();
         let ancestors = HashMap::new();
-        assert!(index.get(&key.pubkey(), &ancestors).is_none());
+        assert!(index.get(&key.pubkey(), Some(&ancestors)).is_none());
+        assert!(index.get(&key.pubkey(), None).is_none());
 
         let mut num = 0;
         index.scan_accounts(&ancestors, |_pubkey, _index| num += 1);
@@ -261,7 +265,8 @@ mod tests {
         assert!(gc.is_empty());
 
         let ancestors = HashMap::new();
-        assert!(index.get(&key.pubkey(), &ancestors).is_none());
+        assert!(index.get(&key.pubkey(), Some(&ancestors)).is_none());
+        assert!(index.get(&key.pubkey(), None).is_none());
 
         let mut num = 0;
         index.scan_accounts(&ancestors, |_pubkey, _index| num += 1);
@@ -277,7 +282,7 @@ mod tests {
         assert!(gc.is_empty());
 
         let ancestors = vec![(1, 1)].into_iter().collect();
-        assert!(index.get(&key.pubkey(), &ancestors).is_none());
+        assert!(index.get(&key.pubkey(), Some(&ancestors)).is_none());
 
         let mut num = 0;
         index.scan_accounts(&ancestors, |_pubkey, _index| num += 1);
@@ -293,7 +298,7 @@ mod tests {
         assert!(gc.is_empty());
 
         let ancestors = vec![(0, 0)].into_iter().collect();
-        let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
+        let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap();
         assert_eq!(list[idx], (0, true));
 
         let mut num = 0;
@@ -324,9 +329,8 @@ mod tests {
         index.insert(0, &key.pubkey(), true, &mut gc);
         assert!(gc.is_empty());
 
-        let ancestors = vec![].into_iter().collect();
         index.add_root(0);
-        let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
+        let (list, idx) = index.get(&key.pubkey(), None).unwrap();
         assert_eq!(list[idx], (0, true));
     }
 
@@ -369,14 +373,14 @@ mod tests {
         let mut gc = Vec::new();
         index.insert(0, &key.pubkey(), true, &mut gc);
         assert!(gc.is_empty());
-        let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
+        let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap();
         assert_eq!(list[idx], (0, true));
         drop(list);
 
         let mut gc = Vec::new();
         index.insert(0, &key.pubkey(), false, &mut gc);
         assert_eq!(gc, vec![(0, true)]);
-        let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
+        let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap();
         assert_eq!(list[idx], (0, false));
     }
 
@@ -391,10 +395,10 @@ mod tests {
         assert!(gc.is_empty());
         index.insert(1, &key.pubkey(), false, &mut gc);
         assert!(gc.is_empty());
-        let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
+        let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap();
         assert_eq!(list[idx], (0, true));
         let ancestors = vec![(1, 0)].into_iter().collect();
-        let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
+        let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap();
         assert_eq!(list[idx], (1, false));
     }
 
@@ -413,13 +417,12 @@ mod tests {
         index.add_root(3);
         index.insert(4, &key.pubkey(), true, &mut gc);
         assert_eq!(gc, vec![(0, true), (1, false), (2, true)]);
-        let ancestors = vec![].into_iter().collect();
-        let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
+        let (list, idx) = index.get(&key.pubkey(), None).unwrap();
         assert_eq!(list[idx], (3, true));
 
         let mut num = 0;
         let mut found_key = false;
-        index.scan_accounts(&ancestors, |pubkey, _index| {
+        index.scan_accounts(&Ancestors::new(), |pubkey, _index| {
             if pubkey == &key.pubkey() {
                 found_key = true;
                 assert_eq!(_index, (&true, 3));

+ 3 - 3
runtime/src/bank.rs

@@ -150,7 +150,7 @@ impl StatusCacheRc {
     }
 }
 
-pub type EnteredEpochCallback = Box<dyn Fn(&mut Bank) -> () + Sync + Send>;
+pub type EnteredEpochCallback = Box<dyn Fn(&mut Bank) + Sync + Send>;
 
 pub type TransactionProcessResult = (Result<()>, Option<HashAgeKind>);
 pub struct TransactionResults {
@@ -3854,7 +3854,7 @@ mod tests {
     impl Bank {
         fn slots_by_pubkey(&self, pubkey: &Pubkey, ancestors: &Ancestors) -> Vec<Slot> {
             let accounts_index = self.rc.accounts.accounts_db.accounts_index.read().unwrap();
-            let (accounts, _) = accounts_index.get(&pubkey, &ancestors).unwrap();
+            let (accounts, _) = accounts_index.get(&pubkey, Some(&ancestors)).unwrap();
             accounts
                 .iter()
                 .map(|(slot, _)| *slot)
@@ -4988,7 +4988,7 @@ mod tests {
         let (genesis_config, mint_keypair) = create_genesis_config(2_000);
         let bank0 = Arc::new(Bank::new(&genesis_config));
         let initial_state = bank0.hash_internal_state();
-        let bank1 = Bank::new_from_parent(&bank0.clone(), &Pubkey::default(), 1);
+        let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
         assert_ne!(bank1.hash_internal_state(), initial_state);
 
         info!("transfer bank1");

+ 8 - 3
runtime/src/bloom.rs

@@ -42,15 +42,20 @@ impl<T: BloomHashIndex> Bloom<T> {
         let keys: Vec<u64> = (0..num_keys).map(|_| rand::thread_rng().gen()).collect();
         Self::new(num_bits, keys)
     }
-    pub fn num_bits(num_items: f64, false_rate: f64) -> f64 {
+    fn num_bits(num_items: f64, false_rate: f64) -> f64 {
         let n = num_items;
         let p = false_rate;
         ((n * p.ln()) / (1f64 / 2f64.powf(2f64.ln())).ln()).ceil()
     }
-    pub fn num_keys(num_bits: f64, num_items: f64) -> f64 {
+    fn num_keys(num_bits: f64, num_items: f64) -> f64 {
         let n = num_items;
         let m = num_bits;
-        1f64.max(((m / n) * 2f64.ln()).round())
+        // infinity as usize is zero in rust 1.43 but 2^64-1 in rust 1.45; ensure it's zero here
+        if n == 0.0 {
+            0.0
+        } else {
+            1f64.max(((m / n) * 2f64.ln()).round())
+        }
     }
     fn pos(&self, key: &T, k: u64) -> u64 {
         key.hash_at_index(k) % self.bits.len()

+ 1 - 1
runtime/src/legacy_system_instruction_processor0.rs

@@ -930,7 +930,7 @@ mod tests {
 
     fn with_create_zero_lamport<F>(callback: F)
     where
-        F: Fn(&Bank) -> (),
+        F: Fn(&Bank),
     {
         solana_logger::setup();
 

+ 2 - 2
runtime/src/nonce_utils.rs

@@ -206,7 +206,7 @@ mod tests {
     fn verify_nonce_ok() {
         with_test_keyed_account(42, true, |nonce_account| {
             let mut signers = HashSet::new();
-            signers.insert(nonce_account.signer_key().unwrap().clone());
+            signers.insert(nonce_account.signer_key().unwrap());
             let state: State = nonce_account.state().unwrap();
             // New is in Uninitialzed state
             assert_eq!(state, State::Uninitialized);
@@ -236,7 +236,7 @@ mod tests {
     fn verify_nonce_bad_query_hash_fail() {
         with_test_keyed_account(42, true, |nonce_account| {
             let mut signers = HashSet::new();
-            signers.insert(nonce_account.signer_key().unwrap().clone());
+            signers.insert(nonce_account.signer_key().unwrap());
             let state: State = nonce_account.state().unwrap();
             // New is in Uninitialzed state
             assert_eq!(state, State::Uninitialized);

+ 10 - 5
runtime/src/rent_collector.rs

@@ -48,11 +48,16 @@ impl RentCollector {
                 .map(|epoch| self.epoch_schedule.get_slots_in_epoch(epoch + 1))
                 .sum();
 
-            let (rent_due, exempt) = self.rent.due(
-                account.lamports,
-                account.data.len(),
-                slots_elapsed as f64 / self.slots_per_year,
-            );
+            // avoid infinite rent in rust 1.45
+            let years_elapsed = if self.slots_per_year != 0.0 {
+                slots_elapsed as f64 / self.slots_per_year
+            } else {
+                0.0
+            };
+
+            let (rent_due, exempt) =
+                self.rent
+                    .due(account.lamports, account.data.len(), years_elapsed);
 
             if exempt || rent_due != 0 {
                 if account.lamports > rent_due {

+ 1 - 1
runtime/src/status_cache.rs

@@ -271,7 +271,7 @@ impl<T: Serialize + Clone> StatusCache<T> {
                 .or_insert((slot, sig_index, HashMap::new()));
         sig_map.0 = std::cmp::max(slot, sig_map.0);
 
-        let sig_forks = sig_map.2.entry(sig_slice).or_insert_with(|| vec![]);
+        let sig_forks = sig_map.2.entry(sig_slice).or_insert_with(Vec::new);
         sig_forks.push((slot, res.clone()));
         let slot_deltas = self.slot_deltas.entry(slot).or_default();
         let mut fork_entry = slot_deltas.lock().unwrap();

+ 1 - 1
runtime/src/system_instruction_processor.rs

@@ -933,7 +933,7 @@ mod tests {
 
     fn with_create_zero_lamport<F>(callback: F)
     where
-        F: Fn(&Bank) -> (),
+        F: Fn(&Bank),
     {
         solana_logger::setup();
 

+ 2 - 2
sdk/benches/short_vec.rs

@@ -8,14 +8,14 @@ use test::Bencher;
 // Return a ShortVec with 127 bytes
 fn create_encoded_short_vec() -> Vec<u8> {
     let mut bytes = vec![127];
-    bytes.extend_from_slice(&vec![0u8; 127]);
+    bytes.extend_from_slice(&[0u8; 127]);
     bytes
 }
 
 // Return a Vec with 127 bytes
 fn create_encoded_vec() -> Vec<u8> {
     let mut bytes = vec![127, 0, 0, 0, 0, 0, 0, 0];
-    bytes.extend_from_slice(&vec![0u8; 127]);
+    bytes.extend_from_slice(&[0u8; 127]);
     bytes
 }
 

+ 1 - 1
sdk/benches/slot_history.rs

@@ -22,7 +22,7 @@ fn bench_slot_history_add_new(b: &mut Bencher) {
     b.iter(|| {
         for _ in 0..5 {
             slot_history.add(slot);
-            slot += 100000;
+            slot += 100_000;
         }
     });
 }

+ 1 - 1
sdk/src/abi_example.rs

@@ -298,7 +298,7 @@ impl<T: AbiExample> AbiExample for Box<T> {
     }
 }
 
-impl<T> AbiExample for Box<dyn Fn(&mut T) -> () + Sync + Send> {
+impl<T> AbiExample for Box<dyn Fn(&mut T) + Sync + Send> {
     fn example() -> Self {
         info!("AbiExample for (Box<T>): {}", type_name::<Self>());
         Box::new(move |_t: &mut T| {})

+ 19 - 19
sdk/src/nonce/account.rs

@@ -211,7 +211,7 @@ mod test {
                 ..nonce::state::Data::default()
             };
             let mut signers = HashSet::new();
-            signers.insert(keyed_account.signer_key().unwrap().clone());
+            signers.insert(*keyed_account.signer_key().unwrap());
             let state = AccountUtilsState::<Versions>::state(keyed_account)
                 .unwrap()
                 .convert_to_current();
@@ -326,7 +326,7 @@ mod test {
         let min_lamports = rent.minimum_balance(State::size());
         with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
             let mut signers = HashSet::new();
-            signers.insert(keyed_account.signer_key().unwrap().clone());
+            signers.insert(*keyed_account.signer_key().unwrap());
             let recent_blockhashes = create_test_recent_blockhashes(0);
             let authorized = *keyed_account.unsigned_key();
             keyed_account
@@ -347,7 +347,7 @@ mod test {
         let min_lamports = rent.minimum_balance(State::size());
         with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
             let mut signers = HashSet::new();
-            signers.insert(keyed_account.signer_key().unwrap().clone());
+            signers.insert(*keyed_account.signer_key().unwrap());
             let recent_blockhashes = create_test_recent_blockhashes(63);
             let authorized = *keyed_account.unsigned_key();
             keyed_account
@@ -367,7 +367,7 @@ mod test {
         let min_lamports = rent.minimum_balance(State::size());
         with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
             let mut signers = HashSet::new();
-            signers.insert(keyed_account.signer_key().unwrap().clone());
+            signers.insert(*keyed_account.signer_key().unwrap());
             let recent_blockhashes = create_test_recent_blockhashes(63);
             let result = keyed_account.advance_nonce_account(&recent_blockhashes, &signers);
             assert_eq!(result, Err(NonceError::BadAccountState.into()));
@@ -384,14 +384,14 @@ mod test {
         with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
             with_test_keyed_account(42, true, |nonce_authority| {
                 let mut signers = HashSet::new();
-                signers.insert(nonce_account.signer_key().unwrap().clone());
+                signers.insert(*nonce_account.signer_key().unwrap());
                 let recent_blockhashes = create_test_recent_blockhashes(63);
                 let authorized = *nonce_authority.unsigned_key();
                 nonce_account
                     .initialize_nonce_account(&authorized, &recent_blockhashes, &rent)
                     .unwrap();
                 let mut signers = HashSet::new();
-                signers.insert(nonce_authority.signer_key().unwrap().clone());
+                signers.insert(*nonce_authority.signer_key().unwrap());
                 let recent_blockhashes = create_test_recent_blockhashes(31);
                 let result = nonce_account.advance_nonce_account(&recent_blockhashes, &signers);
                 assert_eq!(result, Ok(()));
@@ -409,7 +409,7 @@ mod test {
         with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
             with_test_keyed_account(42, false, |nonce_authority| {
                 let mut signers = HashSet::new();
-                signers.insert(nonce_account.signer_key().unwrap().clone());
+                signers.insert(*nonce_account.signer_key().unwrap());
                 let recent_blockhashes = create_test_recent_blockhashes(63);
                 let authorized = *nonce_authority.unsigned_key();
                 nonce_account
@@ -435,7 +435,7 @@ mod test {
             assert_eq!(state, State::Uninitialized);
             with_test_keyed_account(42, false, |to_keyed| {
                 let mut signers = HashSet::new();
-                signers.insert(nonce_keyed.signer_key().unwrap().clone());
+                signers.insert(*nonce_keyed.signer_key().unwrap());
                 let recent_blockhashes = create_test_recent_blockhashes(0);
                 let withdraw_lamports = nonce_keyed.account.borrow().lamports;
                 let expect_nonce_lamports =
@@ -506,7 +506,7 @@ mod test {
             assert_eq!(state, State::Uninitialized);
             with_test_keyed_account(42, false, |to_keyed| {
                 let mut signers = HashSet::new();
-                signers.insert(nonce_keyed.signer_key().unwrap().clone());
+                signers.insert(*nonce_keyed.signer_key().unwrap());
                 let recent_blockhashes = create_test_recent_blockhashes(0);
                 let lamports = nonce_keyed.account.borrow().lamports + 1;
                 let result = nonce_keyed.withdraw_nonce_account(
@@ -531,7 +531,7 @@ mod test {
         with_test_keyed_account(min_lamports + 42, true, |nonce_keyed| {
             with_test_keyed_account(42, false, |to_keyed| {
                 let mut signers = HashSet::new();
-                signers.insert(nonce_keyed.signer_key().unwrap().clone());
+                signers.insert(*nonce_keyed.signer_key().unwrap());
                 let recent_blockhashes = create_test_recent_blockhashes(0);
                 let withdraw_lamports = nonce_keyed.account.borrow().lamports / 2;
                 let nonce_expect_lamports =
@@ -584,7 +584,7 @@ mod test {
         let min_lamports = rent.minimum_balance(State::size());
         with_test_keyed_account(min_lamports + 42, true, |nonce_keyed| {
             let mut signers = HashSet::new();
-            signers.insert(nonce_keyed.signer_key().unwrap().clone());
+            signers.insert(*nonce_keyed.signer_key().unwrap());
             let recent_blockhashes = create_test_recent_blockhashes(31);
             let authority = *nonce_keyed.unsigned_key();
             nonce_keyed
@@ -659,7 +659,7 @@ mod test {
                 .unwrap();
             with_test_keyed_account(42, false, |to_keyed| {
                 let mut signers = HashSet::new();
-                signers.insert(nonce_keyed.signer_key().unwrap().clone());
+                signers.insert(*nonce_keyed.signer_key().unwrap());
                 let withdraw_lamports = nonce_keyed.account.borrow().lamports;
                 let result = nonce_keyed.withdraw_nonce_account(
                     withdraw_lamports,
@@ -689,7 +689,7 @@ mod test {
             with_test_keyed_account(42, false, |to_keyed| {
                 let recent_blockhashes = create_test_recent_blockhashes(63);
                 let mut signers = HashSet::new();
-                signers.insert(nonce_keyed.signer_key().unwrap().clone());
+                signers.insert(*nonce_keyed.signer_key().unwrap());
                 let withdraw_lamports = nonce_keyed.account.borrow().lamports + 1;
                 let result = nonce_keyed.withdraw_nonce_account(
                     withdraw_lamports,
@@ -719,7 +719,7 @@ mod test {
             with_test_keyed_account(42, false, |to_keyed| {
                 let recent_blockhashes = create_test_recent_blockhashes(63);
                 let mut signers = HashSet::new();
-                signers.insert(nonce_keyed.signer_key().unwrap().clone());
+                signers.insert(*nonce_keyed.signer_key().unwrap());
                 let withdraw_lamports = nonce_keyed.account.borrow().lamports - min_lamports + 1;
                 let result = nonce_keyed.withdraw_nonce_account(
                     withdraw_lamports,
@@ -746,7 +746,7 @@ mod test {
                 .convert_to_current();
             assert_eq!(state, State::Uninitialized);
             let mut signers = HashSet::new();
-            signers.insert(keyed_account.signer_key().unwrap().clone());
+            signers.insert(*keyed_account.signer_key().unwrap());
             let recent_blockhashes = create_test_recent_blockhashes(0);
             let authority = *keyed_account.unsigned_key();
             let result =
@@ -773,7 +773,7 @@ mod test {
         let min_lamports = rent.minimum_balance(State::size());
         with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
             let mut signers = HashSet::new();
-            signers.insert(keyed_account.signer_key().unwrap().clone());
+            signers.insert(*keyed_account.signer_key().unwrap());
             let recent_blockhashes = RecentBlockhashes::from_iter(vec![].into_iter());
             let authorized = *keyed_account.unsigned_key();
             let result =
@@ -827,7 +827,7 @@ mod test {
         let min_lamports = rent.minimum_balance(State::size());
         with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
             let mut signers = HashSet::new();
-            signers.insert(nonce_account.signer_key().unwrap().clone());
+            signers.insert(*nonce_account.signer_key().unwrap());
             let recent_blockhashes = create_test_recent_blockhashes(31);
             let authorized = *nonce_account.unsigned_key();
             nonce_account
@@ -857,7 +857,7 @@ mod test {
         let min_lamports = rent.minimum_balance(State::size());
         with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
             let mut signers = HashSet::new();
-            signers.insert(nonce_account.signer_key().unwrap().clone());
+            signers.insert(*nonce_account.signer_key().unwrap());
             let result = nonce_account.authorize_nonce_account(&Pubkey::default(), &signers);
             assert_eq!(result, Err(NonceError::BadAccountState.into()));
         })
@@ -872,7 +872,7 @@ mod test {
         let min_lamports = rent.minimum_balance(State::size());
         with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
             let mut signers = HashSet::new();
-            signers.insert(nonce_account.signer_key().unwrap().clone());
+            signers.insert(*nonce_account.signer_key().unwrap());
             let recent_blockhashes = create_test_recent_blockhashes(31);
             let authorized = &Pubkey::default().clone();
             nonce_account

+ 3 - 9
validator/src/main.rs

@@ -152,12 +152,7 @@ fn start_gossip_node(
     let cluster_info = Arc::new(cluster_info);
 
     let gossip_exit_flag = Arc::new(AtomicBool::new(false));
-    let gossip_service = GossipService::new(
-        &cluster_info.clone(),
-        None,
-        gossip_socket,
-        &gossip_exit_flag,
-    );
+    let gossip_service = GossipService::new(&cluster_info, None, gossip_socket, &gossip_exit_flag);
     (cluster_info, gossip_exit_flag, gossip_service)
 }
 
@@ -955,7 +950,7 @@ pub fn main() {
         .collect();
 
     let snapshot_interval_slots = value_t_or_exit!(matches, "snapshot_interval_slots", u64);
-    let snapshot_path = ledger_path.clone().join("snapshot");
+    let snapshot_path = ledger_path.join("snapshot");
     fs::create_dir_all(&snapshot_path).unwrap_or_else(|err| {
         eprintln!(
             "Failed to create snapshots directory {:?}: {}",
@@ -1235,7 +1230,7 @@ pub fn main() {
                         Ok(())
                     }
                 })
-                .and_then(|_| {
+                .map(|_| {
                     if !validator_config.voting_disabled && !no_check_vote_account {
                         check_vote_account(
                             &rpc_client,
@@ -1254,7 +1249,6 @@ pub fn main() {
                             exit(1);
                         });
                     }
-                    Ok(())
                 });
 
                 if result.is_ok() {

+ 1 - 1
watchtower/src/main.rs

@@ -125,7 +125,7 @@ fn get_config() -> Config {
     let json_rpc_url =
         value_t!(matches, "json_rpc_url", String).unwrap_or_else(|_| config.json_rpc_url);
     let validator_identity_pubkeys: Vec<_> = pubkeys_of(&matches, "validator_identities")
-        .unwrap_or_else(|| vec![])
+        .unwrap_or_else(Vec::new)
         .into_iter()
         .map(|i| i.to_string())
         .collect();