Преглед на файлове

chore: Fix typos (#8681)

chore: typo fixes
steviez преди 3 дни
родител
ревизия
afb60f70a5
променени са 92 файла, в които са добавени 153 реда и са изтрити 150 реда
  1. 1 1
      CONTRIBUTING.md
  2. 3 3
      accounts-db/src/accounts_db.rs
  3. 1 1
      accounts-db/src/accounts_db/tests.rs
  4. 1 1
      accounts-db/src/accounts_index.rs
  5. 1 1
      accounts-db/src/accounts_index/in_mem_accounts_index.rs
  6. 4 2
      accounts-db/src/ancient_append_vecs.rs
  7. 2 2
      accounts-db/src/append_vec/meta.rs
  8. 1 1
      accounts-db/src/rolling_bit_field.rs
  9. 1 1
      banking-bench/src/main.rs
  10. 1 1
      bench-tps/src/send_batch.rs
  11. 5 5
      bucket_map/src/bucket.rs
  12. 2 2
      bucket_map/src/restart.rs
  13. 2 2
      builtins-default-costs/src/lib.rs
  14. 1 1
      ci/order-crates-for-publishing.py
  15. 1 1
      core/benches/banking_stage.rs
  16. 2 2
      core/src/banking_stage/consume_worker.rs
  17. 1 1
      core/src/banking_stage/leader_slot_metrics.rs
  18. 1 1
      core/src/banking_stage/transaction_scheduler/greedy_scheduler.rs
  19. 1 1
      core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs
  20. 1 1
      core/src/banking_stage/transaction_scheduler/transaction_state_container.rs
  21. 2 2
      core/src/cluster_slots_service/cluster_slots.rs
  22. 1 1
      core/src/consensus.rs
  23. 2 2
      core/src/consensus/heaviest_subtree_fork_choice.rs
  24. 2 2
      core/src/repair/repair_weight.rs
  25. 7 7
      core/src/replay_stage.rs
  26. 2 2
      core/src/validator.rs
  27. 1 1
      core/src/vortexor_receiver_adapter.rs
  28. 1 1
      core/tests/unified_scheduler.rs
  29. 3 3
      cost-model/src/cost_tracker.rs
  30. 2 2
      curves/curve25519/src/curve_syscall_traits.rs
  31. 1 1
      docs/src/proposals/comprehensive-compute-fees.md
  32. 1 1
      docs/src/proposals/interchain-transaction-verification.md
  33. 1 1
      dos/src/main.rs
  34. 1 1
      feature-set/src/lib.rs
  35. 1 1
      gossip/src/ping_pong.rs
  36. 1 1
      ledger-tool/src/main.rs
  37. 1 1
      ledger-tool/src/program.rs
  38. 6 6
      ledger/src/blockstore.rs
  39. 1 1
      ledger/src/blockstore_db.rs
  40. 4 4
      ledger/src/blockstore_processor.rs
  41. 1 1
      ledger/src/sigverify_shreds.rs
  42. 1 1
      local-cluster/src/integration_tests.rs
  43. 2 2
      metrics/src/datapoint.rs
  44. 1 1
      net-utils/src/token_bucket.rs
  45. 1 1
      perf/src/deduper.rs
  46. 1 1
      perf/src/sigverify.rs
  47. 1 1
      poh/src/poh_recorder.rs
  48. 1 1
      program-runtime/src/cpi.rs
  49. 1 1
      program-runtime/src/execution_budget.rs
  50. 1 1
      quic-client/src/nonblocking/quic_client.rs
  51. 1 1
      quic-client/src/quic_client.rs
  52. 1 1
      rpc-client/src/nonblocking/rpc_client.rs
  53. 1 1
      rpc-client/src/rpc_client.rs
  54. 1 1
      rpc/src/optimistically_confirmed_bank_tracker.rs
  55. 2 2
      runtime/src/bank.rs
  56. 1 1
      runtime/src/bank/accounts_lt_hash.rs
  57. 1 1
      runtime/src/bank/partitioned_epoch_rewards/epoch_rewards_hasher.rs
  58. 1 1
      runtime/src/bank/partitioned_epoch_rewards/mod.rs
  59. 3 3
      runtime/src/bank/tests.rs
  60. 1 1
      runtime/src/bank_forks.rs
  61. 1 1
      runtime/src/bank_hash_cache.rs
  62. 2 2
      runtime/src/inflation_rewards/mod.rs
  63. 2 2
      runtime/src/inflation_rewards/points.rs
  64. 1 1
      runtime/src/serde_snapshot/status_cache.rs
  65. 1 1
      runtime/src/snapshot_bank_utils.rs
  66. 2 2
      runtime/src/snapshot_minimizer.rs
  67. 1 1
      runtime/src/snapshot_utils.rs
  68. 3 3
      scheduler-bindings/src/lib.rs
  69. 1 1
      scheduling-utils/src/error.rs
  70. 1 1
      snapshots/src/hardened_unpack.rs
  71. 1 1
      snapshots/src/snapshot_config.rs
  72. 1 1
      svm/src/account_loader.rs
  73. 3 3
      svm/tests/integration_test.rs
  74. 3 2
      svm/tests/mock_bank.rs
  75. 5 5
      test-validator/src/lib.rs
  76. 1 1
      tpu-client-next/src/connection_worker.rs
  77. 1 1
      transaction-view/src/resolved_transaction_view.rs
  78. 9 9
      unified-scheduler-pool/src/lib.rs
  79. 1 1
      validator/src/bootstrap.rs
  80. 1 1
      validator/src/cli.rs
  81. 1 1
      validator/src/cli/thread_args.rs
  82. 1 1
      validator/src/commands/exit/mod.rs
  83. 1 1
      verified-packet-receiver/Readme.md
  84. 1 1
      vortexor/README.md
  85. 1 1
      vortexor/src/main.rs
  86. 1 1
      vortexor/src/vortexor.rs
  87. 1 1
      votor/src/event.rs
  88. 1 1
      votor/src/event_handler.rs
  89. 2 2
      votor/src/voting_utils.rs
  90. 1 1
      wen-restart/src/wen_restart.rs
  91. 1 1
      xdp-ebpf/README
  92. 2 2
      zk-token-sdk/src/instruction/transfer/mod.rs

+ 1 - 1
CONTRIBUTING.md

@@ -73,7 +73,7 @@ be backported to release branches.
 Duplicate code should generally be avoided.
 Duplicate code should generally be avoided.
 
 
 Features should be activated on testnet before mainnet in the closest configuration to mainnet as possible
 Features should be activated on testnet before mainnet in the closest configuration to mainnet as possible
-Relevant metrics need to be monitored and approriate follow-up given after feature activation.
+Relevant metrics need to be monitored and appropriate follow-up given after feature activation.
 
 
 Avoid “hack” or “one-off” solutions, prefer well-architected designs which are not fragile.
 Avoid “hack” or “one-off” solutions, prefer well-architected designs which are not fragile.
 
 

+ 3 - 3
accounts-db/src/accounts_db.rs

@@ -2593,8 +2593,8 @@ impl AccountsDb {
     /// pubkeys_removed_from_accounts_index - These keys have already been removed from the accounts index
     /// pubkeys_removed_from_accounts_index - These keys have already been removed from the accounts index
     ///    and should not be unref'd. If they exist in the accounts index, they are NEW.
     ///    and should not be unref'd. If they exist in the accounts index, they are NEW.
     /// clean_stored_dead_slots - clean_stored_dead_slots iterates through all the pubkeys in the dead
     /// clean_stored_dead_slots - clean_stored_dead_slots iterates through all the pubkeys in the dead
-    ///    slots and unrefs them in the acocunts index if they are not present in
-    ///    pubkeys_removed_from_accounts_index. Skipping clean is the equivilent to
+    ///    slots and unrefs them in the accounts index if they are not present in
+    ///    pubkeys_removed_from_accounts_index. Skipping clean is the equivalent to
     ///    pubkeys_removed_from_accounts_index containing all the pubkeys in the dead slots
     ///    pubkeys_removed_from_accounts_index containing all the pubkeys in the dead slots
     fn process_dead_slots(
     fn process_dead_slots(
         &self,
         &self,
@@ -6839,7 +6839,7 @@ impl AccountsDb {
 
 
         // Now that the index is generated, get the total capacity of the in-mem maps
         // Now that the index is generated, get the total capacity of the in-mem maps
         // across all the bins and set the initial value for the stat.
         // across all the bins and set the initial value for the stat.
-        // We do this all at once, at the end, since getting the capacity requries iterating all
+        // We do this all at once, at the end, since getting the capacity requires iterating all
         // the bins and grabbing a read lock, which we try to avoid whenever possible.
         // the bins and grabbing a read lock, which we try to avoid whenever possible.
         let index_capacity = self
         let index_capacity = self
             .accounts_index
             .accounts_index

+ 1 - 1
accounts-db/src/accounts_db/tests.rs

@@ -2999,7 +2999,7 @@ fn test_reuse_storage_id() {
         let slot = slot as Slot;
         let slot = slot as Slot;
         db.store_for_tests((slot, [(key, &zero_lamport_account)].as_slice()));
         db.store_for_tests((slot, [(key, &zero_lamport_account)].as_slice()));
         db.add_root_and_flush_write_cache(slot);
         db.add_root_and_flush_write_cache(slot);
-        // reset next_id to what it was previously to cause us to re-use the same id
+        // reset next_id to what it was previously to cause us to reuse the same id
         db.next_id.store(AccountsFileId::MAX, Ordering::Release);
         db.next_id.store(AccountsFileId::MAX, Ordering::Release);
     });
     });
     let ancestors = Ancestors::default();
     let ancestors = Ancestors::default();

+ 1 - 1
accounts-db/src/accounts_index.rs

@@ -1057,7 +1057,7 @@ impl<T: IndexValue, U: DiskIndexValue + From<T> + Into<T>> AccountsIndex<T, U> {
         pubkeys.into_iter().for_each(|pubkey| {
         pubkeys.into_iter().for_each(|pubkey| {
             let bin = self.bin_calculator.bin_from_pubkey(pubkey);
             let bin = self.bin_calculator.bin_from_pubkey(pubkey);
             if bin != last_bin {
             if bin != last_bin {
-                // cannot re-use lock since next pubkey is in a different bin than previous one
+                // cannot reuse lock since next pubkey is in a different bin than previous one
                 lock = Some(&self.account_maps[bin]);
                 lock = Some(&self.account_maps[bin]);
                 last_bin = bin;
                 last_bin = bin;
             }
             }

+ 1 - 1
accounts-db/src/accounts_index/in_mem_accounts_index.rs

@@ -1436,7 +1436,7 @@ mod tests {
             .try_write(&pubkey, disk_entry)
             .try_write(&pubkey, disk_entry)
             .unwrap();
             .unwrap();
 
 
-        // Ensure the entry is not found in meory
+        // Ensure the entry is not found in memory
         let mut found = false;
         let mut found = false;
         accounts_index.get_only_in_mem(&pubkey, false, |entry| {
         accounts_index.get_only_in_mem(&pubkey, false, |entry| {
             found = entry.is_some();
             found = entry.is_some();

+ 4 - 2
accounts-db/src/ancient_append_vecs.rs

@@ -3413,7 +3413,7 @@ pub mod tests {
                     assert!(storage.is_none());
                     assert!(storage.is_none());
                     continue;
                     continue;
                 }
                 }
-                // any of the several slots could have been chosen to be re-used
+                // any of the several slots could have been chosen to be reused
                 let active_slots = (0..num_slots)
                 let active_slots = (0..num_slots)
                     .filter_map(|slot| db.storage.get_slot_storage_entry((slot as Slot) + slot1))
                     .filter_map(|slot| db.storage.get_slot_storage_entry((slot as Slot) + slot1))
                     .count();
                     .count();
@@ -3503,7 +3503,9 @@ pub mod tests {
         create_storages_and_update_index(&db, None, initial_slot, MAX_RECYCLE_STORES, true, None);
         create_storages_and_update_index(&db, None, initial_slot, MAX_RECYCLE_STORES, true, None);
         let max_slot_inclusive = initial_slot + (MAX_RECYCLE_STORES as Slot) - 1;
         let max_slot_inclusive = initial_slot + (MAX_RECYCLE_STORES as Slot) - 1;
         let range = initial_slot..(max_slot_inclusive + 1);
         let range = initial_slot..(max_slot_inclusive + 1);
-        // storages with Arc::strong_count > 1 cannot be pulled out of the recycling bin, so hold refcounts so these storages are never re-used by the actual test code
+        // storages with Arc::strong_count > 1 cannot be pulled out of the
+        // recycling bin, so hold refcounts so these storages are never reused
+        // by the actual test code
         let _storages_hold_to_prevent_recycling = range
         let _storages_hold_to_prevent_recycling = range
             .filter_map(|slot| db.storage.get_slot_storage_entry(slot))
             .filter_map(|slot| db.storage.get_slot_storage_entry(slot))
             .collect::<Vec<_>>();
             .collect::<Vec<_>>();

+ 2 - 2
accounts-db/src/append_vec/meta.rs

@@ -8,7 +8,7 @@ use {
 };
 };
 
 
 /// Meta contains enough context to recover the index from storage itself
 /// Meta contains enough context to recover the index from storage itself
-/// This struct will be backed by mmaped and snapshotted data files.
+/// This struct will be backed by mmapped and snapshotted data files.
 /// So the data layout must be stable and consistent across the entire cluster!
 /// So the data layout must be stable and consistent across the entire cluster!
 #[derive(Clone, PartialEq, Eq, Debug)]
 #[derive(Clone, PartialEq, Eq, Debug)]
 #[repr(C)]
 #[repr(C)]
@@ -23,7 +23,7 @@ pub struct StoredMeta {
     pub pubkey: Pubkey,
     pub pubkey: Pubkey,
 }
 }
 
 
-/// This struct will be backed by mmaped and snapshotted data files.
+/// This struct will be backed by mmapped and snapshotted data files.
 /// So the data layout must be stable and consistent across the entire cluster!
 /// So the data layout must be stable and consistent across the entire cluster!
 #[derive(Serialize, Deserialize, Clone, Debug, Default, Eq, PartialEq)]
 #[derive(Serialize, Deserialize, Clone, Debug, Default, Eq, PartialEq)]
 #[repr(C)]
 #[repr(C)]

+ 1 - 1
accounts-db/src/rolling_bit_field.rs

@@ -718,7 +718,7 @@ pub mod tests {
             let width = *width;
             let width = *width;
             let mut tester = setup_empty(width);
             let mut tester = setup_empty(width);
             for start in [0, width * 5].iter().cloned() {
             for start in [0, width * 5].iter().cloned() {
-                // recreate means create empty bitfield with each iteration, otherwise re-use
+                // recreate means create empty bitfield with each iteration, otherwise reuse
                 for recreate in [false, true].iter().cloned() {
                 for recreate in [false, true].iter().cloned() {
                     let max = start + 3;
                     let max = start + 3;
                     // first root to add
                     // first root to add

+ 1 - 1
banking-bench/src/main.rs

@@ -581,7 +581,7 @@ fn main() {
 
 
         // This signature clear may not actually clear the signatures
         // This signature clear may not actually clear the signatures
         // in this chunk, but since we rotate between CHUNKS then
         // in this chunk, but since we rotate between CHUNKS then
-        // we should clear them by the time we come around again to re-use that chunk.
+        // we should clear them by the time we come around again to reuse that chunk.
         bank.clear_signatures();
         bank.clear_signatures();
         total_us += now.elapsed().as_micros() as u64;
         total_us += now.elapsed().as_micros() as u64;
         total_sent += sent;
         total_sent += sent;

+ 1 - 1
bench-tps/src/send_batch.rs

@@ -318,7 +318,7 @@ where
             if self.is_empty() {
             if self.is_empty() {
                 break;
                 break;
             }
             }
-            info!("Looping verifications");
+            info!("Looping verification");
 
 
             let verified_txs = verified_txs.load(Ordering::Relaxed);
             let verified_txs = verified_txs.load(Ordering::Relaxed);
             let failed_verify = failed_verify.load(Ordering::Relaxed);
             let failed_verify = failed_verify.load(Ordering::Relaxed);

+ 5 - 5
bucket_map/src/bucket.rs

@@ -129,7 +129,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket<T> {
         let elem_size = NonZeroU64::new(std::mem::size_of::<IndexEntry<T>>() as u64).unwrap();
         let elem_size = NonZeroU64::new(std::mem::size_of::<IndexEntry<T>>() as u64).unwrap();
         let (index, random, reused_file_at_startup) = reuse_path
         let (index, random, reused_file_at_startup) = reuse_path
             .and_then(|path| {
             .and_then(|path| {
-                // try to re-use the file this bucket was using last time we were running
+                // try to reuse the file this bucket was using last time we were running
                 restartable_bucket.get().and_then(|(_file_name, random)| {
                 restartable_bucket.get().and_then(|(_file_name, random)| {
                     let result = BucketStorage::load_on_restart(
                     let result = BucketStorage::load_on_restart(
                         path.clone(),
                         path.clone(),
@@ -140,14 +140,14 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket<T> {
                     )
                     )
                     .map(|index| (index, random, true /* true = reused file */));
                     .map(|index| (index, random, true /* true = reused file */));
                     if result.is_none() {
                     if result.is_none() {
-                        // we couldn't re-use it, so delete it
+                        // we couldn't reuse it, so delete it
                         _ = fs::remove_file(path);
                         _ = fs::remove_file(path);
                     }
                     }
                     result
                     result
                 })
                 })
             })
             })
             .unwrap_or_else(|| {
             .unwrap_or_else(|| {
-                // no file to re-use, so create a new file
+                // no file to reuse, so create a new file
                 let (index, file_name) = BucketStorage::new(
                 let (index, file_name) = BucketStorage::new(
                     Arc::clone(&drives),
                     Arc::clone(&drives),
                     1,
                     1,
@@ -505,7 +505,7 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket<T> {
     ) -> Result<(), BucketMapError> {
     ) -> Result<(), BucketMapError> {
         let num_slots = data_len as u64;
         let num_slots = data_len as u64;
         let best_fit_bucket = MultipleSlots::data_bucket_from_num_slots(data_len as u64);
         let best_fit_bucket = MultipleSlots::data_bucket_from_num_slots(data_len as u64);
-        // num_slots > 1 becuase we can store num_slots = 0 or num_slots = 1 in the index entry
+        // num_slots > 1 because we can store num_slots = 0 or num_slots = 1 in the index entry
         let requires_data_bucket = num_slots > 1 || ref_count != 1;
         let requires_data_bucket = num_slots > 1 || ref_count != 1;
         if requires_data_bucket && self.data.get(best_fit_bucket as usize).is_none() {
         if requires_data_bucket && self.data.get(best_fit_bucket as usize).is_none() {
             // fail early if the data bucket we need doesn't exist - we don't want the index entry partially allocated
             // fail early if the data bucket we need doesn't exist - we don't want the index entry partially allocated
@@ -1012,7 +1012,7 @@ mod tests {
                         &mut hashed,
                         &mut hashed,
                         &mut entries_created,
                         &mut entries_created,
                         &mut duplicates,
                         &mut duplicates,
-                        // call re-use code first
+                        // call reuse code first
                         true,
                         true,
                     );
                     );
                     assert_eq!(entries_created, 0);
                     assert_eq!(entries_created, 0);

+ 2 - 2
bucket_map/src/restart.rs

@@ -206,7 +206,7 @@ impl Restart {
 
 
     /// get one `RestartableBucket` for each bucket.
     /// get one `RestartableBucket` for each bucket.
     /// If a potentially reusable file exists, then put that file's path in `RestartableBucket` for that bucket.
     /// If a potentially reusable file exists, then put that file's path in `RestartableBucket` for that bucket.
-    /// Delete all files that cannot possibly be re-used.
+    /// Delete all files that cannot possibly be reused.
     pub(crate) fn get_restartable_buckets(
     pub(crate) fn get_restartable_buckets(
         restart: Option<&Arc<Mutex<Restart>>>,
         restart: Option<&Arc<Mutex<Restart>>>,
         drives: &Arc<Vec<PathBuf>>,
         drives: &Arc<Vec<PathBuf>>,
@@ -559,7 +559,7 @@ mod test {
         test_get(&restart, buckets, last_offset);
         test_get(&restart, buckets, last_offset);
         (4..6).for_each(|offset| test_set_get(&restart, buckets, offset));
         (4..6).for_each(|offset| test_set_get(&restart, buckets, offset));
         drop(restart);
         drop(restart);
-        // create a new file without deleting old one. Make sure it is default and not re-used.
+        // create a new file without deleting old one. Make sure it is default and not reused.
         let restart = Arc::new(Mutex::new(Restart::new(&config).unwrap()));
         let restart = Arc::new(Mutex::new(Restart::new(&config).unwrap()));
         test_default_restart(&restart, &config);
         test_default_restart(&restart, &config);
     }
     }

+ 2 - 2
builtins-default-costs/src/lib.rs

@@ -153,10 +153,10 @@ const fn validate_position(migrating_builtins: &[(Pubkey, BuiltinCost)]) {
         match migrating_builtins[index].1 {
         match migrating_builtins[index].1 {
             BuiltinCost::Migrating(MigratingBuiltinCost { position, .. }) => assert!(
             BuiltinCost::Migrating(MigratingBuiltinCost { position, .. }) => assert!(
                 position == index,
                 position == index,
-                "migration feture must exist and at correct position"
+                "migration feature must exist and at correct position"
             ),
             ),
             BuiltinCost::NotMigrating => {
             BuiltinCost::NotMigrating => {
-                panic!("migration feture must exist and at correct position")
+                panic!("migration feature must exist and at correct position")
             }
             }
         }
         }
         index += 1;
         index += 1;

+ 1 - 1
ci/order-crates-for-publishing.py

@@ -23,7 +23,7 @@ def load_metadata():
 
 
 # Consider a situation where a crate now wants to use already existing
 # Consider a situation where a crate now wants to use already existing
 # developing-oriented library code for their integration tests and benchmarks,
 # developing-oriented library code for their integration tests and benchmarks,
-# like creating malformed data or omitting signature verifications. Ideally,
+# like creating malformed data or omitting signature verification. Ideally,
 # the code should have been guarded under the special feature
 # the code should have been guarded under the special feature
 # `dev-context-only-utils` to avoid accidental misuse for production code path.
 # `dev-context-only-utils` to avoid accidental misuse for production code path.
 #
 #

+ 1 - 1
core/benches/banking_stage.rs

@@ -298,7 +298,7 @@ fn bench_banking(
 
 
         // This signature clear may not actually clear the signatures
         // This signature clear may not actually clear the signatures
         // in this chunk, but since we rotate between CHUNKS then
         // in this chunk, but since we rotate between CHUNKS then
-        // we should clear them by the time we come around again to re-use that chunk.
+        // we should clear them by the time we come around again to reuse that chunk.
         bank.clear_signatures();
         bank.clear_signatures();
         trace!(
         trace!(
             "time: {} checked: {} sent: {}",
             "time: {} checked: {} sent: {}",

+ 2 - 2
core/src/banking_stage/consume_worker.rs

@@ -1501,7 +1501,7 @@ fn active_leader_state_with_timeout(
     // If the initial check above didn't find a bank, we will
     // If the initial check above didn't find a bank, we will
     // spin up to some timeout to wait for a bank to execute on.
     // spin up to some timeout to wait for a bank to execute on.
     // This is conservatively long because transitions between slots
     // This is conservatively long because transitions between slots
-    // can occassionally be slow.
+    // can occasionally be slow.
     const TIMEOUT: Duration = Duration::from_millis(50);
     const TIMEOUT: Duration = Duration::from_millis(50);
     let now = Instant::now();
     let now = Instant::now();
     while now.elapsed() < TIMEOUT {
     while now.elapsed() < TIMEOUT {
@@ -1514,7 +1514,7 @@ fn active_leader_state_with_timeout(
     None
     None
 }
 }
 
 
-/// Returns an active leader state if avaiable, otherwise None.
+/// Returns an active leader state if available, otherwise None.
 fn active_leader_state(
 fn active_leader_state(
     shared_leader_state: &SharedLeaderState,
     shared_leader_state: &SharedLeaderState,
 ) -> Option<arc_swap::Guard<Arc<LeaderState>>> {
 ) -> Option<arc_swap::Guard<Arc<LeaderState>>> {

+ 1 - 1
core/src/banking_stage/leader_slot_metrics.rs

@@ -486,7 +486,7 @@ impl LeaderSlotMetricsTracker {
                 MetricsTrackerAction::ReportAndResetTracker
                 MetricsTrackerAction::ReportAndResetTracker
             }
             }
 
 
-            // Our leader slot has begain, time to create a new slot tracker
+            // Our leader slot has begun, time to create a new slot tracker
             (None, Some(bank)) => {
             (None, Some(bank)) => {
                 MetricsTrackerAction::NewTracker(Some(LeaderSlotMetrics::new(bank.slot())))
                 MetricsTrackerAction::NewTracker(Some(LeaderSlotMetrics::new(bank.slot())))
             }
             }

+ 1 - 1
core/src/banking_stage/transaction_scheduler/greedy_scheduler.rs

@@ -627,7 +627,7 @@ mod test {
         let pubkey2 = Pubkey::new_unique();
         let pubkey2 = Pubkey::new_unique();
         let pubkey3 = Pubkey::new_unique();
         let pubkey3 = Pubkey::new_unique();
 
 
-        // Dependecy graph:
+        // Dependency graph:
         // 3 --
         // 3 --
         //     \
         //     \
         //       -> 1 -> 0
         //       -> 1 -> 0

+ 1 - 1
core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs

@@ -798,7 +798,7 @@ mod tests {
     fn test_schedule_over_full_container() {
     fn test_schedule_over_full_container() {
         let (mut scheduler, _work_receivers, _finished_work_sender) = create_test_frame(1);
         let (mut scheduler, _work_receivers, _finished_work_sender) = create_test_frame(1);
 
 
-        // set up a container is larger enough that single pass of schedulling will not deplete it.
+        // set up a container is larger enough that single pass of scheduling will not deplete it.
         let capacity = scheduler
         let capacity = scheduler
             .config
             .config
             .max_scanned_transactions_per_scheduling_pass
             .max_scanned_transactions_per_scheduling_pass

+ 1 - 1
core/src/banking_stage/transaction_scheduler/transaction_state_container.rs

@@ -246,7 +246,7 @@ pub type SharedBytes = Arc<Vec<u8>>;
 pub(crate) type RuntimeTransactionView = RuntimeTransaction<ResolvedTransactionView<SharedBytes>>;
 pub(crate) type RuntimeTransactionView = RuntimeTransaction<ResolvedTransactionView<SharedBytes>>;
 pub(crate) type TransactionViewState = TransactionState<RuntimeTransactionView>;
 pub(crate) type TransactionViewState = TransactionState<RuntimeTransactionView>;
 
 
-/// A wrapper around `TransactionStateContainer` that allows re-uses
+/// A wrapper around `TransactionStateContainer` that allows reuse of
 /// pre-allocated `Bytes` to copy packet data into and use for serialization.
 /// pre-allocated `Bytes` to copy packet data into and use for serialization.
 /// This is used to avoid allocations in parsing transactions.
 /// This is used to avoid allocations in parsing transactions.
 pub struct TransactionViewStateContainer {
 pub struct TransactionViewStateContainer {

+ 2 - 2
core/src/cluster_slots_service/cluster_slots.rs

@@ -439,7 +439,7 @@ impl ClusterSlots {
     #[cfg(test)]
     #[cfg(test)]
     // patches the given node_id into the internal structures
     // patches the given node_id into the internal structures
     // to pretend as if it has submitted epoch slots for a given slot.
     // to pretend as if it has submitted epoch slots for a given slot.
-    // If the node was not previosly registered in validator_stakes,
+    // If the node was not previously registered in validator_stakes,
     // an override_stake amount should be provided.
     // an override_stake amount should be provided.
     pub(crate) fn insert_node_id(&self, slot: Slot, node_id: Pubkey) {
     pub(crate) fn insert_node_id(&self, slot: Slot, node_id: Pubkey) {
         let mut epoch_slot = EpochSlots {
         let mut epoch_slot = EpochSlots {
@@ -801,7 +801,7 @@ mod tests {
         assert_eq!(
         assert_eq!(
             cs.lookup(1).unwrap().get_support_by_pubkey(&pk).unwrap(),
             cs.lookup(1).unwrap().get_support_by_pubkey(&pk).unwrap(),
             42,
             42,
-            "the stake of the node should be commited to the slot"
+            "the stake of the node should be committed to the slot"
         );
         );
     }
     }
 }
 }

+ 1 - 1
core/src/consensus.rs

@@ -881,7 +881,7 @@ impl Tower {
         if last_vote_ancestors.is_empty() {
         if last_vote_ancestors.is_empty() {
             // If `last_vote_ancestors` is empty, this means we must have a last vote that is stray. If the `last_voted_slot`
             // If `last_vote_ancestors` is empty, this means we must have a last vote that is stray. If the `last_voted_slot`
             // is stray, it must be descended from some earlier root than the latest root (the anchor at startup).
             // is stray, it must be descended from some earlier root than the latest root (the anchor at startup).
-            // The above check also guarentees that the candidate slot is not a descendant of this stray last vote.
+            // The above check also guarantees that the candidate slot is not a descendant of this stray last vote.
             //
             //
             // This gives us a fork graph:
             // This gives us a fork graph:
             //     / ------------- stray `last_voted_slot`
             //     / ------------- stray `last_voted_slot`

+ 2 - 2
core/src/consensus/heaviest_subtree_fork_choice.rs

@@ -2704,8 +2704,8 @@ mod test {
         let (bank, vote_pubkeys) =
         let (bank, vote_pubkeys) =
             bank_utils::setup_bank_and_vote_pubkeys_for_tests(num_validators, stake);
             bank_utils::setup_bank_and_vote_pubkeys_for_tests(num_validators, stake);
 
 
-        // Both voters voted on duplicate_leaves_descended_from_4[1], so thats the heaviest
-        // branch
+        // Both voters voted on duplicate_leaves_descended_from_4[1], so that is
+        // the heaviest branch
         let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![
         let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![
             (vote_pubkeys[0], duplicate_leaves_descended_from_4[1]),
             (vote_pubkeys[0], duplicate_leaves_descended_from_4[1]),
             (vote_pubkeys[1], duplicate_leaves_descended_from_4[1]),
             (vote_pubkeys[1], duplicate_leaves_descended_from_4[1]),

+ 2 - 2
core/src/repair/repair_weight.rs

@@ -1820,8 +1820,8 @@ mod test {
             TreeRoot::Root(20)
             TreeRoot::Root(20)
         );
         );
 
 
-        // Now set root at a slot 30 that doesnt exist in `repair_weight`, but is
-        // higher than the remaining orphan
+        // Now set root at a slot 30 that doesn't exist in `repair_weight`, but
+        // is higher than the remaining orphan
         assert!(!repair_weight.slot_to_tree.contains_key(&30));
         assert!(!repair_weight.slot_to_tree.contains_key(&30));
         repair_weight.set_root(30);
         repair_weight.set_root(30);
         check_old_root_purged_verify_new_root(3, 30, &repair_weight);
         check_old_root_purged_verify_new_root(3, 30, &repair_weight);

+ 7 - 7
core/src/replay_stage.rs

@@ -134,9 +134,9 @@ enum ForkReplayMode {
 
 
 enum GenerateVoteTxResult {
 enum GenerateVoteTxResult {
     // non voting validator, not eligible for refresh
     // non voting validator, not eligible for refresh
-    // until authorized keypair is overriden
+    // until authorized keypair is overridden
     NonVoting,
     NonVoting,
-    // hot spare validator, not eligble for refresh
+    // hot spare validator, not eligible for refresh
     // until set identity is invoked
     // until set identity is invoked
     HotSpare,
     HotSpare,
     // failed generation, eligible for refresh
     // failed generation, eligible for refresh
@@ -2735,7 +2735,7 @@ impl ReplayStage {
             // On the fly adjustments via the cli will be picked up for the next vote.
             // On the fly adjustments via the cli will be picked up for the next vote.
             BlockhashStatus::NonVoting | BlockhashStatus::HotSpare => return false,
             BlockhashStatus::NonVoting | BlockhashStatus::HotSpare => return false,
             // In this case we have not voted since restart, our setup is unclear.
             // In this case we have not voted since restart, our setup is unclear.
-            // We have a vote from our previous restart that is eligble for refresh, we must refresh.
+            // We have a vote from our previous restart that is eligible for refresh, we must refresh.
             BlockhashStatus::Uninitialized => None,
             BlockhashStatus::Uninitialized => None,
             BlockhashStatus::Blockhash(blockhash) => Some(blockhash),
             BlockhashStatus::Blockhash(blockhash) => Some(blockhash),
         };
         };
@@ -8070,7 +8070,7 @@ pub(crate) mod tests {
 
 
         // Trying to refresh the vote on a sibling bank where:
         // Trying to refresh the vote on a sibling bank where:
         // 1) The vote for slot 1 hasn't landed
         // 1) The vote for slot 1 hasn't landed
-        // 2) The blockheight is still eligble for a refresh
+        // 2) The blockheight is still eligible for a refresh
         // This will still not refresh because `MAX_VOTE_REFRESH_INTERVAL_MILLIS` has not expired yet
         // This will still not refresh because `MAX_VOTE_REFRESH_INTERVAL_MILLIS` has not expired yet
         let expired_bank_sibling = {
         let expired_bank_sibling = {
             let mut parent_bank = bank2.clone();
             let mut parent_bank = bank2.clone();
@@ -9296,7 +9296,7 @@ pub(crate) mod tests {
             last_hash =
             last_hash =
                 fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash);
                 fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash);
         }
         }
-        // Artifically root 3 and 4
+        // Artificially root 3 and 4
         blockstore.set_roots([3, 4].iter()).unwrap();
         blockstore.set_roots([3, 4].iter()).unwrap();
 
 
         // Set up bank0
         // Set up bank0
@@ -9554,7 +9554,7 @@ pub(crate) mod tests {
 
 
         assert!(!duplicate_confirmed_slots.contains_key(&0));
         assert!(!duplicate_confirmed_slots.contains_key(&0));
 
 
-        // Mark 5 as duplicate confirmed, should suceed
+        // Mark 5 as duplicate confirmed, should succeed
         let bank_hash_5 = bank_forks.read().unwrap().bank_hash(5).unwrap();
         let bank_hash_5 = bank_forks.read().unwrap().bank_hash(5).unwrap();
         let confirmed_slots = [(5, bank_hash_5)];
         let confirmed_slots = [(5, bank_hash_5)];
 
 
@@ -9693,7 +9693,7 @@ pub(crate) mod tests {
             .is_duplicate_confirmed(&(5, bank_hash_5))
             .is_duplicate_confirmed(&(5, bank_hash_5))
             .unwrap_or(false));
             .unwrap_or(false));
 
 
-        // Mark 5 and 6 as duplicate confirmed, should suceed
+        // Mark 5 and 6 as duplicate confirmed, should succeed
         let bank_hash_6 = bank_forks.read().unwrap().bank_hash(6).unwrap();
         let bank_hash_6 = bank_forks.read().unwrap().bank_hash(6).unwrap();
         if same_batch {
         if same_batch {
             sender
             sender

+ 2 - 2
core/src/validator.rs

@@ -335,7 +335,7 @@ pub struct ValidatorConfig {
     pub repair_whitelist: Arc<RwLock<HashSet<Pubkey>>>, // Empty = repair with all
     pub repair_whitelist: Arc<RwLock<HashSet<Pubkey>>>, // Empty = repair with all
     pub gossip_validators: Option<HashSet<Pubkey>>, // None = gossip with all
     pub gossip_validators: Option<HashSet<Pubkey>>, // None = gossip with all
     pub max_genesis_archive_unpacked_size: u64,
     pub max_genesis_archive_unpacked_size: u64,
-    /// Run PoH, transaction signature and other transaction verifications during blockstore
+    /// Run PoH, transaction signature and other transaction verification during blockstore
     /// processing.
     /// processing.
     pub run_verification: bool,
     pub run_verification: bool,
     pub require_tower: bool,
     pub require_tower: bool,
@@ -563,7 +563,7 @@ pub struct ValidatorTpuConfig {
     pub vote_use_quic: bool,
     pub vote_use_quic: bool,
     /// Controls the connection cache pool size
     /// Controls the connection cache pool size
     pub tpu_connection_pool_size: usize,
     pub tpu_connection_pool_size: usize,
-    /// Controls if to enable UDP for TPU tansactions.
+    /// Controls if to enable UDP for TPU transactions.
     pub tpu_enable_udp: bool,
     pub tpu_enable_udp: bool,
     /// QUIC server config for regular TPU
     /// QUIC server config for regular TPU
     pub tpu_quic_server_config: SwQosQuicStreamerConfig,
     pub tpu_quic_server_config: SwQosQuicStreamerConfig,

+ 1 - 1
core/src/vortexor_receiver_adapter.rs

@@ -56,7 +56,7 @@ impl VortexorReceiverAdapter {
                     packets_sender,
                     packets_sender,
                     forward_stage_sender,
                     forward_stage_sender,
                 ) {
                 ) {
-                    info!("Quiting VortexorReceiverAdapter: {msg}");
+                    info!("Quitting VortexorReceiverAdapter: {msg}");
                 }
                 }
             })
             })
             .unwrap();
             .unwrap();

+ 1 - 1
core/tests/unified_scheduler.rs

@@ -117,7 +117,7 @@ fn test_scheduler_waited_by_drop_bank_service() {
         genesis_config.hash(),
         genesis_config.hash(),
     ));
     ));
 
 
-    // Delay transaction execution to ensure transaction execution happens after termintion has
+    // Delay transaction execution to ensure transaction execution happens after termination has
     // been started
     // been started
     let lock_to_stall = LOCK_TO_STALL.lock().unwrap();
     let lock_to_stall = LOCK_TO_STALL.lock().unwrap();
     pruned_bank
     pruned_bank

+ 3 - 3
cost-model/src/cost_tracker.rs

@@ -933,7 +933,7 @@ mod tests {
         );
         );
 
 
         let test_update_cost_tracker =
         let test_update_cost_tracker =
-            |execution_cost_adjust: i64, loaded_acounts_data_size_cost_adjust: i64| {
+            |execution_cost_adjust: i64, loaded_accounts_data_size_cost_adjust: i64| {
                 let mut cost_tracker = CostTracker::default();
                 let mut cost_tracker = CostTracker::default();
                 assert!(cost_tracker.try_add(&tx_cost).is_ok());
                 assert!(cost_tracker.try_add(&tx_cost).is_ok());
 
 
@@ -941,10 +941,10 @@ mod tests {
                     (estimated_programs_execution_cost as i64 + execution_cost_adjust) as u64;
                     (estimated_programs_execution_cost as i64 + execution_cost_adjust) as u64;
                 let actual_loaded_accounts_data_size_cost =
                 let actual_loaded_accounts_data_size_cost =
                     (estimated_loaded_accounts_data_size_cost as i64
                     (estimated_loaded_accounts_data_size_cost as i64
-                        + loaded_acounts_data_size_cost_adjust) as u64;
+                        + loaded_accounts_data_size_cost_adjust) as u64;
                 let expected_cost = (estimated_tx_cost as i64
                 let expected_cost = (estimated_tx_cost as i64
                     + execution_cost_adjust
                     + execution_cost_adjust
-                    + loaded_acounts_data_size_cost_adjust)
+                    + loaded_accounts_data_size_cost_adjust)
                     as u64;
                     as u64;
 
 
                 cost_tracker.update_execution_cost(
                 cost_tracker.update_execution_cost(

+ 2 - 2
curves/curve25519/src/curve_syscall_traits.rs

@@ -31,7 +31,7 @@ pub trait GroupOperations {
 
 
     /// Subtracts two curve points: P_0 - P_1.
     /// Subtracts two curve points: P_0 - P_1.
     ///
     ///
-    /// NOTE: Altneratively, one can consider replacing this with a `negate` function that maps a
+    /// NOTE: Alternatively, one can consider replacing this with a `negate` function that maps a
     /// curve point P -> -P. Then subtraction can be computed by combining `negate` and `add`
     /// curve point P -> -P. Then subtraction can be computed by combining `negate` and `add`
     /// syscalls. However, `subtract` is a much more widely used function than `negate`.
     /// syscalls. However, `subtract` is a much more widely used function than `negate`.
     fn subtract(left_point: &Self::Point, right_point: &Self::Point) -> Option<Self::Point>;
     fn subtract(left_point: &Self::Point, right_point: &Self::Point) -> Option<Self::Point>;
@@ -44,7 +44,7 @@ pub trait MultiScalarMultiplication {
     type Scalar;
     type Scalar;
     type Point;
     type Point;
 
 
-    /// Given a vector of scalsrs S_1, ..., S_N, and curve points P_1, ..., P_N, computes the
+    /// Given a vector of scalars S_1, ..., S_N, and curve points P_1, ..., P_N, computes the
     /// "inner product": S_1*P_1 + ... + S_N*P_N.
     /// "inner product": S_1*P_1 + ... + S_N*P_N.
     ///
     ///
     /// NOTE: This operation can be represented by combining `add` and `multiply` functions in
     /// NOTE: This operation can be represented by combining `add` and `multiply` functions in

+ 1 - 1
docs/src/proposals/comprehensive-compute-fees.md

@@ -9,7 +9,7 @@ a validator to process a transaction.  The fee structure is only based on the
 number of signatures in a transaction but is meant to account for the work that
 number of signatures in a transaction but is meant to account for the work that
 the validator must perform to validate each transaction.  The validator performs
 the validator must perform to validate each transaction.  The validator performs
 a lot more user-defined work than just signature verification.  Processing a
 a lot more user-defined work than just signature verification.  Processing a
-transaction typically includes signature verifications, account locking, account
+transaction typically includes signature verification, account locking, account
 loading, and instruction processing.
 loading, and instruction processing.
 
 
 ## Proposed Solution
 ## Proposed Solution

+ 1 - 1
docs/src/proposals/interchain-transaction-verification.md

@@ -100,6 +100,6 @@ Store Headers in program sub-accounts indexed by Public address:
 Linked List of multiple sub-accounts storing headers:
 Linked List of multiple sub-accounts storing headers:
 
 
 - Maintain sequential index of storage accounts, many headers per storage account
 - Maintain sequential index of storage accounts, many headers per storage account
-- Max 2 account data lookups for &gt;99.9% of verifications \(1 for most\)
+- Max 2 account data lookups for &gt;99.9% of verification \(1 for most\)
 - Compact sequential data address format allows any number of confirmations and fast lookups
 - Compact sequential data address format allows any number of confirmations and fast lookups
 - Facilitates network-wide header duplication inefficiencies
 - Facilitates network-wide header duplication inefficiencies

+ 1 - 1
dos/src/main.rs

@@ -250,7 +250,7 @@ struct TransactionBatchMsg {
 
 
 /// Creates thread which receives batches of transactions from tx_receiver
 /// Creates thread which receives batches of transactions from tx_receiver
 /// and sends them to the target.
 /// and sends them to the target.
-/// If `iterations` is 0, it works indefenetely.
+/// If `iterations` is 0, it works indefinitely.
 /// Otherwise, it sends at least `iterations` number of transactions
 /// Otherwise, it sends at least `iterations` number of transactions
 fn create_sender_thread(
 fn create_sender_thread(
     tx_receiver: Receiver<TransactionBatchMsg>,
     tx_receiver: Receiver<TransactionBatchMsg>,

+ 1 - 1
feature-set/src/lib.rs

@@ -2183,7 +2183,7 @@ pub struct FullInflationFeaturePair {
     pub enable_id: Pubkey, // Feature to enable full inflation by the candidate
     pub enable_id: Pubkey, // Feature to enable full inflation by the candidate
 }
 }
 
 
-/// Set of feature pairs that once enabled will trigger full inflationi
+/// Set of feature pairs that once enabled will trigger full inflation
 pub static FULL_INFLATION_FEATURE_PAIRS: LazyLock<AHashSet<FullInflationFeaturePair>> =
 pub static FULL_INFLATION_FEATURE_PAIRS: LazyLock<AHashSet<FullInflationFeaturePair>> =
     LazyLock::new(|| {
     LazyLock::new(|| {
         [FullInflationFeaturePair {
         [FullInflationFeaturePair {

+ 1 - 1
gossip/src/ping_pong.rs

@@ -227,7 +227,7 @@ impl<const N: usize> PingCache<N> {
     }
     }
 
 
     /// Returns true if the remote node has responded to a ping message.
     /// Returns true if the remote node has responded to a ping message.
-    /// Removes expired pong messages. In order to extend verifications before
+    /// Removes expired pong messages. In order to extend verification before
     /// expiration, if the pong message is not too recent, and the node has not
     /// expiration, if the pong message is not too recent, and the node has not
     /// been pinged recently, calls the given function to generates a new ping
     /// been pinged recently, calls the given function to generates a new ping
     /// message, records current timestamp and hash of ping token, and returns
     /// message, records current timestamp and hash of ping token, and returns

+ 1 - 1
ledger-tool/src/main.rs

@@ -1903,7 +1903,7 @@ fn main() {
 
 
                     if let Some(mut slot_recorder_config) = slot_recorder_config {
                     if let Some(mut slot_recorder_config) = slot_recorder_config {
                         // Drop transaction_status_sender to break transaction_recorder
                         // Drop transaction_status_sender to break transaction_recorder
-                        // out of its' recieve loop
+                        // out of its' receive loop
                         let transaction_status_sender =
                         let transaction_status_sender =
                             slot_recorder_config.transaction_status_sender.take();
                             slot_recorder_config.transaction_status_sender.take();
                         drop(transaction_status_sender);
                         drop(transaction_status_sender);

+ 1 - 1
ledger-tool/src/program.rs

@@ -166,7 +166,7 @@ and the following fields are required
                             "Mode of execution, where 'interpreter' runs \
                             "Mode of execution, where 'interpreter' runs \
                              the program in the virtual machine's interpreter, 'debugger' is the same as 'interpreter' \
                              the program in the virtual machine's interpreter, 'debugger' is the same as 'interpreter' \
                              but hosts a GDB interface, and 'jit' precompiles the program to native machine code \
                              but hosts a GDB interface, and 'jit' precompiles the program to native machine code \
-                             before execting it in the virtual machine.",
+                             before executing it in the virtual machine.",
                         )
                         )
                         .short("e")
                         .short("e")
                         .long("mode")
                         .long("mode")

+ 6 - 6
ledger/src/blockstore.rs

@@ -1374,7 +1374,7 @@ impl Blockstore {
         }
         }
     }
     }
 
 
-    // Bypasses erasure recovery becuase it is called from broadcast stage
+    // Bypasses erasure recovery because it is called from broadcast stage
     // when inserting own shreds during leader slots.
     // when inserting own shreds during leader slots.
     pub fn insert_cow_shreds<'a>(
     pub fn insert_cow_shreds<'a>(
         &self,
         &self,
@@ -1863,7 +1863,7 @@ impl Blockstore {
                 .map(Cow::into_owned)
                 .map(Cow::into_owned)
             else {
             else {
                 error!(
                 error!(
-                    "Shred {shred_id:?} indiciated by merkle root meta {merkle_root_meta:?} is \
+                    "Shred {shred_id:?} indicated by merkle root meta {merkle_root_meta:?} is \
                      missing from blockstore. This should only happen in extreme cases where \
                      missing from blockstore. This should only happen in extreme cases where \
                      blockstore cleanup has caught up to the root. Skipping the merkle root \
                      blockstore cleanup has caught up to the root. Skipping the merkle root \
                      consistency check"
                      consistency check"
@@ -2100,7 +2100,7 @@ impl Blockstore {
                     .map(Cow::into_owned)
                     .map(Cow::into_owned)
                 else {
                 else {
                     error!(
                     error!(
-                        "Last index data shred {shred_id:?} indiciated by slot meta {slot_meta:?} \
+                        "Last index data shred {shred_id:?} indicated by slot meta {slot_meta:?} \
                          is missing from blockstore. This should only happen in extreme cases \
                          is missing from blockstore. This should only happen in extreme cases \
                          where blockstore cleanup has caught up to the root. Skipping data shred \
                          where blockstore cleanup has caught up to the root. Skipping data shred \
                          insertion"
                          insertion"
@@ -2150,7 +2150,7 @@ impl Blockstore {
                     .map(Cow::into_owned)
                     .map(Cow::into_owned)
                 else {
                 else {
                     error!(
                     error!(
-                        "Last received data shred {shred_id:?} indiciated by slot meta \
+                        "Last received data shred {shred_id:?} indicated by slot meta \
                          {slot_meta:?} is missing from blockstore. This should only happen in \
                          {slot_meta:?} is missing from blockstore. This should only happen in \
                          extreme cases where blockstore cleanup has caught up to the root. \
                          extreme cases where blockstore cleanup has caught up to the root. \
                          Skipping data shred insertion"
                          Skipping data shred insertion"
@@ -3873,7 +3873,7 @@ impl Blockstore {
         if let Some(prev_value) = self.bank_hash_cf.get(slot).unwrap() {
         if let Some(prev_value) = self.bank_hash_cf.get(slot).unwrap() {
             if prev_value.frozen_hash() == frozen_hash && prev_value.is_duplicate_confirmed() {
             if prev_value.frozen_hash() == frozen_hash && prev_value.is_duplicate_confirmed() {
                 // Don't overwrite is_duplicate_confirmed == true with is_duplicate_confirmed == false,
                 // Don't overwrite is_duplicate_confirmed == true with is_duplicate_confirmed == false,
-                // which may happen on startup when procesing from blockstore processor because the
+                // which may happen on startup when processing from blockstore processor because the
                 // blocks may not reflect earlier observed gossip votes from before the restart.
                 // blocks may not reflect earlier observed gossip votes from before the restart.
                 return;
                 return;
             }
             }
@@ -7647,7 +7647,7 @@ pub mod tests {
             blockstore.insert_shreds(slot_shreds, None, false).unwrap();
             blockstore.insert_shreds(slot_shreds, None, false).unwrap();
         }
         }
 
 
-        // Slot doesnt exist, iterator should be empty
+        // Slot doesn't exist, iterator should be empty
         let shred_iter = blockstore.slot_data_iterator(5, 0).unwrap();
         let shred_iter = blockstore.slot_data_iterator(5, 0).unwrap();
         let result: Vec<_> = shred_iter.collect();
         let result: Vec<_> = shred_iter.collect();
         assert_eq!(result, vec![]);
         assert_eq!(result, vec![]);

+ 1 - 1
ledger/src/blockstore_db.rs

@@ -281,7 +281,7 @@ impl Rocks {
         // opposed to manual compaction requests on a range.
         // opposed to manual compaction requests on a range.
         // - Periodic compaction operates on individual files once the file
         // - Periodic compaction operates on individual files once the file
         //   has reached a certain (configurable) age. See comments at
         //   has reached a certain (configurable) age. See comments at
-        //   PERIODIC_COMPACTION_SECONDS for some more deatil.
+        //   PERIODIC_COMPACTION_SECONDS for some more detail.
         // - Manual compaction operates on a range and could end up propagating
         // - Manual compaction operates on a range and could end up propagating
         //   through several files and/or levels of the db.
         //   through several files and/or levels of the db.
         //
         //

+ 4 - 4
ledger/src/blockstore_processor.rs

@@ -244,7 +244,7 @@ pub fn execute_batch<'a>(
             batch.sanitized_transactions(),
             batch.sanitized_transactions(),
         ))
         ))
     } else {
     } else {
-        // Unified scheduler block production wihout metadata recording
+        // Unified scheduler block production without metadata recording
         Ok(vec![])
         Ok(vec![])
     };
     };
     check_block_costs_elapsed.stop();
     check_block_costs_elapsed.stop();
@@ -829,7 +829,7 @@ pub type ProcessSlotCallback = Arc<dyn Fn(&Bank) + Sync + Send>;
 
 
 #[derive(Default, Clone)]
 #[derive(Default, Clone)]
 pub struct ProcessOptions {
 pub struct ProcessOptions {
-    /// Run PoH, transaction signature and other transaction verifications on the entries.
+    /// Run PoH, transaction signature and other transaction verification on the entries.
     pub run_verification: bool,
     pub run_verification: bool,
     pub full_leader_cache: bool,
     pub full_leader_cache: bool,
     pub halt_at_slot: Option<Slot>,
     pub halt_at_slot: Option<Slot>,
@@ -1092,7 +1092,7 @@ fn verify_ticks(
 
 
         // If the bank is in the alpenglow epoch, but the parent is from an epoch
         // If the bank is in the alpenglow epoch, but the parent is from an epoch
         // where the feature flag is not active, we must verify ticks that correspond
         // where the feature flag is not active, we must verify ticks that correspond
-        // to the epoch in which PoH is active. This verification is criticial, as otherwise
+        // to the epoch in which PoH is active. This verification is critical, as otherwise
         // a leader could jump the gun and publish a block in the alpenglow epoch without waiting
         // a leader could jump the gun and publish a block in the alpenglow epoch without waiting
         // the appropriate time as determined by PoH in the prior epoch.
         // the appropriate time as determined by PoH in the prior epoch.
         if bank.slot() >= first_alpenglow_slot && next_bank_tick_height == max_bank_tick_height {
         if bank.slot() >= first_alpenglow_slot && next_bank_tick_height == max_bank_tick_height {
@@ -5198,7 +5198,7 @@ pub mod tests {
             }),
             }),
         );
         );
 
 
-        // pre_commit_callback() should alwasy be called regardless of tx_result
+        // pre_commit_callback() should always be called regardless of tx_result
         assert!(is_called);
         assert!(is_called);
 
 
         if should_commit {
         if should_commit {

+ 1 - 1
ledger/src/sigverify_shreds.rs

@@ -448,7 +448,7 @@ mod tests {
                 .collect()
                 .collect()
         };
         };
         let mut packets = make_packets(&mut rng, &shreds);
         let mut packets = make_packets(&mut rng, &shreds);
-        // Assert that initially all signatrues are invalid.
+        // Assert that initially all signatures are invalid.
         assert_eq!(
         assert_eq!(
             verify_shreds(&thread_pool, &packets, &pubkeys, &cache),
             verify_shreds(&thread_pool, &packets, &pubkeys, &cache),
             packets
             packets

+ 1 - 1
local-cluster/src/integration_tests.rs

@@ -182,7 +182,7 @@ pub fn copy_blocks(end_slot: Slot, source: &Blockstore, dest: &Blockstore, is_tr
     }
     }
 }
 }
 
 
-/// Computes the numbr of milliseconds `num_blocks` blocks will take given
+/// Computes the number of milliseconds `num_blocks` blocks will take given
 /// each slot contains `ticks_per_slot`
 /// each slot contains `ticks_per_slot`
 pub fn ms_for_n_slots(num_blocks: u64, ticks_per_slot: u64) -> u64 {
 pub fn ms_for_n_slots(num_blocks: u64, ticks_per_slot: u64) -> u64 {
     (ticks_per_slot * DEFAULT_MS_PER_SLOT * num_blocks).div_ceil(DEFAULT_TICKS_PER_SLOT)
     (ticks_per_slot * DEFAULT_MS_PER_SLOT * num_blocks).div_ceil(DEFAULT_TICKS_PER_SLOT)

+ 2 - 2
metrics/src/datapoint.rs

@@ -9,7 +9,7 @@
 //! - datapoint_info!
 //! - datapoint_info!
 //! - datapoint_debug!
 //! - datapoint_debug!
 //!
 //!
-//! The matric macro consists of the following three main parts:
+//! The metric macro consists of the following three main parts:
 //!  - name: the name of the metric.
 //!  - name: the name of the metric.
 //!
 //!
 //!  - tags (optional): when a metric sample is reported with tags, you can use
 //!  - tags (optional): when a metric sample is reported with tags, you can use
@@ -20,7 +20,7 @@
 //!
 //!
 //!  - fields (optional): fields are the main content of a metric sample. The
 //!  - fields (optional): fields are the main content of a metric sample. The
 //!    macro supports four different types of fields: bool, i64, f64, and String.
 //!    macro supports four different types of fields: bool, i64, f64, and String.
-//!    Here're their syntax:
+//!    Here is their syntax:
 //!
 //!
 //!    - ("field-name", "field-value", bool)
 //!    - ("field-name", "field-value", bool)
 //!    - ("field-name", "field-value", i64)
 //!    - ("field-name", "field-value", i64)

+ 1 - 1
net-utils/src/token_bucket.rs

@@ -202,7 +202,7 @@ impl<K> KeyedRateLimiter<K>
 where
 where
     K: Hash + Eq,
     K: Hash + Eq,
 {
 {
-    /// Creates a new KeyedRateLimiter with a specified taget capacity and shard amount for the
+    /// Creates a new KeyedRateLimiter with a specified target capacity and shard amount for the
     /// underlying DashMap. This uses a LazyLRU style eviction policy, so actual memory consumption
     /// underlying DashMap. This uses a LazyLRU style eviction policy, so actual memory consumption
     /// will be 2 * target_capacity.
     /// will be 2 * target_capacity.
     ///
     ///

+ 1 - 1
perf/src/deduper.rs

@@ -1,4 +1,4 @@
-//! Utility to deduplicate baches of incoming network packets.
+//! Utility to deduplicate batches of incoming network packets.
 
 
 use {
 use {
     crate::packet::PacketBatch,
     crate::packet::PacketBatch,

+ 1 - 1
perf/src/sigverify.rs

@@ -93,7 +93,7 @@ impl std::convert::From<std::num::TryFromIntError> for PacketError {
     }
     }
 }
 }
 
 
-/// Returns true if the signatrue on the packet verifies.
+/// Returns true if the signature on the packet verifies.
 /// Caller must do packet.set_discard(true) if this returns false.
 /// Caller must do packet.set_discard(true) if this returns false.
 #[must_use]
 #[must_use]
 fn verify_packet(packet: &mut PacketRefMut, reject_non_vote: bool) -> bool {
 fn verify_packet(packet: &mut PacketRefMut, reject_non_vote: bool) -> bool {

+ 1 - 1
poh/src/poh_recorder.rs

@@ -1725,7 +1725,7 @@ mod tests {
         assert!(poh_recorder.working_bank.is_none());
         assert!(poh_recorder.working_bank.is_none());
 
 
         // Even thought we ticked much further than working_bank.max_tick_height,
         // Even thought we ticked much further than working_bank.max_tick_height,
-        // the `start_slot` is still the slot of the last workign bank set by
+        // the `start_slot` is still the slot of the last working bank set by
         // the earlier call to `poh_recorder.set_bank()`
         // the earlier call to `poh_recorder.set_bank()`
         assert_eq!(poh_recorder.start_slot(), bank.slot());
         assert_eq!(poh_recorder.start_slot(), bank.slot());
     }
     }

+ 1 - 1
program-runtime/src/cpi.rs

@@ -229,7 +229,7 @@ pub struct CallerAccount<'a> {
     pub lamports: &'a mut u64,
     pub lamports: &'a mut u64,
     pub owner: &'a mut Pubkey,
     pub owner: &'a mut Pubkey,
     // The original data length of the account at the start of the current
     // The original data length of the account at the start of the current
-    // instruction. We use this to determine wether an account was shrunk or
+    // instruction. We use this to determine whether an account was shrunk or
     // grown before or after CPI, and to derive the vm address of the realloc
     // grown before or after CPI, and to derive the vm address of the realloc
     // region.
     // region.
     pub original_data_len: usize,
     pub original_data_len: usize,

+ 1 - 1
program-runtime/src/execution_budget.rs

@@ -241,7 +241,7 @@ impl SVMTransactionExecutionCost {
     ///
     ///
     /// 61*n^2 + 542
     /// 61*n^2 + 542
     ///
     ///
-    /// Which aproximates the results of benchmarks of light-posiedon
+    /// Which approximates the results of benchmarks of light-posiedon
     /// library[0]. These results assume 1 CU per 33 ns. Examples:
     /// library[0]. These results assume 1 CU per 33 ns. Examples:
     ///
     ///
     /// * 1 input
     /// * 1 input

+ 1 - 1
quic-client/src/nonblocking/quic_client.rs

@@ -401,7 +401,7 @@ impl QuicClient {
                         .prepare_connection_us
                         .prepare_connection_us
                         .fetch_add(measure_prepare_connection.as_us(), Ordering::Relaxed);
                         .fetch_add(measure_prepare_connection.as_us(), Ordering::Relaxed);
                     trace!(
                     trace!(
-                        "Succcessfully sent to {} with id {}, thread: {:?}, data len: {}, \
+                        "Successfully sent to {} with id {}, thread: {:?}, data len: {}, \
                          send_packet_us: {} prepare_connection_us: {}",
                          send_packet_us: {} prepare_connection_us: {}",
                         self.addr,
                         self.addr,
                         connection.stable_id(),
                         connection.stable_id(),

+ 1 - 1
quic-client/src/quic_client.rs

@@ -24,7 +24,7 @@ pub const MAX_OUTSTANDING_TASK: u64 = 2000;
 const SEND_DATA_TIMEOUT: Duration = Duration::from_secs(10);
 const SEND_DATA_TIMEOUT: Duration = Duration::from_secs(10);
 
 
 /// A semaphore used for limiting the number of asynchronous tasks spawn to the
 /// A semaphore used for limiting the number of asynchronous tasks spawn to the
-/// runtime. Before spawnning a task, use acquire. After the task is done (be it
+/// runtime. Before spawning a task, use acquire. After the task is done (be it
 /// success or failure), call release.
 /// success or failure), call release.
 struct AsyncTaskSemaphore {
 struct AsyncTaskSemaphore {
     /// Keep the counter info about the usage
     /// Keep the counter info about the usage

+ 1 - 1
rpc-client/src/nonblocking/rpc_client.rs

@@ -626,7 +626,7 @@ impl RpcClient {
     /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment
     /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment
     ///
     ///
     /// After sending the transaction, this method polls in a loop for the
     /// After sending the transaction, this method polls in a loop for the
-    /// status of the transaction until it has ben confirmed.
+    /// status of the transaction until it has been confirmed.
     ///
     ///
     /// # Errors
     /// # Errors
     ///
     ///

+ 1 - 1
rpc-client/src/rpc_client.rs

@@ -713,7 +713,7 @@ impl RpcClient {
     /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment
     /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment
     ///
     ///
     /// After sending the transaction, this method polls in a loop for the
     /// After sending the transaction, this method polls in a loop for the
-    /// status of the transaction until it has ben confirmed.
+    /// status of the transaction until it has been confirmed.
     ///
     ///
     /// # Errors
     /// # Errors
     ///
     ///

+ 1 - 1
rpc/src/optimistically_confirmed_bank_tracker.rs

@@ -74,7 +74,7 @@ impl std::fmt::Debug for BankNotification {
 
 
 pub type BankNotificationWithDependencyWork = (
 pub type BankNotificationWithDependencyWork = (
     BankNotification,
     BankNotification,
-    Option<u64>, // dependecy work id
+    Option<u64>, // dependency work id
 );
 );
 
 
 pub type BankNotificationReceiver = Receiver<BankNotificationWithDependencyWork>;
 pub type BankNotificationReceiver = Receiver<BankNotificationWithDependencyWork>;

+ 2 - 2
runtime/src/bank.rs

@@ -606,7 +606,7 @@ impl PartialEq for Bank {
             && *stakes_cache.stakes() == *other.stakes_cache.stakes()
             && *stakes_cache.stakes() == *other.stakes_cache.stakes()
             && epoch_stakes == &other.epoch_stakes
             && epoch_stakes == &other.epoch_stakes
             && is_delta.load(Relaxed) == other.is_delta.load(Relaxed)
             && is_delta.load(Relaxed) == other.is_delta.load(Relaxed)
-            // No deadlock is possbile, when Arc::ptr_eq() returns false, because of being
+            // No deadlock is possible, when Arc::ptr_eq() returns false, because of being
             // different Mutexes.
             // different Mutexes.
             && (Arc::ptr_eq(hash_overrides, &other.hash_overrides) ||
             && (Arc::ptr_eq(hash_overrides, &other.hash_overrides) ||
                 *hash_overrides.lock().unwrap() == *other.hash_overrides.lock().unwrap())
                 *hash_overrides.lock().unwrap() == *other.hash_overrides.lock().unwrap())
@@ -2468,7 +2468,7 @@ impl Bank {
     /// Recalculates the bank hash
     /// Recalculates the bank hash
     ///
     ///
     /// This is used by ledger-tool when creating a snapshot, which
     /// This is used by ledger-tool when creating a snapshot, which
-    /// recalcuates the bank hash.
+    /// recalculates the bank hash.
     ///
     ///
     /// Note that the account state is *not* allowed to change by rehashing.
     /// Note that the account state is *not* allowed to change by rehashing.
     /// If modifying accounts in ledger-tool is needed, create a new bank.
     /// If modifying accounts in ledger-tool is needed, create a new bank.

+ 1 - 1
runtime/src/bank/accounts_lt_hash.rs

@@ -368,7 +368,7 @@ pub struct Stats {
 /// The initial state of an account prior to being modified in this slot/transaction
 /// The initial state of an account prior to being modified in this slot/transaction
 #[derive(Debug, Clone, PartialEq)]
 #[derive(Debug, Clone, PartialEq)]
 pub enum InitialStateOfAccount {
 pub enum InitialStateOfAccount {
-    /// The account was initiall dead
+    /// The account was initially dead
     Dead,
     Dead,
     /// The account was initially alive
     /// The account was initially alive
     Alive(AccountSharedData),
     Alive(AccountSharedData),

+ 1 - 1
runtime/src/bank/partitioned_epoch_rewards/epoch_rewards_hasher.rs

@@ -12,7 +12,7 @@ pub(in crate::bank::partitioned_epoch_rewards) fn hash_rewards_into_partitions(
     let mut indices = vec![vec![]; num_partitions];
     let mut indices = vec![vec![]; num_partitions];
 
 
     for (i, reward) in stake_rewards.enumerated_rewards_iter() {
     for (i, reward) in stake_rewards.enumerated_rewards_iter() {
-        // clone here so the hasher's state is re-used on each call to `hash_address_to_partition`.
+        // clone here so the hasher's state is reused on each call to `hash_address_to_partition`.
         // This prevents us from re-hashing the seed each time.
         // This prevents us from re-hashing the seed each time.
         // The clone is explicit (as opposed to an implicit copy) so it is clear this is intended.
         // The clone is explicit (as opposed to an implicit copy) so it is clear this is intended.
         let partition_index = hasher
         let partition_index = hasher

+ 1 - 1
runtime/src/bank/partitioned_epoch_rewards/mod.rs

@@ -954,7 +954,7 @@ mod tests {
                 let _ = bank_forks.write().unwrap().set_root(slot - 1, None, None);
                 let _ = bank_forks.write().unwrap().set_root(slot - 1, None, None);
                 assert_eq!(curr_bank.get_epoch_rewards_cache_len(), 0);
                 assert_eq!(curr_bank.get_epoch_rewards_cache_len(), 0);
             } else if slot == SLOTS_PER_EPOCH + 2 {
             } else if slot == SLOTS_PER_EPOCH + 2 {
-                // When curr_slot == SLOTS_PER_EPOCH + 2, the 3nd block of
+                // When curr_slot == SLOTS_PER_EPOCH + 2, the 3rd block of
                 // epoch 1, reward distribution should happen in this block.
                 // epoch 1, reward distribution should happen in this block.
                 // however, all stake rewards are paid at the this block
                 // however, all stake rewards are paid at the this block
                 // therefore reward_status should have transitioned to inactive.
                 // therefore reward_status should have transitioned to inactive.

+ 3 - 3
runtime/src/bank/tests.rs

@@ -3082,7 +3082,7 @@ fn test_is_empty() {
     let (bank0, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
     let (bank0, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config);
     let key1 = Keypair::new();
     let key1 = Keypair::new();
 
 
-    // The zeroth bank is empty becasue there are no transactions
+    // The zeroth bank is empty because there are no transactions
     assert!(bank0.is_empty());
     assert!(bank0.is_empty());
 
 
     // Set is_delta to true, bank is no longer empty
     // Set is_delta to true, bank is no longer empty
@@ -4122,7 +4122,7 @@ fn test_nonce_transaction() {
     let new_nonce = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap();
     let new_nonce = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap();
     assert_ne!(nonce_hash, new_nonce);
     assert_ne!(nonce_hash, new_nonce);
 
 
-    /* Nonce re-use fails */
+    /* Nonce reuse fails */
     let nonce_tx = Transaction::new_signed_with_payer(
     let nonce_tx = Transaction::new_signed_with_payer(
         &[
         &[
             system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
             system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
@@ -4249,7 +4249,7 @@ fn test_nonce_transaction_with_tx_wide_caps() {
     let new_nonce = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap();
     let new_nonce = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap();
     assert_ne!(nonce_hash, new_nonce);
     assert_ne!(nonce_hash, new_nonce);
 
 
-    /* Nonce re-use fails */
+    /* Nonce reuse fails */
     let nonce_tx = Transaction::new_signed_with_payer(
     let nonce_tx = Transaction::new_signed_with_payer(
         &[
         &[
             system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),
             system_instruction::advance_nonce_account(&nonce_pubkey, &nonce_pubkey),

+ 1 - 1
runtime/src/bank_forks.rs

@@ -355,7 +355,7 @@ impl BankForks {
         self.dumped_slot_subscribers.push(notifier);
         self.dumped_slot_subscribers.push(notifier);
     }
     }
 
 
-    /// Clears associated banks from BankForks and notifies subscribers that a dump has occured.
+    /// Clears associated banks from BankForks and notifies subscribers that a dump has occurred.
     pub fn dump_slots<'a, I>(&mut self, slots: I) -> (Vec<(Slot, BankId)>, Vec<BankWithScheduler>)
     pub fn dump_slots<'a, I>(&mut self, slots: I) -> (Vec<(Slot, BankId)>, Vec<BankWithScheduler>)
     where
     where
         I: Iterator<Item = &'a Slot>,
         I: Iterator<Item = &'a Slot>,

+ 1 - 1
runtime/src/bank_hash_cache.rs

@@ -55,7 +55,7 @@ impl BankHashCache {
     /// Should only be used after `slots_dumped` is acquired from `dumped_slot_subscription` to
     /// Should only be used after `slots_dumped` is acquired from `dumped_slot_subscription` to
     /// guarantee synchronicity with `self.bank_forks`. Multiple calls to `hash` will only be
     /// guarantee synchronicity with `self.bank_forks`. Multiple calls to `hash` will only be
     /// consistent with each other if `slots_dumped` was not released in between, as otherwise a dump
     /// consistent with each other if `slots_dumped` was not released in between, as otherwise a dump
-    /// could have occurred inbetween.
+    /// could have occurred in between.
     pub fn hash(&mut self, slot: Slot, slots_dumped: &mut MutexGuard<bool>) -> Option<Hash> {
     pub fn hash(&mut self, slot: Slot, slots_dumped: &mut MutexGuard<bool>) -> Option<Hash> {
         if **slots_dumped {
         if **slots_dumped {
             // We could be smarter and keep a fork cache to only clear affected slots from the cache,
             // We could be smarter and keep a fork cache to only clear affected slots from the cache,

+ 2 - 2
runtime/src/inflation_rewards/mod.rs

@@ -575,10 +575,10 @@ mod tests {
             )
             )
         );
         );
 
 
-        // credits_observed is auto-rewinded when vote_state credits are assumed to have been
+        // credits_observed is auto-rewound when vote_state credits are assumed to have been
         // recreated
         // recreated
         stake.credits_observed = 1000;
         stake.credits_observed = 1000;
-        // this is new behavior 1; return the post-recreation rewinded credits from the vote account
+        // this is new behavior 1; return the post-recreation rewound credits from the vote account
         assert_eq!(
         assert_eq!(
             CalculatedStakePoints {
             CalculatedStakePoints {
                 points: 0,
                 points: 0,

+ 2 - 2
runtime/src/inflation_rewards/points.rs

@@ -55,7 +55,7 @@ pub enum SkippedReason {
     ZeroReward,
     ZeroReward,
     ZeroCreditsAndReturnZero,
     ZeroCreditsAndReturnZero,
     ZeroCreditsAndReturnCurrent,
     ZeroCreditsAndReturnCurrent,
-    ZeroCreditsAndReturnRewinded,
+    ZeroCreditsAndReturnRewound,
 }
 }
 
 
 impl From<SkippedReason> for InflationPointCalculationEvent {
 impl From<SkippedReason> for InflationPointCalculationEvent {
@@ -116,7 +116,7 @@ pub(crate) fn calculate_stake_points_and_credits(
     match credits_in_vote.cmp(&credits_in_stake) {
     match credits_in_vote.cmp(&credits_in_stake) {
         Ordering::Less => {
         Ordering::Less => {
             if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() {
             if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() {
-                inflation_point_calc_tracer(&SkippedReason::ZeroCreditsAndReturnRewinded.into());
+                inflation_point_calc_tracer(&SkippedReason::ZeroCreditsAndReturnRewound.into());
             }
             }
             // Don't adjust stake.activation_epoch for simplicity:
             // Don't adjust stake.activation_epoch for simplicity:
             //  - generally fast-forwarding stake.activation_epoch forcibly (for
             //  - generally fast-forwarding stake.activation_epoch forcibly (for

+ 1 - 1
runtime/src/serde_snapshot/status_cache.rs

@@ -25,7 +25,7 @@ type SerdeStatus<T> = ahash::HashMap<Hash, (usize, Vec<(KeySlice, T)>)>;
 
 
 /// Serializes the status cache's `slot_deltas` to file at `status_cache_path`
 /// Serializes the status cache's `slot_deltas` to file at `status_cache_path`
 ///
 ///
-/// This fn serializes the status cache into the binary format requried by snapshots.
+/// This fn serializes the status cache into the binary format required by snapshots.
 pub fn serialize_status_cache(
 pub fn serialize_status_cache(
     slot_deltas: &[BankSlotDelta],
     slot_deltas: &[BankSlotDelta],
     status_cache_path: &Path,
     status_cache_path: &Path,

+ 1 - 1
runtime/src/snapshot_bank_utils.rs

@@ -615,7 +615,7 @@ fn _verify_epoch_stakes(
 ) -> std::result::Result<(), VerifyEpochStakesError> {
 ) -> std::result::Result<(), VerifyEpochStakesError> {
     // Ensure epoch stakes from the snapshot does not contain entries for invalid epochs.
     // Ensure epoch stakes from the snapshot does not contain entries for invalid epochs.
     // Since epoch stakes are computed for the leader schedule epoch (usually `epoch + 1`),
     // Since epoch stakes are computed for the leader schedule epoch (usually `epoch + 1`),
-    // the snapshot's epoch stakes therefor can have entries for epochs at-or-below the
+    // the snapshot's epoch stakes therefore can have entries for epochs at-or-below the
     // leader schedule epoch.
     // leader schedule epoch.
     let max_epoch = *required_epochs.end();
     let max_epoch = *required_epochs.end();
     if let Some(invalid_epoch) = epoch_stakes_map.keys().find(|epoch| **epoch > max_epoch) {
     if let Some(invalid_epoch) = epoch_stakes_map.keys().find(|epoch| **epoch > max_epoch) {

+ 2 - 2
runtime/src/snapshot_minimizer.rs

@@ -498,12 +498,12 @@ mod tests {
             programdata_address,
             programdata_address,
         };
         };
 
 
-        let non_program_acount = AccountSharedData::new(1, 0, &non_program_id);
+        let non_program_account = AccountSharedData::new(1, 0, &non_program_id);
         let mut program_account =
         let mut program_account =
             AccountSharedData::new_data(40, &program, &bpf_loader_upgradeable::id()).unwrap();
             AccountSharedData::new_data(40, &program, &bpf_loader_upgradeable::id()).unwrap();
         program_account.set_executable(true);
         program_account.set_executable(true);
 
 
-        bank.store_account(&non_program_id, &non_program_acount);
+        bank.store_account(&non_program_id, &non_program_account);
         bank.store_account(&program_id, &program_account);
         bank.store_account(&program_id, &program_account);
 
 
         // Non-program account does not add any additional keys
         // Non-program account does not add any additional keys

+ 1 - 1
runtime/src/snapshot_utils.rs

@@ -1426,7 +1426,7 @@ pub fn rebuild_storages_from_snapshot_dir(
             .join(ACCOUNTS_RUN_DIR);
             .join(ACCOUNTS_RUN_DIR);
         if !account_run_paths.contains(&account_run_path) {
         if !account_run_paths.contains(&account_run_path) {
             // The appendvec from the bank snapshot storage does not match any of the provided account_paths set.
             // The appendvec from the bank snapshot storage does not match any of the provided account_paths set.
-            // The accout paths have changed so the snapshot is no longer usable.
+            // The account paths have changed so the snapshot is no longer usable.
             return Err(SnapshotError::AccountPathsMismatch);
             return Err(SnapshotError::AccountPathsMismatch);
         }
         }
         // Generate hard-links to make the account files available in the main accounts/, and let the new appendvec
         // Generate hard-links to make the account files available in the main accounts/, and let the new appendvec

+ 3 - 3
scheduler-bindings/src/lib.rs

@@ -119,7 +119,7 @@ pub struct SharableTransactionBatchRegion {
 pub struct TransactionResponseRegion {
 pub struct TransactionResponseRegion {
     /// Tag indicating the type of message.
     /// Tag indicating the type of message.
     /// See [`worker_message_types`] for details.
     /// See [`worker_message_types`] for details.
-    /// All inner messages/responses per trasaction will be of the same type.
+    /// All inner messages/responses per transaction will be of the same type.
     pub tag: u8,
     pub tag: u8,
     /// The number of transactions in the original message.
     /// The number of transactions in the original message.
     /// This corresponds to the number of inner response
     /// This corresponds to the number of inner response
@@ -440,7 +440,7 @@ pub mod worker_message_types {
         pub const PROGRAM_CACHE_HIT_MAX_LIMIT: u8 = 101;
         pub const PROGRAM_CACHE_HIT_MAX_LIMIT: u8 = 101;
 
 
         // This error in agave is only internal, and to avoid updating the sdk
         // This error in agave is only internal, and to avoid updating the sdk
-        // it is re-used for mapping into `ALL_OR_NOTHING_BATCH_FAILURE`.
+        // it is reused for mapping into `ALL_OR_NOTHING_BATCH_FAILURE`.
         // /// Commit cancelled internally.
         // /// Commit cancelled internally.
         // pub const COMMIT_CANCELLED: u8 = 102;
         // pub const COMMIT_CANCELLED: u8 = 102;
     }
     }
@@ -527,7 +527,7 @@ pub mod worker_message_types {
         /// Set only if [`resolve_flags::PERFORMED`] is set,
         /// Set only if [`resolve_flags::PERFORMED`] is set,
         /// otherwise the value is undefined.
         /// otherwise the value is undefined.
         /// Resolved pubkeys - writable then readonly.
         /// Resolved pubkeys - writable then readonly.
-        /// Freeing this memory is the responsiblity of the external
+        /// Freeing this memory is the responsibility of the external
         /// pack process.
         /// pack process.
         pub resolved_pubkeys: SharablePubkeys,
         pub resolved_pubkeys: SharablePubkeys,
     }
     }

+ 1 - 1
scheduling-utils/src/error.rs

@@ -88,7 +88,7 @@ pub fn transaction_error_to_not_included_reason(error: &TransactionError) -> u8
             not_included_reasons::PROGRAM_CACHE_HIT_MAX_LIMIT
             not_included_reasons::PROGRAM_CACHE_HIT_MAX_LIMIT
         }
         }
 
 
-        // SPECIAL CASE - CommitCancelled is an internal error re-used to avoid breaking sdk
+        // SPECIAL CASE - CommitCancelled is an internal error reused to avoid breaking sdk
         TransactionError::CommitCancelled => not_included_reasons::ALL_OR_NOTHING_BATCH_FAILURE,
         TransactionError::CommitCancelled => not_included_reasons::ALL_OR_NOTHING_BATCH_FAILURE,
     }
     }
 }
 }

+ 1 - 1
snapshots/src/hardened_unpack.rs

@@ -33,7 +33,7 @@ pub type Result<T> = std::result::Result<T, UnpackError>;
 
 
 // 64 TiB; some safe margin to the max 128 TiB in amd64 linux userspace VmSize
 // 64 TiB; some safe margin to the max 128 TiB in amd64 linux userspace VmSize
 // (ref: https://unix.stackexchange.com/a/386555/364236)
 // (ref: https://unix.stackexchange.com/a/386555/364236)
-// note that this is directly related to the mmaped data size
+// note that this is directly related to the mmapped data size
 // so protect against insane value
 // so protect against insane value
 // This is the file size including holes for sparse files
 // This is the file size including holes for sparse files
 const MAX_SNAPSHOT_ARCHIVE_UNPACKED_APPARENT_SIZE: u64 = 64 * 1024 * 1024 * 1024 * 1024;
 const MAX_SNAPSHOT_ARCHIVE_UNPACKED_APPARENT_SIZE: u64 = 64 * 1024 * 1024 * 1024 * 1024;

+ 1 - 1
snapshots/src/snapshot_config.rs

@@ -18,7 +18,7 @@ pub const DEFAULT_MAX_INCREMENTAL_SNAPSHOT_ARCHIVES_TO_RETAIN: NonZeroUsize =
 /// Snapshot configuration and runtime information
 /// Snapshot configuration and runtime information
 #[derive(Clone, Debug)]
 #[derive(Clone, Debug)]
 pub struct SnapshotConfig {
 pub struct SnapshotConfig {
-    /// Specifies the ways thats snapshots are allowed to be used
+    /// Specifies the ways that snapshots are allowed to be used
     pub usage: SnapshotUsage,
     pub usage: SnapshotUsage,
 
 
     /// Generate a new full snapshot archive every this many slots
     /// Generate a new full snapshot archive every this many slots

+ 1 - 1
svm/src/account_loader.rs

@@ -2989,7 +2989,7 @@ mod tests {
             &expected_hit_account.data_clone()
             &expected_hit_account.data_clone()
         ));
         ));
 
 
-        // reload doesnt affect this
+        // reload doesn't affect this
         account_loader.load_account(&hit_address);
         account_loader.load_account(&hit_address);
         let actual_hit_account = account_loader.loaded_accounts.get(&hit_address);
         let actual_hit_account = account_loader.loaded_accounts.get(&hit_address);
 
 

+ 3 - 3
svm/tests/integration_test.rs

@@ -355,7 +355,7 @@ pub struct SvmTestEntry {
     // enables drop on failure processing (transactions without Ok status have no state effect)
     // enables drop on failure processing (transactions without Ok status have no state effect)
     pub drop_on_failure: bool,
     pub drop_on_failure: bool,
 
 
-    // enables all or nothing processing (if not all transactions can be commited then none are)
+    // enables all or nothing processing (if not all transactions can be committed then none are)
     pub all_or_nothing: bool,
     pub all_or_nothing: bool,
 
 
     // programs to deploy to the new svm
     // programs to deploy to the new svm
@@ -1053,7 +1053,7 @@ fn simple_nonce(fee_paying_nonce: bool) -> Vec<SvmTestEntry> {
     // there are four cases of fee_paying_nonce and fake_fee_payer:
     // there are four cases of fee_paying_nonce and fake_fee_payer:
     // * false/false: normal nonce account with rent minimum, normal fee payer account with 1sol
     // * false/false: normal nonce account with rent minimum, normal fee payer account with 1sol
     // * true/false: normal nonce account used to pay fees with rent minimum plus 1sol
     // * true/false: normal nonce account used to pay fees with rent minimum plus 1sol
-    // * false/true: normal nonce account with rent minimum, fee payer doesnt exist
+    // * false/true: normal nonce account with rent minimum, fee payer doesn't exist
     // * true/true: same account for both which does not exist
     // * true/true: same account for both which does not exist
     // we also provide a side door to bring a fee-paying nonce account below rent-exemption
     // we also provide a side door to bring a fee-paying nonce account below rent-exemption
     let mk_nonce_transaction = |test_entry: &mut SvmTestEntry,
     let mk_nonce_transaction = |test_entry: &mut SvmTestEntry,
@@ -1137,7 +1137,7 @@ fn simple_nonce(fee_paying_nonce: bool) -> Vec<SvmTestEntry> {
             .copy_from_slice(nonce_info.account().data());
             .copy_from_slice(nonce_info.account().data());
     }
     }
 
 
-    // 1: non-executing nonce transaction (fee payer doesnt exist) regardless of features
+    // 1: non-executing nonce transaction (fee payer doesn't exist) regardless of features
     {
     {
         let (transaction, _fee_payer, nonce_info) =
         let (transaction, _fee_payer, nonce_info) =
             mk_nonce_transaction(&mut test_entry, real_program_id, true, false);
             mk_nonce_transaction(&mut test_entry, real_program_id, true, false);

+ 3 - 2
svm/tests/mock_bank.rs

@@ -161,8 +161,9 @@ impl MockBankCallback {
             .unwrap()
             .unwrap()
             .insert(Rent::id(), account_data);
             .insert(Rent::id(), account_data);
 
 
-        // SystemInstruction::AdvanceNonceAccount asserts RecentBlockhashes is non-empty
-        // but then just gets the blockhash from InvokeContext. so the sysvar doesnt need real entries
+        // SystemInstruction::AdvanceNonceAccount asserts RecentBlockhashes is
+        // non-empty but then just gets the blockhash from InvokeContext. So,
+        // the sysvar doesn't need real entries
         #[allow(deprecated)]
         #[allow(deprecated)]
         let recent_blockhashes = vec![BlockhashesEntry::default()];
         let recent_blockhashes = vec![BlockhashesEntry::default()];
 
 

+ 5 - 5
test-validator/src/lib.rs

@@ -1225,7 +1225,7 @@ impl TestValidator {
     }
     }
 
 
     /// programs added to genesis ain't immediately usable. Actively check "Program
     /// programs added to genesis ain't immediately usable. Actively check "Program
-    /// is not deployed" error for their availibility.
+    /// is not deployed" error for their availability.
     async fn wait_for_upgradeable_programs_deployed(
     async fn wait_for_upgradeable_programs_deployed(
         &self,
         &self,
         upgradeable_programs: &[&Pubkey],
         upgradeable_programs: &[&Pubkey],
@@ -1502,10 +1502,10 @@ mod test {
 
 
         // The first one, where we provided `--deactivate-feature`, should be
         // The first one, where we provided `--deactivate-feature`, should be
         // the account we provided.
         // the account we provided.
-        let overriden_account = our_accounts[0].as_ref().unwrap();
-        assert_eq!(overriden_account.lamports, 100_000);
-        assert_eq!(overriden_account.data.len(), 0);
-        assert_eq!(overriden_account.owner, owner);
+        let overridden_account = our_accounts[0].as_ref().unwrap();
+        assert_eq!(overridden_account.lamports, 100_000);
+        assert_eq!(overridden_account.data.len(), 0);
+        assert_eq!(overridden_account.owner, owner);
 
 
         // The second one should be a feature account.
         // The second one should be a feature account.
         let feature_account = our_accounts[1].as_ref().unwrap();
         let feature_account = our_accounts[1].as_ref().unwrap();

+ 1 - 1
tpu-client-next/src/connection_worker.rs

@@ -28,7 +28,7 @@ use {
 
 
 /// The maximum connection handshake timeout for QUIC connections.
 /// The maximum connection handshake timeout for QUIC connections.
 /// This is set to 2 seconds, which was the earlier shorter connection idle timeout
 /// This is set to 2 seconds, which was the earlier shorter connection idle timeout
-/// which was also used by QUINN to timemout connection handshake.
+/// which was also used by QUINN to timeout connection handshake.
 pub(crate) const DEFAULT_MAX_CONNECTION_HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(2);
 pub(crate) const DEFAULT_MAX_CONNECTION_HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(2);
 
 
 /// Interval between retry attempts for creating a new connection. This value is
 /// Interval between retry attempts for creating a new connection. This value is

+ 1 - 1
transaction-view/src/resolved_transaction_view.rs

@@ -26,7 +26,7 @@ use {
 /// A parsed and sanitized transaction view that has had all address lookups
 /// A parsed and sanitized transaction view that has had all address lookups
 /// resolved.
 /// resolved.
 pub struct ResolvedTransactionView<D: TransactionData> {
 pub struct ResolvedTransactionView<D: TransactionData> {
-    /// The parsed and sanitized transction view.
+    /// The parsed and sanitized transaction view.
     view: TransactionView<true, D>,
     view: TransactionView<true, D>,
     /// The resolved address lookups.
     /// The resolved address lookups.
     resolved_addresses: Option<LoadedAddresses>,
     resolved_addresses: Option<LoadedAddresses>,

+ 9 - 9
unified-scheduler-pool/src/lib.rs

@@ -326,13 +326,13 @@ clone_trait_object!(BankingPacketHandler);
 /// This block-production struct is expected to be shared across the scheduler thread and its
 /// This block-production struct is expected to be shared across the scheduler thread and its
 /// handler threads because all of them needs to handle task creation unlike block verification.
 /// handler threads because all of them needs to handle task creation unlike block verification.
 ///
 ///
-/// Particularly, usage_queue_loader is desired to be shared across hanlders so that task creation
+/// Particularly, usage_queue_loader is desired to be shared across handlers so that task creation
 /// can be processed in the multi-threaded way. For more details, see
 /// can be processed in the multi-threaded way. For more details, see
 /// solana_core::banking_stage::unified_scheduler module doc.
 /// solana_core::banking_stage::unified_scheduler module doc.
 #[derive(Debug)]
 #[derive(Debug)]
 pub struct BankingStageHelper {
 pub struct BankingStageHelper {
     usage_queue_loader: UsageQueueLoaderInner,
     usage_queue_loader: UsageQueueLoaderInner,
-    // Supplemental identification for tasks of identical priority, alloted according to FIFO of
+    // Supplemental identification for tasks of identical priority, allotted according to FIFO of
     // batch granularity, resulting in the total order over the set of available tasks,
     // batch granularity, resulting in the total order over the set of available tasks,
     // collectively.
     // collectively.
     next_task_id: AtomicUsize,
     next_task_id: AtomicUsize,
@@ -345,7 +345,7 @@ pub struct BankingStageHelper {
 // Note that this concern is of theoretical matter. As such, we introduce rather a naive limit with
 // Note that this concern is of theoretical matter. As such, we introduce rather a naive limit with
 // great safety margin, considering relatively frequent check interval (a single session, usually a
 // great safety margin, considering relatively frequent check interval (a single session, usually a
 // slot). Regardless the aforementioned interval precondition, it's exceedingly hard to conceive
 // slot). Regardless the aforementioned interval precondition, it's exceedingly hard to conceive
-// task id is alloted more than half of usize. That's because we'd still need to be running for
+// task id is allotted more than half of usize. That's because we'd still need to be running for
 // almost 300 years continuously to index BANKING_STAGE_MAX_TASK_ID txs at the rate of
 // almost 300 years continuously to index BANKING_STAGE_MAX_TASK_ID txs at the rate of
 // 1_000_000_000/secs ingestion.
 // 1_000_000_000/secs ingestion.
 // For the completeness of discussion, the existence of this check will alleviate the concern of
 // For the completeness of discussion, the existence of this check will alleviate the concern of
@@ -1104,7 +1104,7 @@ impl TaskHandler for DefaultTaskHandler {
         };
         };
         let transaction_indexes = match scheduling_context.mode() {
         let transaction_indexes = match scheduling_context.mode() {
             BlockVerification => {
             BlockVerification => {
-                // Blcok verification's task_id should always be within usize.
+                // Block verification's task_id should always be within usize.
                 vec![task_id.try_into().unwrap()]
                 vec![task_id.try_into().unwrap()]
             }
             }
             BlockProduction => {
             BlockProduction => {
@@ -1525,7 +1525,7 @@ fn disconnected<T>() -> Receiver<T> {
 /// Timeouts are for rare conditions where there are abandoned-yet-unpruned banks in the
 /// Timeouts are for rare conditions where there are abandoned-yet-unpruned banks in the
 /// [`BankForks`](solana_runtime::bank_forks::BankForks) under forky (unsteady rooting) cluster
 /// [`BankForks`](solana_runtime::bank_forks::BankForks) under forky (unsteady rooting) cluster
 /// conditions. The pool's background cleaner thread (`solScCleaner`) triggers the timeout-based
 /// conditions. The pool's background cleaner thread (`solScCleaner`) triggers the timeout-based
-/// out-of-pool (i.e. _taken_) scheduler reclaimation with prior coordination of
+/// out-of-pool (i.e. _taken_) scheduler reclamation with prior coordination of
 /// [`BankForks::insert()`](solana_runtime::bank_forks::BankForks::insert) via
 /// [`BankForks::insert()`](solana_runtime::bank_forks::BankForks::insert) via
 /// [`InstalledSchedulerPool::register_timeout_listener`].
 /// [`InstalledSchedulerPool::register_timeout_listener`].
 ///
 ///
@@ -1551,7 +1551,7 @@ fn disconnected<T>() -> Receiver<T> {
 ///         Aborted --> if_usable: Dropped (BankForks-pruning by solReplayStage)
 ///         Aborted --> if_usable: Dropped (BankForks-pruning by solReplayStage)
 ///         if_usable --> Pooled: IF !overgrown && !aborted
 ///         if_usable --> Pooled: IF !overgrown && !aborted
 ///         Active --> Aborted: Errored on TX execution
 ///         Active --> Aborted: Errored on TX execution
-///         Aborted --> Stale: !Droppped after TIMEOUT_DURATION since taken
+///         Aborted --> Stale: !Dropped after TIMEOUT_DURATION since taken
 ///         Active --> Stale: No new TX after TIMEOUT_DURATION since taken
 ///         Active --> Stale: No new TX after TIMEOUT_DURATION since taken
 ///         Stale --> if_usable: Returned (Timeout-triggered by solScCleaner)
 ///         Stale --> if_usable: Returned (Timeout-triggered by solScCleaner)
 ///         Pooled --> Active: Taken (New bank by solReplayStage)
 ///         Pooled --> Active: Taken (New bank by solReplayStage)
@@ -2038,7 +2038,7 @@ impl<S: SpawnableScheduler<TH>, TH: TaskHandler> ThreadManager<S, TH> {
             //
             //
             // That's because it could be the most notable bottleneck of throughput in the future
             // That's because it could be the most notable bottleneck of throughput in the future
             // when there are ~100 handler threads. Unified scheduler's overall throughput is
             // when there are ~100 handler threads. Unified scheduler's overall throughput is
-            // largely dependant on its ultra-low latency characteristic, which is the most
+            // largely dependent on its ultra-low latency characteristic, which is the most
             // important design goal of the scheduler in order to reduce the transaction
             // important design goal of the scheduler in order to reduce the transaction
             // confirmation latency for end users.
             // confirmation latency for end users.
             //
             //
@@ -2531,7 +2531,7 @@ impl<S: SpawnableScheduler<TH>, TH: TaskHandler> ThreadManager<S, TH> {
 
 
         if nonblocking {
         if nonblocking {
             // Bail out session ending bookkeeping under this special case codepath for block
             // Bail out session ending bookkeeping under this special case codepath for block
-            // production. This means skipping the `abort_detected`-dependant thread joining step
+            // production. This means skipping the `abort_detected`-dependent thread joining step
             // as well; Otherwise, we could be dead-locked around poh, because we would technically
             // as well; Otherwise, we could be dead-locked around poh, because we would technically
             // wait for joining handler threads in _the poh thread_, which holds the poh lock (This
             // wait for joining handler threads in _the poh thread_, which holds the poh lock (This
             // `nonblocking` special case is called by the thread).
             // `nonblocking` special case is called by the thread).
@@ -5130,7 +5130,7 @@ mod tests {
             Box::new(SimpleBankingMinitor),
             Box::new(SimpleBankingMinitor),
         );
         );
 
 
-        // By now, there shuold be a bufferd transaction. Let's discard it.
+        // By now, there should be a buffered transaction. Let's discard it.
         *START_DISCARD.lock().unwrap() = true;
         *START_DISCARD.lock().unwrap() = true;
 
 
         sleepless_testing::at(TestCheckPoint::AfterDiscarded);
         sleepless_testing::at(TestCheckPoint::AfterDiscarded);

+ 1 - 1
validator/src/bootstrap.rs

@@ -862,7 +862,7 @@ type KnownSnapshotHashes = HashMap<(Slot, Hash), HashSet<(Slot, Hash)>>;
 /// queried for their individual snapshot hashes, their results will be checked against this
 /// queried for their individual snapshot hashes, their results will be checked against this
 /// map to verify correctness.
 /// map to verify correctness.
 ///
 ///
-/// NOTE: Only a single snashot hash is allowed per slot.  If somehow two known validators have
+/// NOTE: Only a single snapshot hash is allowed per slot.  If somehow two known validators have
 /// a snapshot hash with the same slot and _different_ hashes, the second will be skipped.
 /// a snapshot hash with the same slot and _different_ hashes, the second will be skipped.
 /// This applies to both full and incremental snapshot hashes.
 /// This applies to both full and incremental snapshot hashes.
 fn get_snapshot_hashes_from_known_validators(
 fn get_snapshot_hashes_from_known_validators(

+ 1 - 1
validator/src/cli.rs

@@ -647,7 +647,7 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App<
                 .help(
                 .help(
                     "Use when running a validator behind a NAT. DNS name or IP address for this \
                     "Use when running a validator behind a NAT. DNS name or IP address for this \
                      validator to advertise in gossip. This address will be used as the target \
                      validator to advertise in gossip. This address will be used as the target \
-                     desination address for peers trying to contact this node. [default: the \
+                     destination address for peers trying to contact this node. [default: the \
                      first --bind-address, or ask --entrypoint when --bind-address is not \
                      first --bind-address, or ask --entrypoint when --bind-address is not \
                      provided, or 127.0.0.1 when --entrypoint is not provided]. Note: this \
                      provided, or 127.0.0.1 when --entrypoint is not provided]. Note: this \
                      argument cannot be used in a multihoming context (when multiple \
                      argument cannot be used in a multihoming context (when multiple \

+ 1 - 1
validator/src/cli/thread_args.rs

@@ -312,7 +312,7 @@ impl ThreadArg for TpuTransactionForwardReceiveThreadArgs {
     const NAME: &'static str = "tpu_transaction_forward_receive_threads";
     const NAME: &'static str = "tpu_transaction_forward_receive_threads";
     const LONG_NAME: &'static str = "tpu-transaction-forward-receive-threads";
     const LONG_NAME: &'static str = "tpu-transaction-forward-receive-threads";
     const HELP: &'static str =
     const HELP: &'static str =
-        "Number of threads to use for receiving transactions on the TPU fowards port";
+        "Number of threads to use for receiving transactions on the TPU forwards port";
 
 
     fn default() -> usize {
     fn default() -> usize {
         solana_streamer::quic::default_num_tpu_transaction_forward_receive_threads()
         solana_streamer::quic::default_num_tpu_transaction_forward_receive_threads()

+ 1 - 1
validator/src/commands/exit/mod.rs

@@ -157,7 +157,7 @@ pub fn execute(matches: &ArgMatches, ledger_path: &Path) -> Result<()> {
     //
     //
     // Additionally, only check the pid() RPC call result if it will be used.
     // Additionally, only check the pid() RPC call result if it will be used.
     // In an upgrade scenario, it is possible that a binary that calls pid()
     // In an upgrade scenario, it is possible that a binary that calls pid()
-    // will be initating exit against a process that doesn't support pid().
+    // will be initiating exit against a process that doesn't support pid().
     const WAIT_FOR_EXIT_UNSUPPORTED_ERROR: &str = "remote process exit cannot be waited on. \
     const WAIT_FOR_EXIT_UNSUPPORTED_ERROR: &str = "remote process exit cannot be waited on. \
                                                    `--wait-for-exit` is not supported by the \
                                                    `--wait-for-exit` is not supported by the \
                                                    remote process";
                                                    remote process";

+ 1 - 1
verified-packet-receiver/Readme.md

@@ -1,6 +1,6 @@
 # Introduction
 # Introduction
 The Vortexor is a service that can offload the tasks of receiving transactions
 The Vortexor is a service that can offload the tasks of receiving transactions
-from the public, performing signature verifications, and deduplications from the
+from the public, performing signature verification, and deduplication from the
 core validator, enabling it to focus on processing and executing the
 core validator, enabling it to focus on processing and executing the
 transactions. The verified and filtered transactions will then be forwarded to
 transactions. The verified and filtered transactions will then be forwarded to
 the validators linked with the Vortexor. This setup makes the TPU transaction
 the validators linked with the Vortexor. This setup makes the TPU transaction

+ 1 - 1
vortexor/README.md

@@ -3,7 +3,7 @@
 
 
 # Introduction
 # Introduction
 The Vortexor is a service that can offload the tasks of receiving transactions
 The Vortexor is a service that can offload the tasks of receiving transactions
-from the public, performing signature verifications, and deduplications from the
+from the public, performing signature verification, and deduplication from the
 core validator, enabling it to focus on processing and executing the
 core validator, enabling it to focus on processing and executing the
 transactions. The verified and filtered transactions will then be forwarded to
 transactions. The verified and filtered transactions will then be forwarded to
 the validators linked with the Vortexor. This setup makes the TPU transaction
 the validators linked with the Vortexor. This setup makes the TPU transaction

+ 1 - 1
vortexor/src/main.rs

@@ -96,7 +96,7 @@ pub fn main() {
     );
     );
 
 
     let (banking_tracer, _) = BankingTracer::new(
     let (banking_tracer, _) = BankingTracer::new(
-        None, // Not interesed in banking tracing
+        None, // Not interested in banking tracing
     )
     )
     .unwrap();
     .unwrap();
 
 

+ 1 - 1
vortexor/src/vortexor.rs

@@ -154,7 +154,7 @@ impl Vortexor {
         )
         )
         .unwrap();
         .unwrap();
 
 
-        // Fot TPU forward -- we disallow unstaked connections. Allocate all connection resources
+        // For TPU forward -- we disallow unstaked connections. Allocate all connection resources
         // for staked connections:
         // for staked connections:
         quic_fwd_server_params.qos_config.max_staked_connections = max_fwd_staked_connections;
         quic_fwd_server_params.qos_config.max_staked_connections = max_fwd_staked_connections;
         quic_fwd_server_params.qos_config.max_unstaked_connections = max_fwd_unstaked_connections;
         quic_fwd_server_params.qos_config.max_unstaked_connections = max_fwd_unstaked_connections;

+ 1 - 1
votor/src/event.rs

@@ -58,7 +58,7 @@ pub enum VotorEvent {
     /// Produce the window
     /// Produce the window
     ProduceWindow(LeaderWindowInfo),
     ProduceWindow(LeaderWindowInfo),
 
 
-    /// The block has received a slow or fast finalization certificate and is eligble for rooting
+    /// The block has received a slow or fast finalization certificate and is eligible for rooting
     /// The second bool indicates whether the block is a fast finalization
     /// The second bool indicates whether the block is a fast finalization
     Finalized(Block, bool),
     Finalized(Block, bool),
 
 

+ 1 - 1
votor/src/event_handler.rs

@@ -639,7 +639,7 @@ impl EventHandler {
     }
     }
 
 
     /// Checks the pending blocks that have completed replay to see if they
     /// Checks the pending blocks that have completed replay to see if they
-    /// are eligble to be voted on now
+    /// are eligible to be voted on now
     fn check_pending_blocks(
     fn check_pending_blocks(
         my_pubkey: &Pubkey,
         my_pubkey: &Pubkey,
         pending_blocks: &mut PendingBlocks,
         pending_blocks: &mut PendingBlocks,

+ 2 - 2
votor/src/voting_utils.rs

@@ -31,9 +31,9 @@ use {
 pub enum GenerateVoteTxResult {
 pub enum GenerateVoteTxResult {
     // The following are transient errors
     // The following are transient errors
     // non voting validator, not eligible for refresh
     // non voting validator, not eligible for refresh
-    // until authorized keypair is overriden
+    // until authorized keypair is overridden
     NonVoting,
     NonVoting,
-    // hot spare validator, not eligble for refresh
+    // hot spare validator, not eligible for refresh
     // until set identity is invoked
     // until set identity is invoked
     HotSpare,
     HotSpare,
     // The hash verification at startup has not completed
     // The hash verification at startup has not completed

+ 1 - 1
wen-restart/src/wen_restart.rs

@@ -509,7 +509,7 @@ pub(crate) fn generate_snapshot(
     }
     }
 
 
     // Snapshot generation calls AccountsDb background tasks (flush/clean/shrink).
     // Snapshot generation calls AccountsDb background tasks (flush/clean/shrink).
-    // These cannot run conncurrent with each other, so we must shutdown
+    // These cannot run concurrent with each other, so we must shutdown
     // AccountsBackgroundService before proceeding.
     // AccountsBackgroundService before proceeding.
     abs_status.stop();
     abs_status.stop();
     info!("Waiting for AccountsBackgroundService to stop");
     info!("Waiting for AccountsBackgroundService to stop");

+ 1 - 1
xdp-ebpf/README

@@ -29,7 +29,7 @@ To verify that the bytecode loaded by agave via crates.io matches the bytecode
 generated by the source included in this package, from the monorepo root run:
 generated by the source included in this package, from the monorepo root run:
 
 
     # This will rebuild and print whether the hashes have changed. If you haven't made any
     # This will rebuild and print whether the hashes have changed. If you haven't made any
-    # modications, the hashes must match.
+    # modifications, the hashes must match.
     scripts/build-agave-xdp.sh
     scripts/build-agave-xdp.sh
 
 
     # If the hashes match, build agave as usual and verify that the final code also matches
     # If the hashes match, build agave as usual and verify that the final code also matches

+ 2 - 2
zk-token-sdk/src/instruction/transfer/mod.rs

@@ -50,7 +50,7 @@ pub fn split_u64(amount: u64, bit_length: usize) -> (u64, u64) {
 
 
 /// Takes in a 64-bit number `amount` and a bit length `bit_length`. It returns:
 /// Takes in a 64-bit number `amount` and a bit length `bit_length`. It returns:
 /// - the `bit_length` low bits of `amount` interpreted as u64
 /// - the `bit_length` low bits of `amount` interpreted as u64
-/// - the `(64 - bit_length)` high bits of `amount` interpretted as u64
+/// - the `(64 - bit_length)` high bits of `amount` interpreted as u64
 #[cfg(not(target_os = "solana"))]
 #[cfg(not(target_os = "solana"))]
 pub fn try_split_u64(amount: u64, bit_length: usize) -> Result<(u64, u64), InstructionError> {
 pub fn try_split_u64(amount: u64, bit_length: usize) -> Result<(u64, u64), InstructionError> {
     match bit_length {
     match bit_length {
@@ -81,7 +81,7 @@ pub fn combine_lo_hi_u64(amount_lo: u64, amount_hi: u64, bit_length: usize) -> u
     }
     }
 }
 }
 
 
-/// Combine two numbers that are interpretted as the low and high bits of a target number. The
+/// Combine two numbers that are interpreted as the low and high bits of a target number. The
 /// `bit_length` parameter specifies the number of bits that `amount_hi` is to be shifted by.
 /// `bit_length` parameter specifies the number of bits that `amount_hi` is to be shifted by.
 #[cfg(not(target_os = "solana"))]
 #[cfg(not(target_os = "solana"))]
 pub fn try_combine_lo_hi_u64(
 pub fn try_combine_lo_hi_u64(