Browse Source

clippy: manual_is_multiple_of (#8959)

* clippy: manual_is_multiple_of

warning: manual implementation of `.is_multiple_of()`

* fmt
Brooks 2 weeks ago
parent
commit
0f761dca1d

+ 1 - 1
accounts-db/src/ancient_append_vecs.rs

@@ -1099,7 +1099,7 @@ pub const fn get_ancient_append_vec_capacity() -> u64 {
     );
     );
     const PAGE_SIZE: u64 = 4 * 1024;
     const PAGE_SIZE: u64 = 4 * 1024;
     const _: () = assert!(
     const _: () = assert!(
-        RESULT % PAGE_SIZE == 0,
+        RESULT.is_multiple_of(PAGE_SIZE),
         "ancient append vec size should be a multiple of PAGE_SIZE"
         "ancient append vec size should be a multiple of PAGE_SIZE"
     );
     );
 
 

+ 1 - 1
accounts-db/src/tiered_storage/byte_block.rs

@@ -159,7 +159,7 @@ pub unsafe fn read_type<T>(byte_block: &[u8], offset: usize) -> Option<&T> {
         return None;
         return None;
     }
     }
     let ptr = byte_block[offset..].as_ptr().cast();
     let ptr = byte_block[offset..].as_ptr().cast();
-    debug_assert!(ptr as usize % std::mem::align_of::<T>() == 0);
+    debug_assert!((ptr as usize).is_multiple_of(std::mem::align_of::<T>()));
     // SAFETY: The caller ensures it is safe to cast bytes to T,
     // SAFETY: The caller ensures it is safe to cast bytes to T,
     // we ensure the size is safe by querying T directly,
     // we ensure the size is safe by querying T directly,
     // and we just checked above to ensure the ptr is aligned for T.
     // and we just checked above to ensure the ptr is aligned for T.

+ 1 - 1
accounts-db/src/tiered_storage/hot.rs

@@ -130,7 +130,7 @@ impl HotAccountOffset {
         }
         }
 
 
         // Hot accounts are aligned based on HOT_ACCOUNT_ALIGNMENT.
         // Hot accounts are aligned based on HOT_ACCOUNT_ALIGNMENT.
-        if offset % HOT_ACCOUNT_ALIGNMENT != 0 {
+        if !offset.is_multiple_of(HOT_ACCOUNT_ALIGNMENT) {
             return Err(TieredStorageError::OffsetAlignmentError(
             return Err(TieredStorageError::OffsetAlignmentError(
                 offset,
                 offset,
                 HOT_ACCOUNT_ALIGNMENT,
                 HOT_ACCOUNT_ALIGNMENT,

+ 1 - 1
accounts-db/src/tiered_storage/mmap_utils.rs

@@ -20,7 +20,7 @@ pub fn get_pod<T: bytemuck::AnyBitPattern>(mmap: &Mmap, offset: usize) -> io::Re
 pub unsafe fn get_type<T>(mmap: &Mmap, offset: usize) -> io::Result<(&T, usize)> {
 pub unsafe fn get_type<T>(mmap: &Mmap, offset: usize) -> io::Result<(&T, usize)> {
     let (data, next) = get_slice(mmap, offset, std::mem::size_of::<T>())?;
     let (data, next) = get_slice(mmap, offset, std::mem::size_of::<T>())?;
     let ptr = data.as_ptr().cast();
     let ptr = data.as_ptr().cast();
-    debug_assert!(ptr as usize % std::mem::align_of::<T>() == 0);
+    debug_assert!((ptr as usize).is_multiple_of(std::mem::align_of::<T>()));
     // SAFETY: The caller ensures it is safe to cast bytes to T,
     // SAFETY: The caller ensures it is safe to cast bytes to T,
     // we ensure the size is safe by querying T directly,
     // we ensure the size is safe by querying T directly,
     // and we just checked above to ensure the ptr is aligned for T.
     // and we just checked above to ensure the ptr is aligned for T.

+ 2 - 2
accounts-db/src/tiered_storage/test_utils.rs

@@ -24,8 +24,8 @@ pub(super) fn create_test_account(seed: u64) -> (Pubkey, AccountSharedData) {
         data: std::iter::repeat_n(data_byte, seed as usize).collect(),
         data: std::iter::repeat_n(data_byte, seed as usize).collect(),
         // this will allow some test account sharing the same owner.
         // this will allow some test account sharing the same owner.
         owner: [owner_byte; 32].into(),
         owner: [owner_byte; 32].into(),
-        executable: seed % 2 > 0,
-        rent_epoch: if seed % 3 > 0 {
+        executable: !seed.is_multiple_of(2),
+        rent_epoch: if !seed.is_multiple_of(3) {
             seed
             seed
         } else {
         } else {
             RENT_EXEMPT_RENT_EPOCH
             RENT_EXEMPT_RENT_EPOCH

+ 2 - 2
bucket_map/src/bucket_storage.rs

@@ -357,7 +357,7 @@ impl<O: BucketOccupied> BucketStorage<O> {
         };
         };
         let ptr = {
         let ptr = {
             let ptr = slice.as_ptr().cast();
             let ptr = slice.as_ptr().cast();
-            debug_assert!(ptr as usize % std::mem::align_of::<T>() == 0);
+            debug_assert!((ptr as usize).is_multiple_of(std::mem::align_of::<T>()));
             ptr
             ptr
         };
         };
         unsafe { std::slice::from_raw_parts(ptr, len as usize) }
         unsafe { std::slice::from_raw_parts(ptr, len as usize) }
@@ -382,7 +382,7 @@ impl<O: BucketOccupied> BucketStorage<O> {
         };
         };
         let ptr = {
         let ptr = {
             let ptr = slice.as_mut_ptr().cast();
             let ptr = slice.as_mut_ptr().cast();
-            debug_assert!(ptr as usize % std::mem::align_of::<T>() == 0);
+            debug_assert!((ptr as usize).is_multiple_of(std::mem::align_of::<T>()));
             ptr
             ptr
         };
         };
         unsafe { std::slice::from_raw_parts_mut(ptr, len as usize) }
         unsafe { std::slice::from_raw_parts_mut(ptr, len as usize) }

+ 2 - 2
bucket_map/src/index_entry.rs

@@ -488,14 +488,14 @@ impl<T: Copy + PartialEq + 'static> IndexEntryPlaceInBucket<T> {
 fn get_from_bytes<T>(item_slice: &[u8]) -> &T {
 fn get_from_bytes<T>(item_slice: &[u8]) -> &T {
     debug_assert!(std::mem::size_of::<T>() <= item_slice.len());
     debug_assert!(std::mem::size_of::<T>() <= item_slice.len());
     let item = item_slice.as_ptr().cast();
     let item = item_slice.as_ptr().cast();
-    debug_assert!(item as usize % std::mem::align_of::<T>() == 0);
+    debug_assert!((item as usize).is_multiple_of(std::mem::align_of::<T>()));
     unsafe { &*item }
     unsafe { &*item }
 }
 }
 
 
 fn get_mut_from_bytes<T>(item_slice: &mut [u8]) -> &mut T {
 fn get_mut_from_bytes<T>(item_slice: &mut [u8]) -> &mut T {
     debug_assert!(std::mem::size_of::<T>() <= item_slice.len());
     debug_assert!(std::mem::size_of::<T>() <= item_slice.len());
     let item = item_slice.as_mut_ptr().cast();
     let item = item_slice.as_mut_ptr().cast();
-    debug_assert!(item as usize % std::mem::align_of::<T>() == 0);
+    debug_assert!((item as usize).is_multiple_of(std::mem::align_of::<T>()));
     unsafe { &mut *item }
     unsafe { &mut *item }
 }
 }
 
 

+ 1 - 1
compute-budget-instruction/src/compute_budget_instruction_details.rs

@@ -190,7 +190,7 @@ impl ComputeBudgetInstructionDetails {
 
 
     #[inline]
     #[inline]
     fn sanitize_requested_heap_size(bytes: u32) -> bool {
     fn sanitize_requested_heap_size(bytes: u32) -> bool {
-        (MIN_HEAP_FRAME_BYTES..=MAX_HEAP_FRAME_BYTES).contains(&bytes) && bytes % 1024 == 0
+        (MIN_HEAP_FRAME_BYTES..=MAX_HEAP_FRAME_BYTES).contains(&bytes) && bytes.is_multiple_of(1024)
     }
     }
 
 
     fn calculate_default_compute_unit_limit(&self, feature_set: &FeatureSet) -> u32 {
     fn calculate_default_compute_unit_limit(&self, feature_set: &FeatureSet) -> u32 {

+ 1 - 1
core/src/mock_alpenglow_consensus.rs

@@ -556,7 +556,7 @@ impl MockAlpenglowConsensus {
             return false;
             return false;
         }
         }
 
 
-        slot % interval == 0
+        slot.is_multiple_of(interval)
     }
     }
 
 
     pub(crate) fn signal_new_slot(&mut self, slot: Slot, root_bank: &Bank) {
     pub(crate) fn signal_new_slot(&mut self, slot: Slot, root_bank: &Bank) {

+ 1 - 1
core/src/repair/malicious_repair_handler.rs

@@ -46,7 +46,7 @@ impl RepairHandler for MaliciousRepairHandler {
         if self
         if self
             .config
             .config
             .bad_shred_slot_frequency
             .bad_shred_slot_frequency
-            .is_some_and(|freq| slot % freq == 0)
+            .is_some_and(|freq| slot.is_multiple_of(freq))
         {
         {
             // Change some random piece of data
             // Change some random piece of data
             shred[Self::BAD_DATA_INDEX] = shred[Self::BAD_DATA_INDEX].wrapping_add(1);
             shred[Self::BAD_DATA_INDEX] = shred[Self::BAD_DATA_INDEX].wrapping_add(1);

+ 1 - 1
core/src/repair/repair_service.rs

@@ -1437,7 +1437,7 @@ mod test {
             // between shreds received; So this should either insert the
             // between shreds received; So this should either insert the
             // last shred in each slot, or exclude missing shreds after the
             // last shred in each slot, or exclude missing shreds after the
             // last inserted shred from expected repairs.
             // last inserted shred from expected repairs.
-            if index % nth == 0 || index + 1 == num_shreds_per_slot {
+            if index.is_multiple_of(nth) || index + 1 == num_shreds_per_slot {
                 shreds_to_write.insert(0, shreds.remove(i as usize));
                 shreds_to_write.insert(0, shreds.remove(i as usize));
             } else if i < num_shreds_per_slot {
             } else if i < num_shreds_per_slot {
                 missing_indexes_per_slot.insert(0, index);
                 missing_indexes_per_slot.insert(0, index);

+ 1 - 1
gossip/src/epoch_slots.rs

@@ -30,7 +30,7 @@ impl Sanitize for Uncompressed {
         if self.num >= MAX_SLOTS_PER_ENTRY {
         if self.num >= MAX_SLOTS_PER_ENTRY {
             return Err(SanitizeError::ValueOutOfBounds);
             return Err(SanitizeError::ValueOutOfBounds);
         }
         }
-        if self.slots.len() % 8 != 0 {
+        if !self.slots.len().is_multiple_of(8) {
             // Uncompressed::new() ensures the length is always a multiple of 8
             // Uncompressed::new() ensures the length is always a multiple of 8
             return Err(SanitizeError::ValueOutOfBounds);
             return Err(SanitizeError::ValueOutOfBounds);
         }
         }

+ 1 - 1
gossip/tests/crds_gossip.rs

@@ -458,7 +458,7 @@ fn network_run_push(
                 }
                 }
             }
             }
         }
         }
-        if now % CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS == 0 && now > 0 {
+        if now.is_multiple_of(CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS) && now > 0 {
             network_values.par_iter().for_each(|node| {
             network_values.par_iter().for_each(|node| {
                 node.gossip.refresh_push_active_set(
                 node.gossip.refresh_push_active_set(
                     &node.keypair,
                     &node.keypair,

+ 1 - 1
keygen/src/keygen.rs

@@ -749,7 +749,7 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box<dyn error::Error>> {
                             break;
                             break;
                         }
                         }
                         let attempts = attempts.fetch_add(1, Ordering::Relaxed);
                         let attempts = attempts.fetch_add(1, Ordering::Relaxed);
-                        if attempts % 1_000_000 == 0 {
+                        if attempts.is_multiple_of(1_000_000) {
                             println!(
                             println!(
                                 "Searched {} keypairs in {}s. {} matches found.",
                                 "Searched {} keypairs in {}s. {} matches found.",
                                 attempts,
                                 attempts,

+ 3 - 3
ledger/src/blockstore.rs

@@ -5784,14 +5784,14 @@ pub mod tests {
             assert_eq!(blockstore.get_slot_entries(slot, 0).unwrap(), vec![]);
             assert_eq!(blockstore.get_slot_entries(slot, 0).unwrap(), vec![]);
 
 
             let meta = blockstore.meta(slot).unwrap().unwrap();
             let meta = blockstore.meta(slot).unwrap().unwrap();
-            if num_shreds % 2 == 0 {
+            if num_shreds.is_multiple_of(2) {
                 assert_eq!(meta.received, num_shreds);
                 assert_eq!(meta.received, num_shreds);
             } else {
             } else {
                 trace!("got here");
                 trace!("got here");
                 assert_eq!(meta.received, num_shreds - 1);
                 assert_eq!(meta.received, num_shreds - 1);
             }
             }
             assert_eq!(meta.consumed, 0);
             assert_eq!(meta.consumed, 0);
-            if num_shreds % 2 == 0 {
+            if num_shreds.is_multiple_of(2) {
                 assert_eq!(meta.last_index, Some(num_shreds - 1));
                 assert_eq!(meta.last_index, Some(num_shreds - 1));
             } else {
             } else {
                 assert_eq!(meta.last_index, None);
                 assert_eq!(meta.last_index, None);
@@ -9357,7 +9357,7 @@ pub mod tests {
             .is_empty());
             .is_empty());
 
 
         // Fetch all signatures for address 0, three at a time
         // Fetch all signatures for address 0, three at a time
-        assert!(all0.len() % 3 == 0);
+        assert!(all0.len().is_multiple_of(3));
         for i in (0..all0.len()).step_by(3) {
         for i in (0..all0.len()).step_by(3) {
             let results = blockstore
             let results = blockstore
                 .get_confirmed_signatures_for_address2(
                 .get_confirmed_signatures_for_address2(

+ 2 - 2
ledger/src/shred.rs

@@ -837,7 +837,7 @@ where
 fn check_fixed_fec_set(index: u32, fec_set_index: u32) -> bool {
 fn check_fixed_fec_set(index: u32, fec_set_index: u32) -> bool {
     index >= fec_set_index
     index >= fec_set_index
         && index < fec_set_index + DATA_SHREDS_PER_FEC_BLOCK as u32
         && index < fec_set_index + DATA_SHREDS_PER_FEC_BLOCK as u32
-        && fec_set_index % DATA_SHREDS_PER_FEC_BLOCK as u32 == 0
+        && fec_set_index.is_multiple_of(DATA_SHREDS_PER_FEC_BLOCK as u32)
 }
 }
 
 
 /// Returns true if `index` of the last data shred is valid under the assumption that
 /// Returns true if `index` of the last data shred is valid under the assumption that
@@ -848,7 +848,7 @@ fn check_fixed_fec_set(index: u32, fec_set_index: u32) -> bool {
 /// This currently is checked post insert in `Blockstore::check_last_fec_set`, but in the
 /// This currently is checked post insert in `Blockstore::check_last_fec_set`, but in the
 /// future it can be solely checked during ingest
 /// future it can be solely checked during ingest
 fn check_last_data_shred_index(index: u32) -> bool {
 fn check_last_data_shred_index(index: u32) -> bool {
-    (index + 1) % (DATA_SHREDS_PER_FEC_BLOCK as u32) == 0
+    (index + 1).is_multiple_of(DATA_SHREDS_PER_FEC_BLOCK as u32)
 }
 }
 
 
 pub fn max_ticks_per_n_shreds(num_shreds: u64, shred_data_size: Option<usize>) -> u64 {
 pub fn max_ticks_per_n_shreds(num_shreds: u64, shred_data_size: Option<usize>) -> u64 {

+ 1 - 1
merkle-tree/src/merkle_tree.rs

@@ -157,7 +157,7 @@ impl MerkleTree {
             if lsib.is_some() || rsib.is_some() {
             if lsib.is_some() || rsib.is_some() {
                 path.push(ProofEntry::new(target, lsib, rsib));
                 path.push(ProofEntry::new(target, lsib, rsib));
             }
             }
-            if node_index % 2 == 0 {
+            if node_index.is_multiple_of(2) {
                 lsib = None;
                 lsib = None;
                 rsib = if node_index + 1 < level.len() {
                 rsib = if node_index + 1 < level.len() {
                     Some(&level[node_index + 1])
                     Some(&level[node_index + 1])

+ 1 - 1
metrics/src/counter.rs

@@ -173,7 +173,7 @@ impl Counter {
         let lograte = self.lograte.load(Ordering::Relaxed);
         let lograte = self.lograte.load(Ordering::Relaxed);
         let metricsrate = self.metricsrate.load(Ordering::Relaxed);
         let metricsrate = self.metricsrate.load(Ordering::Relaxed);
 
 
-        if times % lograte == 0 && times > 0 && log_enabled!(level) {
+        if times.is_multiple_of(lograte) && times > 0 && log_enabled!(level) {
             log!(
             log!(
                 level,
                 level,
                 "COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"samples\": {times}, \"now\": \
                 "COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"samples\": {times}, \"now\": \

+ 5 - 2
runtime/src/accounts_background_service.rs

@@ -840,9 +840,12 @@ mod test {
 
 
                 // Since we're not using `BankForks::set_root()`, we have to handle sending the
                 // Since we're not using `BankForks::set_root()`, we have to handle sending the
                 // correct snapshot requests ourself.
                 // correct snapshot requests ourself.
-                if bank.block_height() % FULL_SNAPSHOT_INTERVAL == 0 {
+                if bank.block_height().is_multiple_of(FULL_SNAPSHOT_INTERVAL) {
                     send_snapshot_request(Arc::clone(&bank), SnapshotRequestKind::FullSnapshot);
                     send_snapshot_request(Arc::clone(&bank), SnapshotRequestKind::FullSnapshot);
-                } else if bank.block_height() % INCREMENTAL_SNAPSHOT_INTERVAL == 0 {
+                } else if bank
+                    .block_height()
+                    .is_multiple_of(INCREMENTAL_SNAPSHOT_INTERVAL)
+                {
                     send_snapshot_request(
                     send_snapshot_request(
                         Arc::clone(&bank),
                         Arc::clone(&bank),
                         SnapshotRequestKind::IncrementalSnapshot,
                         SnapshotRequestKind::IncrementalSnapshot,

+ 2 - 2
runtime/src/snapshot_utils.rs

@@ -1838,7 +1838,7 @@ pub fn should_take_full_snapshot(
     block_height: Slot,
     block_height: Slot,
     full_snapshot_archive_interval_slots: Slot,
     full_snapshot_archive_interval_slots: Slot,
 ) -> bool {
 ) -> bool {
-    block_height % full_snapshot_archive_interval_slots == 0
+    block_height.is_multiple_of(full_snapshot_archive_interval_slots)
 }
 }
 
 
 pub fn should_take_incremental_snapshot(
 pub fn should_take_incremental_snapshot(
@@ -1846,7 +1846,7 @@ pub fn should_take_incremental_snapshot(
     incremental_snapshot_archive_interval_slots: Slot,
     incremental_snapshot_archive_interval_slots: Slot,
     latest_full_snapshot_slot: Option<Slot>,
     latest_full_snapshot_slot: Option<Slot>,
 ) -> bool {
 ) -> bool {
-    block_height % incremental_snapshot_archive_interval_slots == 0
+    block_height.is_multiple_of(incremental_snapshot_archive_interval_slots)
         && latest_full_snapshot_slot.is_some()
         && latest_full_snapshot_slot.is_some()
 }
 }
 
 

+ 1 - 1
scheduling-utils/src/handshake/client.rs

@@ -161,7 +161,7 @@ pub fn setup_session(
 
 
     // Ensure worker_fds length matches expectations.
     // Ensure worker_fds length matches expectations.
     if worker_fds.is_empty()
     if worker_fds.is_empty()
-        || worker_fds.len() % 2 != 0
+        || !worker_fds.len().is_multiple_of(2)
         || worker_fds.len() / 2 != logon.worker_count
         || worker_fds.len() / 2 != logon.worker_count
     {
     {
         return Err(ClientHandshakeError::ProtocolViolation);
         return Err(ClientHandshakeError::ProtocolViolation);

+ 1 - 1
transaction-dos/src/main.rs

@@ -174,7 +174,7 @@ fn run_transactions_dos(
 
 
     let account_groups = maybe_account_groups.unwrap_or(1);
     let account_groups = maybe_account_groups.unwrap_or(1);
 
 
-    assert!(account_keypairs.len() % account_groups == 0);
+    assert!(account_keypairs.len().is_multiple_of(account_groups));
 
 
     let account_group_size = account_keypairs.len() / account_groups;
     let account_group_size = account_keypairs.len() / account_groups;
 
 

+ 1 - 1
turbine/src/broadcast_stage/broadcast_duplicates_run.rs

@@ -123,7 +123,7 @@ impl BroadcastRun for BroadcastDuplicatesRun {
         let last_entries = {
         let last_entries = {
             if last_tick_height == bank.max_tick_height()
             if last_tick_height == bank.max_tick_height()
                 && bank.slot() > MINIMUM_DUPLICATE_SLOT
                 && bank.slot() > MINIMUM_DUPLICATE_SLOT
-                && self.num_slots_broadcasted % DUPLICATE_RATE == 0
+                && self.num_slots_broadcasted.is_multiple_of(DUPLICATE_RATE)
                 && self.recent_blockhash.is_some()
                 && self.recent_blockhash.is_some()
             {
             {
                 let entry_batch_len = receive_results.entries.len();
                 let entry_batch_len = receive_results.entries.len();

+ 2 - 2
zk-token-sdk/src/range_proof/inner_product.rs

@@ -387,14 +387,14 @@ impl InnerProductProof {
     /// * any of 2 scalars are not canonical scalars modulo Ristretto group order.
     /// * any of 2 scalars are not canonical scalars modulo Ristretto group order.
     pub fn from_bytes(slice: &[u8]) -> Result<InnerProductProof, RangeProofVerificationError> {
     pub fn from_bytes(slice: &[u8]) -> Result<InnerProductProof, RangeProofVerificationError> {
         let b = slice.len();
         let b = slice.len();
-        if b % 32 != 0 {
+        if !b.is_multiple_of(32) {
             return Err(RangeProofVerificationError::Deserialization);
             return Err(RangeProofVerificationError::Deserialization);
         }
         }
         let num_elements = b / 32;
         let num_elements = b / 32;
         if num_elements < 2 {
         if num_elements < 2 {
             return Err(RangeProofVerificationError::Deserialization);
             return Err(RangeProofVerificationError::Deserialization);
         }
         }
-        if (num_elements - 2) % 2 != 0 {
+        if !(num_elements - 2).is_multiple_of(2) {
             return Err(RangeProofVerificationError::Deserialization);
             return Err(RangeProofVerificationError::Deserialization);
         }
         }
         let lg_n = (num_elements - 2) / 2;
         let lg_n = (num_elements - 2) / 2;

+ 1 - 1
zk-token-sdk/src/range_proof/mod.rs

@@ -375,7 +375,7 @@ impl RangeProof {
     // Following the dalek rangeproof library signature for now. The exact method signature can be
     // Following the dalek rangeproof library signature for now. The exact method signature can be
     // changed.
     // changed.
     pub fn from_bytes(slice: &[u8]) -> Result<RangeProof, RangeProofVerificationError> {
     pub fn from_bytes(slice: &[u8]) -> Result<RangeProof, RangeProofVerificationError> {
-        if slice.len() % 32 != 0 {
+        if !slice.len().is_multiple_of(32) {
             return Err(RangeProofVerificationError::Deserialization);
             return Err(RangeProofVerificationError::Deserialization);
         }
         }
         if slice.len() < 7 * 32 {
         if slice.len() < 7 * 32 {