Browse Source

status-cache: add shuttle tests (#8275)

Alessandro Decina 1 month ago
parent
commit
21d88de4b6
5 changed files with 219 additions and 10 deletions
  1. 1 0
      Cargo.lock
  2. 1 0
      ci/test-shuttle.sh
  3. 2 0
      runtime/Cargo.toml
  4. 5 5
      runtime/src/serde_snapshot/status_cache.rs
  5. 210 5
      runtime/src/status_cache.rs

+ 1 - 0
Cargo.lock

@@ -10341,6 +10341,7 @@ dependencies = [
  "serde_derive",
  "serde_json",
  "serde_with",
+ "shuttle",
  "solana-account",
  "solana-account-info",
  "solana-accounts-db",

+ 1 - 0
ci/test-shuttle.sh

@@ -5,3 +5,4 @@ set -eo pipefail
 source ci/_
 
 cargo nextest run --profile ci  --manifest-path="svm/Cargo.toml" --features="shuttle-test" --test concurrent_tests --release --jobs 1
+cargo nextest run --release --profile ci  --manifest-path="runtime/Cargo.toml" --features="shuttle-test" shuttle_tests

+ 2 - 0
runtime/Cargo.toml

@@ -47,6 +47,7 @@ frozen-abi = [
     "solana-vote/frozen-abi",
     "solana-vote-program/frozen-abi",
 ]
+shuttle-test = ["dep:shuttle"]
 
 [dependencies]
 agave-feature-set = { workspace = true }
@@ -89,6 +90,7 @@ serde = { workspace = true, features = ["rc"] }
 serde_derive = { workspace = true }
 serde_json = { workspace = true }
 serde_with = { workspace = true }
+shuttle = { workspace = true, optional = true }
 solana-account = { workspace = true }
 solana-account-info = { workspace = true }
 solana-accounts-db = { workspace = true }

+ 5 - 5
runtime/src/serde_snapshot/status_cache.rs

@@ -1,5 +1,9 @@
 //! Serialize and deserialize the status cache for snapshots
 
+#[cfg(feature = "shuttle-test")]
+use shuttle::sync::Mutex;
+#[cfg(not(feature = "shuttle-test"))]
+use std::sync::Mutex;
 use {
     crate::{bank::BankSlotDelta, snapshot_utils, status_cache::KeySlice},
     bincode::{self, Options as _},
@@ -7,11 +11,7 @@ use {
     solana_hash::Hash,
     solana_instruction::error::InstructionError,
     solana_transaction_error::TransactionError,
-    std::{
-        collections::HashMap,
-        path::Path,
-        sync::{Arc, Mutex},
-    },
+    std::{collections::HashMap, path::Path, sync::Arc},
 };
 
 #[cfg_attr(

+ 210 - 5
runtime/src/status_cache.rs

@@ -1,15 +1,18 @@
+#[cfg(feature = "shuttle-test")]
+use shuttle::sync::{Arc, Mutex};
 use {
     ahash::{HashMap, HashMapExt as _},
     log::*,
-    rand::{thread_rng, Rng},
     serde::Serialize,
     solana_accounts_db::ancestors::Ancestors,
     solana_clock::{Slot, MAX_RECENT_BLOCKHASHES},
     solana_hash::Hash,
-    std::{
-        collections::{hash_map::Entry, HashSet},
-        sync::{Arc, Mutex},
-    },
+    std::collections::{hash_map::Entry, HashSet},
+};
+#[cfg(not(feature = "shuttle-test"))]
+use {
+    rand::{thread_rng, Rng},
+    std::sync::{Arc, Mutex},
 };
 
 pub const MAX_CACHE_ENTRIES: usize = MAX_RECENT_BLOCKHASHES;
@@ -187,6 +190,10 @@ impl<T: Serialize + Clone> StatusCache<T> {
         // Get the cache entry for this blockhash.
         let (max_slot, key_index, hash_map) =
             self.cache.entry(*transaction_blockhash).or_insert_with(|| {
+                // DFS tests need deterministic behavior
+                #[cfg(feature = "shuttle-test")]
+                let key_index = 0;
+                #[cfg(not(feature = "shuttle-test"))]
                 let key_index = thread_rng().gen_range(0..max_key_index + 1);
                 (slot, key_index, HashMap::new())
             });
@@ -546,3 +553,201 @@ mod tests {
         }
     }
 }
+
+#[cfg(all(test, feature = "shuttle-test"))]
+mod shuttle_tests {
+    use {super::*, shuttle::sync::RwLock};
+
+    type BankStatusCache = RwLock<StatusCache<()>>;
+
+    const CLEAR_DFS_ITERATIONS: Option<usize> = None;
+    const CLEAR_RANDOM_ITERATIONS: usize = 20000;
+    const PURGE_DFS_ITERATIONS: Option<usize> = None;
+    const PURGE_RANDOM_ITERATIONS: usize = 8000;
+    const INSERT_DFS_ITERATIONS: Option<usize> = Some(20000);
+    const INSERT_RANDOM_ITERATIONS: usize = 20000;
+
+    fn do_test_shuttle_clear_slots_blockhash_overlap() {
+        let status_cache = Arc::new(BankStatusCache::default());
+
+        let blockhash1 = Hash::new_from_array([1; 32]);
+
+        let key1 = Hash::new_from_array([3; 32]);
+        let key2 = Hash::new_from_array([4; 32]);
+
+        status_cache
+            .write()
+            .unwrap()
+            .insert(&blockhash1, key1, 1, ());
+        let th_clear = shuttle::thread::spawn({
+            let status_cache = status_cache.clone();
+            move || {
+                status_cache.write().unwrap().clear_slot_entries(1);
+            }
+        });
+
+        let th_insert = shuttle::thread::spawn({
+            let status_cache = status_cache.clone();
+            move || {
+                // insert an entry for slot 1 so clear_slot_entries will remove it
+                status_cache
+                    .write()
+                    .unwrap()
+                    .insert(&blockhash1, key2, 2, ());
+            }
+        });
+
+        th_clear.join().unwrap();
+        th_insert.join().unwrap();
+
+        let mut ancestors2 = Ancestors::default();
+        ancestors2.insert(2, 0);
+
+        assert!(status_cache
+            .read()
+            .unwrap()
+            .get_status(key2, &blockhash1, &ancestors2)
+            .is_some());
+    }
+    #[test]
+    fn test_shuttle_clear_slots_blockhash_overlap_random() {
+        shuttle::check_random(
+            do_test_shuttle_clear_slots_blockhash_overlap,
+            CLEAR_RANDOM_ITERATIONS,
+        );
+    }
+
+    #[test]
+    fn test_shuttle_clear_slots_blockhash_overlap_dfs() {
+        shuttle::check_dfs(
+            do_test_shuttle_clear_slots_blockhash_overlap,
+            CLEAR_DFS_ITERATIONS,
+        );
+    }
+
+    // unlike clear_slot_entries(), purge_slots() can't overlap with regular blockhashes since
+    // they'd have expired by the time roots are old enough to be purged. However, nonces don't
+    // expire, so they can overlap.
+    fn do_test_shuttle_purge_nonce_overlap() {
+        let status_cache = Arc::new(BankStatusCache::default());
+        // fill the cache so that the next add_root() will purge the oldest root
+        for i in 0..MAX_CACHE_ENTRIES {
+            status_cache.write().unwrap().add_root(i as u64);
+        }
+
+        let blockhash1 = Hash::new_from_array([1; 32]);
+
+        let key1 = Hash::new_from_array([3; 32]);
+        let key2 = Hash::new_from_array([4; 32]);
+
+        // this slot/key is going to get purged when the th_purge thread calls add_root()
+        status_cache
+            .write()
+            .unwrap()
+            .insert(&blockhash1, key1, 0, ());
+
+        let th_purge = shuttle::thread::spawn({
+            let status_cache = status_cache.clone();
+            move || {
+                status_cache
+                    .write()
+                    .unwrap()
+                    .add_root(MAX_CACHE_ENTRIES as Slot + 1);
+            }
+        });
+
+        let th_insert = shuttle::thread::spawn({
+            let status_cache = status_cache.clone();
+            move || {
+                // insert an entry for a blockhash that gets concurrently purged
+                status_cache.write().unwrap().insert(
+                    &blockhash1,
+                    key2,
+                    MAX_CACHE_ENTRIES as Slot + 2,
+                    (),
+                );
+            }
+        });
+        th_purge.join().unwrap();
+        th_insert.join().unwrap();
+
+        let mut ancestors2 = Ancestors::default();
+        ancestors2.insert(MAX_CACHE_ENTRIES as Slot + 2, 0);
+
+        assert!(status_cache
+            .read()
+            .unwrap()
+            .get_status(key1, &blockhash1, &ancestors2)
+            .is_none());
+        assert!(status_cache
+            .read()
+            .unwrap()
+            .get_status(key2, &blockhash1, &ancestors2)
+            .is_some());
+    }
+
+    #[test]
+    fn test_shuttle_purge_nonce_overlap_random() {
+        shuttle::check_random(do_test_shuttle_purge_nonce_overlap, PURGE_RANDOM_ITERATIONS);
+    }
+
+    #[test]
+    fn test_shuttle_purge_nonce_overlap_dfs() {
+        shuttle::check_dfs(do_test_shuttle_purge_nonce_overlap, PURGE_DFS_ITERATIONS);
+    }
+
+    fn do_test_shuttle_concurrent_inserts() {
+        let status_cache = Arc::new(BankStatusCache::default());
+        let blockhash1 = Hash::new_from_array([42; 32]);
+        let blockhash2 = Hash::new_from_array([43; 32]);
+        const N_INSERTS: u8 = 50;
+
+        let mut handles = Vec::with_capacity(N_INSERTS as usize);
+        for i in 0..N_INSERTS {
+            let status_cache = status_cache.clone();
+            let slot = (i % 3) + 1;
+            let bh = if i % 2 == 0 { blockhash1 } else { blockhash2 };
+            handles.push(shuttle::thread::spawn(move || {
+                let key = Hash::new_from_array([i; 32]);
+                status_cache
+                    .write()
+                    .unwrap()
+                    .insert(&bh, key, slot as Slot, ());
+            }));
+        }
+
+        for handle in handles {
+            handle.join().unwrap();
+        }
+
+        let mut ancestors = Ancestors::default();
+        ancestors.insert(1, 0);
+        ancestors.insert(2, 0);
+        ancestors.insert(3, 0);
+
+        // verify all 100 inserts are visible
+        for i in 0..N_INSERTS {
+            let key = Hash::new_from_array([i; 32]);
+            let bh = if i % 2 == 0 { blockhash1 } else { blockhash2 };
+            assert!(
+                status_cache
+                    .read()
+                    .unwrap()
+                    .get_status(key, &bh, &ancestors)
+                    .is_some(),
+                "missing key {}",
+                i
+            );
+        }
+    }
+
+    #[test]
+    fn test_shuttle_concurrent_inserts_dfs() {
+        shuttle::check_dfs(do_test_shuttle_concurrent_inserts, INSERT_DFS_ITERATIONS);
+    }
+
+    #[test]
+    fn test_shuttle_concurrent_inserts_random() {
+        shuttle::check_random(do_test_shuttle_concurrent_inserts, INSERT_RANDOM_ITERATIONS);
+    }
+}