فهرست منبع

Gossip: Add dynamic stake weighting based on fraction of unstaked nodes - Fixed Point Math (#7098)

* add dynamic weighting based on fraction of unstaked nodes. fixed point math

* Add setup for account control

* address comments - add explicit types, bundle config into Dynamic, refactor, etc

* update to vanity key: gosW... (gossip weight)

* agave-unstable-api implementation

* refactor add apply_cfg tests

* refactor and address comments

* update interpolate. change lpf

* fix deser bug. dont forget to rm logs

* update WeightingConfig to match record program

* address comments and make default setting static for PushActiveSet
Greg Cusack 3 ماه پیش
والد
کامیت
299fd879a8

+ 6 - 0
Cargo.lock

@@ -8676,6 +8676,7 @@ dependencies = [
  "serde_derive",
  "serial_test",
  "siphasher 1.0.1",
+ "solana-account",
  "solana-bloom",
  "solana-clap-utils",
  "solana-client",
@@ -8689,6 +8690,7 @@ dependencies = [
  "solana-keypair",
  "solana-ledger",
  "solana-logger 3.0.0",
+ "solana-low-pass-filter",
  "solana-measure",
  "solana-metrics",
  "solana-native-token",
@@ -9165,6 +9167,10 @@ dependencies = [
  "signal-hook",
 ]
 
+[[package]]
+name = "solana-low-pass-filter"
+version = "3.0.0"
+
 [[package]]
 name = "solana-measure"
 version = "3.0.0"

+ 2 - 0
Cargo.toml

@@ -51,6 +51,7 @@ members = [
     "ledger-tool",
     "local-cluster",
     "log-analyzer",
+    "low-pass-filter",
     "measure",
     "memory-management",
     "merkle-tree",
@@ -452,6 +453,7 @@ solana-loader-v4-interface = "2.2.1"
 solana-loader-v4-program = { path = "programs/loader-v4", version = "=3.0.0" }
 solana-local-cluster = { path = "local-cluster", version = "=3.0.0" }
 solana-logger = "3.0.0"
+solana-low-pass-filter = { path = "low-pass-filter", version = "=3.0.0" }
 solana-measure = { path = "measure", version = "=3.0.0" }
 solana-merkle-tree = { path = "merkle-tree", version = "=3.0.0" }
 solana-message = "2.4.0"

+ 3 - 0
gossip/Cargo.toml

@@ -33,6 +33,7 @@ frozen-abi = [
     "solana-vote/frozen-abi",
     "solana-vote-program/frozen-abi",
 ]
+agave-unstable-api = ["solana-low-pass-filter/agave-unstable-api"]
 
 [dependencies]
 agave-feature-set = { workspace = true }
@@ -56,6 +57,7 @@ serde-big-array = { workspace = true }
 serde_bytes = { workspace = true }
 serde_derive = { workspace = true }
 siphasher = { workspace = true }
+solana-account = { workspace = true }
 solana-bloom = { workspace = true }
 solana-clap-utils = { workspace = true }
 solana-client = { workspace = true }
@@ -73,6 +75,7 @@ solana-hash = "=2.3.0"
 solana-keypair = "=2.2.1"
 solana-ledger = { workspace = true, features = ["agave-unstable-api"] }
 solana-logger = "=3.0.0"
+solana-low-pass-filter = { workspace = true, features = ["agave-unstable-api"] }
 solana-measure = { workspace = true }
 solana-metrics = { workspace = true }
 solana-native-token = "=2.2.2"

+ 15 - 3
gossip/src/cluster_info.rs

@@ -62,7 +62,7 @@ use {
     },
     solana_pubkey::Pubkey,
     solana_rayon_threadlimit::get_thread_count,
-    solana_runtime::bank_forks::BankForks,
+    solana_runtime::{bank::Bank, bank_forks::BankForks},
     solana_sanitize::Sanitize,
     solana_signature::Signature,
     solana_signer::Signer,
@@ -130,6 +130,8 @@ pub const DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS: u64 = 10_000;
 pub const DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS: u64 = 60_000;
 // Limit number of unique pubkeys in the crds table.
 pub(crate) const CRDS_UNIQUE_PUBKEY_CAPACITY: usize = 8192;
+// Interval between push active set refreshes.
+pub const REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS: u64 = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2;
 
 // Must have at least one socket to monitor the TVU port
 pub const MINIMUM_NUM_TVU_RECEIVE_SOCKETS: NonZeroUsize = NonZeroUsize::new(1).unwrap();
@@ -214,6 +216,7 @@ impl ClusterInfo {
         stakes: &HashMap<Pubkey, u64>,
         gossip_validators: Option<&HashSet<Pubkey>>,
         sender: &impl ChannelSend<PacketBatch>,
+        maybe_bank_ref: Option<&Bank>,
     ) {
         let shred_version = self.my_contact_info.read().unwrap().shred_version();
         let self_keypair: Arc<Keypair> = self.keypair().clone();
@@ -226,6 +229,7 @@ impl ClusterInfo {
             &self.ping_cache,
             &mut pings,
             &self.socket_addr_space,
+            maybe_bank_ref,
         );
         let pings = pings
             .into_iter()
@@ -1448,7 +1452,7 @@ impl ClusterInfo {
             .thread_name(|i| format!("solGossipRun{i:02}"))
             .build()
             .unwrap();
-        let mut epoch_specs = bank_forks.map(EpochSpecs::from);
+        let mut epoch_specs = bank_forks.clone().map(EpochSpecs::from);
         Builder::new()
             .name("solGossip".to_string())
             .spawn(move || {
@@ -1504,13 +1508,19 @@ impl ClusterInfo {
                     entrypoints_processed = entrypoints_processed || self.process_entrypoints();
                     //TODO: possibly tune this parameter
                     //we saw a deadlock passing an self.read().unwrap().timeout into sleep
-                    if start - last_push > CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2 {
+                    if start - last_push > REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS {
+                        let maybe_bank = bank_forks
+                            .as_ref()
+                            .and_then(|bf| bf.read().ok())
+                            .map(|forks| forks.root_bank());
+                        let maybe_bank_ref = maybe_bank.as_deref();
                         self.refresh_my_gossip_contact_info();
                         self.refresh_push_active_set(
                             &recycler,
                             &stakes,
                             gossip_validators.as_ref(),
                             &sender,
+                            maybe_bank_ref,
                         );
                         last_push = timestamp();
                     }
@@ -2831,6 +2841,7 @@ mod tests {
             &cluster_info.ping_cache,
             &mut Vec::new(), // pings
             &SocketAddrSpace::Unspecified,
+            None,
         );
         let mut reqs = cluster_info.generate_new_gossip_requests(
             &thread_pool,
@@ -2972,6 +2983,7 @@ mod tests {
             &cluster_info.ping_cache,
             &mut Vec::new(), // pings
             &SocketAddrSpace::Unspecified,
+            None,
         );
         //check that all types of gossip messages are signed correctly
         cluster_info.flush_push_queue();

+ 4 - 0
gossip/src/crds_gossip.rs

@@ -27,6 +27,7 @@ use {
     solana_keypair::Keypair,
     solana_ledger::shred::Shred,
     solana_pubkey::Pubkey,
+    solana_runtime::bank::Bank,
     solana_signer::Signer,
     solana_streamer::socket::SocketAddrSpace,
     solana_time_utils::timestamp,
@@ -186,6 +187,7 @@ impl CrdsGossip {
         ping_cache: &Mutex<PingCache>,
         pings: &mut Vec<(SocketAddr, Ping)>,
         socket_addr_space: &SocketAddrSpace,
+        maybe_bank_ref: Option<&Bank>,
     ) {
         self.push.refresh_push_active_set(
             &self.crds,
@@ -196,6 +198,7 @@ impl CrdsGossip {
             ping_cache,
             pings,
             socket_addr_space,
+            maybe_bank_ref,
         )
     }
 
@@ -445,6 +448,7 @@ mod test {
             &ping_cache,
             &mut Vec::new(), // pings
             &SocketAddrSpace::Unspecified,
+            None,
         );
         let now = timestamp();
         //incorrect dest

+ 32 - 1
gossip/src/crds_gossip_push.rs

@@ -21,10 +21,12 @@ use {
         protocol::{Ping, PingCache},
         push_active_set::PushActiveSet,
         received_cache::ReceivedCache,
+        stake_weighting_config::{get_gossip_config_from_account, WeightingConfig},
     },
     itertools::Itertools,
     solana_keypair::Keypair,
     solana_pubkey::Pubkey,
+    solana_runtime::bank::Bank,
     solana_signer::Signer,
     solana_streamer::socket::SocketAddrSpace,
     solana_time_utils::timestamp,
@@ -49,6 +51,7 @@ const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
 const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
 const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 2;
 const CRDS_GOSSIP_PUSH_ACTIVE_SET_SIZE: usize = CRDS_GOSSIP_PUSH_FANOUT + 3;
+const CONFIG_REFRESH_INTERVAL_MS: u64 = 60_000;
 
 pub struct CrdsGossipPush {
     /// Active set of validators for push
@@ -65,12 +68,13 @@ pub struct CrdsGossipPush {
     pub num_total: AtomicUsize,
     pub num_old: AtomicUsize,
     pub num_pushes: AtomicUsize,
+    last_cfg_poll_ms: Mutex<u64>,
 }
 
 impl Default for CrdsGossipPush {
     fn default() -> Self {
         Self {
-            active_set: RwLock::default(),
+            active_set: RwLock::new(PushActiveSet::new_static()),
             crds_cursor: Mutex::default(),
             received_cache: Mutex::new(ReceivedCache::new(2 * CRDS_UNIQUE_PUBKEY_CAPACITY)),
             push_fanout: CRDS_GOSSIP_PUSH_FANOUT,
@@ -79,6 +83,7 @@ impl Default for CrdsGossipPush {
             num_total: AtomicUsize::default(),
             num_old: AtomicUsize::default(),
             num_pushes: AtomicUsize::default(),
+            last_cfg_poll_ms: Mutex::new(0),
         }
     }
 }
@@ -238,6 +243,22 @@ impl CrdsGossipPush {
         active_set.prune(self_pubkey, peer, origins, stakes);
     }
 
+    fn maybe_refresh_weighting_config(
+        &self,
+        maybe_bank_ref: Option<&Bank>,
+        now_ms: u64,
+    ) -> Option<WeightingConfig> {
+        let bank = maybe_bank_ref?;
+        {
+            let mut last = self.last_cfg_poll_ms.lock().unwrap();
+            if now_ms.saturating_sub(*last) < CONFIG_REFRESH_INTERVAL_MS {
+                return None;
+            }
+            *last = now_ms;
+        }
+        get_gossip_config_from_account(bank)
+    }
+
     /// Refresh the push active set.
     #[allow(clippy::too_many_arguments)]
     pub(crate) fn refresh_push_active_set(
@@ -250,6 +271,7 @@ impl CrdsGossipPush {
         ping_cache: &Mutex<PingCache>,
         pings: &mut Vec<(SocketAddr, Ping)>,
         socket_addr_space: &SocketAddrSpace,
+        maybe_bank_ref: Option<&Bank>,
     ) {
         let mut rng = rand::thread_rng();
         // Active and valid gossip nodes with matching shred-version.
@@ -280,13 +302,18 @@ impl CrdsGossipPush {
             return;
         }
         let cluster_size = crds.read().unwrap().num_pubkeys().max(stakes.len());
+        let maybe_cfg = self.maybe_refresh_weighting_config(maybe_bank_ref, timestamp());
         let mut active_set = self.active_set.write().unwrap();
+        if let Some(cfg) = maybe_cfg {
+            active_set.apply_cfg(&cfg);
+        }
         active_set.rotate(
             &mut rng,
             CRDS_GOSSIP_PUSH_ACTIVE_SET_SIZE,
             cluster_size,
             &nodes,
             stakes,
+            &self_keypair.pubkey(),
         )
     }
 }
@@ -447,6 +474,7 @@ mod tests {
             &ping_cache,
             &mut Vec::new(), // pings
             &SocketAddrSpace::Unspecified,
+            None,
         );
 
         let new_msg = CrdsValue::new_unsigned(CrdsData::from(ContactInfo::new_localhost(
@@ -514,6 +542,7 @@ mod tests {
             &ping_cache,
             &mut Vec::new(),
             &SocketAddrSpace::Unspecified,
+            None,
         );
 
         // push 3's contact info to 1 and 2 and 3
@@ -557,6 +586,7 @@ mod tests {
             &ping_cache,
             &mut Vec::new(), // pings
             &SocketAddrSpace::Unspecified,
+            None,
         );
 
         let new_msg = CrdsValue::new_unsigned(CrdsData::from(ContactInfo::new_localhost(
@@ -605,6 +635,7 @@ mod tests {
             &ping_cache,
             &mut Vec::new(), // pings
             &SocketAddrSpace::Unspecified,
+            None,
         );
 
         let mut ci = ContactInfo::new_localhost(&solana_pubkey::new_rand(), 0);

+ 1 - 0
gossip/src/lib.rs

@@ -41,6 +41,7 @@ mod protocol;
 mod push_active_set;
 mod received_cache;
 pub mod restart_crds_values;
+pub mod stake_weighting_config;
 pub mod weighted_shuffle;
 
 #[macro_use]

+ 777 - 35
gossip/src/push_active_set.rs

@@ -1,8 +1,13 @@
 use {
-    crate::weighted_shuffle::WeightedShuffle,
+    crate::{
+        cluster_info::REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS,
+        stake_weighting_config::{TimeConstant, WeightingConfig, WeightingConfigTyped},
+        weighted_shuffle::WeightedShuffle,
+    },
     indexmap::IndexMap,
     rand::Rng,
     solana_bloom::bloom::{Bloom, ConcurrentBloom},
+    solana_low_pass_filter::api as lpf,
     solana_native_token::LAMPORTS_PER_SOL,
     solana_pubkey::Pubkey,
     std::collections::HashMap,
@@ -10,12 +15,128 @@ use {
 
 const NUM_PUSH_ACTIVE_SET_ENTRIES: usize = 25;
 
+const ALPHA_MIN: u64 = lpf::SCALE.get();
+const ALPHA_MAX: u64 = 2 * lpf::SCALE.get();
+const DEFAULT_ALPHA: u64 = ALPHA_MAX;
+// Low pass filter convergence time (ms)
+const DEFAULT_TC_MS: u64 = 30_000;
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum WeightingMode {
+    // alpha = 2.0 -> Quadratic
+    Static,
+    // alpha in [1.0, 2.0], smoothed over time, scaled up by 1,000,000 to avoid floating-point math
+    Dynamic {
+        alpha: u64,    // current alpha (fixed-point, 1,000,000–2,000,000)
+        filter_k: u64, // default: 611,015
+        tc_ms: u64,    // IIR time-constant (ms)
+    },
+}
+
+impl From<WeightingConfigTyped> for WeightingMode {
+    fn from(cfg: WeightingConfigTyped) -> Self {
+        match cfg {
+            WeightingConfigTyped::Static => WeightingMode::Static,
+            WeightingConfigTyped::Dynamic { tc } => {
+                let tc_ms = match tc {
+                    TimeConstant::Value(ms) => ms,
+                    TimeConstant::Default => DEFAULT_TC_MS,
+                };
+                let filter_k = lpf::compute_k(REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS, tc_ms);
+                WeightingMode::Dynamic {
+                    alpha: DEFAULT_ALPHA,
+                    filter_k,
+                    tc_ms,
+                }
+            }
+        }
+    }
+}
+
+#[inline]
+fn get_weight(bucket: u64, alpha: u64) -> u64 {
+    debug_assert!((ALPHA_MIN..=ALPHA_MIN + lpf::SCALE.get()).contains(&alpha));
+    let b = bucket + 1;
+    let b_squared = b.saturating_mul(b);
+    gossip_interpolate_weight(b, b_squared, alpha)
+}
+
+/// Approximates `base^alpha` rounded to nearest integer using
+/// integer-only linear interpolation between `base^1` and `base^2`.
+///
+/// Note: This function is most accurate when `base` is small e.g. < ~25.
+#[inline]
+#[allow(clippy::arithmetic_side_effects)]
+fn gossip_interpolate_weight(base: u64, base_squared: u64, alpha: u64) -> u64 {
+    let scale = lpf::SCALE.get();
+    let t = alpha.saturating_sub(ALPHA_MIN);
+    debug_assert!(t <= scale, "interpolation t={} > SCALE={}", t, scale);
+    // ((base * (scale - t) + base_squared * t) + scale / 2) / scale
+    ((base.saturating_mul(scale.saturating_sub(t))).saturating_add(base_squared.saturating_mul(t)))
+        .saturating_add(scale / 2)
+        / scale
+}
+
 // Each entry corresponds to a stake bucket for
 //     min stake of { this node, crds value owner }
 // The entry represents set of gossip nodes to actively
 // push to for crds values belonging to the bucket.
-#[derive(Default)]
-pub(crate) struct PushActiveSet([PushActiveSetEntry; NUM_PUSH_ACTIVE_SET_ENTRIES]);
+pub(crate) struct PushActiveSet {
+    entries: [PushActiveSetEntry; NUM_PUSH_ACTIVE_SET_ENTRIES],
+    mode: WeightingMode,
+}
+
+impl PushActiveSet {
+    pub(crate) fn new(mode: WeightingMode) -> Self {
+        Self {
+            entries: Default::default(),
+            mode,
+        }
+    }
+
+    pub(crate) fn new_static() -> Self {
+        Self::new(WeightingMode::Static)
+    }
+
+    pub(crate) fn apply_cfg(&mut self, cfg: &WeightingConfig) {
+        let config_type = WeightingConfigTyped::from(cfg);
+        match (&mut self.mode, config_type) {
+            (WeightingMode::Static, WeightingConfigTyped::Static) => (),
+            (current_mode, WeightingConfigTyped::Static) => {
+                // Dynamic -> Static: Switch mode
+                info!("Switching mode: {:?} -> Static", current_mode);
+                self.mode = WeightingMode::Static;
+            }
+            (
+                WeightingMode::Dynamic {
+                    filter_k, tc_ms, ..
+                },
+                WeightingConfigTyped::Dynamic { tc },
+            ) => {
+                // Dynamic -> Dynamic: Update parameters if needed
+                let new_tc_ms = match tc {
+                    TimeConstant::Value(ms) => ms,
+                    TimeConstant::Default => DEFAULT_TC_MS,
+                };
+                if *tc_ms != new_tc_ms {
+                    *filter_k = lpf::compute_k(REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS, new_tc_ms);
+                    *tc_ms = new_tc_ms;
+                    info!("Recomputed filter K = {} (tc_ms = {})", *filter_k, *tc_ms);
+                }
+            }
+            (current_mode, WeightingConfigTyped::Dynamic { .. }) => {
+                info!("Switching mode: {:?} -> Dynamic", current_mode);
+                self.mode = WeightingMode::from(config_type);
+                if let WeightingMode::Dynamic {
+                    filter_k, tc_ms, ..
+                } = self.mode
+                {
+                    info!("Initialized filter K = {} (tc_ms = {})", filter_k, tc_ms);
+                }
+            }
+        }
+    }
+}
 
 // Keys are gossip nodes to push messages to.
 // Values are which origins the node has pruned.
@@ -68,7 +189,11 @@ impl PushActiveSet {
         // Gossip nodes to be sampled for each push active set.
         nodes: &[Pubkey],
         stakes: &HashMap<Pubkey, u64>,
+        self_pubkey: &Pubkey,
     ) {
+        if nodes.is_empty() {
+            return;
+        }
         let num_bloom_filter_items = cluster_size.max(Self::MIN_NUM_BLOOM_ITEMS);
         // Active set of nodes to push to are sampled from these gossip nodes,
         // using sampling probabilities obtained from the stake bucket of each
@@ -77,32 +202,76 @@ impl PushActiveSet {
             .iter()
             .map(|node| get_stake_bucket(stakes.get(node)))
             .collect();
-        // (k, entry) represents push active set where the stake bucket of
-        //     min stake of {this node, crds value owner}
-        // is equal to `k`. The `entry` maintains set of gossip nodes to
-        // actively push to for crds values belonging to this bucket.
-        for (k, entry) in self.0.iter_mut().enumerate() {
-            let weights: Vec<u64> = buckets
-                .iter()
-                .map(|&bucket| {
-                    // bucket <- get_stake_bucket(min stake of {
-                    //  this node, crds value owner and gossip peer
-                    // })
-                    // weight <- (bucket + 1)^2
-                    // min stake of {...} is a proxy for how much we care about
-                    // the link, and tries to mirror similar logic on the
-                    // receiving end when pruning incoming links:
-                    // https://github.com/solana-labs/solana/blob/81394cf92/gossip/src/received_cache.rs#L100-L105
-                    let bucket = bucket.min(k) as u64;
-                    bucket.saturating_add(1).saturating_pow(2)
-                })
-                .collect();
-            entry.rotate(rng, size, num_bloom_filter_items, nodes, &weights);
+
+        match self.mode {
+            WeightingMode::Static => {
+                // alpha = 2.0 → weight = (bucket + 1)^2
+                // (k, entry) represents push active set where the stake bucket of
+                //     min stake of {this node, crds value owner}
+                // is equal to `k`. The `entry` maintains set of gossip nodes to
+                // actively push to for crds values belonging to this bucket.
+                for (k, entry) in self.entries.iter_mut().enumerate() {
+                    let weights: Vec<u64> = buckets
+                        .iter()
+                        .map(|&bucket| {
+                            // bucket <- get_stake_bucket(min stake of {
+                            //  this node, crds value owner and gossip peer
+                            // })
+                            // weight <- (bucket + 1)^2
+                            // min stake of {...} is a proxy for how much we care about
+                            // the link, and tries to mirror similar logic on the
+                            // receiving end when pruning incoming links:
+                            // https://github.com/solana-labs/solana/blob/81394cf92/gossip/src/received_cache.rs#L100-L105
+                            let bucket = bucket.min(k) as u64;
+                            bucket.saturating_add(1).saturating_pow(2)
+                        })
+                        .collect();
+                    entry.rotate(rng, size, num_bloom_filter_items, nodes, &weights);
+                }
+            }
+            WeightingMode::Dynamic {
+                ref mut alpha,
+                filter_k,
+                tc_ms: _,
+            } => {
+                // Need to take into account this node's stake bucket when calculating fraction of unstaked nodes.
+                let self_bucket = get_stake_bucket(stakes.get(self_pubkey));
+                let num_unstaked = buckets
+                    .iter()
+                    .filter(|&&b| b == 0)
+                    .count()
+                    .saturating_add(if self_bucket == 0 { 1 } else { 0 });
+                let total_nodes = nodes.len().saturating_add(1);
+
+                let f_scaled = ((num_unstaked.saturating_mul(lpf::SCALE.get() as usize))
+                    .saturating_add(total_nodes / 2))
+                    / total_nodes;
+                let alpha_target = ALPHA_MIN.saturating_add(f_scaled as u64);
+                *alpha = lpf::filter_alpha(
+                    *alpha,
+                    alpha_target,
+                    lpf::FilterConfig {
+                        output_range: ALPHA_MIN..ALPHA_MAX,
+                        k: filter_k,
+                    },
+                );
+
+                for (k, entry) in self.entries.iter_mut().enumerate() {
+                    let weights: Vec<u64> = buckets
+                        .iter()
+                        .map(|&bucket| {
+                            let bucket = bucket.min(k) as u64;
+                            get_weight(bucket, *alpha)
+                        })
+                        .collect();
+                    entry.rotate(rng, size, num_bloom_filter_items, nodes, &weights);
+                }
+            }
         }
     }
 
     fn get_entry(&self, stake: Option<&u64>) -> &PushActiveSetEntry {
-        &self.0[get_stake_bucket(stake)]
+        &self.entries[get_stake_bucket(stake)]
     }
 }
 
@@ -184,10 +353,42 @@ fn get_stake_bucket(stake: Option<&u64>) -> usize {
 #[cfg(test)]
 mod tests {
     use {
-        super::*, itertools::iproduct, rand::SeedableRng, rand_chacha::ChaChaRng,
+        super::*,
+        crate::stake_weighting_config::{WEIGHTING_MODE_DYNAMIC, WEIGHTING_MODE_STATIC},
+        itertools::iproduct,
+        rand::SeedableRng,
+        rand_chacha::ChaChaRng,
         std::iter::repeat_with,
     };
 
+    const MAX_STAKE: u64 = (1 << 20) * LAMPORTS_PER_SOL;
+
+    fn push_active_set_new_dynamic() -> PushActiveSet {
+        PushActiveSet::new(WeightingMode::from(WeightingConfigTyped::Dynamic {
+            tc: TimeConstant::Default,
+        }))
+    }
+
+    // Helper to generate a stake map given unstaked count
+    fn make_stakes(
+        nodes: &[Pubkey],
+        num_unstaked: usize,
+        rng: &mut ChaChaRng,
+    ) -> HashMap<Pubkey, u64> {
+        nodes
+            .iter()
+            .enumerate()
+            .map(|(i, node)| {
+                let stake = if i < num_unstaked {
+                    0
+                } else {
+                    rng.gen_range(1..=MAX_STAKE)
+                };
+                (*node, stake)
+            })
+            .collect()
+    }
+
     #[test]
     fn test_get_stake_bucket() {
         assert_eq!(get_stake_bucket(None), 0);
@@ -212,21 +413,20 @@ mod tests {
     }
 
     #[test]
-    fn test_push_active_set() {
+    fn test_push_active_set_static_weighting() {
         const CLUSTER_SIZE: usize = 117;
-        const MAX_STAKE: u64 = (1 << 20) * LAMPORTS_PER_SOL;
         let mut rng = ChaChaRng::from_seed([189u8; 32]);
         let pubkey = Pubkey::new_unique();
         let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect();
         let stakes = repeat_with(|| rng.gen_range(1..MAX_STAKE));
         let mut stakes: HashMap<_, _> = nodes.iter().copied().zip(stakes).collect();
         stakes.insert(pubkey, rng.gen_range(1..MAX_STAKE));
-        let mut active_set = PushActiveSet::default();
-        assert!(active_set.0.iter().all(|entry| entry.0.is_empty()));
-        active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes);
-        assert!(active_set.0.iter().all(|entry| entry.0.len() == 5));
+        let mut active_set = PushActiveSet::new(WeightingMode::Static);
+        assert!(active_set.entries.iter().all(|entry| entry.0.is_empty()));
+        active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &pubkey);
+        assert!(active_set.entries.iter().all(|entry| entry.0.len() == 5));
         // Assert that for all entries, each filter already prunes the key.
-        for entry in &active_set.0 {
+        for entry in &active_set.entries {
             for (node, filter) in entry.0.iter() {
                 assert!(filter.contains(node));
             }
@@ -248,8 +448,8 @@ mod tests {
         assert!(active_set
             .get_nodes(&pubkey, other, |_| false, &stakes)
             .eq([13, 18, 16, 0].into_iter().map(|k| &nodes[k])));
-        active_set.rotate(&mut rng, 7, CLUSTER_SIZE, &nodes, &stakes);
-        assert!(active_set.0.iter().all(|entry| entry.0.len() == 7));
+        active_set.rotate(&mut rng, 7, CLUSTER_SIZE, &nodes, &stakes, &pubkey);
+        assert!(active_set.entries.iter().all(|entry| entry.0.len() == 7));
         assert!(active_set
             .get_nodes(&pubkey, origin, |_| false, &stakes)
             .eq([18, 0, 7, 15, 11].into_iter().map(|k| &nodes[k])));
@@ -268,6 +468,63 @@ mod tests {
             .eq([16, 7, 11].into_iter().map(|k| &nodes[k])));
     }
 
+    #[test]
+    fn test_push_active_set_dynamic_weighting() {
+        const CLUSTER_SIZE: usize = 117;
+        let mut rng = ChaChaRng::from_seed([14u8; 32]);
+        let pubkey = Pubkey::new_unique();
+        let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect();
+        let stakes = repeat_with(|| rng.gen_range(1..MAX_STAKE));
+        let mut stakes: HashMap<_, _> = nodes.iter().copied().zip(stakes).collect();
+        stakes.insert(pubkey, rng.gen_range(1..MAX_STAKE));
+        let mut active_set = push_active_set_new_dynamic();
+        assert!(active_set.entries.iter().all(|entry| entry.0.is_empty()));
+        active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &pubkey);
+        assert!(active_set.entries.iter().all(|entry| entry.0.len() == 5));
+        // Assert that for all entries, each filter already prunes the key.
+        for entry in &active_set.entries {
+            for (node, filter) in entry.0.iter() {
+                assert!(filter.contains(node));
+            }
+        }
+        let other = &nodes[6];
+        let origin = &nodes[17];
+        assert!(active_set
+            .get_nodes(&pubkey, origin, |_| false, &stakes)
+            .eq([7, 6, 2, 4, 12].into_iter().map(|k| &nodes[k])));
+        assert!(active_set
+            .get_nodes(&pubkey, other, |_| false, &stakes)
+            .eq([7, 2, 4, 12].into_iter().map(|k| &nodes[k])));
+
+        active_set.prune(&pubkey, &nodes[6], &[*origin], &stakes);
+        active_set.prune(&pubkey, &nodes[11], &[*origin], &stakes);
+        active_set.prune(&pubkey, &nodes[4], &[*origin], &stakes);
+        assert!(active_set
+            .get_nodes(&pubkey, origin, |_| false, &stakes)
+            .eq([7, 2, 12].into_iter().map(|k| &nodes[k])));
+        assert!(active_set
+            .get_nodes(&pubkey, other, |_| false, &stakes)
+            .eq([7, 2, 4, 12].into_iter().map(|k| &nodes[k])));
+        active_set.rotate(&mut rng, 7, CLUSTER_SIZE, &nodes, &stakes, &pubkey);
+        assert!(active_set.entries.iter().all(|entry| entry.0.len() == 7));
+        assert!(active_set
+            .get_nodes(&pubkey, origin, |_| false, &stakes)
+            .eq([2, 12, 15, 14, 16].into_iter().map(|k| &nodes[k])));
+        assert!(active_set
+            .get_nodes(&pubkey, other, |_| false, &stakes)
+            .eq([2, 4, 12, 15, 14, 16].into_iter().map(|k| &nodes[k])));
+        let origins = [*origin, *other];
+        active_set.prune(&pubkey, &nodes[2], &origins, &stakes);
+        active_set.prune(&pubkey, &nodes[12], &origins, &stakes);
+        active_set.prune(&pubkey, &nodes[14], &origins, &stakes);
+        assert!(active_set
+            .get_nodes(&pubkey, origin, |_| false, &stakes)
+            .eq([15, 16].into_iter().map(|k| &nodes[k])));
+        assert!(active_set
+            .get_nodes(&pubkey, other, |_| false, &stakes)
+            .eq([4, 15, 16].into_iter().map(|k| &nodes[k])));
+    }
+
     #[test]
     fn test_push_active_set_entry() {
         const NUM_BLOOM_FILTER_ITEMS: usize = 100;
@@ -329,4 +586,489 @@ mod tests {
         let keys = [&nodes[5], &nodes[7], &nodes[1], &nodes[13]];
         assert!(entry.0.keys().eq(keys));
     }
+
+    fn alpha_of(pas: &PushActiveSet) -> u64 {
+        match pas.mode {
+            WeightingMode::Dynamic { alpha, .. } => alpha,
+            WeightingMode::Static => panic!("test assumed Dynamic mode but found Static"),
+        }
+    }
+
+    #[test]
+    fn test_alpha_converges_to_expected_target() {
+        const CLUSTER_SIZE: usize = 415;
+        const TOLERANCE_MILLI: u64 = lpf::SCALE.get() / 100; // ±1% of alpha
+
+        let mut rng = ChaChaRng::from_seed([77u8; 32]);
+        let mut nodes: Vec<Pubkey> = repeat_with(Pubkey::new_unique).take(CLUSTER_SIZE).collect();
+
+        // 39% unstaked → alpha_target = 1,000,000 + 39 * 10000 = 1,390,000
+        let percent_unstaked = 39;
+        let num_unstaked = (CLUSTER_SIZE * percent_unstaked + 50) / 100;
+        let expected_alpha_milli = 1_000_000 + (percent_unstaked as u64 * 10_000);
+
+        let stakes = make_stakes(&nodes, num_unstaked, &mut rng);
+        let my_pubkey = nodes.pop().unwrap();
+
+        let mut active_set = push_active_set_new_dynamic();
+
+        // Simulate repeated calls to `rotate()` (as would happen every 7.5s)
+        // 8 calls (60s) should be enough to converge to the expected target alpha.
+        // We converge in about 4 calls (30s).
+        for _ in 0..8 {
+            active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &my_pubkey);
+        }
+
+        let actual_alpha = alpha_of(&active_set);
+        assert!(
+            (actual_alpha as i32 - expected_alpha_milli as i32).abs() <= TOLERANCE_MILLI as i32,
+            "alpha={} did not converge to expected alpha={}",
+            actual_alpha,
+            expected_alpha_milli
+        );
+
+        // 93% unstaked → alpha_target = 1,000,000 + 93 * 10000 = 1,930,000
+        let percent_unstaked = 93;
+        let num_unstaked = (CLUSTER_SIZE * percent_unstaked + 50) / 100;
+        let expected_alpha_milli = 1_000_000 + (percent_unstaked as u64 * 10_000);
+
+        let stakes = make_stakes(&nodes, num_unstaked, &mut rng);
+        for _ in 0..8 {
+            active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &my_pubkey);
+        }
+
+        let actual_alpha = alpha_of(&active_set);
+        assert!(
+            (actual_alpha as i32 - expected_alpha_milli as i32).abs() <= TOLERANCE_MILLI as i32,
+            "alpha={} did not reconverge to expected alpha={}",
+            actual_alpha,
+            expected_alpha_milli
+        );
+    }
+
+    #[test]
+    fn test_alpha_converges_up_and_down() {
+        const CLUSTER_SIZE: usize = 415;
+        const TOLERANCE_MILLI: u64 = lpf::SCALE.get() / 100; // ±1% of alpha
+        const ROTATE_CALLS: usize = 8;
+
+        let mut rng = ChaChaRng::from_seed([99u8; 32]);
+        let mut nodes: Vec<Pubkey> = repeat_with(Pubkey::new_unique).take(CLUSTER_SIZE).collect();
+
+        let mut active_set = push_active_set_new_dynamic();
+
+        // 0% unstaked → alpha_target = 1,000,000
+        let num_unstaked = 0;
+        let expected_alpha_0 = 1_000_000;
+        let stakes = make_stakes(&nodes, num_unstaked, &mut rng);
+        let my_pubkey = nodes.pop().unwrap();
+
+        for _ in 0..ROTATE_CALLS {
+            active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &my_pubkey);
+        }
+        let alpha = alpha_of(&active_set);
+        assert!(
+            (alpha as i32 - expected_alpha_0).abs() <= TOLERANCE_MILLI as i32,
+            "alpha={} did not converge to alpha_0={}",
+            alpha,
+            expected_alpha_0
+        );
+
+        // 100% unstaked → alpha_target = 2,000,000
+        let num_unstaked = CLUSTER_SIZE;
+        let expected_alpha_100 = 2_000_000;
+        let stakes = make_stakes(&nodes, num_unstaked, &mut rng);
+        for _ in 0..ROTATE_CALLS {
+            active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &my_pubkey);
+        }
+        let alpha = alpha_of(&active_set);
+        assert!(
+            (alpha as i32 - expected_alpha_100).abs() <= TOLERANCE_MILLI as i32,
+            "alpha={} did not converge to alpha_100={}",
+            alpha,
+            expected_alpha_100
+        );
+
+        // back to 0% unstaked → alpha_target = 1,000,000
+        let num_unstaked = 0;
+        let stakes = make_stakes(&nodes, num_unstaked, &mut rng);
+        for _ in 0..ROTATE_CALLS {
+            active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &my_pubkey);
+        }
+        let alpha = alpha_of(&active_set);
+        assert!(
+            (alpha as i32 - expected_alpha_0).abs() <= TOLERANCE_MILLI as i32,
+            "alpha={} did not reconverge to alpha_0={}",
+            alpha,
+            expected_alpha_0
+        );
+    }
+
+    #[test]
+    fn test_alpha_progression_matches_expected() {
+        let mut alpha = ALPHA_MAX;
+        let target_down = ALPHA_MIN;
+        let target_up = ALPHA_MAX;
+        let filter_k = lpf::compute_k(REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS, DEFAULT_TC_MS);
+
+        // Expected values from rotating to 1,000,000 from 2,000,000
+        let expected_down = [
+            1_388_985, 1_151_309, 1_058_856, 1_022_894, 1_008_905, 1_003_463, 1_001_347, 1_000_523,
+        ];
+
+        for (i, expected) in expected_down.iter().enumerate() {
+            alpha = lpf::filter_alpha(
+                alpha,
+                target_down,
+                lpf::FilterConfig {
+                    output_range: ALPHA_MIN..ALPHA_MAX,
+                    k: filter_k,
+                },
+            );
+            assert_eq!(
+                alpha, *expected as u64,
+                "step {}: alpha did not match expected during convergence down",
+                i
+            );
+        }
+
+        // Rotate upward from current alpha (1,000,000) to 2,000,000
+        let expected_up = [
+            1_611_218, 1_848_769, 1_941_173, 1_977_117, 1_991_098, 1_996_537, 1_998_652, 1_999_475,
+        ];
+        for (i, expected) in expected_up.iter().enumerate() {
+            alpha = lpf::filter_alpha(
+                alpha,
+                target_up,
+                lpf::FilterConfig {
+                    output_range: ALPHA_MIN..ALPHA_MAX,
+                    k: filter_k,
+                },
+            );
+            assert_eq!(
+                alpha, *expected as u64,
+                "step {}: alpha did not match expected during convergence up",
+                i
+            );
+        }
+
+        // Rotate downward again from current alpha (1,999,000) to 1,000,000
+        let expected_down2 = [
+            1_388_780, 1_151_229, 1_058_825, 1_022_882, 1_008_900, 1_003_461, 1_001_346, 1_000_523,
+        ];
+        for (i, expected) in expected_down2.iter().enumerate() {
+            alpha = lpf::filter_alpha(
+                alpha,
+                target_down,
+                lpf::FilterConfig {
+                    output_range: ALPHA_MIN..ALPHA_MAX,
+                    k: filter_k,
+                },
+            );
+            assert_eq!(
+                alpha, *expected as u64,
+                "step {}: alpha did not match expected during final convergence down",
+                i
+            );
+        }
+    }
+
+    #[test]
+    fn test_record_size() {
+        assert_eq!(
+            bincode::serialized_size(&WeightingConfig::default()).unwrap(),
+            58
+        );
+    }
+
+    #[test]
+    fn test_apply_cfg_static_to_static() {
+        // Static -> Static: No change
+        let mut active_set = PushActiveSet::new(WeightingMode::Static);
+        assert_eq!(active_set.mode, WeightingMode::Static);
+
+        active_set.apply_cfg(&WeightingConfig::new_for_test(WEIGHTING_MODE_STATIC, 0));
+        assert_eq!(active_set.mode, WeightingMode::Static);
+    }
+
+    #[test]
+    fn test_apply_cfg_dynamic_to_static() {
+        // Dynamic -> Static: Mode switch
+        let mut active_set = push_active_set_new_dynamic();
+        assert!(matches!(active_set.mode, WeightingMode::Dynamic { .. }));
+
+        active_set.apply_cfg(&WeightingConfig::new_for_test(WEIGHTING_MODE_STATIC, 0));
+        assert_eq!(active_set.mode, WeightingMode::Static);
+    }
+
+    #[test]
+    fn test_apply_cfg_static_to_dynamic() {
+        // Static -> Dynamic: Mode switch
+        let mut active_set = PushActiveSet::new(WeightingMode::Static);
+        assert_eq!(active_set.mode, WeightingMode::Static);
+
+        let config = WeightingConfig::new_for_test(WEIGHTING_MODE_DYNAMIC, 0);
+        active_set.apply_cfg(&config);
+
+        match active_set.mode {
+            WeightingMode::Dynamic {
+                alpha,
+                filter_k,
+                tc_ms,
+            } => {
+                assert_eq!(alpha, DEFAULT_ALPHA);
+                assert_eq!(tc_ms, DEFAULT_TC_MS);
+                assert_eq!(
+                    filter_k,
+                    lpf::compute_k(REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS, DEFAULT_TC_MS)
+                );
+            }
+            WeightingMode::Static => panic!("Expected Dynamic mode after config change"),
+        }
+    }
+
+    #[test]
+    fn test_apply_cfg_dynamic_to_dynamic_same_tc() {
+        // Dynamic -> Dynamic (same tc): No change
+        let mut active_set = push_active_set_new_dynamic();
+        let original_mode = active_set.mode;
+
+        let config = WeightingConfig::new_for_test(WEIGHTING_MODE_DYNAMIC, 0);
+        active_set.apply_cfg(&config);
+
+        // Mode should be unchanged since tc is the same
+        assert_eq!(active_set.mode, original_mode);
+    }
+
+    #[test]
+    fn test_apply_cfg_dynamic_to_dynamic_different_tc() {
+        // Dynamic -> Dynamic (different tc): Update filter parameters
+        let mut active_set = push_active_set_new_dynamic();
+
+        // Change to a different tc value
+        let new_tc_ms = 45_000;
+        let config = WeightingConfig::new_for_test(WEIGHTING_MODE_DYNAMIC, new_tc_ms);
+        active_set.apply_cfg(&config);
+
+        match active_set.mode {
+            WeightingMode::Dynamic {
+                alpha,
+                filter_k,
+                tc_ms,
+            } => {
+                assert_eq!(alpha, DEFAULT_ALPHA);
+                assert_eq!(tc_ms, new_tc_ms);
+                assert_eq!(
+                    filter_k,
+                    lpf::compute_k(REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS, new_tc_ms)
+                );
+            }
+            WeightingMode::Static => panic!("Expected Dynamic mode"),
+        }
+    }
+
+    #[test]
+    fn test_apply_cfg_multiple_transitions() {
+        // Test multiple config changes in sequence
+        let mut active_set = PushActiveSet::new(WeightingMode::Static);
+
+        // Static -> Dynamic
+        active_set.apply_cfg(&WeightingConfig::new_for_test(
+            WEIGHTING_MODE_DYNAMIC,
+            20_000,
+        ));
+        assert!(matches!(
+            active_set.mode,
+            WeightingMode::Dynamic { tc_ms: 20_000, .. }
+        ));
+
+        // Dynamic -> Dynamic (change tc)
+        active_set.apply_cfg(&WeightingConfig::new_for_test(
+            WEIGHTING_MODE_DYNAMIC,
+            40_000,
+        ));
+        assert!(matches!(
+            active_set.mode,
+            WeightingMode::Dynamic { tc_ms: 40_000, .. }
+        ));
+
+        // Dynamic -> Static
+        active_set.apply_cfg(&WeightingConfig::new_for_test(WEIGHTING_MODE_STATIC, 0));
+        assert_eq!(active_set.mode, WeightingMode::Static);
+
+        // Static -> Dynamic (with default tc)
+        active_set.apply_cfg(&WeightingConfig::new_for_test(WEIGHTING_MODE_DYNAMIC, 0));
+        assert!(
+            matches!(active_set.mode, WeightingMode::Dynamic { tc_ms, .. } if tc_ms == DEFAULT_TC_MS)
+        );
+    }
+
+    #[test]
+    fn test_apply_cfg_filter_k_computation() {
+        // Verify that filter_k is correctly computed for different tc values
+        let mut active_set = PushActiveSet::new(WeightingMode::Static);
+
+        let test_cases = [10_000, 30_000, 60_000, 120_000];
+
+        for tc_ms in test_cases {
+            let config = WeightingConfig::new_for_test(WEIGHTING_MODE_DYNAMIC, tc_ms);
+            active_set.apply_cfg(&config);
+
+            let expected_filter_k = lpf::compute_k(REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS, tc_ms);
+
+            match active_set.mode {
+                WeightingMode::Dynamic {
+                    filter_k,
+                    tc_ms: actual_tc_ms,
+                    ..
+                } => {
+                    assert_eq!(actual_tc_ms, tc_ms);
+                    assert_eq!(filter_k, expected_filter_k);
+                }
+                WeightingMode::Static => panic!("Expected Dynamic mode"),
+            }
+        }
+    }
+
+    #[test]
+    fn test_interpolate_t_zero() {
+        // When alpha = ALPHA_MIN (t = 0), should return base
+        assert_eq!(gossip_interpolate_weight(100, 100 * 100, ALPHA_MIN), 100);
+        assert_eq!(gossip_interpolate_weight(0, 0, ALPHA_MIN), 0);
+        assert_eq!(
+            gossip_interpolate_weight(1_000_000, 1_000_000 * 1_000_000, ALPHA_MIN),
+            1_000_000
+        );
+    }
+
+    #[test]
+    fn test_interpolate_t_max() {
+        // When alpha = ALPHA_MAX (t = SCALE), should return base^2
+        let base = 100;
+        let result = gossip_interpolate_weight(base, base * base, ALPHA_MAX);
+        assert_eq!(result, base * base);
+
+        let base2 = 1000;
+        let result = gossip_interpolate_weight(base2, base2 * base2, ALPHA_MAX);
+        assert_eq!(result, base2 * base2);
+    }
+
+    #[test]
+    fn test_interpolate_values() {
+        let t_10 = lpf::SCALE.get() / 10; // 10%
+        let t_50 = lpf::SCALE.get() / 2; // 50%
+        let t_75 = lpf::SCALE.get() * 3 / 4; // 75%
+
+        let base = 3;
+        let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_10);
+        assert_eq!(result, 4);
+
+        let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_50);
+        assert_eq!(result, 6);
+
+        let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_75);
+        assert_eq!(result, 8);
+
+        let base = 15;
+        let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_10);
+        assert_eq!(result, 36);
+
+        let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_50);
+        assert_eq!(result, 120);
+
+        let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_75);
+        assert_eq!(result, 173);
+
+        let base = 24;
+        let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_10);
+        assert_eq!(result, 79);
+
+        let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_50);
+        assert_eq!(result, 300);
+
+        let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_75);
+        assert_eq!(result, 438);
+    }
+
+    #[test]
+    fn test_interpolate_large_base() {
+        let base = 1_000_000_000u64;
+        let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + lpf::SCALE.get() / 2);
+        assert!(result >= base);
+        assert!(result < base * base);
+    }
+
+    #[test]
+    fn test_interpolate_edge_cases() {
+        // Test with base = 1
+        assert_eq!(gossip_interpolate_weight(1, 1, ALPHA_MIN), 1);
+        assert_eq!(gossip_interpolate_weight(1, 1, ALPHA_MAX), 1);
+        assert_eq!(
+            gossip_interpolate_weight(1, 1, ALPHA_MIN + (lpf::SCALE.get() / 2)),
+            1
+        );
+
+        // Test with base = 0
+        assert_eq!(gossip_interpolate_weight(0, 0, ALPHA_MIN), 0);
+        assert_eq!(gossip_interpolate_weight(0, 0, ALPHA_MAX), 0);
+        assert_eq!(
+            gossip_interpolate_weight(0, 0, ALPHA_MIN + (lpf::SCALE.get() / 2)),
+            0
+        );
+    }
+
+    #[test]
+    fn test_interpolate_rounding() {
+        let base = 3;
+        let t = lpf::SCALE.get() / 3;
+        let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t);
+
+        assert!(result >= 3);
+        assert!(result <= 9);
+    }
+
+    #[test]
+    fn test_integration_filter_and_interpolate() {
+        // Test using filtered alpha with interpolate
+        // Alpha range is [SCALE, 2*SCALE] as used in push_active_set
+        let alpha_min = lpf::SCALE.get();
+        let alpha_max = 2 * lpf::SCALE.get();
+
+        let config = lpf::FilterConfig {
+            output_range: alpha_min..alpha_max,
+            k: lpf::SCALE.get() / 10, // 10%
+        };
+
+        let prev_alpha = alpha_min + lpf::SCALE.get() / 4; // 1.25 * SCALE
+        let target_alpha = alpha_min + lpf::SCALE.get() / 2; // 1.5 * SCALE
+        let filtered_alpha = lpf::filter_alpha(prev_alpha, target_alpha, config);
+
+        let base = 2;
+        let result = gossip_interpolate_weight(base, base * base, filtered_alpha);
+
+        assert!(result >= base);
+        assert!(result <= base * base);
+
+        assert!(filtered_alpha >= alpha_min);
+        assert!(filtered_alpha <= alpha_max);
+    }
+
+    #[test]
+    fn test_get_weight_specific_values() {
+        // Test get_weight with specific bucket=15 and alpha=1118676
+        let bucket = 15;
+        let alpha = 1118676;
+
+        // Verify alpha is in the valid range
+        assert!(alpha >= ALPHA_MIN);
+        assert!(alpha <= ALPHA_MAX);
+
+        let result = get_weight(bucket, alpha);
+
+        // Expected calculation:
+        // b = bucket + 1 = 16
+        // t = alpha - ALPHA_MIN = 1118676 - 1000000 = 118676
+        // interpolate(16, 118676) should return 44
+        assert_eq!(result, 44);
+    }
 }

+ 80 - 0
gossip/src/stake_weighting_config.rs

@@ -0,0 +1,80 @@
+use {
+    serde::{Deserialize, Serialize},
+    solana_account::ReadableAccount,
+    solana_runtime::bank::Bank,
+};
+
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub(crate) enum TimeConstant {
+    /// IIR time-constant (ms)
+    Value(u64),
+    /// Use the default time constant.
+    Default,
+}
+
+/// Actual on-chain state that controls the weighting of gossip nodes
+#[derive(Serialize, Deserialize, Debug, Default)]
+#[repr(C)]
+pub(crate) struct WeightingConfig {
+    _version: u8,           // This is part of Record program header
+    _authority: [u8; 32],   // This is part of Record program header
+    pub weighting_mode: u8, // 0 = Static, 1 = Dynamic
+    pub tc_ms: u64,         // IIR time constant in milliseconds
+    _future_use: [u8; 16],  // Reserved for future use
+}
+
+pub const WEIGHTING_MODE_STATIC: u8 = 0;
+pub const WEIGHTING_MODE_DYNAMIC: u8 = 1;
+
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub(crate) enum WeightingConfigTyped {
+    Static,
+    Dynamic { tc: TimeConstant },
+}
+
+impl From<&WeightingConfig> for WeightingConfigTyped {
+    fn from(raw: &WeightingConfig) -> Self {
+        match raw.weighting_mode {
+            WEIGHTING_MODE_STATIC => WeightingConfigTyped::Static,
+            WEIGHTING_MODE_DYNAMIC => {
+                let tc = if raw.tc_ms == 0 {
+                    TimeConstant::Default
+                } else {
+                    TimeConstant::Value(raw.tc_ms)
+                };
+                WeightingConfigTyped::Dynamic { tc }
+            }
+            _ => WeightingConfigTyped::Static,
+        }
+    }
+}
+
+impl WeightingConfig {
+    #[cfg(test)]
+    pub(crate) fn new_for_test(weighting_mode: u8, tc_ms: u64) -> Self {
+        Self {
+            _version: 0,
+            _authority: [0; 32],
+            weighting_mode,
+            tc_ms,
+            _future_use: [0; 16],
+        }
+    }
+}
+
+mod weighting_config_control_pubkey {
+    solana_pubkey::declare_id!("goSwVUizoqNYKEaaiTjkgdN2RgLpvsTvFt1MEVGibY9");
+}
+
+pub(crate) fn get_gossip_config_from_account(bank: &Bank) -> Option<WeightingConfig> {
+    let data = bank
+        .accounts()
+        .accounts_db
+        .load_account_with(
+            &bank.ancestors,
+            &weighting_config_control_pubkey::id(),
+            |_| true,
+        )?
+        .0;
+    bincode::deserialize::<WeightingConfig>(data.data()).ok()
+}

+ 3 - 0
gossip/tests/crds_gossip.rs

@@ -296,6 +296,7 @@ fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_conver
             &node.ping_cache,
             &mut Vec::new(), // pings
             &SocketAddrSpace::Unspecified,
+            None,
         );
     });
     let mut total_bytes = bytes_tx;
@@ -468,6 +469,7 @@ fn network_run_push(
                     &node.ping_cache,
                     &mut Vec::new(), // pings
                     &SocketAddrSpace::Unspecified,
+                    None,
                 );
             });
         }
@@ -796,6 +798,7 @@ fn test_prune_errors() {
         &ping_cache,
         &mut Vec::new(), // pings
         &SocketAddrSpace::Unspecified,
+        None,
     );
     let now = timestamp();
     let stakes = HashMap::<Pubkey, u64>::default();

+ 17 - 0
low-pass-filter/Cargo.toml

@@ -0,0 +1,17 @@
+[package]
+name = "solana-low-pass-filter"
+description = "Low Pass Filter"
+version = { workspace = true }
+repository = { workspace = true }
+homepage = { workspace = true }
+license = { workspace = true }
+edition = { workspace = true }
+publish = false
+
+[lib]
+path = "src/lib.rs"
+
+[features]
+agave-unstable-api = []
+
+[dependencies]

+ 217 - 0
low-pass-filter/src/lib.rs

@@ -0,0 +1,217 @@
+#![cfg(feature = "agave-unstable-api")]
+//! Fixed-point IIR filter for smoothing `alpha` updates.
+//!
+//! This is equivalent to a discrete-time Butterworth filter of order 1
+//! Implements:
+//!   alpha_new = K * target + (1 - K) * previous
+//!
+//! All math is unsigned integer fixed-point with `SCALE = 1,000,000`
+//!
+//! The filter constant K is derived from:
+//!     K = W_C / (1 + W_C), where Wc = 2π * Fs / Tc
+//!     Fc = 1 / TC  (cutoff frequency)
+//!     Fs = 1 / refresh interval
+pub mod api {
+    use std::num::NonZeroU64;
+
+    // Fixed point scale for K and `alpha` calculation
+    pub const SCALE: NonZeroU64 = NonZeroU64::new(1_000_000).unwrap();
+    // 2 * pi * SCALE
+    const TWO_PI_SCALED: u64 = (2.0 * std::f64::consts::PI * SCALE.get() as f64) as u64;
+
+    #[derive(Clone)]
+    pub struct FilterConfig {
+        pub output_range: std::ops::Range<u64>,
+        pub k: u64,
+    }
+
+    /// Computes the filter constant `K` for a given sample period and
+    /// time‑constant, both in **milliseconds**.
+    ///
+    /// Returns `K` scaled by `SCALE` (0–1,000,000).
+    #[allow(clippy::arithmetic_side_effects)]
+    pub fn compute_k(fs_ms: u64, tc_ms: u64) -> u64 {
+        if tc_ms == 0 {
+            return 0;
+        }
+        let scale = SCALE.get();
+        let wc_scaled = (TWO_PI_SCALED.saturating_mul(fs_ms)).saturating_div(tc_ms);
+        // ((wc_scaled * scale + scale / 2) / (scale + wc_scaled)).min(scale) rounded to nearest integer
+        ((wc_scaled
+            .saturating_mul(scale)
+            .saturating_add(scale.saturating_div(2)))
+        .saturating_div(scale.saturating_add(wc_scaled)))
+        .min(scale)
+    }
+
+    /// Updates alpha with a first-order low-pass filter.
+    /// ### Convergence Characteristics (w/ K = 0.611):
+    ///
+    /// - From a step change in target, `alpha` reaches:
+    ///   - ~61% of the way to target after 1 update
+    ///   - ~85% after 2
+    ///   - ~94% after 3
+    ///   - ~98% after 4
+    ///   - ~99% after 5
+    ///
+    /// Note: Each update is `fs_ms` apart. `fs_ms` is 7500ms for push_active_set.
+    ///
+    /// If future code changes make `alpha_target` jump larger, we must retune
+    /// `TC`/`K` or use a higher‑order filter to avoid lag/overshoot.
+    /// Returns `alpha_new = K * target + (1 - K) * prev`, rounded and clamped.
+    #[allow(clippy::arithmetic_side_effects)]
+    pub fn filter_alpha(prev: u64, target: u64, filter_config: FilterConfig) -> u64 {
+        let scale = SCALE.get();
+        // (k * target + (scale - k) * prev) / scale
+        let next = (filter_config.k.saturating_mul(target))
+            .saturating_add((scale.saturating_sub(filter_config.k)).saturating_mul(prev))
+            .saturating_div(scale);
+        next.clamp(
+            filter_config.output_range.start,
+            filter_config.output_range.end,
+        )
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::api::*;
+
+    #[test]
+    fn test_compute_k_zero_tc() {
+        // When time constant is 0, K should be 0
+        assert_eq!(compute_k(100, 0), 0);
+        assert_eq!(compute_k(1000, 0), 0);
+        assert_eq!(compute_k(u64::MAX, 0), 0);
+    }
+
+    #[test]
+    fn test_compute_k_zero_fs() {
+        // When sample frequency is 0, K should be 0
+        assert_eq!(compute_k(0, 100), 0);
+        assert_eq!(compute_k(0, 1000), 0);
+        assert_eq!(compute_k(0, u64::MAX), 0);
+    }
+
+    #[test]
+    fn test_compute_k_large_values() {
+        // K should never exceed SCALE
+        let k = compute_k(u64::MAX, 1);
+        assert!(k <= SCALE.get());
+
+        let k = compute_k(1000000, 1);
+        assert!(k <= SCALE.get());
+
+        let k = compute_k(u64::MAX / 2, u64::MAX / 4);
+        assert!(k <= SCALE.get());
+
+        let k = compute_k(500000000, 1000000000);
+        assert!(k <= SCALE.get());
+    }
+
+    #[test]
+    fn test_compute_k_normal_cases() {
+        // Test some normal cases
+        let k1 = compute_k(100, 1000);
+        assert_eq!(k1, 385869);
+
+        let k2 = compute_k(1000, 100);
+        assert_eq!(k2, 984333);
+        assert!(k2 > k1);
+
+        let k3 = compute_k(1000, 1000);
+        assert_eq!(k3, 862697);
+    }
+
+    #[test]
+    fn test_filter_alpha_k_zero() {
+        // When K=0, alpha should not change
+        let config = FilterConfig {
+            output_range: 0..1000000,
+            k: 0,
+        };
+
+        assert_eq!(filter_alpha(100, 500, config.clone()), 100);
+        assert_eq!(filter_alpha(0, 1000000, config.clone()), 0);
+        assert_eq!(filter_alpha(999999, 0, config), 999999);
+    }
+
+    #[test]
+    fn test_filter_alpha_k_max() {
+        // When K=SCALE, alpha should equal target value (clamped to range)
+        let config = FilterConfig {
+            output_range: 0..1000000,
+            k: SCALE.get(),
+        };
+
+        assert_eq!(filter_alpha(100, 500, config.clone()), 500);
+        assert_eq!(filter_alpha(0, 1000000, config), 1000000);
+
+        // Test clamping - target outside range
+        let config = FilterConfig {
+            output_range: 100..900,
+            k: SCALE.get(),
+        };
+        assert_eq!(filter_alpha(200, 50, config.clone()), 100);
+        assert_eq!(filter_alpha(200, 1000, config), 900);
+    }
+
+    #[test]
+    fn test_filter_alpha_clamping() {
+        // Test output range clamping
+        let config = FilterConfig {
+            output_range: 100..900,
+            k: SCALE.get() / 2,
+        };
+
+        // This should be within range
+        let result = filter_alpha(950, 50, config);
+        assert_eq!(result, 500);
+
+        // Test extreme clamping
+        let config_narrow = FilterConfig {
+            output_range: 500..501,
+            k: SCALE.get() / 4,
+        };
+        let result = filter_alpha(0, 1000000, config_narrow);
+        assert_eq!(result, 501);
+    }
+
+    #[test]
+    fn test_filter_alpha_overflow_protection() {
+        // Test with large values that might cause overflow
+        let config = FilterConfig {
+            output_range: 0..u64::MAX,
+            k: SCALE.get() / 2,
+        };
+
+        let result = filter_alpha(u64::MAX / 2, u64::MAX / 2, config.clone());
+        assert_eq!(result, 18446744073709);
+
+        let result2 = filter_alpha(u64::MAX - 1000, u64::MAX - 2000, config);
+        assert_eq!(result2, 18446744073709);
+    }
+
+    #[test]
+    fn test_filter_alpha_mathematical_correctness() {
+        let config = FilterConfig {
+            output_range: 0..u64::MAX,
+            k: SCALE.get() / 4, // 25%
+        };
+
+        let prev = 800;
+        let target = 400;
+        let result = filter_alpha(prev, target, config);
+        assert_eq!(result, 700);
+
+        let config = FilterConfig {
+            output_range: 0..u64::MAX,
+            k: SCALE.get() * 60 / 100, // 60%
+        };
+
+        let prev = 111111;
+        let target = 222222;
+        let result = filter_alpha(prev, target, config);
+        assert_eq!(result, 177777);
+    }
+}

+ 6 - 0
programs/sbf/Cargo.lock

@@ -6732,6 +6732,7 @@ dependencies = [
  "serde_bytes",
  "serde_derive",
  "siphasher 1.0.1",
+ "solana-account",
  "solana-bloom",
  "solana-clap-utils",
  "solana-client",
@@ -6743,6 +6744,7 @@ dependencies = [
  "solana-keypair",
  "solana-ledger",
  "solana-logger 3.0.0",
+ "solana-low-pass-filter",
  "solana-measure",
  "solana-metrics",
  "solana-native-token",
@@ -7103,6 +7105,10 @@ dependencies = [
  "signal-hook",
 ]
 
+[[package]]
+name = "solana-low-pass-filter"
+version = "3.0.0"
+
 [[package]]
 name = "solana-measure"
 version = "3.0.0"