|
|
@@ -251,6 +251,7 @@ pub fn run_kill_partition_switch_threshold<C>(
|
|
|
on_before_partition_resolved,
|
|
|
on_partition_resolved,
|
|
|
ticks_per_slot,
|
|
|
+ true,
|
|
|
vec![],
|
|
|
)
|
|
|
}
|
|
|
@@ -287,13 +288,17 @@ pub fn create_custom_leader_schedule_with_random_keys(
|
|
|
}
|
|
|
|
|
|
/// This function runs a network, initiates a partition based on a
|
|
|
-/// configuration, resolve the partition, then checks that the network
|
|
|
-/// continues to achieve consensus
|
|
|
-/// # Arguments
|
|
|
+/// configuration, resolve the partition, then checks that the network continues
|
|
|
+/// to achieve consensus.
|
|
|
+///
|
|
|
+/// # Arguments:
|
|
|
/// * `partitions` - A slice of partition configurations, where each partition
|
|
|
/// configuration is a usize representing a node's stake
|
|
|
/// * `leader_schedule` - An option that specifies whether the cluster should
|
|
|
/// run with a fixed, predetermined leader schedule
|
|
|
+/// * `no_wait_for_vote_to_start_leader` - provide option to only allow the
|
|
|
+/// bootstrap to build blocks at first to minimize forking during cluster
|
|
|
+/// startup.
|
|
|
#[allow(clippy::cognitive_complexity)]
|
|
|
pub fn run_cluster_partition<C>(
|
|
|
partitions: &[usize],
|
|
|
@@ -303,6 +308,7 @@ pub fn run_cluster_partition<C>(
|
|
|
on_before_partition_resolved: impl FnOnce(&mut LocalCluster, &mut C),
|
|
|
on_partition_resolved: impl FnOnce(&mut LocalCluster, &mut C),
|
|
|
ticks_per_slot: Option<u64>,
|
|
|
+ no_wait_for_vote_to_start_leader: bool,
|
|
|
additional_accounts: Vec<(Pubkey, AccountSharedData)>,
|
|
|
) {
|
|
|
agave_logger::setup_with_default(RUST_LOG_FILTER);
|
|
|
@@ -315,9 +321,23 @@ pub fn run_cluster_partition<C>(
|
|
|
assert_eq!(node_stakes.len(), num_nodes);
|
|
|
let mint_lamports = node_stakes.iter().sum::<u64>() * 2;
|
|
|
let turbine_disabled = Arc::new(AtomicBool::new(false));
|
|
|
+ let wait_for_supermajority = if no_wait_for_vote_to_start_leader {
|
|
|
+ // This helps nodes get a little more in sync by waiting for
|
|
|
+ // supermajority to observe slot 0. It still doesn't provide perfect
|
|
|
+ // synchronization because there is quite a bit of work todo after the
|
|
|
+ // sync point before nodes are fully operational.
|
|
|
+ Some(0)
|
|
|
+ } else {
|
|
|
+ // If we sync nodes on a slot, this overrides the flag to wait on
|
|
|
+ // building blocks until the node votes. But waiting for the bootstrap
|
|
|
+ // to build a block provides greater synchronization (less
|
|
|
+ // partitioning), so let that take precedence for these tests.
|
|
|
+ None
|
|
|
+ };
|
|
|
let mut validator_config = ValidatorConfig {
|
|
|
turbine_disabled: turbine_disabled.clone(),
|
|
|
- wait_for_supermajority: Some(0),
|
|
|
+ wait_for_supermajority,
|
|
|
+ no_wait_for_vote_to_start_leader,
|
|
|
..ValidatorConfig::default_for_test()
|
|
|
};
|
|
|
|
|
|
@@ -344,11 +364,17 @@ pub fn run_cluster_partition<C>(
|
|
|
}
|
|
|
};
|
|
|
|
|
|
+ // Always ensure at least one node is allowed to build blocks.
|
|
|
+ let mut validator_configs = make_identical_validator_configs(&validator_config, num_nodes);
|
|
|
+ validator_configs
|
|
|
+ .first_mut()
|
|
|
+ .unwrap()
|
|
|
+ .no_wait_for_vote_to_start_leader = true;
|
|
|
let slots_per_epoch = 2048;
|
|
|
let mut config = ClusterConfig {
|
|
|
mint_lamports,
|
|
|
node_stakes,
|
|
|
- validator_configs: make_identical_validator_configs(&validator_config, num_nodes),
|
|
|
+ validator_configs,
|
|
|
validator_keys: Some(
|
|
|
validator_keys
|
|
|
.into_iter()
|