Brennan 3 сар өмнө
parent
commit
37fa8c8cb2
100 өөрчлөгдсөн 9187 нэмэгдсэн , 1069 устгасан
  1. 46 0
      .buildkite/alpenglow/pipeline.sh
  2. 0 16
      .github/dependabot.yml
  3. 322 19
      Cargo.lock
  4. 18 0
      Cargo.toml
  5. 25 0
      build-alpenglow-vote/Cargo.toml
  6. 159 0
      build-alpenglow-vote/build.rs
  7. 12 0
      build-alpenglow-vote/src/lib.rs
  8. 7 1
      ci/docker-run.sh
  9. 9 13
      ci/stable/run-local-cluster-partially.sh
  10. 1 1
      ci/stable/run-localnet.sh
  11. 17 13
      ci/stable/run-partition.sh
  12. 1 0
      clap-utils/Cargo.toml
  13. 32 0
      clap-utils/src/input_parsers.rs
  14. 7 2
      cli/Cargo.toml
  15. 5 0
      cli/src/cli.rs
  16. 20 1
      cli/src/stake.rs
  17. 243 87
      cli/src/vote.rs
  18. 1 0
      cli/tests/stake.rs
  19. 2 0
      cli/tests/vote.rs
  20. 6 0
      core/Cargo.toml
  21. 3 11
      core/benches/consumer.rs
  22. 2 1
      core/benches/receive_and_buffer_utils.rs
  23. 1 1
      core/benches/sigverify_stage.rs
  24. 2 0
      core/src/admin_rpc_post_init.rs
  25. 56 0
      core/src/alpenglow_consensus/bls_vote_transaction.rs
  26. 604 0
      core/src/alpenglow_consensus/skip_pool.rs
  27. 25 0
      core/src/alpenglow_consensus/transaction.rs
  28. 7 0
      core/src/banking_simulation.rs
  29. 79 4
      core/src/banking_stage.rs
  30. 110 1
      core/src/banking_stage/consumer.rs
  31. 119 9
      core/src/banking_stage/decision_maker.rs
  32. 7 2
      core/src/banking_stage/leader_slot_metrics.rs
  33. 720 0
      core/src/block_creation_loop.rs
  34. 75 65
      core/src/cluster_info_vote_listener.rs
  35. 75 26
      core/src/commitment_service.rs
  36. 47 33
      core/src/consensus.rs
  37. 38 3
      core/src/fetch_stage.rs
  38. 3 1
      core/src/lib.rs
  39. 10 0
      core/src/repair/ancestor_hashes_service.rs
  40. 124 0
      core/src/repair/certificate_service.rs
  41. 1 1
      core/src/repair/cluster_slot_state_verifier.rs
  42. 1 0
      core/src/repair/mod.rs
  43. 12 7
      core/src/repair/repair_service.rs
  44. 606 453
      core/src/replay_stage.rs
  45. 427 0
      core/src/sigverifier/bls_sigverifier.rs
  46. 107 0
      core/src/sigverifier/bls_sigverifier/stats.rs
  47. 0 0
      core/src/sigverifier/ed25519_sigverifier.rs
  48. 2 0
      core/src/sigverifier/mod.rs
  49. 10 5
      core/src/sigverify_stage.rs
  50. 887 0
      core/src/staked_validators_cache.rs
  51. 34 2
      core/src/tpu.rs
  52. 58 5
      core/src/tvu.rs
  53. 274 65
      core/src/validator.rs
  54. 23 18
      core/src/vote_simulator.rs
  55. 409 37
      core/src/voting_service.rs
  56. 20 3
      core/src/window_service.rs
  57. 19 22
      core/tests/unified_scheduler.rs
  58. 25 0
      curves/bls12-381/Cargo.toml
  59. 13 0
      curves/bls12-381/src/errors.rs
  60. 370 0
      curves/bls12-381/src/g1.rs
  61. 406 0
      curves/bls12-381/src/g2.rs
  62. 7 0
      curves/bls12-381/src/lib.rs
  63. 5 0
      curves/bls12-381/src/scalar.rs
  64. 15 0
      curves/curve-traits/Cargo.toml
  65. 20 2
      curves/curve-traits/src/lib.rs
  66. 1 0
      curves/curve25519/Cargo.toml
  67. 2 5
      curves/curve25519/src/edwards.rs
  68. 0 1
      curves/curve25519/src/lib.rs
  69. 2 5
      curves/curve25519/src/ristretto.rs
  70. 4 0
      genesis/Cargo.toml
  71. 1 0
      genesis/src/lib.rs
  72. 102 12
      genesis/src/main.rs
  73. 1 1
      gossip/Cargo.toml
  74. 26 18
      gossip/src/cluster_info.rs
  75. 18 0
      gossip/src/contact_info.rs
  76. 1 0
      gossip/tests/gossip.rs
  77. 4 0
      keygen/Cargo.toml
  78. 67 0
      keygen/src/keygen.rs
  79. 8 2
      ledger-tool/src/main.rs
  80. 4 0
      ledger/Cargo.toml
  81. 64 5
      ledger/src/blockstore.rs
  82. 8 0
      ledger/src/blockstore/blockstore_purge.rs
  83. 18 0
      ledger/src/blockstore/column.rs
  84. 3 1
      ledger/src/blockstore_db.rs
  85. 38 1
      ledger/src/blockstore_meta.rs
  86. 148 3
      ledger/src/blockstore_processor.rs
  87. 1 0
      ledger/src/genesis_utils.rs
  88. 21 0
      ledger/src/leader_schedule_utils.rs
  89. 7 1
      local-cluster/Cargo.toml
  90. 148 17
      local-cluster/src/cluster_tests.rs
  91. 148 20
      local-cluster/src/integration_tests.rs
  92. 62 6
      local-cluster/src/local_cluster.rs
  93. 5 5
      local-cluster/src/validator_configs.rs
  94. 1418 29
      local-cluster/tests/local_cluster.rs
  95. 8 0
      metrics/src/datapoint.rs
  96. 3 0
      multinode-demo/setup.sh
  97. 11 2
      multinode-demo/validator.sh
  98. 2 2
      net-utils/src/sockets.rs
  99. 26 3
      net/net.sh
  100. 16 0
      net/remote/remote-node.sh

+ 46 - 0
.buildkite/alpenglow/pipeline.sh

@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+
+cat <<EOF | tee /dev/tty | buildkite-agent pipeline upload
+steps:
+  - name: "checks"
+    command: "ci/docker-run-default-image.sh ci/test-checks.sh"
+    timeout_in_minutes: 20
+    agents:
+      queue: "default"
+
+  - name: "frozen-abi"
+    command: "ci/docker-run-default-image.sh ./test-abi.sh"
+    timeout_in_minutes: 15
+    agents:
+      queue: "default"
+
+  - wait
+
+  - group: "stable"
+    steps:
+      - name: "partitions"
+        command: "ci/docker-run-default-image.sh ci/stable/run-partition.sh"
+        timeout_in_minutes: 40
+        agents:
+          queue: "default"
+        parallelism: 3
+        retry:
+          automatic:
+            - limit: 3
+
+      - name: "local-cluster"
+        command: "ci/docker-run-default-image.sh ci/stable/run-local-cluster-partially.sh"
+        timeout_in_minutes: 30
+        agents:
+          queue: "default"
+        parallelism: 4
+        retry:
+          automatic:
+            - limit: 3
+
+      - name: "localnet"
+        command: "ci/docker-run-default-image.sh ci/stable/run-localnet.sh"
+        timeout_in_minutes: 30
+        agents:
+          queue: "default"
+EOF

+ 0 - 16
.github/dependabot.yml

@@ -1,16 +0,0 @@
-# To get started with Dependabot version updates, you'll need to specify which
-# package ecosystems to update and where the package manifests are located.
-# Please see the documentation for all configuration options:
-# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
-
-version: 2
-updates:
-- package-ecosystem: cargo
-  directory: "/"
-  schedule:
-    interval: daily
-    time: "01:00"
-    timezone: America/Los_Angeles
-  #labels:
-  #  - "automerge"
-  open-pull-requests-limit: 6

+ 322 - 19
Cargo.lock

@@ -112,7 +112,7 @@ dependencies = [
  "tar",
  "tempfile",
  "tokio",
- "toml 0.8.12",
+ "toml 0.8.20",
 ]
 
 [[package]]
@@ -352,9 +352,11 @@ dependencies = [
  "solana-account-info",
  "solana-big-mod-exp",
  "solana-blake3-hasher",
+ "solana-bls12-381",
  "solana-bn254",
  "solana-clock",
  "solana-cpi",
+ "solana-curve-traits",
  "solana-curve25519 3.0.0",
  "solana-epoch-rewards",
  "solana-epoch-schedule",
@@ -408,7 +410,7 @@ dependencies = [
  "solana-metrics",
  "thread-priority",
  "tokio",
- "toml 0.8.12",
+ "toml 0.8.20",
  "tower 0.5.2",
 ]
 
@@ -515,6 +517,7 @@ dependencies = [
  "solana-validator-exit",
  "solana-version",
  "solana-vote-program",
+ "solana-votor",
  "spl-generic-token",
  "spl-token-2022",
  "symlink",
@@ -630,6 +633,31 @@ version = "0.2.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"
 
+[[package]]
+name = "alpenglow-vote"
+version = "0.1.0"
+source = "git+https://github.com/solana-program/alpenglow-vote.git?rev=cdae90a#cdae90a3c3f57f56bfa6f4c7e05d5b77b83926f1"
+dependencies = [
+ "bincode",
+ "bitflags 2.9.1",
+ "bitvec",
+ "bytemuck",
+ "either",
+ "num-derive",
+ "num-traits",
+ "num_enum",
+ "serde",
+ "serde_derive",
+ "solana-account",
+ "solana-bls-signatures",
+ "solana-hash",
+ "solana-program",
+ "solana-signature",
+ "solana-vote-interface",
+ "spl-pod",
+ "thiserror 2.0.12",
+]
+
 [[package]]
 name = "android-tzdata"
 version = "0.1.1"
@@ -1304,6 +1332,7 @@ checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c"
 dependencies = [
  "funty",
  "radium",
+ "serde",
  "tap",
  "wyz",
 ]
@@ -1361,6 +1390,34 @@ dependencies = [
  "byte-tools",
 ]
 
+[[package]]
+name = "blst"
+version = "0.3.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "47c79a94619fade3c0b887670333513a67ac28a6a7e653eb260bf0d4103db38d"
+dependencies = [
+ "cc",
+ "glob",
+ "threadpool",
+ "zeroize",
+]
+
+[[package]]
+name = "blstrs"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29"
+dependencies = [
+ "blst",
+ "byte-slice-cast",
+ "ff",
+ "group",
+ "pairing",
+ "rand_core 0.6.4",
+ "serde",
+ "subtle",
+]
+
 [[package]]
 name = "borsh"
 version = "0.10.3"
@@ -1486,6 +1543,12 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "build-print"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4a2128d00b7061b82b72844a351e80acd29e05afc60e9261e2ac90dca9ecc2ac"
+
 [[package]]
 name = "bumpalo"
 version = "3.12.0"
@@ -1502,6 +1565,12 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "byte-slice-cast"
+version = "1.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d"
+
 [[package]]
 name = "byte-tools"
 version = "0.3.1"
@@ -2606,9 +2675,9 @@ dependencies = [
 
 [[package]]
 name = "either"
-version = "1.11.0"
+version = "1.14.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2"
+checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d"
 
 [[package]]
 name = "encode_unicode"
@@ -2803,6 +2872,17 @@ version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "835a3dc7d1ec9e75e2b5fb4ba75396837112d2060b03f7d43bc1897c7f7211da"
 
+[[package]]
+name = "ff"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393"
+dependencies = [
+ "bitvec",
+ "rand_core 0.6.4",
+ "subtle",
+]
+
 [[package]]
 name = "fiat-crypto"
 version = "0.2.9"
@@ -3174,9 +3254,9 @@ checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e"
 
 [[package]]
 name = "glob"
-version = "0.3.0"
+version = "0.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
+checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2"
 
 [[package]]
 name = "globset"
@@ -3230,6 +3310,19 @@ dependencies = [
  "spinning_top",
 ]
 
+[[package]]
+name = "group"
+version = "0.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63"
+dependencies = [
+ "ff",
+ "rand 0.8.5",
+ "rand_core 0.6.4",
+ "rand_xorshift 0.3.0",
+ "subtle",
+]
+
 [[package]]
 name = "h2"
 version = "0.3.26"
@@ -4927,6 +5020,15 @@ version = "3.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f"
 
+[[package]]
+name = "pairing"
+version = "0.23.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f"
+dependencies = [
+ "group",
+]
+
 [[package]]
 name = "parity-tokio-ipc"
 version = "0.9.0"
@@ -5372,7 +5474,7 @@ dependencies = [
  "num-traits",
  "rand 0.9.0",
  "rand_chacha 0.9.0",
- "rand_xorshift",
+ "rand_xorshift 0.4.0",
  "regex-syntax",
  "rusty-fork",
  "tempfile",
@@ -5706,6 +5808,15 @@ dependencies = [
  "rand_core 0.6.4",
 ]
 
+[[package]]
+name = "rand_xorshift"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f"
+dependencies = [
+ "rand_core 0.6.4",
+]
+
 [[package]]
 name = "rand_xorshift"
 version = "0.4.0"
@@ -6385,9 +6496,9 @@ dependencies = [
 
 [[package]]
 name = "serde_spanned"
-version = "0.6.5"
+version = "0.6.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1"
+checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1"
 dependencies = [
  "serde",
 ]
@@ -7239,6 +7350,43 @@ dependencies = [
  "solana-time-utils",
 ]
 
+[[package]]
+name = "solana-bls-signatures"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af089f712fb5cbef2d73ac7ffee9cad05ada6cd1fd5c812338df040bcd3c410b"
+dependencies = [
+ "base64 0.22.1",
+ "blst",
+ "blstrs",
+ "bytemuck",
+ "cfg_eval",
+ "ff",
+ "group",
+ "rand 0.8.5",
+ "serde",
+ "serde_json",
+ "serde_with",
+ "solana-frozen-abi",
+ "solana-frozen-abi-macro",
+ "solana-signature",
+ "solana-signer",
+ "subtle",
+ "thiserror 2.0.12",
+]
+
+[[package]]
+name = "solana-bls12-381"
+version = "3.0.0"
+dependencies = [
+ "blst",
+ "bytemuck",
+ "bytemuck_derive",
+ "solana-curve-traits",
+ "solana-define-syscall",
+ "thiserror 2.0.12",
+]
+
 [[package]]
 name = "solana-bn254"
 version = "2.2.2"
@@ -7276,11 +7424,19 @@ dependencies = [
  "rand 0.8.5",
  "solana-account",
  "solana-bincode",
+ "solana-blake3-hasher",
+ "solana-bls12-381",
+ "solana-bn254",
  "solana-bpf-loader-program",
  "solana-clock",
+ "solana-compute-budget",
+ "solana-cpi",
+ "solana-curve-traits",
+ "solana-curve25519 3.0.0",
  "solana-epoch-rewards",
  "solana-epoch-schedule",
  "solana-fee-calculator",
+ "solana-hash",
  "solana-instruction",
  "solana-last-restart-slot",
  "solana-loader-v3-interface",
@@ -7346,6 +7502,16 @@ dependencies = [
  "tempfile",
 ]
 
+[[package]]
+name = "solana-build-alpenglow-vote"
+version = "3.0.0"
+dependencies = [
+ "build-print",
+ "glob",
+ "serde",
+ "toml 0.8.20",
+]
+
 [[package]]
 name = "solana-builtins"
 version = "3.0.0"
@@ -7426,6 +7592,7 @@ dependencies = [
  "chrono",
  "clap 2.33.3",
  "rpassword",
+ "solana-bls-signatures",
  "solana-clock",
  "solana-cluster-type",
  "solana-commitment-config",
@@ -7486,6 +7653,7 @@ version = "3.0.0"
 dependencies = [
  "agave-feature-set",
  "agave-syscalls",
+ "alpenglow-vote",
  "assert_matches",
  "bincode",
  "bs58",
@@ -7508,7 +7676,9 @@ dependencies = [
  "solana-account",
  "solana-account-decoder",
  "solana-address-lookup-table-interface",
+ "solana-bls-signatures",
  "solana-borsh",
+ "solana-bpf-loader-program",
  "solana-clap-utils",
  "solana-cli-config",
  "solana-cli-output",
@@ -7569,7 +7739,9 @@ dependencies = [
  "solana-transaction-status-client-types",
  "solana-udp-client",
  "solana-version",
+ "solana-vote",
  "solana-vote-program",
+ "solana-votor-messages",
  "spl-memo",
  "tempfile",
  "test-case",
@@ -7914,12 +8086,14 @@ dependencies = [
  "agave-transaction-view",
  "agave-verified-packet-receiver",
  "ahash 0.8.11",
+ "alpenglow-vote",
  "anyhow",
  "arrayvec",
  "assert_matches",
  "async-trait",
  "base64 0.22.1",
  "bincode",
+ "bitvec",
  "bs58",
  "bytes",
  "chrono",
@@ -7957,6 +8131,7 @@ dependencies = [
  "solana-address-lookup-table-interface",
  "solana-bincode",
  "solana-bloom",
+ "solana-bls-signatures",
  "solana-bpf-loader-program",
  "solana-builtins-default-costs",
  "solana-client",
@@ -8039,6 +8214,8 @@ dependencies = [
  "solana-version",
  "solana-vote",
  "solana-vote-program",
+ "solana-votor",
+ "solana-votor-messages",
  "solana-wen-restart",
  "spl-memo",
  "static_assertions",
@@ -8115,6 +8292,10 @@ dependencies = [
  "solana-stable-layout",
 ]
 
+[[package]]
+name = "solana-curve-traits"
+version = "3.0.0"
+
 [[package]]
 name = "solana-curve25519"
 version = "2.2.15"
@@ -8136,6 +8317,7 @@ dependencies = [
  "bytemuck",
  "bytemuck_derive",
  "curve25519-dalek 4.1.3",
+ "solana-curve-traits",
  "solana-define-syscall",
  "subtle",
  "thiserror 2.0.12",
@@ -8519,6 +8701,7 @@ name = "solana-genesis"
 version = "3.0.0"
 dependencies = [
  "agave-feature-set",
+ "alpenglow-vote",
  "base64 0.22.1",
  "bincode",
  "clap 2.33.3",
@@ -8528,6 +8711,7 @@ dependencies = [
  "serde_yaml 0.9.34+deprecated",
  "solana-account",
  "solana-accounts-db",
+ "solana-bls-signatures",
  "solana-borsh",
  "solana-clap-utils",
  "solana-cli-config",
@@ -8556,7 +8740,9 @@ dependencies = [
  "solana-stake-program",
  "solana-time-utils",
  "solana-version",
+ "solana-vote",
  "solana-vote-program",
+ "solana-votor-messages",
  "tempfile",
 ]
 
@@ -8805,11 +8991,13 @@ dependencies = [
 name = "solana-keygen"
 version = "3.0.0"
 dependencies = [
+ "alpenglow-vote",
  "bs58",
  "clap 3.2.23",
  "dirs-next",
  "num_cpus",
  "serde_json",
+ "solana-bls-signatures",
  "solana-clap-v3-utils",
  "solana-cli-config",
  "solana-derivation-path",
@@ -8821,6 +9009,8 @@ dependencies = [
  "solana-seed-derivable",
  "solana-signer",
  "solana-version",
+ "solana-vote",
+ "solana-votor-messages",
  "tempfile",
  "tiny-bip39",
 ]
@@ -8876,6 +9066,7 @@ version = "3.0.0"
 dependencies = [
  "agave-feature-set",
  "agave-reserved-account-keys",
+ "alpenglow-vote",
  "anyhow",
  "assert_matches",
  "bincode",
@@ -8963,8 +9154,11 @@ dependencies = [
  "solana-transaction-status",
  "solana-vote",
  "solana-vote-program",
+ "solana-votor-messages",
  "spl-generic-token",
  "spl-pod",
+ "spl-token",
+ "spl-token-2022",
  "static_assertions",
  "strum",
  "strum_macros",
@@ -9052,6 +9246,7 @@ name = "solana-local-cluster"
 version = "3.0.0"
 dependencies = [
  "assert_matches",
+ "bincode",
  "crossbeam-channel",
  "fs_extra",
  "gag",
@@ -9062,10 +9257,13 @@ dependencies = [
  "serial_test",
  "solana-account",
  "solana-accounts-db",
+ "solana-bls-signatures",
+ "solana-build-alpenglow-vote",
  "solana-client",
  "solana-client-traits",
  "solana-clock",
  "solana-commitment-config",
+ "solana-connection-cache",
  "solana-core",
  "solana-download-utils",
  "solana-entry",
@@ -9107,6 +9305,8 @@ dependencies = [
  "solana-vote",
  "solana-vote-interface",
  "solana-vote-program",
+ "solana-votor",
+ "solana-votor-messages",
  "static_assertions",
  "strum",
  "tempfile",
@@ -9371,6 +9571,7 @@ dependencies = [
  "solana-transaction",
  "solana-vote",
  "solana-vote-program",
+ "solana-votor-messages",
  "test-case",
  "tikv-jemallocator",
 ]
@@ -10204,6 +10405,7 @@ dependencies = [
  "num-traits",
  "num_cpus",
  "num_enum",
+ "parking_lot 0.12.3",
  "percentage",
  "qualifier_attr",
  "rand 0.7.3",
@@ -10219,8 +10421,10 @@ dependencies = [
  "solana-account-info",
  "solana-accounts-db",
  "solana-address-lookup-table-interface",
+ "solana-bls-signatures",
  "solana-bpf-loader-program",
  "solana-bucket-map",
+ "solana-build-alpenglow-vote",
  "solana-builtins",
  "solana-client-traits",
  "solana-clock",
@@ -10261,6 +10465,7 @@ dependencies = [
  "solana-perf",
  "solana-poh-config",
  "solana-precompile-error",
+ "solana-program",
  "solana-program-runtime",
  "solana-pubkey",
  "solana-rayon-threadlimit",
@@ -10300,6 +10505,7 @@ dependencies = [
  "solana-vote",
  "solana-vote-interface",
  "solana-vote-program",
+ "solana-votor-messages",
  "spl-generic-token",
  "static_assertions",
  "strum",
@@ -10339,7 +10545,9 @@ dependencies = [
  "solana-system-transaction",
  "solana-transaction",
  "solana-transaction-error",
+ "solana-vote",
  "solana-vote-interface",
+ "solana-votor-messages",
  "thiserror 2.0.12",
 ]
 
@@ -10678,6 +10886,7 @@ name = "solana-stake-program"
 version = "3.0.0"
 dependencies = [
  "agave-feature-set",
+ "alpenglow-vote",
  "assert_matches",
  "bincode",
  "criterion",
@@ -10706,8 +10915,10 @@ dependencies = [
  "solana-sysvar-id",
  "solana-transaction-context",
  "solana-type-overrides",
+ "solana-vote",
  "solana-vote-interface",
  "solana-vote-program",
+ "solana-votor-messages",
  "test-case",
 ]
 
@@ -11115,6 +11326,7 @@ dependencies = [
  "serde_json",
  "solana-account",
  "solana-accounts-db",
+ "solana-bls-signatures",
  "solana-cli-output",
  "solana-clock",
  "solana-cluster-type",
@@ -11585,6 +11797,7 @@ dependencies = [
  "solana-tls-utils",
  "solana-transaction",
  "solana-transaction-error",
+ "solana-votor",
  "static_assertions",
  "test-case",
  "thiserror 2.0.12",
@@ -11759,13 +11972,20 @@ dependencies = [
  "arbitrary",
  "bencher",
  "bincode",
+ "bitvec",
+ "bytemuck",
  "itertools 0.12.1",
  "log",
+ "num-derive",
+ "num-traits",
+ "num_enum",
  "rand 0.8.5",
  "serde",
  "serde_derive",
+ "serial_test",
  "solana-account",
  "solana-bincode",
+ "solana-bls-signatures",
  "solana-clock",
  "solana-frozen-abi",
  "solana-frozen-abi-macro",
@@ -11774,6 +11994,7 @@ dependencies = [
  "solana-keypair",
  "solana-logger",
  "solana-packet",
+ "solana-program",
  "solana-pubkey",
  "solana-sdk-ids",
  "solana-serialize-utils",
@@ -11781,9 +12002,13 @@ dependencies = [
  "solana-signature",
  "solana-signer",
  "solana-svm-transaction",
+ "solana-system-interface",
  "solana-transaction",
  "solana-vote-interface",
+ "solana-votor-messages",
+ "spl-pod",
  "static_assertions",
+ "test-case",
  "thiserror 2.0.12",
 ]
 
@@ -11854,6 +12079,74 @@ dependencies = [
  "thiserror 2.0.12",
 ]
 
+[[package]]
+name = "solana-votor"
+version = "3.0.0"
+dependencies = [
+ "anyhow",
+ "bincode",
+ "bitvec",
+ "bs58",
+ "crossbeam-channel",
+ "dashmap",
+ "etcd-client",
+ "itertools 0.12.1",
+ "log",
+ "parking_lot 0.12.3",
+ "qualifier_attr",
+ "rayon",
+ "serde",
+ "serde_bytes",
+ "serde_derive",
+ "solana-accounts-db",
+ "solana-bloom",
+ "solana-bls-signatures",
+ "solana-clock",
+ "solana-entry",
+ "solana-epoch-schedule",
+ "solana-frozen-abi",
+ "solana-frozen-abi-macro",
+ "solana-gossip",
+ "solana-hash",
+ "solana-keypair",
+ "solana-ledger",
+ "solana-logger",
+ "solana-measure",
+ "solana-metrics",
+ "solana-pubkey",
+ "solana-rpc",
+ "solana-runtime",
+ "solana-signature",
+ "solana-signer",
+ "solana-time-utils",
+ "solana-transaction",
+ "solana-vote",
+ "solana-vote-program",
+ "solana-votor-messages",
+ "test-case",
+ "thiserror 2.0.12",
+]
+
+[[package]]
+name = "solana-votor-messages"
+version = "3.0.0"
+dependencies = [
+ "bitvec",
+ "bytemuck",
+ "num_enum",
+ "serde",
+ "solana-account",
+ "solana-bls-signatures",
+ "solana-clock",
+ "solana-frozen-abi",
+ "solana-frozen-abi-macro",
+ "solana-hash",
+ "solana-logger",
+ "solana-program",
+ "solana-vote-interface",
+ "spl-pod",
+]
+
 [[package]]
 name = "solana-wen-restart"
 version = "3.0.0"
@@ -11933,6 +12226,7 @@ dependencies = [
  "bs58",
  "clap 3.2.23",
  "dirs-next",
+ "solana-bls-signatures",
  "solana-clap-v3-utils",
  "solana-pubkey",
  "solana-remote-wallet",
@@ -12853,6 +13147,15 @@ dependencies = [
  "once_cell",
 ]
 
+[[package]]
+name = "threadpool"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa"
+dependencies = [
+ "num_cpus",
+]
+
 [[package]]
 name = "tikv-jemalloc-sys"
 version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7"
@@ -13098,21 +13401,21 @@ dependencies = [
 
 [[package]]
 name = "toml"
-version = "0.8.12"
+version = "0.8.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3"
+checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148"
 dependencies = [
  "serde",
  "serde_spanned",
  "toml_datetime",
- "toml_edit 0.22.12",
+ "toml_edit 0.22.24",
 ]
 
 [[package]]
 name = "toml_datetime"
-version = "0.6.5"
+version = "0.6.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1"
+checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
 dependencies = [
  "serde",
 ]
@@ -13130,15 +13433,15 @@ dependencies = [
 
 [[package]]
 name = "toml_edit"
-version = "0.22.12"
+version = "0.22.24"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef"
+checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474"
 dependencies = [
  "indexmap 2.10.0",
  "serde",
  "serde_spanned",
  "toml_datetime",
- "winnow 0.6.13",
+ "winnow 0.7.4",
 ]
 
 [[package]]
@@ -14144,9 +14447,9 @@ dependencies = [
 
 [[package]]
 name = "winnow"
-version = "0.6.13"
+version = "0.7.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1"
+checksum = "0e97b544156e9bebe1a0ffbc03484fc1ffe3100cbce3ffb17eac35f7cdd7ab36"
 dependencies = [
  "memchr",
 ]

+ 18 - 0
Cargo.toml

@@ -16,6 +16,7 @@ members = [
     "bench-vote",
     "bloom",
     "bucket_map",
+    "build-alpenglow-vote",
     "builtins",
     "builtins-default-costs",
     "cargo-registry",
@@ -31,6 +32,8 @@ members = [
     "connection-cache",
     "core",
     "cost-model",
+    "curves/bls12-381",
+    "curves/curve-traits",
     "curves/curve25519",
     "dos",
     "download-utils",
@@ -136,6 +139,8 @@ members = [
     "version",
     "vortexor",
     "vote",
+    "votor",
+    "votor-messages",
     "watchtower",
     "wen-restart",
     "xdp",
@@ -188,6 +193,7 @@ agave-transaction-view = { path = "transaction-view", version = "=3.0.0" }
 agave-verified-packet-receiver = { path = "verified-packet-receiver", version = "=3.0.0" }
 agave-xdp = { path = "xdp", version = "=3.0.0" }
 ahash = "0.8.11"
+alpenglow-vote = { git = "https://github.com/solana-program/alpenglow-vote.git", rev = "cdae90a", features = ["serde"] }
 anyhow = "1.0.98"
 aquamarine = "0.6.0"
 arbitrary = "1.4.1"
@@ -212,7 +218,10 @@ base64 = "0.22.1"
 bencher = "0.1.5"
 bincode = "1.3.3"
 bitflags = { version = "2.9.1" }
+bitvec = { version = "1.0.1", features = ["serde"] }
 blake3 = "1.8.2"
+blst = "0.3.14"
+blstrs = "0.7.1"
 borsh = { version = "1.5.7", features = ["derive", "unstable__schema"] }
 borsh0-10 = { package = "borsh", version = "0.10.3" }
 bs58 = { version = "0.5.1", default-features = false }
@@ -260,6 +269,7 @@ env_logger = "0.11.8"
 etcd-client = "0.11.1"
 fast-math = "0.1"
 fd-lock = "3.0.13"
+ff = "0.13.1"
 five8_const = "0.1.4"
 flate2 = "1.0.31"
 fnv = "1.0.7"
@@ -271,6 +281,7 @@ gethostname = "0.2.3"
 getrandom = "0.3.3"
 goauth = "0.13.1"
 governor = "0.6.3"
+group = "0.13.0"
 hex = "0.4.3"
 hidapi = { version = "2.6.3", default-features = false }
 histogram = "0.6.9"
@@ -378,6 +389,7 @@ solana-account-decoder-client-types = { path = "account-decoder-client-types", v
 solana-account-info = "2.3.0"
 solana-accounts-db = { path = "accounts-db", version = "=3.0.0" }
 solana-address-lookup-table-interface = "2.2.2"
+solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=2.3.0" }
 solana-atomic-u64 = "2.2.1"
 solana-banks-client = { path = "banks-client", version = "=3.0.0" }
 solana-banks-interface = { path = "banks-interface", version = "=3.0.0" }
@@ -387,10 +399,13 @@ solana-big-mod-exp = "2.2.1"
 solana-bincode = "2.2.1"
 solana-blake3-hasher = "2.2.1"
 solana-bloom = { path = "bloom", version = "=3.0.0" }
+solana-bls-signatures = { version = "0.1.0", features = ["serde"] }
+solana-bls12-381 = { path = "curves/bls12-381", version = "=3.0.0" }
 solana-bn254 = "2.2.2"
 solana-borsh = "2.2.1"
 solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=3.0.0" }
 solana-bucket-map = { path = "bucket_map", version = "=3.0.0" }
+solana-build-alpenglow-vote = { path = "build-alpenglow-vote" }
 solana-builtins = { path = "builtins", version = "=3.0.0" }
 solana-builtins-default-costs = { path = "builtins-default-costs", version = "=3.0.0" }
 solana-clap-utils = { path = "clap-utils", version = "=3.0.0" }
@@ -413,6 +428,7 @@ solana-connection-cache = { path = "connection-cache", version = "=3.0.0", defau
 solana-core = { path = "core", version = "=3.0.0" }
 solana-cost-model = { path = "cost-model", version = "=3.0.0" }
 solana-cpi = "2.2.1"
+solana-curve-traits = { path = "curves/curve-traits", version = "=3.0.0" }
 solana-curve25519 = { path = "curves/curve25519", version = "=3.0.0" }
 solana-define-syscall = "2.3.0"
 solana-derivation-path = "2.2.1"
@@ -558,6 +574,8 @@ solana-version = { path = "version", version = "=3.0.0" }
 solana-vote = { path = "vote", version = "=3.0.0" }
 solana-vote-interface = "2.2.6"
 solana-vote-program = { path = "programs/vote", version = "=3.0.0", default-features = false }
+solana-votor = { path = "votor", version = "=3.0.0" }
+solana-votor-messages = { path = "votor-messages", version = "=3.0.0" }
 solana-wen-restart = { path = "wen-restart", version = "=3.0.0" }
 solana-zk-elgamal-proof-program = { path = "programs/zk-elgamal-proof", version = "=3.0.0" }
 solana-zk-keygen = { path = "zk-keygen", version = "=3.0.0" }

+ 25 - 0
build-alpenglow-vote/Cargo.toml

@@ -0,0 +1,25 @@
+[package]
+name = "solana-build-alpenglow-vote"
+description = "Build alpenglow-vote"
+version = { workspace = true }
+authors = { workspace = true }
+repository = { workspace = true }
+homepage = { workspace = true }
+license = { workspace = true }
+edition = { workspace = true }
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[lib]
+crate-type = ["lib"]
+name = "build_alpenglow_vote"
+
+[build-dependencies]
+build-print = "0.1.1"
+glob = "0.3.2"
+serde = { workspace = true }
+toml = { workspace = true }
+
+[lints]
+workspace = true

+ 159 - 0
build-alpenglow-vote/build.rs

@@ -0,0 +1,159 @@
+use std::{
+    env, fs,
+    path::{Path, PathBuf},
+};
+
+fn fetch_shared_object_path(manifest_path: &Path) -> PathBuf {
+    manifest_path
+        .parent()
+        .unwrap()
+        .to_owned()
+        .join("spl-alpenglow_vote.so")
+}
+
+fn generate_github_rev(rev: &str) -> PathBuf {
+    // Form the glob that searches for the git repo's manifest path under ~/.cargo/git/checkouts
+    let git_checkouts_path = PathBuf::from(env::var("CARGO_HOME").unwrap())
+        .join("git")
+        .join("checkouts");
+
+    let glob_str = format!(
+        "{}/alpenglow-vote-*/{}/Cargo.toml",
+        git_checkouts_path.to_str().unwrap(),
+        rev
+    );
+
+    // Find the manifest path
+    let manifest_path = glob::glob(&glob_str)
+        .unwrap_or_else(|_| panic!("Failed to read glob: {}", &glob_str))
+        .filter_map(Result::ok)
+        .next()
+        .unwrap_or_else(|| {
+            panic!(
+                "Couldn't find path to git repo with glob {} and revision {}",
+                &glob_str, rev
+            )
+        });
+
+    fetch_shared_object_path(&manifest_path)
+}
+
+fn generate_local_checkout(path: &str) -> PathBuf {
+    let err = || {
+        format!("Local checkout path must be of the form: /x/y/z/alpenglow-vote-project-path/program. In particular, alpenglow-vote-project-path is the local checkout, which might typically just be called alpenglow-vote. Current checkout path: {}", path)
+    };
+    let path = PathBuf::from(path);
+
+    // Ensure that path ends with "program"
+    if path
+        .file_name()
+        .and_then(|p| p.to_str())
+        .unwrap_or_else(|| panic!("{}", err()))
+        != "program"
+    {
+        panic!("{}", err());
+    }
+
+    // If this is a relative path, then make it absolute by determining the relative path with
+    // respect to the project directory, and not the current CARGO_MANIFEST_DIR.
+    let path = if path.is_relative() {
+        PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap())
+            .parent()
+            .unwrap()
+            .to_owned()
+            .join(path)
+    } else {
+        path
+    };
+
+    // Turn the path into an absolute path
+    let path = std::path::absolute(path).unwrap();
+    let manifest_path = path.parent().unwrap().to_owned().join("Cargo.toml");
+
+    fetch_shared_object_path(&manifest_path)
+}
+
+fn main() {
+    // Get the project's Cargo.toml
+    let cargo_manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
+    let project_cargo_toml_path = PathBuf::from(&cargo_manifest_dir)
+        .join("..")
+        .join("Cargo.toml");
+
+    // Parse the Cargo file.
+    let project_cargo_toml_contents =
+        fs::read_to_string(&project_cargo_toml_path).expect("Couldn't read root Cargo.toml.");
+
+    let project_cargo_toml = project_cargo_toml_contents
+        .parse::<toml::Value>()
+        .expect("Couldn't parse root Cargo.toml into a valid toml::Value.");
+
+    // Find alpenglow-vote
+    let workspace_dependencies = &project_cargo_toml["workspace"]["dependencies"];
+
+    let err = "alpenglow-vote must either be of form: (1) if you're trying to fetch from a git repo: { git = \"...\", rev = \"...\" } or (2) if you're trying to use a local checkout of alpenglow-vote : { path = \"...\" }";
+
+    let alpenglow_vote = workspace_dependencies
+        .get("alpenglow-vote")
+        .expect("Couldn't find alpenglow-vote under workspace.dependencies in root Cargo.toml.")
+        .as_table()
+        .expect(err);
+
+    // Are we trying to build alpenglow-vote from Github or a local checkout?
+    let so_src_path = if alpenglow_vote.contains_key("git") && alpenglow_vote.contains_key("rev") {
+        build_print::custom_println!(
+            "Compiling",
+            green,
+            "spl-alpenglow_vote.so: building from github rev: {:?}",
+            &alpenglow_vote
+        );
+        generate_github_rev(alpenglow_vote["rev"].as_str().unwrap())
+    } else if alpenglow_vote.contains_key("path") {
+        build_print::custom_println!(
+            "Compiling",
+            green,
+            "spl-alpenglow_vote.so: building from local checkout: {:?}",
+            &alpenglow_vote
+        );
+        generate_local_checkout(alpenglow_vote["path"].as_str().unwrap())
+    } else {
+        panic!("{}", err);
+    };
+
+    // Copy the .so to project_dir/target/tmp/
+    let so_dest_path = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap())
+        .parent()
+        .unwrap()
+        .to_owned()
+        .join("target")
+        .join("alpenglow-vote-so")
+        .join("spl_alpenglow-vote.so");
+
+    fs::create_dir_all(so_dest_path.parent().unwrap())
+        .unwrap_or_else(|_| panic!("Couldn't create path: {:?}", &so_dest_path));
+
+    fs::copy(&so_src_path, &so_dest_path).unwrap_or_else(|err| {
+        panic!(
+            "Couldn't copy alpenglow_vote from {:?} to {:?}:\n{}",
+            &so_src_path, &so_dest_path, err
+        )
+    });
+
+    build_print::custom_println!(
+        "[build-alpenglow-vote]",
+        green,
+        "spl-alpenglow_vote.so: successfully built alpenglow_vote! Copying {} -> {}",
+        so_src_path.display(),
+        so_dest_path.display(),
+    );
+
+    // Save the destination path as an environment variable that can later be invoked in Rust code
+    println!(
+        "cargo:rustc-env=ALPENGLOW_VOTE_SO_PATH={}",
+        so_dest_path.display()
+    );
+
+    // Re-build if we detect a change in either (1) the alpenglow-vote src or (2) this build script
+    println!("cargo::rerun-if-changed={}", so_src_path.display());
+    println!("cargo::rerun-if-changed=build.rs");
+}

+ 12 - 0
build-alpenglow-vote/src/lib.rs

@@ -0,0 +1,12 @@
+/// Path to the alpenglow-vote shared object
+pub const ALPENGLOW_VOTE_SO_PATH: &str = env!("ALPENGLOW_VOTE_SO_PATH");
+
+#[cfg(test)]
+mod tests {
+    use {crate::ALPENGLOW_VOTE_SO_PATH, std::path::Path};
+
+    #[test]
+    pub fn ensure_alpenglow_vote_so_path_exists() {
+        assert!(Path::new(ALPENGLOW_VOTE_SO_PATH).exists());
+    }
+}

+ 7 - 1
ci/docker-run.sh

@@ -96,7 +96,12 @@ fi
 
 # Ensure files are created with the current host uid/gid
 if [[ -z "$SOLANA_DOCKER_RUN_NOSETUID" ]]; then
-  ARGS+=(--user "$(id -u):$(id -g)")
+  ARGS+=(
+    --user "$(id -u):$(id -g)"
+    --volume "/etc/passwd:/etc/passwd:ro"
+    --volume "/etc/group:/etc/group:ro"
+    --volume "/var/lib/buildkite-agent:/var/lib/buildkite-agent"
+  )
 fi
 
 if [[ -n $SOLANA_ALLOCATE_TTY ]]; then
@@ -122,6 +127,7 @@ ARGS+=(
   --env CI_PULL_REQUEST
   --env CI_REPO_SLUG
   --env CRATES_IO_TOKEN
+  --env CARGO_NET_GIT_FETCH_WITH_CLI
 )
 
 # Also propagate environment variables needed for codecov

+ 9 - 13
ci/stable/run-local-cluster-partially.sh

@@ -1,17 +1,6 @@
 #!/usr/bin/env bash
 set -e
 
-CURRENT=$1
-: "${CURRENT:?}"
-
-TOTAL=$2
-: "${TOTAL:?}"
-
-if [ "$CURRENT" -gt "$TOTAL" ]; then
-  echo "Error: The value of CURRENT (\$1) cannot be greater than the value of TOTAL (\$2)."
-  exit 1
-fi
-
 here="$(dirname "$0")"
 
 #shellcheck source=ci/common/shared-functions.sh
@@ -20,10 +9,17 @@ source "$here"/../common/shared-functions.sh
 #shellcheck source=ci/stable/common.sh
 source "$here"/common.sh
 
+INDEX=${1:-"$BUILDKITE_PARALLEL_JOB"}
+: "${INDEX:?}"
+
+LIMIT=${2:-"$BUILDKITE_PARALLEL_JOB_COUNT"}
+: "${LIMIT:?}"
+
 _ cargo nextest run \
   --profile ci \
+  --config-file ./nextest.toml \
   --package solana-local-cluster \
   --test local_cluster \
-  --partition hash:"$CURRENT/$TOTAL" \
+  --partition hash:"$((INDEX + 1))/$LIMIT" \
   --test-threads=1 \
-  --no-tests=warn
+  --no-tests=warn

+ 1 - 1
ci/stable/run-localnet.sh

@@ -9,4 +9,4 @@ echo --- ci/localnet-sanity.sh
 "$here"/../localnet-sanity.sh -x
 
 echo --- ci/run-sanity.sh
-"$here"/../run-sanity.sh -x
+"$here"/../run-sanity.sh -x

+ 17 - 13
ci/stable/run-partition.sh

@@ -1,17 +1,6 @@
 #!/usr/bin/env bash
 set -eo pipefail
 
-CURRENT=$1
-: "${CURRENT:?}"
-
-TOTAL=$2
-: "${TOTAL:?}"
-
-if [ "$CURRENT" -gt "$TOTAL" ]; then
-  echo "Error: The value of CURRENT (\$1) cannot be greater than the value of TOTAL (\$2)."
-  exit 1
-fi
-
 here="$(dirname "$0")"
 
 #shellcheck source=ci/common/shared-functions.sh
@@ -23,15 +12,30 @@ source "$here"/../common/limit-threads.sh
 #shellcheck source=ci/stable/common.sh
 source "$here"/common.sh
 
+# check partition info
+INDEX=${1:-"$BUILDKITE_PARALLEL_JOB"} # BUILDKITE_PARALLEL_JOB from 0 to (BUILDKITE_PARALLEL_JOB_COUNT - 1)
+: "${INDEX:?}"
+
+# if LIMIT = 3, the valid INDEX is 0~2
+LIMIT=${2:-"$BUILDKITE_PARALLEL_JOB_COUNT"}
+: "${LIMIT:?}"
+
+if [ ! "$LIMIT" -gt "$INDEX" ]; then
+  echo "LIMIT(\$2) should greater than INDEX(\$1)"
+  exit 1
+fi
+
 ARGS=(
   --profile ci
+  --config-file ./nextest.toml
   --workspace
   --tests
   --jobs "$JOBS"
-  --partition hash:"$CURRENT/$TOTAL"
+  --partition hash:"$((INDEX + 1))/$LIMIT"
   --verbose
   --exclude solana-local-cluster
+  --exclude solana-cargo-build-sbf
   --no-tests=warn
 )
 
-_ cargo nextest run "${ARGS[@]}"
+_ cargo nextest run "${ARGS[@]}"

+ 1 - 0
clap-utils/Cargo.toml

@@ -19,6 +19,7 @@ name = "solana_clap_utils"
 chrono = { workspace = true, features = ["default"] }
 clap = "2.33.0"
 rpassword = { workspace = true }
+solana-bls-signatures = { workspace = true }
 solana-clock = { workspace = true }
 solana-cluster-type = { workspace = true }
 solana-commitment-config = { workspace = true }

+ 32 - 0
clap-utils/src/input_parsers.rs

@@ -5,6 +5,7 @@ use {
     },
     chrono::DateTime,
     clap::ArgMatches,
+    solana_bls_signatures::Pubkey as BLSPubkey,
     solana_clock::UnixTimestamp,
     solana_cluster_type::ClusterType,
     solana_commitment_config::CommitmentConfig,
@@ -104,6 +105,19 @@ pub fn pubkeys_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Pubkey>> {
     })
 }
 
+pub fn bls_pubkeys_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<BLSPubkey>> {
+    matches.values_of(name).map(|values| {
+        values
+            .map(|value| {
+                BLSPubkey::from_str(value).unwrap_or_else(|_| {
+                    //TODO(wen): support reading BLS keypair files
+                    panic!("Failed to parse BLS public key from value: {}", value)
+                })
+            })
+            .collect()
+    })
+}
+
 // Return pubkey/signature pairs for a string of the form pubkey=signature
 pub fn pubkeys_sigs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<(Pubkey, Signature)>> {
     matches.values_of(name).map(|values| {
@@ -253,6 +267,7 @@ mod tests {
     use {
         super::*,
         clap::{App, Arg},
+        solana_bls_signatures::{keypair::Keypair as BLSKeypair, Pubkey as BLSPubkey},
         solana_keypair::write_keypair_file,
         std::fs,
     };
@@ -416,6 +431,23 @@ mod tests {
         assert_ne!(lamports_of_sol(&matches, "single"), Some(502_500_000));
     }
 
+    #[test]
+    fn test_bls_pubkeys_of() {
+        let bls_pubkey1: BLSPubkey = BLSKeypair::new().public.into();
+        let bls_pubkey2: BLSPubkey = BLSKeypair::new().public.into();
+        let matches = app().get_matches_from(vec![
+            "test",
+            "--multiple",
+            &bls_pubkey1.to_string(),
+            "--multiple",
+            &bls_pubkey2.to_string(),
+        ]);
+        assert_eq!(
+            bls_pubkeys_of(&matches, "multiple"),
+            Some(vec![bls_pubkey1, bls_pubkey2])
+        );
+    }
+
     #[test]
     fn test_lamports_of_sol() {
         let matches = app().get_matches_from(vec!["test", "--single", "50"]);

+ 7 - 2
cli/Cargo.toml

@@ -19,6 +19,7 @@ path = "src/main.rs"
 [dependencies]
 agave-feature-set = { workspace = true }
 agave-syscalls = { workspace = true }
+alpenglow-vote = { workspace = true }
 bincode = { workspace = true }
 bs58 = { workspace = true }
 clap = { workspace = true }
@@ -40,7 +41,9 @@ serde_json = { workspace = true }
 solana-account = "=2.2.1"
 solana-account-decoder = { workspace = true }
 solana-address-lookup-table-interface = { workspace = true }
-solana-borsh = "=2.2.1"
+solana-bls-signatures = { workspace = true, features = ["solana-signer-derive"] }
+solana-borsh = { workspace = true }
+solana-bpf-loader-program = { workspace = true }
 solana-clap-utils = { workspace = true }
 solana-cli-config = { workspace = true }
 solana-cli-output = { workspace = true }
@@ -95,8 +98,10 @@ solana-transaction-status = { workspace = true }
 solana-transaction-status-client-types = { workspace = true }
 solana-udp-client = { workspace = true }
 solana-version = { workspace = true }
+solana-vote = { workspace = true }
 solana-vote-program = { workspace = true }
-spl-memo = { version = "=6.0.0", features = ["no-entrypoint"] }
+solana-votor-messages = { workspace = true }
+spl-memo = { workspace = true, features = ["no-entrypoint"] }
 thiserror = { workspace = true }
 tiny-bip39 = { workspace = true }
 

+ 5 - 0
cli/src/cli.rs

@@ -338,6 +338,7 @@ pub enum CliCommand {
         memo: Option<String>,
         fee_payer: SignerIndex,
         compute_unit_price: Option<u64>,
+        is_alpenglow: bool,
     },
     ShowVoteAccount {
         pubkey: Pubkey,
@@ -1474,6 +1475,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
             memo,
             fee_payer,
             compute_unit_price,
+            is_alpenglow,
         } => process_create_vote_account(
             &rpc_client,
             config,
@@ -1491,6 +1493,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
             memo.as_ref(),
             *fee_payer,
             *compute_unit_price,
+            *is_alpenglow,
         ),
         CliCommand::ShowVoteAccount {
             pubkey: vote_account_pubkey,
@@ -2151,6 +2154,7 @@ mod tests {
             memo: None,
             fee_payer: 0,
             compute_unit_price: None,
+            is_alpenglow: false,
         };
         config.signers = vec![&keypair, &bob_keypair, &identity_keypair];
         let result = process_command(&config);
@@ -2430,6 +2434,7 @@ mod tests {
             memo: None,
             fee_payer: 0,
             compute_unit_price: None,
+            is_alpenglow: false,
         };
         config.signers = vec![&keypair, &bob_keypair, &identity_keypair];
         assert!(process_command(&config).is_err());

+ 20 - 1
cli/src/stake.rs

@@ -1747,7 +1747,26 @@ pub fn process_deactivate_stake_account(
             &vote_account_address,
             rpc_client.commitment(),
         )?;
-        if !eligible_for_deactivate_delinquent(&vote_state.epoch_credits, current_epoch) {
+
+        let is_eligible_for_deactivate_delinquent = match vote_state {
+            crate::vote::VoteStateWrapper::VoteState(ref vote_state) => {
+                eligible_for_deactivate_delinquent(&vote_state.epoch_credits, current_epoch)
+            }
+            crate::vote::VoteStateWrapper::AlpenglowVoteState(ref vote_state) => {
+                let credits = vote_state.epoch_credits().credits();
+                let prev_credits = vote_state.epoch_credits().prev_credits();
+
+                let mut epoch_credits = vec![];
+
+                if credits != 0 && prev_credits != 0 {
+                    epoch_credits.push((current_epoch, credits, prev_credits));
+                };
+
+                eligible_for_deactivate_delinquent(epoch_credits.as_slice(), current_epoch)
+            }
+        };
+
+        if is_eligible_for_deactivate_delinquent {
             return Err(CliError::BadParameter(format!(
                 "Stake has not been delinquent for {} epochs",
                 stake::MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION,

+ 243 - 87
cli/src/vote.rs

@@ -15,6 +15,7 @@ use {
     },
     clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand},
     solana_account::Account,
+    solana_bls_signatures::{keypair::Keypair as BLSKeypair, Pubkey as BLSPubkey},
     solana_clap_utils::{
         compute_budget::{compute_unit_price_arg, ComputeUnitLimit, COMPUTE_UNIT_PRICE_ARG},
         fee_payer::{fee_payer_arg, FEE_PAYER_ARG},
@@ -40,12 +41,18 @@ use {
     solana_system_interface::error::SystemError,
     solana_transaction::Transaction,
     solana_vote_program::{
+        authorized_voters::AuthorizedVoters,
         vote_error::VoteError,
         vote_instruction::{self, withdraw, CreateVoteAccountConfig},
         vote_state::{
-            VoteAuthorize, VoteInit, VoteState, VoteStateVersions, VOTE_CREDITS_MAXIMUM_PER_SLOT,
+            BlockTimestamp, VoteAuthorize, VoteInit, VoteState, VoteStateVersions,
+            VOTE_CREDITS_MAXIMUM_PER_SLOT,
         },
     },
+    solana_votor_messages::{
+        bls_message::BLS_KEYPAIR_DERIVE_SEED, instruction::InitializeAccountInstructionData,
+        state::VoteState as AlpenglowVoteState,
+    },
     std::rc::Rc,
 };
 
@@ -119,6 +126,15 @@ impl VoteSubCommands for App<'_, '_> {
                              will be at a derived address of the VOTE ACCOUNT pubkey",
                         ),
                 )
+                .arg(
+                    Arg::with_name("alpenglow")
+                        .long("alpenglow")
+                        .takes_value(false)
+                        .help(
+                            "When enabled, creates an Alpenglow vote account. When disabled, \
+                             creates a POH vote account.",
+                        ),
+                )
                 .offline_args()
                 .nonce_args(false)
                 .arg(fee_payer_arg())
@@ -474,6 +490,7 @@ pub fn parse_create_vote_account(
         signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
     let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?;
     let compute_unit_price = value_of(matches, COMPUTE_UNIT_PRICE_ARG.name);
+    let is_alpenglow = matches.is_present("alpenglow");
 
     if !allow_unsafe {
         if authorized_withdrawer == vote_account_pubkey.unwrap() {
@@ -515,6 +532,7 @@ pub fn parse_create_vote_account(
             memo,
             fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(),
             compute_unit_price,
+            is_alpenglow,
         },
         signers: signer_info.signers,
     })
@@ -808,6 +826,7 @@ pub fn process_create_vote_account(
     memo: Option<&String>,
     fee_payer: SignerIndex,
     compute_unit_price: Option<u64>,
+    is_alpenglow: bool,
 ) -> ProcessResult {
     let vote_account = config.signers[vote_account];
     let vote_account_pubkey = vote_account.pubkey();
@@ -829,48 +848,85 @@ pub fn process_create_vote_account(
     )?;
 
     let required_balance = rpc_client
-        .get_minimum_balance_for_rent_exemption(VoteState::size_of())?
+        .get_minimum_balance_for_rent_exemption(if is_alpenglow {
+            solana_votor_messages::state::VoteState::size()
+        } else {
+            VoteState::size_of()
+        })?
         .max(1);
+
     let amount = SpendAmount::Some(required_balance);
 
     let fee_payer = config.signers[fee_payer];
     let nonce_authority = config.signers[nonce_authority];
-    let space = VoteStateVersions::vote_state_size_of(true) as u64;
-
     let compute_unit_limit = match blockhash_query {
         BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default,
         BlockhashQuery::All(_) => ComputeUnitLimit::Simulated,
     };
+
     let build_message = |lamports| {
-        let vote_init = VoteInit {
-            node_pubkey: identity_pubkey,
-            authorized_voter: authorized_voter.unwrap_or(identity_pubkey),
-            authorized_withdrawer,
-            commission,
-        };
-        let mut create_vote_account_config = CreateVoteAccountConfig {
-            space,
-            ..CreateVoteAccountConfig::default()
-        };
-        let to = if let Some(seed) = seed {
-            create_vote_account_config.with_seed = Some((&vote_account_pubkey, seed));
-            &vote_account_address
+        let node_pubkey = identity_pubkey;
+        let authorized_voter = authorized_voter.unwrap_or(identity_pubkey);
+
+        let from_pubkey = &config.signers[0].pubkey();
+        let to_pubkey = &vote_account_address;
+
+        let mut ixs = if is_alpenglow {
+            let bls_keypair =
+                BLSKeypair::derive_from_signer(&identity_account, BLS_KEYPAIR_DERIVE_SEED).unwrap();
+            let bls_pubkey: BLSPubkey = bls_keypair.public.into();
+            let initialize_account_ixn_meta = InitializeAccountInstructionData {
+                node_pubkey,
+                authorized_voter,
+                authorized_withdrawer,
+                commission,
+                bls_pubkey,
+            };
+
+            let create_ix = solana_system_interface::instruction::create_account(
+                from_pubkey,
+                to_pubkey,
+                lamports,
+                solana_votor_messages::state::VoteState::size() as u64,
+                &solana_votor_messages::id(),
+            );
+
+            let init_ix = solana_votor_messages::instruction::initialize_account(
+                *to_pubkey,
+                &initialize_account_ixn_meta,
+            );
+
+            vec![create_ix, init_ix]
         } else {
-            &vote_account_pubkey
+            let vote_init = VoteInit {
+                node_pubkey,
+                authorized_voter,
+                authorized_withdrawer,
+                commission,
+            };
+            let mut create_vote_account_config = CreateVoteAccountConfig {
+                space: VoteStateVersions::vote_state_size_of(true) as u64,
+                ..CreateVoteAccountConfig::default()
+            };
+            if let Some(seed) = seed {
+                create_vote_account_config.with_seed = Some((&vote_account_pubkey, seed));
+            }
+
+            vote_instruction::create_account_with_config(
+                from_pubkey,
+                to_pubkey,
+                &vote_init,
+                lamports,
+                create_vote_account_config,
+            )
         };
 
-        let ixs = vote_instruction::create_account_with_config(
-            &config.signers[0].pubkey(),
-            to,
-            &vote_init,
-            lamports,
-            create_vote_account_config,
-        )
-        .with_memo(memo)
-        .with_compute_unit_config(&ComputeUnitConfig {
-            compute_unit_price,
-            compute_unit_limit,
-        });
+        ixs = ixs
+            .with_memo(memo)
+            .with_compute_unit_config(&ComputeUnitConfig {
+                compute_unit_price,
+                compute_unit_limit,
+            });
 
         if let Some(nonce_account) = &nonce_account {
             Message::new_with_nonce(
@@ -976,15 +1032,15 @@ pub fn process_vote_authorize(
             if let Some(vote_state) = vote_state {
                 let current_epoch = rpc_client.get_epoch_info()?.epoch;
                 let current_authorized_voter = vote_state
-                    .authorized_voters()
                     .get_authorized_voter(current_epoch)
                     .ok_or_else(|| {
                         CliError::RpcRequestError(
                             "Invalid vote account state; no authorized voters found".to_string(),
                         )
                     })?;
+
                 check_current_authority(
-                    &[current_authorized_voter, vote_state.authorized_withdrawer],
+                    &[current_authorized_voter, vote_state.authorized_withdrawer()],
                     &authorized.pubkey(),
                 )?;
                 if let Some(signer) = new_authorized_signer {
@@ -1004,7 +1060,10 @@ pub fn process_vote_authorize(
                 (new_authorized_pubkey, "new_authorized_pubkey".to_string()),
             )?;
             if let Some(vote_state) = vote_state {
-                check_current_authority(&[vote_state.authorized_withdrawer], &authorized.pubkey())?
+                check_current_authority(
+                    &[vote_state.authorized_withdrawer()],
+                    &authorized.pubkey(),
+                )?
             }
         }
     }
@@ -1257,11 +1316,71 @@ pub fn process_vote_update_commission(
     }
 }
 
+#[allow(clippy::large_enum_variant)]
+pub(crate) enum VoteStateWrapper {
+    VoteState(VoteState),
+    AlpenglowVoteState(AlpenglowVoteState),
+}
+
+impl VoteStateWrapper {
+    pub fn get_authorized_voter(&self, epoch: u64) -> Option<Pubkey> {
+        match self {
+            VoteStateWrapper::VoteState(vote_state) => vote_state.get_authorized_voter(epoch),
+            VoteStateWrapper::AlpenglowVoteState(vote_state) => {
+                vote_state.get_authorized_voter(epoch)
+            }
+        }
+    }
+
+    pub fn authorized_withdrawer(&self) -> Pubkey {
+        match self {
+            VoteStateWrapper::VoteState(vote_state) => vote_state.authorized_withdrawer,
+            VoteStateWrapper::AlpenglowVoteState(vote_state) => *vote_state.authorized_withdrawer(),
+        }
+    }
+
+    pub fn node_pubkey(&self) -> Pubkey {
+        match self {
+            VoteStateWrapper::VoteState(vote_state) => vote_state.node_pubkey,
+            VoteStateWrapper::AlpenglowVoteState(vote_state) => *vote_state.node_pubkey(),
+        }
+    }
+
+    pub fn credits(&self) -> u64 {
+        match self {
+            VoteStateWrapper::VoteState(vote_state) => vote_state.credits(),
+            VoteStateWrapper::AlpenglowVoteState(vote_state) => {
+                vote_state.epoch_credits().credits()
+            }
+        }
+    }
+
+    pub fn commission(&self) -> u8 {
+        match self {
+            VoteStateWrapper::VoteState(vote_state) => vote_state.commission,
+            VoteStateWrapper::AlpenglowVoteState(vote_state) => vote_state.commission(),
+        }
+    }
+
+    pub fn last_timestamp(&self) -> BlockTimestamp {
+        match self {
+            VoteStateWrapper::VoteState(vote_state) => vote_state.last_timestamp.clone(),
+            VoteStateWrapper::AlpenglowVoteState(vote_state) => BlockTimestamp {
+                slot: vote_state.latest_timestamp_legacy_format().slot,
+                timestamp: vote_state.latest_timestamp_legacy_format().timestamp,
+            },
+        }
+    }
+}
+
+const SOLANA_VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
+const ALPENGLOW_VOTE_PROGRAM_ID: Pubkey = solana_votor_messages::id();
+
 pub(crate) fn get_vote_account(
     rpc_client: &RpcClient,
     vote_account_pubkey: &Pubkey,
     commitment_config: CommitmentConfig,
-) -> Result<(Account, VoteState), Box<dyn std::error::Error>> {
+) -> Result<(Account, VoteStateWrapper), Box<dyn std::error::Error>> {
     let vote_account = rpc_client
         .get_account_with_commitment(vote_account_pubkey, commitment_config)?
         .value
@@ -1269,19 +1388,32 @@ pub(crate) fn get_vote_account(
             CliError::RpcRequestError(format!("{vote_account_pubkey:?} account does not exist"))
         })?;
 
-    if vote_account.owner != solana_vote_program::id() {
-        return Err(CliError::RpcRequestError(format!(
-            "{vote_account_pubkey:?} is not a vote account"
-        ))
-        .into());
-    }
-    let vote_state = VoteState::deserialize(&vote_account.data).map_err(|_| {
-        CliError::RpcRequestError(
-            "Account data could not be deserialized to vote state".to_string(),
-        )
-    })?;
+    let vote_state_wrapper = match vote_account.owner {
+        SOLANA_VOTE_PROGRAM_ID => VoteStateWrapper::VoteState(
+            VoteState::deserialize(&vote_account.data).map_err(|_| {
+                CliError::RpcRequestError(
+                    "Account data could not be deserialized to vote state".to_string(),
+                )
+            })?,
+        ),
+
+        ALPENGLOW_VOTE_PROGRAM_ID => VoteStateWrapper::AlpenglowVoteState(
+            *AlpenglowVoteState::deserialize(&vote_account.data).map_err(|_| {
+                CliError::RpcRequestError(
+                    "Account data could not be deserialized to vote state".to_string(),
+                )
+            })?,
+        ),
+
+        _ => {
+            return Err(CliError::RpcRequestError(format!(
+                "{vote_account_pubkey:?} is not a vote account"
+            ))
+            .into())
+        }
+    };
 
-    Ok((vote_account, vote_state))
+    Ok((vote_account, vote_state_wrapper))
 }
 
 pub fn process_show_vote_account(
@@ -1303,55 +1435,73 @@ pub fn process_show_vote_account(
 
     let mut votes: Vec<CliLandedVote> = vec![];
     let mut epoch_voting_history: Vec<CliEpochVotingHistory> = vec![];
-    if !vote_state.votes.is_empty() {
-        for vote in &vote_state.votes {
-            votes.push(vote.into());
+    let mut epoch_rewards = None;
+
+    // TODO: handle Alpenglow case
+    if let VoteStateWrapper::VoteState(ref vote_state) = vote_state {
+        if !vote_state.votes.is_empty() {
+            for vote in &vote_state.votes {
+                votes.push(vote.into());
+            }
+            for (epoch, credits, prev_credits) in vote_state.epoch_credits().iter().copied() {
+                let credits_earned = credits.saturating_sub(prev_credits);
+                let slots_in_epoch = epoch_schedule.get_slots_in_epoch(epoch);
+                let is_tvc_active = tvc_activation_epoch.map(|e| epoch >= e).unwrap_or_default();
+                let max_credits_per_slot = if is_tvc_active {
+                    VOTE_CREDITS_MAXIMUM_PER_SLOT
+                } else {
+                    1
+                };
+                epoch_voting_history.push(CliEpochVotingHistory {
+                    epoch,
+                    slots_in_epoch,
+                    credits_earned,
+                    credits,
+                    prev_credits,
+                    max_credits_per_slot,
+                });
+            }
         }
-        for (epoch, credits, prev_credits) in vote_state.epoch_credits().iter().copied() {
-            let credits_earned = credits.saturating_sub(prev_credits);
-            let slots_in_epoch = epoch_schedule.get_slots_in_epoch(epoch);
-            let is_tvc_active = tvc_activation_epoch.map(|e| epoch >= e).unwrap_or_default();
-            let max_credits_per_slot = if is_tvc_active {
-                VOTE_CREDITS_MAXIMUM_PER_SLOT
-            } else {
-                1
-            };
-            epoch_voting_history.push(CliEpochVotingHistory {
-                epoch,
-                slots_in_epoch,
-                credits_earned,
-                credits,
-                prev_credits,
-                max_credits_per_slot,
+
+        epoch_rewards =
+            with_rewards.and_then(|num_epochs| {
+                match crate::stake::fetch_epoch_rewards(
+                    rpc_client,
+                    vote_account_address,
+                    num_epochs,
+                    starting_epoch,
+                ) {
+                    Ok(rewards) => Some(rewards),
+                    Err(error) => {
+                        eprintln!("Failed to fetch epoch rewards: {error:?}");
+                        None
+                    }
+                }
             });
-        }
     }
 
-    let epoch_rewards =
-        with_rewards.and_then(|num_epochs| {
-            match crate::stake::fetch_epoch_rewards(
-                rpc_client,
-                vote_account_address,
-                num_epochs,
-                starting_epoch,
-            ) {
-                Ok(rewards) => Some(rewards),
-                Err(error) => {
-                    eprintln!("Failed to fetch epoch rewards: {error:?}");
-                    None
-                }
-            }
-        });
+    let authorized_voters = match vote_state {
+        VoteStateWrapper::VoteState(ref vote_state) => vote_state.authorized_voters(),
+        // TODO: implement this properly for AlpenglowVoteState
+        VoteStateWrapper::AlpenglowVoteState(_) => &AuthorizedVoters::default(),
+    };
+
+    let root_slot = match vote_state {
+        VoteStateWrapper::VoteState(ref vote_state) => vote_state.root_slot,
+        // TODO: no real equivalent for Alpenglow - we should really change
+        // process_show_vote_account properly
+        VoteStateWrapper::AlpenglowVoteState(_) => None,
+    };
 
     let vote_account_data = CliVoteAccount {
         account_balance: vote_account.lamports,
-        validator_identity: vote_state.node_pubkey.to_string(),
-        authorized_voters: vote_state.authorized_voters().into(),
-        authorized_withdrawer: vote_state.authorized_withdrawer.to_string(),
+        validator_identity: vote_state.node_pubkey().to_string(),
+        authorized_voters: authorized_voters.into(),
+        authorized_withdrawer: vote_state.authorized_withdrawer().to_string(),
         credits: vote_state.credits(),
-        commission: vote_state.commission,
-        root_slot: vote_state.root_slot,
-        recent_timestamp: vote_state.last_timestamp.clone(),
+        commission: vote_state.commission(),
+        root_slot,
+        recent_timestamp: vote_state.last_timestamp(),
         votes,
         epoch_voting_history,
         use_lamports_unit,
@@ -1853,6 +2003,7 @@ mod tests {
                     memo: None,
                     fee_payer: 0,
                     compute_unit_price: None,
+                    is_alpenglow: false,
                 },
                 signers: vec![
                     Box::new(read_keypair_file(&default_keypair_file).unwrap()),
@@ -1887,6 +2038,7 @@ mod tests {
                     memo: None,
                     fee_payer: 0,
                     compute_unit_price: None,
+                    is_alpenglow: false,
                 },
                 signers: vec![
                     Box::new(read_keypair_file(&default_keypair_file).unwrap()),
@@ -1928,6 +2080,7 @@ mod tests {
                     memo: None,
                     fee_payer: 0,
                     compute_unit_price: None,
+                    is_alpenglow: false,
                 },
                 signers: vec![
                     Box::new(read_keypair_file(&default_keypair_file).unwrap()),
@@ -1981,6 +2134,7 @@ mod tests {
                     memo: None,
                     fee_payer: 0,
                     compute_unit_price: None,
+                    is_alpenglow: false,
                 },
                 signers: vec![
                     Box::new(read_keypair_file(&default_keypair_file).unwrap()),
@@ -2024,6 +2178,7 @@ mod tests {
                     memo: None,
                     fee_payer: 0,
                     compute_unit_price: None,
+                    is_alpenglow: false,
                 },
                 signers: vec![
                     Box::new(read_keypair_file(&default_keypair_file).unwrap()),
@@ -2063,6 +2218,7 @@ mod tests {
                     memo: None,
                     fee_payer: 0,
                     compute_unit_price: None,
+                    is_alpenglow: false,
                 },
                 signers: vec![
                     Box::new(read_keypair_file(&default_keypair_file).unwrap()),

+ 1 - 0
cli/tests/stake.rs

@@ -92,6 +92,7 @@ fn test_stake_delegation_force() {
         memo: None,
         fee_payer: 0,
         compute_unit_price: None,
+        is_alpenglow: false,
     };
     process_command(&config).unwrap();
 

+ 2 - 0
cli/tests/vote.rs

@@ -58,6 +58,7 @@ fn test_vote_authorize_and_withdraw(compute_unit_price: Option<u64>) {
         memo: None,
         fee_payer: 0,
         compute_unit_price,
+        is_alpenglow: false,
     };
     process_command(&config).unwrap();
     let vote_account = rpc_client
@@ -286,6 +287,7 @@ fn test_offline_vote_authorize_and_withdraw(compute_unit_price: Option<u64>) {
         memo: None,
         fee_payer: 0,
         compute_unit_price,
+        is_alpenglow: false,
     };
     process_command(&config_payer).unwrap();
     let vote_account = rpc_client

+ 6 - 0
core/Cargo.toml

@@ -24,6 +24,7 @@ frozen-abi = [
     "dep:solana-frozen-abi-macro",
     "solana-accounts-db/frozen-abi",
     "solana-bloom/frozen-abi",
+    "solana-bls-signatures/frozen-abi",
     "solana-compute-budget/frozen-abi",
     "solana-cost-model/frozen-abi",
     "solana-frozen-abi/frozen-abi",
@@ -46,12 +47,14 @@ agave-feature-set = { workspace = true }
 agave-transaction-view = { workspace = true }
 agave-verified-packet-receiver = { workspace = true }
 ahash = { workspace = true }
+alpenglow-vote = { workspace = true, features = ["serde"] }
 anyhow = { workspace = true }
 arrayvec = { workspace = true }
 assert_matches = { workspace = true }
 async-trait = { workspace = true }
 base64 = { workspace = true }
 bincode = { workspace = true }
+bitvec = { workspace = true }
 bs58 = { workspace = true }
 bytes = { workspace = true }
 chrono = { workspace = true, features = ["default", "serde"] }
@@ -85,6 +88,7 @@ solana-accounts-db = { workspace = true }
 solana-address-lookup-table-interface = { workspace = true }
 solana-bincode = { workspace = true }
 solana-bloom = { workspace = true }
+solana-bls-signatures = { workspace = true }
 solana-builtins-default-costs = { workspace = true }
 solana-client = { workspace = true }
 solana-clock = { workspace = true }
@@ -163,6 +167,8 @@ solana-validator-exit = { workspace = true }
 solana-version = { workspace = true }
 solana-vote = { workspace = true }
 solana-vote-program = { workspace = true }
+solana-votor = { workspace = true }
+solana-votor-messages = { workspace = true }
 solana-wen-restart = { workspace = true }
 static_assertions = { workspace = true }
 strum = { workspace = true, features = ["derive"] }

+ 3 - 11
core/benches/consumer.rs

@@ -2,16 +2,14 @@
 #![feature(test)]
 
 use {
-    crossbeam_channel::{unbounded, Receiver},
+    crossbeam_channel::Receiver,
     rayon::{
         iter::IndexedParallelIterator,
         prelude::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator},
     },
     solana_account::{Account, ReadableAccount},
     solana_clock::Epoch,
-    solana_core::banking_stage::{
-        committer::Committer, consumer::Consumer, qos_service::QosService,
-    },
+    solana_core::banking_stage::consumer::Consumer,
     solana_entry::entry::Entry,
     solana_keypair::Keypair,
     solana_ledger::{
@@ -80,12 +78,6 @@ fn create_transactions(bank: &Bank, num: usize) -> Vec<RuntimeTransaction<Saniti
         .collect()
 }
 
-fn create_consumer(transaction_recorder: TransactionRecorder) -> Consumer {
-    let (replay_vote_sender, _replay_vote_receiver) = unbounded();
-    let committer = Committer::new(None, replay_vote_sender, Arc::default());
-    Consumer::new(committer, transaction_recorder, QosService::new(0), None)
-}
-
 struct BenchFrame {
     bank: Arc<Bank>,
     _bank_forks: Arc<RwLock<BankForks>>,
@@ -154,7 +146,7 @@ fn bench_process_and_record_transactions(bencher: &mut Bencher, batch_size: usiz
         poh_service,
         signal_receiver: _signal_receiver,
     } = setup();
-    let consumer = create_consumer(transaction_recorder);
+    let consumer = Consumer::from(&transaction_recorder);
     let transactions = create_transactions(&bank, 2_usize.pow(20));
     let mut transaction_iter = transactions.chunks(batch_size);
 

+ 2 - 1
core/benches/receive_and_buffer_utils.rs

@@ -30,7 +30,7 @@ use {
     solana_signer::Signer,
     solana_transaction::versioned::VersionedTransaction,
     std::{
-        sync::{Arc, RwLock},
+        sync::{atomic::AtomicBool, Arc, RwLock},
         time::Instant,
     },
 };
@@ -194,6 +194,7 @@ pub fn setup_receive_and_buffer<T: ReceiveAndBuffer + ReceiveAndBufferCreator>(
     let bank_start = BankStart {
         working_bank: bank.clone(),
         bank_creation_time: Arc::new(Instant::now()),
+        contains_valid_certificate: Arc::new(AtomicBool::new(false)),
     };
 
     let (sender, receiver) = unbounded();

+ 1 - 1
core/benches/sigverify_stage.rs

@@ -13,7 +13,7 @@ use {
     },
     solana_core::{
         banking_trace::BankingTracer,
-        sigverify::TransactionSigVerifier,
+        sigverifier::ed25519_sigverifier::TransactionSigVerifier,
         sigverify_stage::{SigVerifier, SigVerifyStage},
     },
     solana_hash::Hash,

+ 2 - 0
core/src/admin_rpc_post_init.rs

@@ -8,6 +8,7 @@ use {
     solana_quic_definitions::NotifyKeyUpdate,
     solana_runtime::bank_forks::BankForks,
     solana_streamer::atomic_udp_socket::AtomicUdpSocket,
+    solana_votor::event::VotorEventSender,
     std::{
         collections::{HashMap, HashSet},
         net::UdpSocket,
@@ -80,4 +81,5 @@ pub struct AdminRpcRequestMetadataPostInit {
     pub outstanding_repair_requests: Arc<RwLock<OutstandingRequests<ShredRepairType>>>,
     pub cluster_slots: Arc<ClusterSlots>,
     pub gossip_socket: Option<AtomicUdpSocket>,
+    pub votor_event_sender: VotorEventSender,
 }

+ 56 - 0
core/src/alpenglow_consensus/bls_vote_transaction.rs

@@ -0,0 +1,56 @@
+use {
+    super::transaction::AlpenglowVoteTransaction,
+    solana_bls::{keypair::Keypair, Pubkey, PubkeyProjective, Signature, SignatureProjective},
+    solana_message::VersionedMessage,
+};
+
+impl AlpenglowVoteTransaction for BlsVoteTransaction {
+    fn new_for_test(bls_keypair: Keypair) -> Self {
+        let message = VersionedMessage::default();
+        let pubkey: Pubkey = bls_keypair.public.into();
+        let signature = bls_keypair.sign(&message.serialize()).into();
+        Self {
+            pubkey,
+            signature,
+            message,
+        }
+    }
+}
+
+/// A vote instruction signed using BLS signatures. This format will be used
+/// for vote communication between validators. This is not inteded to include
+/// real Solana program instructions to be processed on-chain.
+#[derive(Clone, Debug, Default, Eq, PartialEq)]
+pub struct BlsVoteTransaction {
+    /// Bls pubkey associated with the transaction
+    pub pubkey: Pubkey,
+    /// BLS signature certifying the message
+    pub signature: Signature,
+    /// Message signed
+    pub message: VersionedMessage,
+}
+
+impl BlsVoteTransaction {
+    /// Signs a versioned message
+    pub fn new(message: VersionedMessage, keypair: &Keypair) -> Self {
+        let message_data = message.serialize();
+        let signature = keypair.sign(&message_data).into();
+        let pubkey: Pubkey = keypair.public.into();
+        Self {
+            pubkey,
+            signature,
+            message,
+        }
+    }
+
+    /// Verifies a signed versioned message
+    pub fn verify(&self, pubkey: &Pubkey) -> bool {
+        let pubkey: Result<PubkeyProjective, _> = pubkey.try_into();
+        let signature: Result<SignatureProjective, _> = self.signature.try_into();
+        if let (Ok(pubkey), Ok(signature)) = (pubkey, signature) {
+            pubkey.verify(&signature, &self.message.serialize())
+        } else {
+            false
+        }
+    }
+}

+ 604 - 0
core/src/alpenglow_consensus/skip_pool.rs

@@ -0,0 +1,604 @@
+use {
+    super::{utils::super_majority_threshold, Stake},
+    solana_clock::Slot,
+    solana_pubkey::Pubkey,
+    std::{
+        collections::{BTreeMap, BTreeSet, HashMap},
+        fmt::Debug,
+        ops::RangeInclusive,
+    },
+    thiserror::Error,
+};
+
+#[derive(Debug, Error, PartialEq)]
+pub enum AddVoteError {
+    #[error("Skip vote {0:?} already exists")]
+    AlreadyExists(RangeInclusive<Slot>),
+
+    #[error("Newer skip vote {0:?} than {1:?} already exists for this pubkey")]
+    TooOld(RangeInclusive<Slot>, RangeInclusive<Slot>),
+
+    #[error("Overlapping skip vote old {0:?} and new {1:?}")]
+    Overlapping(RangeInclusive<Slot>, RangeInclusive<Slot>),
+
+    #[error("Zero stake")]
+    ZeroStake,
+}
+
+/// A trait for objects that provide a stake value.
+pub trait HasStake {
+    fn stake_value(&self) -> Stake;
+    fn pubkey(&self) -> Pubkey;
+}
+
+/// Implement `HasStake` for `(Pubkey, Stake)`
+impl HasStake for (Pubkey, Stake) {
+    fn stake_value(&self) -> Stake {
+        self.1
+    }
+
+    fn pubkey(&self) -> Pubkey {
+        self.0
+    }
+}
+
+/// Dynamic Segment Tree that works with any type implementing `HasStake`
+struct DynamicSegmentTree<T: Ord + Clone + HasStake> {
+    /// (starts, ends) per `slot`, indicating the items that start and end at `slot`
+    tree: BTreeMap<Slot, (Vec<T>, Vec<T>)>,
+}
+
+impl<T: Ord + Clone + Debug + HasStake> DynamicSegmentTree<T> {
+    /// Initializes an empty dynamic segment tree
+    fn new() -> Self {
+        Self {
+            tree: BTreeMap::new(),
+        }
+    }
+
+    /// Inserts a given range `[start, end]` with an item `value`
+    fn insert(&mut self, start: Slot, end: Slot, new_value: T) {
+        self.tree
+            .entry(start)
+            .or_default()
+            .0
+            .push(new_value.clone());
+        self.tree.entry(end).or_default().1.push(new_value);
+    }
+
+    /// Removes a given range `[start, end]` with an item `value`
+    fn remove(&mut self, start: Slot, end: Slot, new_value: T) {
+        if let Some((starts, _)) = self.tree.get_mut(&start) {
+            starts.retain(|v| v.pubkey() != new_value.pubkey());
+        }
+        if let Some((_, ends)) = self.tree.get_mut(&end) {
+            ends.retain(|v| v.pubkey() != new_value.pubkey());
+        }
+    }
+
+    fn scan_certificates(&self, threshold_stake: f64) -> Vec<((Slot, Slot), BTreeSet<Pubkey>)> {
+        let mut accumulated = 0f64;
+        let mut current_contributors = BTreeSet::new();
+        let mut current_cert: Option<(Slot, BTreeSet<Pubkey>)> = None;
+        let mut certs: Vec<((Slot, Slot), BTreeSet<Pubkey>)> = vec![];
+
+        for (slot, (starts, ends)) in self.tree.iter() {
+            let mut new_contributors = vec![];
+
+            // Add new stakes
+            for item in starts {
+                current_contributors.insert(item.pubkey());
+                new_contributors.push(item.pubkey());
+                accumulated += item.stake_value() as f64;
+            }
+
+            // Start or increment current cert
+            if accumulated > threshold_stake {
+                match &mut current_cert {
+                    None => {
+                        // Start a cert
+                        current_cert = Some((*slot, current_contributors.clone()));
+                        // Check if the previous cert is consecutive
+                        if let Some(((_, prev_end), _)) = certs.last() {
+                            if prev_end + 1 == *slot {
+                                // Overwrite the newly started cert with an extension of the previous
+                                let ((prev_start, _), mut prev_contributors) = certs
+                                    .pop()
+                                    .expect("`certs` has at least one element checked above");
+                                prev_contributors.extend(current_contributors.clone());
+                                current_cert = Some((prev_start, prev_contributors));
+                            }
+                        }
+                    }
+                    Some((_, ref mut contributors)) => {
+                        // Active cert, still above threshold, add any new contributors as
+                        // we want to build the maximal certificate
+                        contributors.extend(new_contributors)
+                    }
+                }
+            }
+
+            // Subtract stakes that end on this slot
+            for item in ends {
+                current_contributors.remove(&item.pubkey());
+                accumulated -= item.stake_value() as f64
+            }
+
+            // Return cert if it has ended
+            if accumulated <= threshold_stake {
+                if let Some((start_slot, contributors)) = &current_cert {
+                    // Skip certificate has ended, reset and publish
+                    certs.push(((*start_slot, *slot), contributors.clone()));
+                    current_cert = None;
+                }
+            }
+        }
+
+        debug_assert_eq!(accumulated, 0f64);
+        debug_assert!(current_cert.is_none());
+        certs
+    }
+}
+
+/// Structure to store a skip vote, including the range and transaction
+pub struct SkipVote<T> {
+    skip_range: RangeInclusive<Slot>,
+    data: T,
+}
+
+/// `SkipPool` tracks validator skip votes and aggregates stake using a dynamic segment tree.
+pub struct SkipPool<T: Clone> {
+    skips: HashMap<Pubkey, SkipVote<T>>, // Stores latest skip range for each validator
+    segment_tree: DynamicSegmentTree<(Pubkey, Stake)>, // Generic tree tracking validators' stake
+    /// The current ranges of slots that are skip certified
+    certificate_ranges: Vec<RangeInclusive<Slot>>,
+    /// Whether `certificate_ranges` is up to date
+    up_to_date: bool,
+}
+
+impl<T: Clone> Default for SkipPool<T> {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
+impl<T: Clone> SkipPool<T> {
+    /// Initializes the `SkipPool`
+    pub fn new() -> Self {
+        Self {
+            skips: HashMap::new(),
+            segment_tree: DynamicSegmentTree::new(),
+            certificate_ranges: Vec::default(),
+            up_to_date: true,
+        }
+    }
+
+    /// Adds a skip vote for a validator and updates the segment tree
+    pub fn add_vote(
+        &mut self,
+        pubkey: &Pubkey,
+        skip_range: RangeInclusive<Slot>,
+        data: T,
+        stake: Stake,
+    ) -> Result<(), AddVoteError> {
+        if stake == 0 {
+            return Err(AddVoteError::ZeroStake);
+        }
+        // Remove previous skip vote if it exists
+        if let Some(prev_skip_vote) = self.skips.get(pubkey) {
+            if prev_skip_vote.skip_range == skip_range {
+                return Err(AddVoteError::AlreadyExists(
+                    prev_skip_vote.skip_range.clone(),
+                ));
+            }
+            if prev_skip_vote.skip_range.end() >= skip_range.end() {
+                return Err(AddVoteError::TooOld(
+                    prev_skip_vote.skip_range.clone(),
+                    skip_range,
+                ));
+            }
+
+            // Extensions are allowed, i.e. (1..=3) to (1..=5)
+            if skip_range.start() == prev_skip_vote.skip_range.start() {
+                // Guaranteed by above TooOld check
+                assert!(skip_range.end() > prev_skip_vote.skip_range.end());
+            } else if skip_range.start() <= prev_skip_vote.skip_range.end() {
+                return Err(AddVoteError::Overlapping(
+                    prev_skip_vote.skip_range.clone(),
+                    skip_range,
+                ));
+            }
+
+            self.segment_tree.remove(
+                *prev_skip_vote.skip_range.start(),
+                *prev_skip_vote.skip_range.end(),
+                (*pubkey, (stake as Stake)), // stake doesn't actually matter here
+            );
+        }
+
+        // Add new skip range
+        self.segment_tree.insert(
+            *skip_range.start(),
+            *skip_range.end(),
+            (*pubkey, stake as Stake), // Add stake
+        );
+
+        // Store the validator's updated skip vote
+        self.skips.insert(
+            *pubkey,
+            SkipVote {
+                skip_range: skip_range.clone(),
+                data,
+            },
+        );
+
+        self.up_to_date = false;
+        if self.skip_range_certified(skip_range.start(), skip_range.end()) {
+            // The vote is already contained in a cert, not necessary to update
+            self.up_to_date = true;
+        }
+
+        Ok(())
+    }
+
+    pub fn max_skip_certificate_range(&self) -> &RangeInclusive<Slot> {
+        self.certificate_ranges.last().unwrap_or(&(0..=0))
+    }
+
+    /// Get all skip certificates
+    pub fn get_skip_certificates(&self, total_stake: Stake) -> Vec<(RangeInclusive<Slot>, Vec<T>)> {
+        let threshold = super_majority_threshold(total_stake);
+        self.segment_tree
+            .scan_certificates(threshold)
+            .into_iter()
+            .map(|((start, end), contributors)| {
+                (
+                    start..=end,
+                    contributors
+                        .iter()
+                        .filter_map(|pk| self.skips.get(pk))
+                        .map(|sv| sv.data.clone())
+                        .collect(),
+                )
+            })
+            .collect()
+    }
+
+    pub fn update(&mut self, total_stake: Stake) {
+        let threshold = super_majority_threshold(total_stake);
+        self.certificate_ranges = self
+            .segment_tree
+            .scan_certificates(threshold)
+            .into_iter()
+            .map(|((start, end), _)| start..=end)
+            .collect();
+        self.up_to_date = true;
+    }
+
+    /// Is `slot` contained in any skip certificates
+    pub fn skip_certified(&mut self, slot: Slot, total_stake: Stake) -> bool {
+        if self
+            .certificate_ranges
+            .iter()
+            .any(|range| range.contains(&slot))
+        {
+            // If we are already have a certificate no reason to rescan (potentially costly)
+            return true;
+        }
+
+        if !self.up_to_date {
+            // No certificate is found and ranges are out of date, rescan and retry
+            self.update(total_stake);
+            return self.skip_certified(slot, total_stake);
+        }
+
+        false
+    }
+
+    /// Is the given slot range contained in any skip certificates
+    pub fn skip_range_certified(&self, start_slot: &Slot, end_slot: &Slot) -> bool {
+        self.certificate_ranges
+            .iter()
+            .any(|range| range.contains(start_slot) && range.contains(end_slot))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use {super::*, solana_transaction::versioned::VersionedTransaction};
+
+    fn dummy_transaction() -> VersionedTransaction {
+        VersionedTransaction::default() // Creates a dummy transaction for testing
+    }
+
+    fn assert_single_certificate_range<T: Clone>(
+        pool: &SkipPool<T>,
+        total_stake: Stake,
+        exp_range: RangeInclusive<Slot>,
+    ) {
+        let [(ref range, _)] = pool.get_skip_certificates(total_stake)[..] else {
+            panic!("skip cert failure");
+        };
+        assert_eq!(*range, exp_range);
+    }
+
+    #[test]
+    fn test_add_single_vote() {
+        let mut pool = SkipPool::new();
+        let validator = Pubkey::new_unique();
+        let skip_range = 10..=20;
+        let skip_tx = dummy_transaction();
+        let stake = 70;
+        let total_stake = 100;
+
+        pool.add_vote(&validator, skip_range.clone(), skip_tx.clone(), stake)
+            .unwrap();
+
+        let stored_vote = pool.skips.get(&validator).unwrap();
+        assert_eq!(stored_vote.skip_range, skip_range);
+        assert_eq!(stored_vote.data, skip_tx);
+        assert_single_certificate_range(&pool, total_stake, 10..=20);
+    }
+
+    #[test]
+    fn test_add_vote_zero_stake() {
+        let mut pool = SkipPool::new();
+        let validator = Pubkey::new_unique();
+        let skip_range = 1..=1;
+        let skip_tx = dummy_transaction();
+        let stake = 0;
+
+        assert_eq!(
+            pool.add_vote(&validator, skip_range.clone(), skip_tx.clone(), stake,),
+            Err(AddVoteError::ZeroStake)
+        );
+    }
+
+    #[test]
+    fn test_add_singleton_range() {
+        let mut pool = SkipPool::new();
+        let validator = Pubkey::new_unique();
+        let skip_range = 1..=1;
+        let skip_tx = dummy_transaction();
+        let stake = 70;
+        let total_stake = 100;
+
+        pool.add_vote(&validator, skip_range.clone(), skip_tx.clone(), stake)
+            .unwrap();
+
+        let stored_vote = pool.skips.get(&validator).unwrap();
+        assert_eq!(stored_vote.skip_range, skip_range);
+        assert_eq!(stored_vote.data, skip_tx);
+        assert_single_certificate_range(&pool, total_stake, 1..=1);
+    }
+
+    #[test]
+    fn test_consecutive_slots() -> Result<(), AddVoteError> {
+        let mut pool = SkipPool::new();
+        let total_stake = 100;
+        let validator1 = Pubkey::new_unique();
+        let single_slot_skippers = [Pubkey::new_unique(); 10];
+
+        pool.add_vote(&validator1, 5..=15, dummy_transaction(), 75)?;
+
+        for (i, validator) in single_slot_skippers.into_iter().enumerate() {
+            let slot = i as u64 + 16;
+            // These should not extend the skip range
+            pool.add_vote(&validator, slot..=slot, dummy_transaction(), 1)?;
+        }
+
+        assert_single_certificate_range(&pool, total_stake, 5..=15);
+        Ok(())
+    }
+
+    #[test]
+    fn test_contributer_removed() -> Result<(), AddVoteError> {
+        let mut pool = SkipPool::new();
+        let total_stake = 100;
+        let small_non_contributor = Pubkey::new_unique();
+        let validator = Pubkey::new_unique();
+
+        pool.add_vote(&small_non_contributor, 5..=5, dummy_transaction(), 1)?;
+        pool.add_vote(&validator, 6..=10, dummy_transaction(), 75)?;
+
+        let [(ref range, ref contributors)] = pool.get_skip_certificates(total_stake)[..] else {
+            panic!("skip cert failure");
+        };
+        assert_eq!(*range, RangeInclusive::new(6, 10));
+        assert_eq!(contributors.len(), 1);
+        Ok(())
+    }
+
+    #[test]
+    fn test_multi_cert() -> Result<(), AddVoteError> {
+        let mut pool = SkipPool::new();
+        let total_stake = 100;
+        let validator1 = Pubkey::new_unique();
+        let validator2 = Pubkey::new_unique();
+        let validator3 = Pubkey::new_unique();
+
+        pool.add_vote(&validator1, 5..=15, dummy_transaction(), 66)?;
+        pool.add_vote(&validator2, 5..=8, dummy_transaction(), 1)?;
+        pool.add_vote(&validator3, 11..=15, dummy_transaction(), 1)?;
+
+        let certificates = pool.get_skip_certificates(total_stake);
+        assert_eq!(certificates.len(), 2);
+        assert_eq!(certificates[0].0, RangeInclusive::new(5, 8));
+        assert_eq!(certificates[1].0, RangeInclusive::new(11, 15));
+        assert!(pool.skip_certified(6, total_stake));
+        assert!(pool.skip_certified(12, total_stake));
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_add_multiple_votes() {
+        let mut pool = SkipPool::new();
+        let validator1 = Pubkey::new_unique();
+        let validator2 = Pubkey::new_unique();
+        let total_stake = 100;
+
+        pool.add_vote(&validator1, 5..=15, dummy_transaction(), 50)
+            .unwrap();
+        pool.add_vote(&validator2, 20..=30, dummy_transaction(), 50)
+            .unwrap();
+        assert!(pool.get_skip_certificates(total_stake).is_empty());
+
+        pool.add_vote(&validator1, 5..=30, dummy_transaction(), 50)
+            .unwrap();
+        assert_single_certificate_range(&pool, total_stake, 20..=30);
+    }
+
+    #[test]
+    fn test_add_multiple_disjoint_votes() {
+        let mut pool = SkipPool::new();
+        let validator1 = Pubkey::new_unique();
+        let validator2 = Pubkey::new_unique();
+        let validator3 = Pubkey::new_unique();
+        let validator4 = Pubkey::new_unique();
+        let total_stake = 100;
+
+        pool.add_vote(&validator1, 1..=10, dummy_transaction(), 66)
+            .unwrap();
+
+        pool.add_vote(&validator2, 2..=2, dummy_transaction(), 1)
+            .unwrap();
+        assert_single_certificate_range(&pool, total_stake, 2..=2);
+
+        pool.add_vote(&validator3, 4..=4, dummy_transaction(), 1)
+            .unwrap();
+        let certificates = pool.get_skip_certificates(total_stake);
+        assert_eq!(certificates.len(), 2);
+        assert_eq!(certificates[0].0, 2..=2);
+        assert_eq!(certificates[1].0, 4..=4);
+
+        pool.add_vote(&validator4, 3..=3, dummy_transaction(), 1)
+            .unwrap();
+        assert_single_certificate_range(&pool, total_stake, 2..=4);
+        assert!(pool.skip_certified(3, total_stake));
+
+        pool.add_vote(&validator4, 3..=10, dummy_transaction(), 1)
+            .unwrap();
+        assert_single_certificate_range(&pool, total_stake, 2..=10);
+        assert!(pool.skip_certified(7, total_stake));
+    }
+
+    #[test]
+    fn test_two_validators_overlapping_votes() {
+        let mut pool = SkipPool::new();
+        let validator1 = Pubkey::new_unique();
+        let validator2 = Pubkey::new_unique();
+        let total_stake = 100;
+
+        let tx1 = dummy_transaction();
+        let tx2 = dummy_transaction();
+
+        pool.add_vote(&validator1, 10..=20, tx1.clone(), 50)
+            .unwrap();
+        pool.add_vote(&validator2, 15..=25, tx2.clone(), 50)
+            .unwrap();
+        assert_single_certificate_range(&pool, total_stake, 15..=20);
+
+        // Test certificate is correct
+        let [(ref range, ref transactions)] = pool.get_skip_certificates(total_stake)[..] else {
+            panic!("skip cert failure");
+        };
+        assert_eq!(*range, 15..=20);
+        assert_eq!(transactions.len(), 2);
+        assert!(transactions.contains(&tx1));
+        assert!(transactions.contains(&tx2));
+    }
+
+    #[test]
+    fn test_update_existing_singleton_vote() {
+        let mut pool = SkipPool::new();
+        let validator = Pubkey::new_unique();
+        let total_stake = 100;
+        // Range expansion on a singleton vote should be ok
+        assert!(pool
+            .add_vote(&validator, 1..=1, dummy_transaction(), 70)
+            .is_ok());
+        pool.add_vote(&validator, 1..=6, dummy_transaction(), 70)
+            .unwrap();
+        assert_single_certificate_range(&pool, total_stake, 1..=6);
+    }
+
+    #[test]
+    fn test_update_existing_vote() {
+        let mut pool = SkipPool::new();
+        let validator = Pubkey::new_unique();
+        let total_stake = 100;
+
+        pool.add_vote(&validator, 10..=20, dummy_transaction(), 70)
+            .unwrap();
+        assert_single_certificate_range(&pool, total_stake, 10..=20);
+
+        // AlreadyExists failure
+        assert_eq!(
+            pool.add_vote(&validator, 10..=20, dummy_transaction(), 70),
+            Err(AddVoteError::AlreadyExists(10..=20))
+        );
+
+        // TooOld failure (trying to add 15..=17 when 10..=20 already exists)
+        assert_eq!(
+            pool.add_vote(&validator, 15..=17, dummy_transaction(), 70),
+            Err(AddVoteError::TooOld(10..=20, 15..=17))
+        );
+
+        // TooOld falure with same range start but smaller range end
+        assert_eq!(
+            pool.add_vote(&validator, 10..=19, dummy_transaction(), 70),
+            Err(AddVoteError::TooOld(10..=20, 10..=19))
+        );
+
+        // Overlapping failures
+        assert_eq!(
+            pool.add_vote(&validator, 15..=25, dummy_transaction(), 70),
+            Err(AddVoteError::Overlapping(10..=20, 15..=25))
+        );
+
+        assert_eq!(
+            pool.add_vote(&validator, 20..=25, dummy_transaction(), 70),
+            Err(AddVoteError::Overlapping(10..=20, 20..=25))
+        );
+
+        // Adding a new, non-overlapping range
+        pool.add_vote(&validator, 21..=22, dummy_transaction(), 70)
+            .unwrap();
+
+        // Range extension is allowed
+        pool.add_vote(&validator, 21..=23, dummy_transaction(), 70)
+            .unwrap();
+        assert_single_certificate_range(&pool, total_stake, 21..=23);
+    }
+
+    #[test]
+    fn test_threshold_not_reached() {
+        let mut pool = SkipPool::new();
+        let validator1 = Pubkey::new_unique();
+        let validator2 = Pubkey::new_unique();
+
+        pool.add_vote(&validator1, 5..=15, dummy_transaction(), 30)
+            .unwrap();
+        pool.add_vote(&validator2, 20..=30, dummy_transaction(), 30)
+            .unwrap();
+    }
+
+    #[test]
+    fn test_update_and_skip_range_certify() {
+        let mut pool = SkipPool::new();
+        let validator1 = Pubkey::new_unique();
+        let validator2 = Pubkey::new_unique();
+        let total_stake = 100;
+
+        pool.add_vote(&validator1, 5..=15, dummy_transaction(), 50)
+            .unwrap();
+        pool.add_vote(&validator2, 10..=30, dummy_transaction(), 50)
+            .unwrap();
+        pool.update(total_stake);
+        assert!(!pool.skip_range_certified(&5, &10));
+        assert!(pool.skip_range_certified(&10, &15));
+        assert!(pool.skip_range_certified(&11, &12));
+        assert!(!pool.skip_range_certified(&15, &30));
+    }
+}

+ 25 - 0
core/src/alpenglow_consensus/transaction.rs

@@ -0,0 +1,25 @@
+use {
+    alpenglow_vote::{bls_message::VoteMessage, vote::Vote},
+    solana_bls_signatures::Signature as BLSSignature,
+    solana_transaction::versioned::VersionedTransaction,
+};
+
+pub trait AlpenglowVoteTransaction: Clone + std::fmt::Debug {
+    fn new_for_test(signature: BLSSignature, vote: Vote, rank: usize) -> Self;
+}
+
+impl AlpenglowVoteTransaction for VersionedTransaction {
+    fn new_for_test(_signature: BLSSignature, _vote: Vote, _rank: usize) -> Self {
+        Self::default()
+    }
+}
+
+impl AlpenglowVoteTransaction for VoteMessage {
+    fn new_for_test(signature: BLSSignature, vote: Vote, rank: usize) -> Self {
+        VoteMessage {
+            vote,
+            signature,
+            rank: rank as u16,
+        }
+    }
+}

+ 7 - 0
core/src/banking_simulation.rs

@@ -44,6 +44,7 @@ use {
     solana_signer::Signer,
     solana_streamer::socket::SocketAddrSpace,
     solana_turbine::broadcast_stage::{BroadcastStage, BroadcastStageType},
+    solana_votor::event::VotorEventReceiver,
     std::{
         collections::BTreeMap,
         fmt::Display,
@@ -419,6 +420,7 @@ struct SimulatorLoop {
     leader_schedule_cache: Arc<LeaderScheduleCache>,
     retransmit_slots_sender: Sender<Slot>,
     retracer: Arc<BankingTracer>,
+    _completed_block_receiver: VotorEventReceiver,
 }
 
 impl SimulatorLoop {
@@ -741,6 +743,7 @@ impl BankingSimulator {
             &leader_schedule_cache,
             &genesis_config.poh_config,
             exit.clone(),
+            false,
         );
         let poh_recorder = Arc::new(RwLock::new(poh_recorder));
         let (record_sender, record_receiver) = unbounded();
@@ -753,6 +756,7 @@ impl BankingSimulator {
             DEFAULT_PINNED_CPU_CORE,
             DEFAULT_HASHES_PER_BATCH,
             record_receiver,
+            || {},
         );
 
         // Enable BankingTracer to approximate the real environment as close as possible because
@@ -785,6 +789,7 @@ impl BankingSimulator {
 
         let (replay_vote_sender, _replay_vote_receiver) = unbounded();
         let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
+        let (completed_block_sender, completed_block_receiver) = unbounded();
         let shred_version = compute_shred_version(
             &genesis_config.hash(),
             Some(&bank_forks.read().unwrap().root_bank().hard_forks()),
@@ -819,6 +824,7 @@ impl BankingSimulator {
             shred_version,
             sender,
             None,
+            completed_block_sender,
         );
 
         info!("Start banking stage!...");
@@ -891,6 +897,7 @@ impl BankingSimulator {
             leader_schedule_cache,
             retransmit_slots_sender,
             retracer,
+            _completed_block_receiver: completed_block_receiver,
         };
 
         let simulator_threads = SimulatorThreads {

+ 79 - 4
core/src/banking_stage.rs

@@ -1,7 +1,6 @@
 //! The `banking_stage` processes Transaction messages. It is intended to be used
 //! to construct a software pipeline. The stage uses all available CPU cores and
 //! can do its processing in parallel with signature verification on the GPU.
-
 #[cfg(feature = "dev-context-only-utils")]
 use qualifier_attr::qualifiers;
 use {
@@ -33,7 +32,12 @@ use {
         bank::Bank, bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache,
         vote_sender_types::ReplayVoteSender,
     },
+    solana_runtime_transaction::runtime_transaction::RuntimeTransaction,
     solana_time_utils::AtomicInterval,
+    solana_transaction::{
+        sanitized::{MessageHash, SanitizedTransaction},
+        versioned::VersionedTransaction,
+    },
     std::{
         cmp, env,
         num::Saturating,
@@ -43,7 +47,7 @@ use {
             Arc, RwLock,
         },
         thread::{self, Builder, JoinHandle},
-        time::Duration,
+        time::{Duration, Instant},
     },
     transaction_scheduler::{
         greedy_scheduler::{GreedyScheduler, GreedySchedulerConfig},
@@ -663,8 +667,7 @@ impl BankingStage {
     }
 }
 
-#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))]
-pub(crate) fn update_bank_forks_and_poh_recorder_for_new_tpu_bank(
+pub fn update_bank_forks_and_poh_recorder_for_new_tpu_bank(
     bank_forks: &RwLock<BankForks>,
     poh_recorder: &RwLock<PohRecorder>,
     tpu_bank: Bank,
@@ -673,6 +676,78 @@ pub(crate) fn update_bank_forks_and_poh_recorder_for_new_tpu_bank(
     poh_recorder.write().unwrap().set_bank(tpu_bank);
 }
 
+#[allow(dead_code)]
+pub fn commit_certificate(
+    bank: &Arc<Bank>,
+    transaction_recorder: &TransactionRecorder,
+    certificate: Vec<VersionedTransaction>,
+) -> bool {
+    if certificate.is_empty() {
+        return true;
+    }
+    let consumer = Consumer::from(transaction_recorder);
+    let runtime_transactions: Result<Vec<RuntimeTransaction<SanitizedTransaction>>, _> =
+        certificate
+            .into_iter()
+            .map(|versioned_tx| {
+                // Short circuits on first error because
+                // transactions in the certificate need to
+                // be guaranteed to not fail
+                RuntimeTransaction::try_create(
+                    versioned_tx,
+                    MessageHash::Compute,
+                    None,
+                    &**bank,
+                    bank.get_reserved_account_keys(),
+                )
+            })
+            .collect();
+
+    //TODO: guarantee these transactions don't fail
+    if let Err(e) = runtime_transactions {
+        error!(
+            "Error in bank {} creating runtime transaction in certificate {:?}",
+            bank.slot(),
+            e
+        );
+        return false;
+    }
+
+    let runtime_transactions = runtime_transactions.unwrap();
+    let summary = consumer.process_transactions(bank, &Instant::now(), &runtime_transactions);
+
+    if summary.reached_max_poh_height {
+        error!("Slot took too long to ingest votes {}", bank.slot());
+        datapoint_error!(
+            "vote_certificate_commit_failure",
+            ("error", "slot took too long to ingest votes", String),
+            ("slot", bank.slot(), i64)
+        );
+        // TODO: check if 2/3 of the stake landed, otherwise return false
+        return false;
+    }
+
+    if summary.error_counters.total.0 != 0 {
+        error!(
+            "Vote certificate commit failure {} errors occured",
+            summary.error_counters.total.0
+        );
+        datapoint_error!(
+            "vote_certificate_commit_failure",
+            (
+                "error",
+                format!("{} errors occurred", summary.error_counters.total.0),
+                String
+            ),
+            ("slot", bank.slot(), i64)
+        );
+        // TODO: check if 2/3 of the stake landed, otherwise return false
+        return false;
+    }
+
+    true
+}
+
 #[cfg(test)]
 mod tests {
     use {

+ 110 - 1
core/src/banking_stage/consumer.rs

@@ -5,6 +5,10 @@ use {
         qos_service::QosService,
         scheduler_messages::MaxAge,
     },
+    crate::banking_stage::leader_slot_metrics::{
+        CommittedTransactionsCounts, ProcessTransactionsSummary,
+    },
+    crossbeam_channel::unbounded,
     itertools::Itertools,
     solana_clock::MAX_PROCESSING_AGE,
     solana_fee::FeeFeatures,
@@ -28,7 +32,7 @@ use {
         transaction_processor::{ExecutionRecordingConfig, TransactionProcessingConfig},
     },
     solana_transaction_error::TransactionError,
-    std::{num::Saturating, sync::Arc},
+    std::{num::Saturating, sync::Arc, time::Instant},
 };
 
 /// Consumer will create chunks of transactions from buffer with up to this size.
@@ -77,6 +81,19 @@ pub struct Consumer {
     log_messages_bytes_limit: Option<usize>,
 }
 
+impl From<&TransactionRecorder> for Consumer {
+    fn from(transaction_recorder: &TransactionRecorder) -> Self {
+        let (replay_vote_sender, _replay_vote_receiver) = unbounded();
+        let committer = Committer::new(None, replay_vote_sender, Arc::default());
+        Self::new(
+            committer,
+            transaction_recorder.clone(),
+            QosService::new(u32::MAX),
+            None,
+        )
+    }
+}
+
 impl Consumer {
     pub fn new(
         committer: Committer,
@@ -92,6 +109,98 @@ impl Consumer {
         }
     }
 
+    /// Sends transactions to the bank.
+    ///
+    /// Returns the number of transactions successfully processed by the bank, which may be less
+    /// than the total number if max PoH height was reached and the bank halted
+    pub(crate) fn process_transactions(
+        &self,
+        bank: &Arc<Bank>,
+        bank_creation_time: &Instant,
+        transactions: &[impl TransactionWithMeta],
+    ) -> ProcessTransactionsSummary {
+        let mut chunk_start = 0;
+        let mut all_retryable_tx_indexes = vec![];
+        let mut total_transaction_counts = CommittedTransactionsCounts::default();
+        let mut total_cost_model_throttled_transactions_count: u64 = 0;
+        let mut total_cost_model_us: u64 = 0;
+        let mut total_execute_and_commit_timings = LeaderExecuteAndCommitTimings::default();
+        let mut total_error_counters = TransactionErrorMetrics::default();
+        let mut reached_max_poh_height = false;
+        while chunk_start != transactions.len() {
+            let chunk_end = std::cmp::min(
+                transactions.len(),
+                chunk_start + TARGET_NUM_TRANSACTIONS_PER_BATCH,
+            );
+            let process_transaction_batch_output =
+                self.process_and_record_transactions(bank, &transactions[chunk_start..chunk_end]);
+
+            let ProcessTransactionBatchOutput {
+                cost_model_throttled_transactions_count: new_cost_model_throttled_transactions_count,
+                cost_model_us: new_cost_model_us,
+                execute_and_commit_transactions_output,
+            } = process_transaction_batch_output;
+            total_cost_model_throttled_transactions_count =
+                total_cost_model_throttled_transactions_count
+                    .saturating_add(new_cost_model_throttled_transactions_count);
+            total_cost_model_us = total_cost_model_us.saturating_add(new_cost_model_us);
+
+            let ExecuteAndCommitTransactionsOutput {
+                transaction_counts: new_transaction_counts,
+                retryable_transaction_indexes: new_retryable_transaction_indexes,
+                commit_transactions_result: new_commit_transactions_result,
+                execute_and_commit_timings: new_execute_and_commit_timings,
+                error_counters: new_error_counters,
+                ..
+            } = execute_and_commit_transactions_output;
+
+            total_execute_and_commit_timings.accumulate(&new_execute_and_commit_timings);
+            total_error_counters.accumulate(&new_error_counters);
+            total_transaction_counts.accumulate(
+                &new_transaction_counts,
+                new_commit_transactions_result.is_ok(),
+            );
+
+            // Add the retryable txs (transactions that errored in a way that warrants a retry)
+            // to the list of unprocessed txs.
+            all_retryable_tx_indexes.extend_from_slice(&new_retryable_transaction_indexes);
+
+            let should_bank_still_be_processing_txs =
+                Bank::should_bank_still_be_processing_txs(bank_creation_time, bank.ns_per_slot);
+            match (
+                new_commit_transactions_result,
+                should_bank_still_be_processing_txs,
+            ) {
+                (Err(PohRecorderError::MaxHeightReached), _) | (_, false) => {
+                    info!(
+                        "process transactions: max height reached slot: {} height: {}",
+                        bank.slot(),
+                        bank.tick_height()
+                    );
+                    // process_and_record_transactions has returned all retryable errors in
+                    // transactions[chunk_start..chunk_end], so we just need to push the remaining
+                    // transactions into the unprocessed queue.
+                    all_retryable_tx_indexes.extend(chunk_end..transactions.len());
+                    reached_max_poh_height = true;
+                    break;
+                }
+                _ => (),
+            }
+            // Don't exit early on any other type of error, continue processing...
+            chunk_start = chunk_end;
+        }
+
+        ProcessTransactionsSummary {
+            reached_max_poh_height,
+            transaction_counts: total_transaction_counts,
+            retryable_transaction_indexes: all_retryable_tx_indexes,
+            cost_model_throttled_transactions_count: total_cost_model_throttled_transactions_count,
+            cost_model_us: total_cost_model_us,
+            execute_and_commit_timings: total_execute_and_commit_timings,
+            error_counters: total_error_counters,
+        }
+    }
+
     pub fn process_and_record_transactions(
         &self,
         bank: &Arc<Bank>,

+ 119 - 9
core/src/banking_stage/decision_maker.rs

@@ -6,7 +6,7 @@ use {
     solana_poh::poh_recorder::{BankStart, PohRecorder},
     solana_unified_scheduler_pool::{BankingStageMonitor, BankingStageStatus},
     std::{
-        sync::{atomic::{AtomicBool, Ordering::Relaxed}, Arc, RwLock},
+        sync::{atomic::{AtomicBool, Ordering::{self, Relaxed}}, Arc, RwLock},
         time::{Duration, Instant},
     },
 };
@@ -101,9 +101,22 @@ impl DecisionMaker {
     }
 
     fn bank_start(poh_recorder: &PohRecorder) -> Option<BankStart> {
-        poh_recorder
-            .bank_start()
-            .filter(|bank_start| bank_start.should_working_bank_still_be_processing_txs())
+        poh_recorder.bank_start().filter(|bank_start| {
+            let first_alpenglow_slot = bank_start
+                .working_bank
+                .feature_set
+                .activated_slot(&agave_feature_set::secp256k1_program_enabled::id())
+                .unwrap_or(u64::MAX);
+            let contains_valid_certificate =
+                if bank_start.working_bank.slot() >= first_alpenglow_slot {
+                    bank_start
+                        .contains_valid_certificate
+                        .load(Ordering::Relaxed)
+                } else {
+                    true
+                };
+            contains_valid_certificate && bank_start.should_working_bank_still_be_processing_txs()
+        })
     }
 
     fn would_be_leader_shortly(poh_recorder: &PohRecorder) -> bool {
@@ -156,13 +169,15 @@ mod tests {
         super::*,
         core::panic,
         solana_clock::NUM_CONSECUTIVE_LEADER_SLOTS,
-        solana_ledger::{blockstore::Blockstore, genesis_utils::create_genesis_config},
+        solana_ledger::{blockstore::Blockstore, genesis_utils::create_genesis_config, get_tmp_ledger_path_auto_delete},
         solana_poh::poh_recorder::create_test_recorder,
         solana_pubkey::Pubkey,
         solana_runtime::bank::Bank,
         std::{
-            env::temp_dir,
-            sync::{atomic::Ordering, Arc},
+            sync::{
+                atomic::{AtomicBool, Ordering},
+                Arc,
+            },
             time::Instant,
         },
     };
@@ -171,6 +186,7 @@ mod tests {
     fn test_buffered_packet_decision_bank_start() {
         let bank = Arc::new(Bank::default_for_tests());
         let bank_start = BankStart {
+            contains_valid_certificate: Arc::new(AtomicBool::new(true)),
             working_bank: bank,
             bank_creation_time: Arc::new(Instant::now()),
         };
@@ -188,8 +204,8 @@ mod tests {
     fn test_make_consume_or_forward_decision() {
         let genesis_config = create_genesis_config(2).genesis_config;
         let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config);
-        let ledger_path = temp_dir();
-        let blockstore = Arc::new(Blockstore::open(ledger_path.as_path()).unwrap());
+        let ledger_path = get_tmp_ledger_path_auto_delete!();
+        let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap());
         let (exit, poh_recorder, _transaction_recorder, poh_service, _entry_receiver) =
             create_test_recorder(bank.clone(), blockstore, None, None);
         // Drop the poh service immediately to avoid potential ticking
@@ -254,13 +270,107 @@ mod tests {
         }
     }
 
+    #[test]
+    fn test_make_consume_or_forward_decision_alpenglow() {
+        let genesis_config = create_genesis_config(2).genesis_config;
+        let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config);
+        let ledger_path = get_tmp_ledger_path_auto_delete!();
+        let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap());
+        let (exit, poh_recorder, _transaction_recorder, poh_service, _entry_receiver) =
+            create_test_recorder(bank.clone(), blockstore, None, None);
+        // Drop the poh service immediately to avoid potential ticking
+        exit.store(true, Ordering::Relaxed);
+        poh_service.join().unwrap();
+
+        let my_pubkey = Pubkey::new_unique();
+        let decision_maker = DecisionMaker::new(poh_recorder.clone());
+        poh_recorder.write().unwrap().reset(bank.clone(), None);
+        let slot = bank.slot() + 1;
+        let mut bank = Bank::new_from_parent(bank, &my_pubkey, slot);
+        bank.activate_feature(&agave_feature_set::secp256k1_program_enabled::id());
+        let bank = Arc::new(bank);
+
+        // Currently Leader, with alpenglow enabled, no certificate - Hold
+        {
+            poh_recorder
+                .write()
+                .unwrap()
+                .set_bank_for_test(bank.clone());
+            assert!(!poh_recorder
+                .write()
+                .unwrap()
+                .bank_start()
+                .unwrap()
+                .contains_valid_certificate
+                .load(Ordering::Relaxed));
+            let decision = decision_maker.make_consume_or_forward_decision_no_cache();
+            assert_matches!(decision, BufferedPacketsDecision::Hold);
+        }
+
+        // Currently Leader, with alpenglow enabled, certificate valid - Consume
+        {
+            poh_recorder
+                .write()
+                .unwrap()
+                .bank_start()
+                .unwrap()
+                .contains_valid_certificate
+                .store(true, Ordering::Relaxed);
+            let decision = decision_maker.make_consume_or_forward_decision_no_cache();
+            assert_matches!(decision, BufferedPacketsDecision::Consume(_));
+        }
+
+        // Will be leader shortly - Hold
+        for next_leader_slot_offset in [0, 1].into_iter() {
+            let next_leader_slot = bank.slot() + next_leader_slot_offset;
+            poh_recorder.write().unwrap().reset(
+                bank.clone(),
+                Some((
+                    next_leader_slot,
+                    next_leader_slot + NUM_CONSECUTIVE_LEADER_SLOTS,
+                )),
+            );
+            let decision = decision_maker.make_consume_or_forward_decision_no_cache();
+            assert!(
+                matches!(decision, BufferedPacketsDecision::Hold),
+                "next_leader_slot_offset: {next_leader_slot_offset}",
+            );
+        }
+
+        // Will be leader - ForwardAndHold
+        for next_leader_slot_offset in [2, 19].into_iter() {
+            let next_leader_slot = bank.slot() + next_leader_slot_offset;
+            poh_recorder.write().unwrap().reset(
+                bank.clone(),
+                Some((
+                    next_leader_slot,
+                    next_leader_slot + NUM_CONSECUTIVE_LEADER_SLOTS + 1,
+                )),
+            );
+            let decision = decision_maker.make_consume_or_forward_decision_no_cache();
+            assert!(
+                matches!(decision, BufferedPacketsDecision::ForwardAndHold),
+                "next_leader_slot_offset: {next_leader_slot_offset}",
+            );
+        }
+
+        // Known leader, not me - Forward
+        {
+            poh_recorder.write().unwrap().reset(bank, None);
+            let decision = decision_maker.make_consume_or_forward_decision_no_cache();
+            assert_matches!(decision, BufferedPacketsDecision::Forward);
+        }
+    }
+
     #[test]
     fn test_should_process_or_forward_packets() {
         let bank = Arc::new(Bank::default_for_tests());
         let bank_start = Some(BankStart {
+            contains_valid_certificate: Arc::new(AtomicBool::new(true)),
             working_bank: bank,
             bank_creation_time: Arc::new(Instant::now()),
         });
+
         // having active bank allows to consume immediately
         assert_matches!(
             DecisionMaker::consume_or_forward_packets(

+ 7 - 2
core/src/banking_stage/leader_slot_metrics.rs

@@ -452,7 +452,7 @@ impl VotePacketCountMetrics {
             "banking_stage-vote_packet_counts",
             ("slot", slot, i64),
             ("dropped_gossip_votes", self.dropped_gossip_votes, i64),
-            ("dropped_tpu_votes", self.dropped_tpu_votes, i64)
+            ("dropped_tpu_votes", self.dropped_tpu_votes, i64),
         );
     }
 }
@@ -805,7 +805,10 @@ mod tests {
         super::*,
         solana_pubkey::Pubkey,
         solana_runtime::{bank::Bank, genesis_utils::create_genesis_config},
-        std::{mem, sync::Arc},
+        std::{
+            mem,
+            sync::{atomic::AtomicBool, Arc},
+        },
     };
 
     struct TestSlotBoundaryComponents {
@@ -820,6 +823,7 @@ mod tests {
         let genesis = create_genesis_config(10);
         let first_bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config));
         let first_poh_recorder_bank = BankStart {
+            contains_valid_certificate: Arc::new(AtomicBool::new(true)),
             working_bank: first_bank.clone(),
             bank_creation_time: Arc::new(Instant::now()),
         };
@@ -831,6 +835,7 @@ mod tests {
             first_bank.slot() + 1,
         ));
         let next_poh_recorder_bank = BankStart {
+            contains_valid_certificate: Arc::new(AtomicBool::new(true)),
             working_bank: next_bank.clone(),
             bank_creation_time: Arc::new(Instant::now()),
         };

+ 720 - 0
core/src/block_creation_loop.rs

@@ -0,0 +1,720 @@
+//! The Alpenglow block creation loop
+//! When our leader window is reached, attempts to create our leader blocks
+//! within the block timeouts. Responsible for inserting empty banks for
+//! banking stage to fill, and clearing banks once the timeout has been reached.
+use {
+    crate::{
+        banking_trace::BankingTracer,
+        replay_stage::{Finalizer, ReplayStage},
+    },
+    crossbeam_channel::{Receiver, RecvTimeoutError},
+    solana_clock::Slot,
+    solana_gossip::cluster_info::ClusterInfo,
+    solana_ledger::{
+        blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache,
+        leader_schedule_utils::leader_slot_index,
+    },
+    solana_measure::measure::Measure,
+    solana_metrics::datapoint_info,
+    solana_poh::poh_recorder::{PohRecorder, Record, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
+    solana_pubkey::Pubkey,
+    solana_rpc::{rpc_subscriptions::RpcSubscriptions, slot_status_notifier::SlotStatusNotifier},
+    solana_runtime::{
+        bank::{Bank, NewBankOptions},
+        bank_forks::BankForks,
+    },
+    solana_time_utils::timestamp,
+    solana_votor::{block_timeout, event::LeaderWindowInfo, votor::LeaderWindowNotifier},
+    std::{
+        sync::{
+            atomic::{AtomicBool, Ordering},
+            Arc, Condvar, Mutex, RwLock,
+        },
+        thread,
+        time::{Duration, Instant},
+    },
+    thiserror::Error,
+};
+
+pub struct BlockCreationLoopConfig {
+    pub exit: Arc<AtomicBool>,
+
+    // Shared state
+    pub bank_forks: Arc<RwLock<BankForks>>,
+    pub blockstore: Arc<Blockstore>,
+    pub cluster_info: Arc<ClusterInfo>,
+    pub poh_recorder: Arc<RwLock<PohRecorder>>,
+    pub leader_schedule_cache: Arc<LeaderScheduleCache>,
+    pub rpc_subscriptions: Option<Arc<RpcSubscriptions>>,
+
+    // Notifiers
+    pub banking_tracer: Arc<BankingTracer>,
+    pub slot_status_notifier: Option<SlotStatusNotifier>,
+
+    // Receivers / notifications from banking stage / replay / voting loop
+    pub record_receiver: Receiver<Record>,
+    pub leader_window_notifier: Arc<LeaderWindowNotifier>,
+    pub replay_highest_frozen: Arc<ReplayHighestFrozen>,
+}
+
+struct LeaderContext {
+    my_pubkey: Pubkey,
+    blockstore: Arc<Blockstore>,
+    poh_recorder: Arc<RwLock<PohRecorder>>,
+    leader_schedule_cache: Arc<LeaderScheduleCache>,
+    bank_forks: Arc<RwLock<BankForks>>,
+    rpc_subscriptions: Option<Arc<RpcSubscriptions>>,
+    slot_status_notifier: Option<SlotStatusNotifier>,
+    banking_tracer: Arc<BankingTracer>,
+    replay_highest_frozen: Arc<ReplayHighestFrozen>,
+}
+
+#[derive(Default)]
+pub struct ReplayHighestFrozen {
+    pub highest_frozen_slot: Mutex<Slot>,
+    pub freeze_notification: Condvar,
+}
+
+#[derive(Default)]
+struct BlockCreationLoopMetrics {
+    last_report: u64,
+    loop_count: u64,
+    bank_timeout_completion_count: u64,
+    bank_filled_completion_count: u64,
+
+    window_production_elapsed: u64,
+    bank_filled_completion_elapsed_hist: histogram::Histogram,
+    bank_timeout_completion_elapsed_hist: histogram::Histogram,
+}
+
+impl BlockCreationLoopMetrics {
+    fn is_empty(&self) -> bool {
+        0 == self.loop_count
+            + self.bank_timeout_completion_count
+            + self.bank_filled_completion_count
+            + self.window_production_elapsed
+            + self.bank_filled_completion_elapsed_hist.entries()
+            + self.bank_timeout_completion_elapsed_hist.entries()
+    }
+
+    fn report(&mut self, report_interval_ms: u64) {
+        // skip reporting metrics if stats is empty
+        if self.is_empty() {
+            return;
+        }
+
+        let now = timestamp();
+        let elapsed_ms = now - self.last_report;
+
+        if elapsed_ms > report_interval_ms {
+            datapoint_info!(
+                "block-creation-loop-metrics",
+                ("loop_count", self.loop_count, i64),
+                (
+                    "bank_timeout_completion_count",
+                    self.bank_timeout_completion_count,
+                    i64
+                ),
+                (
+                    "bank_filled_completion_count",
+                    self.bank_filled_completion_count,
+                    i64
+                ),
+                (
+                    "window_production_elapsed",
+                    self.window_production_elapsed,
+                    i64
+                ),
+                (
+                    "bank_filled_completion_elapsed_90pct",
+                    self.bank_filled_completion_elapsed_hist
+                        .percentile(90.0)
+                        .unwrap_or(0),
+                    i64
+                ),
+                (
+                    "bank_filled_completion_elapsed_mean",
+                    self.bank_filled_completion_elapsed_hist.mean().unwrap_or(0),
+                    i64
+                ),
+                (
+                    "bank_filled_completion_elapsed_min",
+                    self.bank_filled_completion_elapsed_hist
+                        .minimum()
+                        .unwrap_or(0),
+                    i64
+                ),
+                (
+                    "bank_filled_completion_elapsed_max",
+                    self.bank_filled_completion_elapsed_hist
+                        .maximum()
+                        .unwrap_or(0),
+                    i64
+                ),
+                (
+                    "bank_timeout_completion_elapsed_90pct",
+                    self.bank_timeout_completion_elapsed_hist
+                        .percentile(90.0)
+                        .unwrap_or(0),
+                    i64
+                ),
+                (
+                    "bank_timeout_completion_elapsed_mean",
+                    self.bank_timeout_completion_elapsed_hist
+                        .mean()
+                        .unwrap_or(0),
+                    i64
+                ),
+                (
+                    "bank_timeout_completion_elapsed_min",
+                    self.bank_timeout_completion_elapsed_hist
+                        .minimum()
+                        .unwrap_or(0),
+                    i64
+                ),
+                (
+                    "bank_timeout_completion_elapsed_max",
+                    self.bank_timeout_completion_elapsed_hist
+                        .maximum()
+                        .unwrap_or(0),
+                    i64
+                ),
+            );
+
+            // reset metrics
+            self.bank_timeout_completion_count = 0;
+            self.bank_filled_completion_count = 0;
+            self.window_production_elapsed = 0;
+            self.bank_filled_completion_elapsed_hist.clear();
+            self.bank_timeout_completion_elapsed_hist.clear();
+            self.last_report = now;
+        }
+    }
+}
+
+// Metrics on slots that we attempt to start a leader block for
+#[derive(Default)]
+struct SlotMetrics {
+    slot: Slot,
+    attempt_count: u64,
+    replay_is_behind_count: u64,
+    startup_verification_incomplete_count: u64,
+    already_have_bank_count: u64,
+
+    slot_delay_hist: histogram::Histogram,
+    replay_is_behind_cumulative_wait_elapsed: u64,
+    replay_is_behind_wait_elapsed_hist: histogram::Histogram,
+}
+
+impl SlotMetrics {
+    fn report(&mut self) {
+        datapoint_info!(
+            "slot-metrics",
+            ("slot", self.slot, i64),
+            ("attempt_count", self.attempt_count, i64),
+            ("replay_is_behind_count", self.replay_is_behind_count, i64),
+            (
+                "startup_verification_incomplete_count",
+                self.startup_verification_incomplete_count,
+                i64
+            ),
+            ("already_have_bank_count", self.already_have_bank_count, i64),
+            (
+                "slot_delay_90pct",
+                self.slot_delay_hist.percentile(90.0).unwrap_or(0),
+                i64
+            ),
+            (
+                "slot_delay_mean",
+                self.slot_delay_hist.mean().unwrap_or(0),
+                i64
+            ),
+            (
+                "slot_delay_min",
+                self.slot_delay_hist.minimum().unwrap_or(0),
+                i64
+            ),
+            (
+                "slot_delay_max",
+                self.slot_delay_hist.maximum().unwrap_or(0),
+                i64
+            ),
+            (
+                "replay_is_behind_cumulative_wait_elapsed",
+                self.replay_is_behind_cumulative_wait_elapsed,
+                i64
+            ),
+            (
+                "replay_is_behind_wait_elapsed_90pct",
+                self.replay_is_behind_wait_elapsed_hist
+                    .percentile(90.0)
+                    .unwrap_or(0),
+                i64
+            ),
+            (
+                "replay_is_behind_wait_elapsed_mean",
+                self.replay_is_behind_wait_elapsed_hist.mean().unwrap_or(0),
+                i64
+            ),
+            (
+                "replay_is_behind_wait_elapsed_min",
+                self.replay_is_behind_wait_elapsed_hist
+                    .minimum()
+                    .unwrap_or(0),
+                i64
+            ),
+            (
+                "replay_is_behind_wait_elapsed_max",
+                self.replay_is_behind_wait_elapsed_hist
+                    .maximum()
+                    .unwrap_or(0),
+                i64
+            ),
+        );
+
+        // reset metrics
+        self.attempt_count = 0;
+        self.replay_is_behind_count = 0;
+        self.startup_verification_incomplete_count = 0;
+        self.already_have_bank_count = 0;
+        self.slot_delay_hist.clear();
+        self.replay_is_behind_cumulative_wait_elapsed = 0;
+        self.replay_is_behind_wait_elapsed_hist.clear();
+    }
+}
+
+#[derive(Debug, Error)]
+enum StartLeaderError {
+    /// Replay has not yet frozen the parent slot
+    #[error("Replay is behind for parent slot {0}")]
+    ReplayIsBehind(/* parent slot */ Slot),
+
+    /// Startup verification is not yet complete
+    #[error("Startup verification is incomplete on parent bank {0}")]
+    StartupVerificationIncomplete(/* parent slot */ Slot),
+
+    /// Bank forks already contains bank
+    #[error("Already contain bank for leader slot {0}")]
+    AlreadyHaveBank(/* leader slot */ Slot),
+
+    /// Haven't landed a vote
+    #[error("Have not rooted a block with our vote")]
+    VoteNotRooted,
+}
+
+fn start_receive_and_record_loop(
+    exit: Arc<AtomicBool>,
+    poh_recorder: Arc<RwLock<PohRecorder>>,
+    record_receiver: Receiver<Record>,
+) {
+    while !exit.load(Ordering::Relaxed) {
+        // We need a timeout here to check the exit flag, chose 400ms
+        // for now but can be longer if needed.
+        match record_receiver.recv_timeout(Duration::from_millis(400)) {
+            Ok(record) => {
+                if record
+                    .sender
+                    .send(poh_recorder.write().unwrap().record(
+                        record.slot,
+                        record.mixins,
+                        record.transaction_batches,
+                    ))
+                    .is_err()
+                {
+                    panic!("Error returning mixin hashes");
+                }
+            }
+            Err(RecvTimeoutError::Disconnected) => {
+                info!("Record receiver disconnected");
+                return;
+            }
+            Err(RecvTimeoutError::Timeout) => (),
+        }
+    }
+}
+
+/// The block creation loop.
+///
+/// The `votor::certificate_pool_service` tracks when it is our leader window, and populates
+/// communicates the skip timer and parent slot for our window. This loop takes the responsibility
+/// of creating our `NUM_CONSECUTIVE_LEADER_SLOTS` blocks and finishing them within the required timeout.
+pub fn start_loop(config: BlockCreationLoopConfig) {
+    let BlockCreationLoopConfig {
+        exit,
+        bank_forks,
+        blockstore,
+        cluster_info,
+        poh_recorder,
+        leader_schedule_cache,
+        rpc_subscriptions,
+        banking_tracer,
+        slot_status_notifier,
+        leader_window_notifier,
+        replay_highest_frozen,
+        record_receiver,
+    } = config;
+
+    // Similar to the voting loop, if this loop dies kill the validator
+    let _exit = Finalizer::new(exit.clone());
+
+    // get latest identity pubkey during startup
+    let mut my_pubkey = cluster_info.id();
+    let leader_bank_notifier = poh_recorder.read().unwrap().new_leader_bank_notifier();
+
+    let mut ctx = LeaderContext {
+        my_pubkey,
+        blockstore,
+        poh_recorder: poh_recorder.clone(),
+        leader_schedule_cache,
+        bank_forks,
+        rpc_subscriptions,
+        slot_status_notifier,
+        banking_tracer,
+        replay_highest_frozen,
+    };
+
+    let mut metrics = BlockCreationLoopMetrics::default();
+    let mut slot_metrics = SlotMetrics::default();
+
+    // Setup poh
+    reset_poh_recorder(&ctx.bank_forks.read().unwrap().working_bank(), &ctx);
+
+    // Start receive and record loop
+    let exit_c = exit.clone();
+    let p_rec = poh_recorder.clone();
+    let receive_record_loop = thread::spawn(move || {
+        start_receive_and_record_loop(exit_c, p_rec, record_receiver);
+    });
+
+    while !exit.load(Ordering::Relaxed) {
+        // Check if set-identity was called at each leader window start
+        if my_pubkey != cluster_info.id() {
+            // set-identity cli has been called during runtime
+            let my_old_pubkey = my_pubkey;
+            my_pubkey = cluster_info.id();
+            ctx.my_pubkey = my_pubkey;
+
+            warn!(
+                "Identity changed from {} to {} during block creation loop",
+                my_old_pubkey, my_pubkey
+            );
+        }
+
+        // Wait for the voting loop to notify us
+        let LeaderWindowInfo {
+            start_slot,
+            end_slot,
+            // TODO: handle duplicate blocks by using the hash here
+            parent_block: (parent_slot, _),
+            skip_timer,
+        } = {
+            let window_info = leader_window_notifier.window_info.lock().unwrap();
+            let (mut guard, timeout_res) = leader_window_notifier
+                .window_notification
+                .wait_timeout_while(window_info, Duration::from_secs(1), |wi| wi.is_none())
+                .unwrap();
+            if timeout_res.timed_out() {
+                continue;
+            }
+            guard.take().unwrap()
+        };
+
+        trace!(
+            "Received window notification for {start_slot} to {end_slot} \
+            parent: {parent_slot}"
+        );
+
+        if let Err(e) =
+            start_leader_retry_replay(start_slot, parent_slot, skip_timer, &ctx, &mut slot_metrics)
+        {
+            // Give up on this leader window
+            error!(
+                "{my_pubkey}: Unable to produce first slot {start_slot}, skipping production of our entire leader window \
+                {start_slot}-{end_slot}: {e:?}"
+            );
+            continue;
+        }
+
+        // Produce our window
+        let mut window_production_start = Measure::start("window_production");
+        let mut slot = start_slot;
+        // TODO(ashwin): Handle preemption of leader window during this loop
+        while !exit.load(Ordering::Relaxed) {
+            let leader_index = leader_slot_index(slot);
+            let timeout = block_timeout(leader_index);
+
+            // Wait for either the block timeout or for the bank to be completed
+            // The receive and record loop will fill the bank
+            let remaining_slot_time = timeout.saturating_sub(skip_timer.elapsed());
+            trace!(
+                "{my_pubkey}: waiting for leader bank {slot} to finish, remaining time: {}",
+                remaining_slot_time.as_millis(),
+            );
+
+            // Start measuring bank completion time
+            let mut bank_completion_measure = Measure::start("bank_completion");
+
+            leader_bank_notifier.wait_for_completed(remaining_slot_time);
+
+            bank_completion_measure.stop();
+
+            // Time to complete the bank, there are two possibilities:
+            // (1) We hit the block timeout, the bank is still present we must clear it
+            // (2) The bank has filled up and been cleared by banking stage
+            {
+                let mut w_poh_recorder = poh_recorder.write().unwrap();
+                if let Some(bank) = w_poh_recorder.bank() {
+                    assert_eq!(bank.slot(), slot);
+                    trace!(
+                        "{}: bank {} has reached block timeout, ticking",
+                        bank.collector_id(),
+                        bank.slot()
+                    );
+
+                    // Record timeout completion metric
+                    metrics.bank_timeout_completion_count += 1;
+
+                    // Record bank timeout completion time
+                    let _ = metrics
+                        .bank_timeout_completion_elapsed_hist
+                        .increment(bank_completion_measure.as_us());
+
+                    let max_tick_height = bank.max_tick_height();
+                    // Set the tick height for the bank to max_tick_height - 1, so that PohRecorder::flush_cache()
+                    // will properly increment the tick_height to max_tick_height.
+                    bank.set_tick_height(max_tick_height - 1);
+                    // Write the single tick for this slot
+                    // TODO: handle migration slot because we need to provide the PoH
+                    // for slots from the previous epoch, but `tick_alpenglow()` will
+                    // delete those ticks from the cache
+                    drop(bank);
+                    w_poh_recorder.tick_alpenglow(max_tick_height);
+                } else {
+                    trace!("{my_pubkey}: {slot} reached max tick height, moving to next block");
+
+                    // Record filled completion metric
+                    metrics.bank_filled_completion_count += 1;
+
+                    // Record bank filled completion time
+                    let _ = metrics
+                        .bank_filled_completion_elapsed_hist
+                        .increment(bank_completion_measure.as_us());
+                }
+            }
+
+            // Assert that the bank has been cleared
+            assert!(!poh_recorder.read().unwrap().has_bank());
+
+            // Produce our next slot
+            slot += 1;
+            if slot > end_slot {
+                trace!("{my_pubkey}: finished leader window {start_slot}-{end_slot}");
+                break;
+            }
+
+            // Although `slot - 1`has been cleared from `poh_recorder`, it might not have finished processing in
+            // `replay_stage`, which is why we use `start_leader_retry_replay`
+            if let Err(e) =
+                start_leader_retry_replay(slot, slot - 1, skip_timer, &ctx, &mut slot_metrics)
+            {
+                error!("{my_pubkey}: Unable to produce {slot}, skipping rest of leader window {slot} - {end_slot}: {e:?}");
+                break;
+            }
+        }
+        window_production_start.stop();
+        metrics.window_production_elapsed += window_production_start.as_us();
+        metrics.loop_count += 1;
+        metrics.report(1000);
+    }
+
+    receive_record_loop.join().unwrap();
+}
+
+/// Resets poh recorder
+fn reset_poh_recorder(bank: &Arc<Bank>, ctx: &LeaderContext) {
+    trace!("{}: resetting poh to {}", ctx.my_pubkey, bank.slot());
+    let next_leader_slot = ctx.leader_schedule_cache.next_leader_slot(
+        &ctx.my_pubkey,
+        bank.slot(),
+        bank,
+        Some(ctx.blockstore.as_ref()),
+        GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS,
+    );
+
+    ctx.poh_recorder
+        .write()
+        .unwrap()
+        .reset(bank.clone(), next_leader_slot);
+}
+
+/// Similar to `maybe_start_leader`, however if replay is lagging we retry
+/// until either replay finishes or we hit the block timeout.
+fn start_leader_retry_replay(
+    slot: Slot,
+    parent_slot: Slot,
+    skip_timer: Instant,
+    ctx: &LeaderContext,
+    slot_metrics: &mut SlotMetrics,
+) -> Result<(), StartLeaderError> {
+    let my_pubkey = ctx.my_pubkey;
+    let timeout = block_timeout(leader_slot_index(slot));
+    let mut slot_delay_start = Measure::start("slot_delay");
+    while !timeout.saturating_sub(skip_timer.elapsed()).is_zero() {
+        // Count attempts to start a leader block
+        slot_metrics.attempt_count += 1;
+
+        match maybe_start_leader(slot, parent_slot, ctx, slot_metrics) {
+            Ok(()) => {
+                // Record delay for successful slot
+                slot_delay_start.stop();
+                let _ = slot_metrics
+                    .slot_delay_hist
+                    .increment(slot_delay_start.as_us());
+
+                // slot was successful, report slot's metrics
+                slot_metrics.report();
+
+                return Ok(());
+            }
+            Err(StartLeaderError::ReplayIsBehind(_)) => {
+                // slot_metrics.replay_is_behind_count already gets incremented in maybe_start_leader
+
+                trace!(
+                    "{my_pubkey}: Attempting to produce slot {slot}, however replay of the \
+                    the parent {parent_slot} is not yet finished, waiting. Skip timer {}",
+                    skip_timer.elapsed().as_millis()
+                );
+                let highest_frozen_slot = ctx
+                    .replay_highest_frozen
+                    .highest_frozen_slot
+                    .lock()
+                    .unwrap();
+
+                // We wait until either we finish replay of the parent or the skip timer finishes
+                let mut wait_start = Measure::start("replay_is_behind");
+                let _unused = ctx
+                    .replay_highest_frozen
+                    .freeze_notification
+                    .wait_timeout_while(
+                        highest_frozen_slot,
+                        timeout.saturating_sub(skip_timer.elapsed()),
+                        |hfs| *hfs < parent_slot,
+                    )
+                    .unwrap();
+                wait_start.stop();
+                slot_metrics.replay_is_behind_cumulative_wait_elapsed += wait_start.as_us();
+                let _ = slot_metrics
+                    .replay_is_behind_wait_elapsed_hist
+                    .increment(wait_start.as_us());
+            }
+            Err(e) => return Err(e),
+        }
+    }
+
+    error!(
+        "{my_pubkey}: Skipping production of {slot}: \
+        Unable to replay parent {parent_slot} in time"
+    );
+    Err(StartLeaderError::ReplayIsBehind(parent_slot))
+}
+
+/// Checks if we are set to produce a leader block for `slot`:
+/// - Is the highest notarization/finalized slot from `cert_pool` frozen
+/// - Startup verification is complete
+/// - Bank forks does not already contain a bank for `slot`
+///
+/// If checks pass we return `Ok(())` and:
+/// - Reset poh to the `parent_slot`
+/// - Create a new bank for `slot` with parent `parent_slot`
+/// - Insert into bank_forks and poh recorder
+fn maybe_start_leader(
+    slot: Slot,
+    parent_slot: Slot,
+    ctx: &LeaderContext,
+    slot_metrics: &mut SlotMetrics,
+) -> Result<(), StartLeaderError> {
+    if ctx.bank_forks.read().unwrap().get(slot).is_some() {
+        slot_metrics.already_have_bank_count += 1;
+        return Err(StartLeaderError::AlreadyHaveBank(slot));
+    }
+
+    let Some(parent_bank) = ctx.bank_forks.read().unwrap().get(parent_slot) else {
+        slot_metrics.replay_is_behind_count += 1;
+        return Err(StartLeaderError::ReplayIsBehind(parent_slot));
+    };
+
+    if !parent_bank.is_frozen() {
+        slot_metrics.replay_is_behind_count += 1;
+        return Err(StartLeaderError::ReplayIsBehind(parent_slot));
+    }
+
+    if !parent_bank.has_initial_accounts_hash_verification_completed() {
+        slot_metrics.startup_verification_incomplete_count += 1;
+        return Err(StartLeaderError::StartupVerificationIncomplete(parent_slot));
+    }
+
+    // TODO(ashwin): plug this in from replay
+    let has_new_vote_been_rooted = true;
+    if !has_new_vote_been_rooted {
+        return Err(StartLeaderError::VoteNotRooted);
+    }
+
+    // Create and insert the bank
+    create_and_insert_leader_bank(slot, parent_bank, ctx);
+    Ok(())
+}
+
+/// Creates and inserts the leader bank `slot` of this window with
+/// parent `parent_bank`
+fn create_and_insert_leader_bank(slot: Slot, parent_bank: Arc<Bank>, ctx: &LeaderContext) {
+    let parent_slot = parent_bank.slot();
+    let root_slot = ctx.bank_forks.read().unwrap().root();
+
+    if let Some(bank) = ctx.poh_recorder.read().unwrap().bank() {
+        panic!(
+            "{}: Attempting to produce a block for {slot}, however we still are in production of \
+            {}. Something has gone wrong with the block creation loop. exiting",
+            ctx.my_pubkey,
+            bank.slot(),
+        );
+    }
+
+    if ctx.poh_recorder.read().unwrap().start_slot() != parent_slot {
+        // Important to keep Poh somewhat accurate for
+        // parts of the system relying on PohRecorder::would_be_leader()
+        //
+        // TODO: On migration need to keep the ticks around for parent slots in previous epoch
+        // because reset below will delete those ticks
+        reset_poh_recorder(&parent_bank, ctx);
+    }
+
+    let tpu_bank = ReplayStage::new_bank_from_parent_with_notify(
+        parent_bank.clone(),
+        slot,
+        root_slot,
+        &ctx.my_pubkey,
+        ctx.rpc_subscriptions.as_deref(),
+        &ctx.slot_status_notifier,
+        NewBankOptions::default(),
+    );
+    // make sure parent is frozen for finalized hashes via the above
+    // new()-ing of its child bank
+    ctx.banking_tracer.hash_event(
+        parent_slot,
+        &parent_bank.last_blockhash(),
+        &parent_bank.hash(),
+    );
+
+    // Insert the bank
+    let tpu_bank = ctx.bank_forks.write().unwrap().insert(tpu_bank);
+    let poh_bank_start = ctx.poh_recorder.write().unwrap().set_bank(tpu_bank);
+    // TODO: cleanup, this is no longer needed
+    poh_bank_start
+        .contains_valid_certificate
+        .store(true, Ordering::Relaxed);
+
+    info!(
+        "{}: new fork:{} parent:{} (leader) root:{}",
+        ctx.my_pubkey, slot, parent_slot, root_slot
+    );
+}

+ 75 - 65
core/src/cluster_info_vote_listener.rs

@@ -5,7 +5,7 @@ use {
         optimistic_confirmation_verifier::OptimisticConfirmationVerifier,
         replay_stage::DUPLICATE_THRESHOLD,
         result::{Error, Result},
-        sigverify,
+        sigverifier::ed25519_sigverifier::ed25519_verify_cpu,
     },
     agave_banking_stage_ingress_types::BankingPacketBatch,
     crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Select, Sender},
@@ -30,7 +30,6 @@ use {
         bank_forks::BankForks,
         bank_hash_cache::{BankHashCache, DumpedSlotSubscription},
         commitment::VOTE_THRESHOLD_SIZE,
-        epoch_stakes::VersionedEpochStakes,
         root_bank_cache::RootBankCache,
         vote_sender_types::ReplayVoteReceiver,
     },
@@ -38,7 +37,7 @@ use {
     solana_time_utils::AtomicInterval,
     solana_transaction::Transaction,
     solana_vote::{
-        vote_parser::{self, ParsedVote},
+        vote_parser::{self, ParsedVote, ParsedVoteTransaction},
         vote_transaction::VoteTransaction,
     },
     std::{
@@ -279,12 +278,16 @@ impl ClusterInfoVoteListener {
         let mut packet_batches = packet::to_packet_batches(&votes, 1);
 
         // Votes should already be filtered by this point.
-        sigverify::ed25519_verify_cpu(
+        ed25519_verify_cpu(
             &mut packet_batches,
             /*reject_non_vote=*/ false,
             votes.len(),
         );
         let root_bank = root_bank_cache.root_bank();
+        let first_alpenglow_slot = root_bank
+            .feature_set
+            .activated_slot(&agave_feature_set::secp256k1_program_enabled::id())
+            .unwrap_or(Slot::MAX);
         let epoch_schedule = root_bank.epoch_schedule();
         votes
             .into_iter()
@@ -297,6 +300,9 @@ impl ClusterInfoVoteListener {
             .filter_map(|(tx, packet_batch)| {
                 let (vote_account_key, vote, ..) = vote_parser::parse_vote_transaction(&tx)?;
                 let slot = vote.last_voted_slot()?;
+                if (slot >= first_alpenglow_slot) ^ vote.is_alpenglow_vote() {
+                    return None;
+                }
                 let epoch = epoch_schedule.get_epoch(slot);
                 let authorized_voter = root_bank
                     .epoch_stakes(epoch)?
@@ -497,11 +503,9 @@ impl ClusterInfoVoteListener {
 
             // if we don't have stake information, ignore it
             let epoch = root_bank.epoch_schedule().get_epoch(slot);
-            let epoch_stakes = root_bank.epoch_stakes(epoch);
-            if epoch_stakes.is_none() {
+            let Some(epoch_stakes) = root_bank.epoch_stakes(epoch) else {
                 continue;
-            }
-            let epoch_stakes = epoch_stakes.unwrap();
+            };
 
             // We always track the last vote slot for optimistic confirmation. If we have replayed
             // the same version of last vote slot that is being voted on, then we also track the
@@ -553,7 +557,7 @@ impl ClusterInfoVoteListener {
                                 dependency_work,
                             ))
                             .unwrap_or_else(|err| {
-                                warn!("bank_notification_sender failed: {err:?}")
+                                warn!("bank_notification_sender failed: {:?}", err)
                             });
                     }
                 }
@@ -623,29 +627,45 @@ impl ClusterInfoVoteListener {
         // Process votes from gossip and ReplayStage
         let mut gossip_vote_txn_processing_time = Measure::start("gossip_vote_processing_time");
         let votes = gossip_vote_txs
-            .iter()
-            .filter_map(vote_parser::parse_vote_transaction)
-            .zip(repeat(/*is_gossip:*/ true))
-            .chain(replayed_votes.into_iter().zip(repeat(/*is_gossip:*/ false)));
-        for ((vote_pubkey, vote, _switch_proof, signature), is_gossip) in votes {
-            Self::track_new_votes_and_notify_confirmations(
-                vote,
-                &vote_pubkey,
-                signature,
-                vote_tracker,
-                root_bank,
-                subscriptions,
-                verified_vote_sender,
-                gossip_verified_vote_hash_sender,
-                &mut diff,
-                &mut new_optimistic_confirmed_slots,
-                is_gossip,
-                bank_notification_sender,
-                duplicate_confirmed_slot_sender,
-                latest_vote_slot_per_validator,
-                bank_hash_cache,
-                dumped_slot_subscription,
-            );
+            .into_iter()
+            .filter_map(|tx| {
+                let parsed_vote = vote_parser::parse_vote_transaction(&tx)?;
+                Some((parsed_vote, Some(tx)))
+            })
+            .chain(replayed_votes.into_iter().zip(repeat(/*is_gossip:*/ None)));
+        for ((vote_pubkey, vote, _switch_proof, signature), transaction) in votes {
+            match vote {
+                ParsedVoteTransaction::Alpenglow(_) => {
+                    panic!("Will be removed soon");
+                }
+                ParsedVoteTransaction::Tower(vote) => {
+                    if root_bank
+                        .feature_set
+                        .is_active(&agave_feature_set::secp256k1_program_enabled::id())
+                    {
+                        continue;
+                    }
+                    let is_gossip_vote = transaction.is_some();
+                    Self::track_new_votes_and_notify_confirmations(
+                        vote,
+                        &vote_pubkey,
+                        signature,
+                        vote_tracker,
+                        root_bank,
+                        subscriptions,
+                        verified_vote_sender,
+                        gossip_verified_vote_hash_sender,
+                        &mut diff,
+                        &mut new_optimistic_confirmed_slots,
+                        is_gossip_vote,
+                        bank_notification_sender,
+                        duplicate_confirmed_slot_sender,
+                        latest_vote_slot_per_validator,
+                        bank_hash_cache,
+                        dumped_slot_subscription,
+                    )
+                }
+            }
         }
         gossip_vote_txn_processing_time.stop();
         let gossip_vote_txn_processing_time_us = gossip_vote_txn_processing_time.as_us();
@@ -682,7 +702,12 @@ impl ClusterInfoVoteListener {
                     // in gossip in the past, `is_new` would be false and it would have
                     // been filtered out above), so it's safe to increment the gossip-only
                     // stake
-                    Self::sum_stake(&mut gossip_only_stake, epoch_stakes, &pubkey);
+                    if let Some(epoch_stakes) = epoch_stakes {
+                        gossip_only_stake += epoch_stakes
+                            .stakes()
+                            .vote_accounts()
+                            .get_delegated_stake(&pubkey);
+                    }
                 }
 
                 // From the `slot_diff.retain` earlier, we know because there are
@@ -729,12 +754,6 @@ impl ClusterInfoVoteListener {
             .get_or_insert_optimistic_votes_tracker(hash)
             .add_vote_pubkey(pubkey, stake, total_epoch_stake, &THRESHOLDS_TO_CHECK)
     }
-
-    fn sum_stake(sum: &mut u64, epoch_stakes: Option<&VersionedEpochStakes>, pubkey: &Pubkey) {
-        if let Some(stakes) = epoch_stakes {
-            *sum += stakes.stakes().vote_accounts().get_delegated_stake(pubkey)
-        }
-    }
 }
 
 #[cfg(test)]
@@ -757,7 +776,7 @@ mod tests {
         },
         solana_signature::Signature,
         solana_signer::Signer,
-        solana_vote::vote_transaction,
+        solana_vote::vote_transaction::{self, VoteTransaction},
         solana_vote_program::vote_state::{TowerSync, Vote, MAX_LOCKOUT_HISTORY},
         std::{
             collections::BTreeSet,
@@ -970,7 +989,7 @@ mod tests {
                 replay_votes_sender
                     .send((
                         vote_keypair.pubkey(),
-                        VoteTransaction::from(replay_vote.clone()),
+                        ParsedVoteTransaction::Tower(VoteTransaction::from(replay_vote.clone())),
                         switch_proof_hash,
                         Signature::default(),
                     ))
@@ -1293,7 +1312,10 @@ mod tests {
                     replay_votes_sender
                         .send((
                             vote_keypair.pubkey(),
-                            VoteTransaction::from(Vote::new(vec![vote_slot], Hash::default())),
+                            ParsedVoteTransaction::Tower(VoteTransaction::from(Vote::new(
+                                vec![vote_slot],
+                                Hash::default(),
+                            ))),
                             switch_proof_hash,
                             Signature::default(),
                         ))
@@ -1342,6 +1364,7 @@ mod tests {
         run_test_process_votes3(Some(Hash::default()));
     }
 
+    // TODO: Add Alpenglow equivalent tests
     #[test]
     fn test_vote_tracker_references() {
         // Create some voters at genesis
@@ -1394,7 +1417,10 @@ mod tests {
             // Add gossip vote for same slot, should not affect outcome
             vec![(
                 validator0_keypairs.vote_keypair.pubkey(),
-                VoteTransaction::from(Vote::new(vec![voted_slot], Hash::default())),
+                ParsedVoteTransaction::Tower(VoteTransaction::from(Vote::new(
+                    vec![voted_slot],
+                    Hash::default(),
+                ))),
                 None,
                 Signature::default(),
             )],
@@ -1443,7 +1469,10 @@ mod tests {
             vote_txs,
             vec![(
                 validator_keypairs[1].vote_keypair.pubkey(),
-                VoteTransaction::from(Vote::new(vec![first_slot_in_new_epoch], Hash::default())),
+                ParsedVoteTransaction::Tower(VoteTransaction::from(Vote::new(
+                    vec![first_slot_in_new_epoch],
+                    Hash::default(),
+                ))),
                 None,
                 Signature::default(),
             )],
@@ -1589,25 +1618,6 @@ mod tests {
         verify_packets_len(&packets, 2);
     }
 
-    #[test]
-    fn test_sum_stake() {
-        let SetupComponents {
-            bank,
-            validator_voting_keypairs,
-            ..
-        } = setup();
-        let vote_keypair = &validator_voting_keypairs[0].vote_keypair;
-        let epoch_stakes = bank.epoch_stakes(bank.epoch()).unwrap();
-        let mut gossip_only_stake = 0;
-
-        ClusterInfoVoteListener::sum_stake(
-            &mut gossip_only_stake,
-            Some(epoch_stakes),
-            &vote_keypair.pubkey(),
-        );
-        assert_eq!(gossip_only_stake, 100);
-    }
-
     #[test]
     fn test_bad_vote() {
         run_test_bad_vote(None);
@@ -1661,7 +1671,7 @@ mod tests {
             .unwrap();
 
         ClusterInfoVoteListener::track_new_votes_and_notify_confirmations(
-            vote,
+            vote.as_tower_transaction().unwrap(),
             &vote_pubkey,
             signature,
             &vote_tracker,
@@ -1694,7 +1704,7 @@ mod tests {
             .unwrap();
 
         ClusterInfoVoteListener::track_new_votes_and_notify_confirmations(
-            vote,
+            vote.as_tower_transaction().unwrap(),
             &vote_pubkey,
             signature,
             &vote_tracker,

+ 75 - 26
core/src/commitment_service.rs

@@ -1,6 +1,6 @@
 use {
     crate::consensus::{tower_vote_state::TowerVoteState, Stake},
-    crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender},
+    crossbeam_channel::{bounded, select, unbounded, Receiver, RecvTimeoutError, Sender},
     solana_clock::Slot,
     solana_measure::measure::Measure,
     solana_metrics::datapoint_info,
@@ -10,6 +10,7 @@ use {
         bank::Bank,
         commitment::{BlockCommitment, BlockCommitmentCache, CommitmentSlots, VOTE_THRESHOLD_SIZE},
     },
+    solana_votor::commitment::{AlpenglowCommitmentAggregationData, AlpenglowCommitmentType},
     std::{
         cmp::max,
         collections::HashMap,
@@ -22,7 +23,7 @@ use {
     },
 };
 
-pub struct CommitmentAggregationData {
+pub struct TowerCommitmentAggregationData {
     bank: Arc<Bank>,
     root: Slot,
     total_stake: Stake,
@@ -31,7 +32,7 @@ pub struct CommitmentAggregationData {
     node_vote_state: (Pubkey, TowerVoteState),
 }
 
-impl CommitmentAggregationData {
+impl TowerCommitmentAggregationData {
     pub fn new(
         bank: Arc<Bank>,
         root: Slot,
@@ -68,13 +69,24 @@ impl AggregateCommitmentService {
         exit: Arc<AtomicBool>,
         block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
         subscriptions: Option<Arc<RpcSubscriptions>>,
-    ) -> (Sender<CommitmentAggregationData>, Self) {
+    ) -> (
+        Sender<TowerCommitmentAggregationData>,
+        Sender<AlpenglowCommitmentAggregationData>,
+        Self,
+    ) {
         let (sender, receiver): (
-            Sender<CommitmentAggregationData>,
-            Receiver<CommitmentAggregationData>,
+            Sender<TowerCommitmentAggregationData>,
+            Receiver<TowerCommitmentAggregationData>,
         ) = unbounded();
+        // This channel should not grow unbounded, cap at 1000 messages for now
+        let (ag_sender, ag_receiver): (
+            Sender<AlpenglowCommitmentAggregationData>,
+            Receiver<AlpenglowCommitmentAggregationData>,
+        ) = bounded(1000);
+
         (
             sender,
+            ag_sender,
             Self {
                 t_commitment: Builder::new()
                     .name("solAggCommitSvc".to_string())
@@ -85,6 +97,7 @@ impl AggregateCommitmentService {
 
                         if let Err(RecvTimeoutError::Disconnected) = Self::run(
                             &receiver,
+                            &ag_receiver,
                             &block_commitment_cache,
                             subscriptions.as_deref(),
                             &exit,
@@ -98,7 +111,8 @@ impl AggregateCommitmentService {
     }
 
     fn run(
-        receiver: &Receiver<CommitmentAggregationData>,
+        receiver: &Receiver<TowerCommitmentAggregationData>,
+        ag_receiver: &Receiver<AlpenglowCommitmentAggregationData>,
         block_commitment_cache: &RwLock<BlockCommitmentCache>,
         rpc_subscriptions: Option<&RpcSubscriptions>,
         exit: &AtomicBool,
@@ -108,18 +122,30 @@ impl AggregateCommitmentService {
                 return Ok(());
             }
 
-            let aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?;
-            let aggregation_data = receiver.try_iter().last().unwrap_or(aggregation_data);
-
-            let ancestors = aggregation_data.bank.status_cache_ancestors();
-            if ancestors.is_empty() {
-                continue;
-            }
-
             let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
-            let update_commitment_slots =
-                Self::update_commitment_cache(block_commitment_cache, aggregation_data, ancestors);
+            let commitment_slots = select! {
+                recv(receiver) -> msg => {
+                    let data = msg?;
+                    let data = receiver.try_iter().last().unwrap_or(data);
+                    let ancestors = data.bank.status_cache_ancestors();
+                    if ancestors.is_empty() {
+                        continue;
+                    }
+                    Self::update_commitment_cache(block_commitment_cache, data, ancestors)
+                }
+                recv(ag_receiver) -> msg => {
+                    let data = msg?;
+                    let data = ag_receiver.try_iter().last().unwrap_or(data);
+                    Self::alpenglow_update_commitment_cache(
+                        block_commitment_cache,
+                        data.commitment_type,
+                        data.slot,
+                    )
+                }
+                default(Duration::from_secs(1)) => continue
+            };
             aggregate_commitment_time.stop();
+
             datapoint_info!(
                 "block-commitment-cache",
                 (
@@ -129,12 +155,12 @@ impl AggregateCommitmentService {
                 ),
                 (
                     "highest-super-majority-root",
-                    update_commitment_slots.highest_super_majority_root as i64,
+                    commitment_slots.highest_super_majority_root as i64,
                     i64
                 ),
                 (
                     "highest-confirmed-slot",
-                    update_commitment_slots.highest_confirmed_slot as i64,
+                    commitment_slots.highest_confirmed_slot as i64,
                     i64
                 ),
             );
@@ -143,14 +169,34 @@ impl AggregateCommitmentService {
                 // Triggers rpc_subscription notifications as soon as new commitment data is
                 // available, sending just the commitment cache slot information that the
                 // notifications thread needs
-                rpc_subscriptions.notify_subscribers(update_commitment_slots);
+                rpc_subscriptions.notify_subscribers(commitment_slots);
+            }
+        }
+    }
+
+    fn alpenglow_update_commitment_cache(
+        block_commitment_cache: &RwLock<BlockCommitmentCache>,
+        update_type: AlpenglowCommitmentType,
+        slot: Slot,
+    ) -> CommitmentSlots {
+        let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
+
+        match update_type {
+            AlpenglowCommitmentType::Notarize => {
+                w_block_commitment_cache.set_slot(slot);
+            }
+            AlpenglowCommitmentType::Finalized => {
+                w_block_commitment_cache.set_highest_confirmed_slot(slot);
+                w_block_commitment_cache.set_root(slot);
+                w_block_commitment_cache.set_highest_super_majority_root(slot);
             }
         }
+        w_block_commitment_cache.commitment_slots()
     }
 
     fn update_commitment_cache(
         block_commitment_cache: &RwLock<BlockCommitmentCache>,
-        aggregation_data: CommitmentAggregationData,
+        aggregation_data: TowerCommitmentAggregationData,
         ancestors: Vec<u64>,
     ) -> CommitmentSlots {
         let (block_commitment, rooted_stake) = Self::aggregate_commitment(
@@ -208,8 +254,11 @@ impl AggregateCommitmentService {
             let vote_state = if pubkey == node_vote_pubkey {
                 // Override old vote_state in bank with latest one for my own vote pubkey
                 node_vote_state.clone()
+            } else if let Some(vote_state_view) = account.vote_state_view() {
+                TowerVoteState::from(vote_state_view)
             } else {
-                TowerVoteState::from(account.vote_state_view())
+                // Alpenglow doesn't need to aggregate commitment.
+                continue;
             };
             Self::aggregate_commitment_for_vote_account(
                 &mut commitment,
@@ -544,7 +593,7 @@ mod tests {
     fn test_highest_super_majority_root_advance() {
         fn get_vote_state(vote_pubkey: Pubkey, bank: &Bank) -> TowerVoteState {
             let vote_account = bank.get_vote_account(&vote_pubkey).unwrap();
-            TowerVoteState::from(vote_account.vote_state_view())
+            TowerVoteState::from(vote_account.vote_state_view().unwrap())
         }
 
         let block_commitment_cache = RwLock::new(BlockCommitmentCache::new_for_tests());
@@ -615,7 +664,7 @@ mod tests {
         let ancestors = working_bank.status_cache_ancestors();
         let _ = AggregateCommitmentService::update_commitment_cache(
             &block_commitment_cache,
-            CommitmentAggregationData {
+            TowerCommitmentAggregationData {
                 bank: working_bank,
                 root: 0,
                 total_stake: 100,
@@ -650,7 +699,7 @@ mod tests {
         let ancestors = working_bank.status_cache_ancestors();
         let _ = AggregateCommitmentService::update_commitment_cache(
             &block_commitment_cache,
-            CommitmentAggregationData {
+            TowerCommitmentAggregationData {
                 bank: working_bank,
                 root: 1,
                 total_stake: 100,
@@ -699,7 +748,7 @@ mod tests {
         let ancestors = working_bank.status_cache_ancestors();
         let _ = AggregateCommitmentService::update_commitment_cache(
             &block_commitment_cache,
-            CommitmentAggregationData {
+            TowerCommitmentAggregationData {
                 bank: working_bank,
                 root: 0,
                 total_stake: 100,

+ 47 - 33
core/src/consensus.rs

@@ -407,8 +407,11 @@ impl Tower {
             if voted_stake == 0 {
                 continue;
             }
-            trace!("{vote_account_pubkey} {key} with stake {voted_stake}");
-            let mut vote_state = TowerVoteState::from(account.vote_state_view());
+            trace!("{} {} with stake {}", vote_account_pubkey, key, voted_stake);
+            let Some(vote_state_view) = account.vote_state_view() else {
+                continue; // not relevant to Alpenglow.
+            };
+            let mut vote_state = TowerVoteState::from(vote_state_view);
             for vote in &vote_state.votes {
                 lockout_intervals
                     .entry(vote.last_locked_out_slot())
@@ -610,7 +613,9 @@ impl Tower {
 
     pub fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option<Slot> {
         let vote_account = bank.get_vote_account(vote_account_pubkey)?;
-        vote_account.vote_state_view().last_voted_slot()
+        // TODO(wen): make this work for Alpenglow.
+        let vote_state_view = vote_account.vote_state_view()?;
+        vote_state_view.last_voted_slot()
     }
 
     pub fn record_bank_vote(&mut self, bank: &Bank) -> Option<Slot> {
@@ -1615,7 +1620,11 @@ impl Tower {
         bank: &Bank,
     ) {
         if let Some(vote_account) = bank.get_vote_account(vote_account_pubkey) {
-            self.vote_state = TowerVoteState::from(vote_account.vote_state_view());
+            self.vote_state = TowerVoteState::from(
+                vote_account
+                    .vote_state_view()
+                    .expect("must be TowerBFT account"),
+            );
             self.initialize_root(root);
             self.initialize_lockouts(|v| v.slot() > root);
         } else {
@@ -1693,6 +1702,7 @@ impl TowerError {
 pub enum ExternalRootSource {
     Tower(Slot),
     HardFork(Slot),
+    VoteHistory(Slot),
 }
 
 impl ExternalRootSource {
@@ -1700,15 +1710,16 @@ impl ExternalRootSource {
         match self {
             ExternalRootSource::Tower(slot) => *slot,
             ExternalRootSource::HardFork(slot) => *slot,
+            ExternalRootSource::VoteHistory(slot) => *slot,
         }
     }
 }
 
-// Given an untimely crash, tower may have roots that are not reflected in blockstore,
+// Given an untimely crash, tower/vote history may have roots that are not reflected in blockstore,
 // or the reverse of this.
 // That's because we don't impose any ordering guarantee or any kind of write barriers
-// between tower (plain old POSIX fs calls) and blockstore (through RocksDB), when
-// `ReplayState::handle_votable_bank()` saves tower before setting blockstore roots.
+// between tower/vote history (plain old POSIX fs calls) and blockstore (through RocksDB), when
+// replay or voting loop saves tower/vote history before setting blockstore roots.
 pub fn reconcile_blockstore_roots_with_external_source(
     external_source: ExternalRootSource,
     blockstore: &Blockstore,
@@ -1979,6 +1990,7 @@ pub mod test {
         let duplicate_ancestor1 = 44;
         let duplicate_ancestor2 = 45;
         vote_simulator
+            .tbft_structs
             .heaviest_subtree_fork_choice
             .mark_fork_invalid_candidate(&(
                 duplicate_ancestor1,
@@ -1991,6 +2003,7 @@ pub mod test {
                     .hash(),
             ));
         vote_simulator
+            .tbft_structs
             .heaviest_subtree_fork_choice
             .mark_fork_invalid_candidate(&(
                 duplicate_ancestor2,
@@ -2011,7 +2024,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
         );
@@ -2025,6 +2038,7 @@ pub mod test {
         }
         for (i, duplicate_ancestor) in confirm_ancestors.into_iter().enumerate() {
             vote_simulator
+                .tbft_structs
                 .heaviest_subtree_fork_choice
                 .mark_fork_valid_candidate(&(
                     duplicate_ancestor,
@@ -2044,7 +2058,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             );
             if i == 0 {
                 assert_eq!(
@@ -2076,7 +2090,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::SameFork
         );
@@ -2091,7 +2105,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, 20000)
         );
@@ -2108,7 +2122,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, 20000)
         );
@@ -2125,7 +2139,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, 20000)
         );
@@ -2142,7 +2156,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, 20000)
         );
@@ -2161,7 +2175,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, 20000)
         );
@@ -2178,7 +2192,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::SwitchProof(Hash::default())
         );
@@ -2196,7 +2210,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::SwitchProof(Hash::default())
         );
@@ -2218,7 +2232,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, 20000)
         );
@@ -2246,7 +2260,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, num_validators * 10000)
         );
@@ -2262,7 +2276,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, 20000)
         );
@@ -2295,7 +2309,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::SwitchProof(Hash::default())
         );
@@ -2315,7 +2329,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, 20000)
         );
@@ -2439,7 +2453,7 @@ pub mod test {
             .unwrap()
             .get_vote_account(&vote_pubkey)
             .unwrap();
-        let state = observed.vote_state_view();
+        let state = observed.vote_state_view().unwrap();
         info!("observed tower: {:#?}", state.votes_iter().collect_vec());
 
         let num_slots_to_try = 200;
@@ -3035,7 +3049,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::SameFork
         );
@@ -3050,7 +3064,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, 20000)
         );
@@ -3066,7 +3080,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::SwitchProof(Hash::default())
         );
@@ -3126,7 +3140,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, 20000)
         );
@@ -3142,7 +3156,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, 20000)
         );
@@ -3158,7 +3172,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::SwitchProof(Hash::default())
         );
@@ -3763,7 +3777,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, 20_000)
         );
@@ -3781,7 +3795,7 @@ pub mod test {
                     total_stake,
                     bank0.epoch_vote_accounts(0).unwrap(),
                     &vote_simulator.latest_validator_votes_for_frozen_banks,
-                    &vote_simulator.heaviest_subtree_fork_choice,
+                    &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
                 ),
                 SwitchForkDecision::SwitchProof(Hash::default())
             );
@@ -3819,7 +3833,7 @@ pub mod test {
                 total_stake,
                 bank0.epoch_vote_accounts(0).unwrap(),
                 &vote_simulator.latest_validator_votes_for_frozen_banks,
-                &vote_simulator.heaviest_subtree_fork_choice,
+                &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             ),
             SwitchForkDecision::FailedSwitchThreshold(0, 20_000)
         );
@@ -3839,7 +3853,7 @@ pub mod test {
                     total_stake,
                     bank0.epoch_vote_accounts(0).unwrap(),
                     &vote_simulator.latest_validator_votes_for_frozen_banks,
-                    &vote_simulator.heaviest_subtree_fork_choice,
+                    &vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
                 ),
                 SwitchForkDecision::SwitchProof(Hash::default())
             );

+ 38 - 3
core/src/fetch_stage.rs

@@ -1,8 +1,11 @@
 //! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
 
 use {
-    crate::result::{Error, Result},
-    crossbeam_channel::{unbounded, RecvTimeoutError},
+    crate::{
+        result::{Error, Result},
+        tpu::MAX_ALPENGLOW_PACKET_NUM,
+    },
+    crossbeam_channel::{bounded, unbounded, RecvTimeoutError},
     solana_clock::{DEFAULT_TICKS_PER_SLOT, HOLD_TRANSACTIONS_SLOT_OFFSET},
     solana_metrics::{inc_new_counter_debug, inc_new_counter_info},
     solana_packet::PacketFlags,
@@ -35,21 +38,30 @@ impl FetchStage {
         sockets: Vec<UdpSocket>,
         tpu_forwards_sockets: Vec<UdpSocket>,
         tpu_vote_sockets: Vec<UdpSocket>,
+        alpenglow_socket: UdpSocket,
         exit: Arc<AtomicBool>,
         poh_recorder: &Arc<RwLock<PohRecorder>>,
         coalesce: Option<Duration>,
-    ) -> (Self, PacketBatchReceiver, PacketBatchReceiver) {
+    ) -> (
+        Self,
+        PacketBatchReceiver,
+        PacketBatchReceiver,
+        PacketBatchReceiver,
+    ) {
         let (sender, receiver) = unbounded();
         let (vote_sender, vote_receiver) = unbounded();
+        let (alpenglow_sender, alpenglow_receiver) = bounded(MAX_ALPENGLOW_PACKET_NUM);
         let (forward_sender, forward_receiver) = unbounded();
         (
             Self::new_with_sender(
                 sockets,
                 tpu_forwards_sockets,
                 tpu_vote_sockets,
+                alpenglow_socket,
                 exit,
                 &sender,
                 &vote_sender,
+                &alpenglow_sender,
                 &forward_sender,
                 forward_receiver,
                 poh_recorder,
@@ -59,6 +71,7 @@ impl FetchStage {
             ),
             receiver,
             vote_receiver,
+            alpenglow_receiver,
         )
     }
 
@@ -67,9 +80,11 @@ impl FetchStage {
         sockets: Vec<UdpSocket>,
         tpu_forwards_sockets: Vec<UdpSocket>,
         tpu_vote_sockets: Vec<UdpSocket>,
+        alpenglow_socket: UdpSocket,
         exit: Arc<AtomicBool>,
         sender: &PacketBatchSender,
         vote_sender: &PacketBatchSender,
+        bls_packet_sender: &PacketBatchSender,
         forward_sender: &PacketBatchSender,
         forward_receiver: PacketBatchReceiver,
         poh_recorder: &Arc<RwLock<PohRecorder>>,
@@ -84,9 +99,11 @@ impl FetchStage {
             tx_sockets,
             tpu_forwards_sockets,
             tpu_vote_sockets,
+            alpenglow_socket,
             exit,
             sender,
             vote_sender,
+            bls_packet_sender,
             forward_sender,
             forward_receiver,
             poh_recorder,
@@ -143,9 +160,11 @@ impl FetchStage {
         tpu_sockets: Vec<Arc<UdpSocket>>,
         tpu_forwards_sockets: Vec<Arc<UdpSocket>>,
         tpu_vote_sockets: Vec<Arc<UdpSocket>>,
+        alpenglow_socket: UdpSocket,
         exit: Arc<AtomicBool>,
         sender: &PacketBatchSender,
         vote_sender: &PacketBatchSender,
+        bls_packet_sender: &PacketBatchSender,
         forward_sender: &PacketBatchSender,
         forward_receiver: PacketBatchReceiver,
         poh_recorder: &Arc<RwLock<PohRecorder>>,
@@ -224,6 +243,20 @@ impl FetchStage {
             })
             .collect();
 
+        let bls_message_stats = Arc::new(StreamerReceiveStats::new("bls_message_receiver"));
+        let bls_message_threads: Vec<_> = vec![streamer::receiver(
+            "solRcvrAlpMsg".to_string(),
+            Arc::new(alpenglow_socket),
+            exit.clone(),
+            bls_packet_sender.clone(),
+            recycler.clone(),
+            bls_message_stats.clone(),
+            coalesce,
+            true,
+            None,
+            true, // only staked connections can send BLS messages
+        )];
+
         let sender = sender.clone();
         let poh_recorder = poh_recorder.clone();
 
@@ -252,6 +285,7 @@ impl FetchStage {
                 tpu_stats.report();
                 tpu_vote_stats.report();
                 tpu_forward_stats.report();
+                bls_message_stats.report();
 
                 if exit.load(Ordering::Relaxed) {
                     return;
@@ -264,6 +298,7 @@ impl FetchStage {
                 tpu_threads,
                 tpu_forwards_threads,
                 tpu_vote_threads,
+                bls_message_threads,
                 vec![fwd_thread_hdl, metrics_thread_hdl],
             ]
             .into_iter()

+ 3 - 1
core/src/lib.rs

@@ -12,6 +12,7 @@ pub mod admin_rpc_post_init;
 pub mod banking_simulation;
 pub mod banking_stage;
 pub mod banking_trace;
+mod block_creation_loop;
 pub mod cluster_info_vote_listener;
 pub mod cluster_slots_service;
 pub mod commitment_service;
@@ -29,10 +30,11 @@ pub mod replay_stage;
 mod result;
 pub mod sample_performance_service;
 mod shred_fetch_stage;
-pub mod sigverify;
+pub mod sigverifier;
 pub mod sigverify_stage;
 pub mod snapshot_packager_service;
 pub mod staked_nodes_updater_service;
+pub mod staked_validators_cache;
 pub mod stats_reporter_service;
 pub mod system_monitor_service;
 pub mod tpu;

+ 10 - 0
core/src/repair/ancestor_hashes_service.rs

@@ -636,6 +636,16 @@ impl AncestorHashesService {
                 if exit.load(Ordering::Relaxed) {
                     return;
                 }
+                if repair_info
+                    .bank_forks
+                    .read()
+                    .unwrap()
+                    .root_bank()
+                    .feature_set
+                    .is_active(&agave_feature_set::secp256k1_program_enabled::id())
+                {
+                    return;
+                }
                 Self::manage_ancestor_requests(
                     &ancestor_hashes_request_statuses,
                     &ancestor_hashes_request_socket,

+ 124 - 0
core/src/repair/certificate_service.rs

@@ -0,0 +1,124 @@
+//! The `certificate_service` handles critical certificate related activites:
+//! - Storage of critical certificates in blockstore
+//! - TODO: Broadcast of new critical certificates to the cluster
+//! - TODO: Repair of missing critical certificates to enable progress
+
+use {
+    crate::result::{Error, Result},
+    crossbeam_channel::{Receiver, RecvTimeoutError},
+    solana_ledger::blockstore::Blockstore,
+    solana_votor_messages::bls_message::{Certificate, CertificateMessage},
+    std::{
+        sync::{
+            atomic::{AtomicBool, Ordering},
+            Arc,
+        },
+        thread::{self, Builder, JoinHandle},
+        time::Duration,
+    },
+};
+
+pub(crate) type CertificateReceiver = Receiver<(Certificate, CertificateMessage)>;
+pub struct CertificateService {
+    t_cert_insert: JoinHandle<()>,
+}
+
+impl CertificateService {
+    pub(crate) fn new(
+        exit: Arc<AtomicBool>,
+        blockstore: Arc<Blockstore>,
+        certificate_receiver: CertificateReceiver,
+    ) -> Self {
+        let t_cert_insert =
+            Self::start_certificate_insert_broadcast(exit, blockstore, certificate_receiver);
+        Self { t_cert_insert }
+    }
+
+    fn start_certificate_insert_broadcast(
+        exit: Arc<AtomicBool>,
+        blockstore: Arc<Blockstore>,
+        certificate_receiver: CertificateReceiver,
+    ) -> JoinHandle<()> {
+        let handle_error = || {
+            inc_new_counter_error!("solana-certificate-service-error", 1, 1);
+        };
+
+        Builder::new()
+            .name("solCertInsertBCast".to_string())
+            .spawn(move || {
+                while !exit.load(Ordering::Relaxed) {
+                    let certs = match Self::receive_new_certificates(&certificate_receiver) {
+                        Ok(certs) => certs,
+                        Err(e) if Self::should_exit_on_error(&e, &handle_error) => break,
+                        Err(_e) => continue,
+                    };
+
+                    // TODO: broadcast to gossip / all-2-all
+
+                    // TODO: update highest cert local-state and potentially ask for repair for missing certificates
+                    // e,g, our previous highest cert was 5, we now see certs for 7 & 8, notify repair to get the cert for 6
+
+                    // Insert into blockstore
+                    if let Err(e) = certs.into_iter().try_for_each(|(cert_id, cert)| {
+                        Self::insert_certificate(blockstore.as_ref(), cert_id, cert)
+                    }) {
+                        if Self::should_exit_on_error(&e, &handle_error) {
+                            break;
+                        }
+                    }
+                }
+            })
+            .unwrap()
+    }
+
+    fn receive_new_certificates(
+        certificate_receiver: &Receiver<(Certificate, CertificateMessage)>,
+    ) -> Result<Vec<(Certificate, CertificateMessage)>> {
+        const RECV_TIMEOUT: Duration = Duration::from_millis(200);
+        Ok(
+            std::iter::once(certificate_receiver.recv_timeout(RECV_TIMEOUT)?)
+                .chain(certificate_receiver.try_iter())
+                .collect(),
+        )
+    }
+
+    fn insert_certificate(
+        blockstore: &Blockstore,
+        cert_id: Certificate,
+        vote_certificate: CertificateMessage,
+    ) -> Result<()> {
+        match cert_id {
+            Certificate::NotarizeFallback(slot, block_id) => blockstore
+                .insert_new_notarization_fallback_certificate(slot, block_id, vote_certificate)?,
+            Certificate::Skip(slot) => {
+                blockstore.insert_new_skip_certificate(slot, vote_certificate)?
+            }
+            Certificate::Finalize(_)
+            | Certificate::FinalizeFast(_, _)
+            | Certificate::Notarize(_, _) => {
+                panic!("Programmer error, certificate pool should not notify for {cert_id:?}")
+            }
+        }
+        Ok(())
+    }
+
+    fn should_exit_on_error<H>(e: &Error, handle_error: &H) -> bool
+    where
+        H: Fn(),
+    {
+        match e {
+            Error::RecvTimeout(RecvTimeoutError::Disconnected) => true,
+            Error::RecvTimeout(RecvTimeoutError::Timeout) => false,
+            Error::Send => true,
+            _ => {
+                handle_error();
+                error!("thread {:?} error {:?}", thread::current().name(), e);
+                false
+            }
+        }
+    }
+
+    pub(crate) fn join(self) -> thread::Result<()> {
+        self.t_cert_insert.join()
+    }
+}

+ 1 - 1
core/src/repair/cluster_slot_state_verifier.rs

@@ -1584,7 +1584,7 @@ mod test {
         let (vote_simulator, blockstore) = setup_forks_from_tree(forks, 1, None);
         let descendants = vote_simulator.bank_forks.read().unwrap().descendants();
         InitialState {
-            heaviest_subtree_fork_choice: vote_simulator.heaviest_subtree_fork_choice,
+            heaviest_subtree_fork_choice: vote_simulator.tbft_structs.heaviest_subtree_fork_choice,
             progress: vote_simulator.progress,
             descendants,
             bank_forks: vote_simulator.bank_forks,

+ 1 - 0
core/src/repair/mod.rs

@@ -1,4 +1,5 @@
 pub mod ancestor_hashes_service;
+pub mod certificate_service;
 pub mod cluster_slot_state_verifier;
 pub mod duplicate_repair_status;
 pub(crate) mod malicious_repair_handler;

+ 12 - 7
core/src/repair/repair_service.rs

@@ -719,13 +719,18 @@ impl RepairService {
             repair_metrics,
         );
 
-        Self::handle_popular_pruned_forks(
-            root_bank.clone(),
-            repair_weight,
-            popular_pruned_forks_requests,
-            popular_pruned_forks_sender,
-            repair_metrics,
-        );
+        if !root_bank
+            .feature_set
+            .is_active(&agave_feature_set::secp256k1_program_enabled::id())
+        {
+            Self::handle_popular_pruned_forks(
+                root_bank.clone(),
+                repair_weight,
+                popular_pruned_forks_requests,
+                popular_pruned_forks_sender,
+                repair_metrics,
+            );
+        }
 
         Self::build_and_send_repair_batch(
             serve_repair,

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 606 - 453
core/src/replay_stage.rs


+ 427 - 0
core/src/sigverifier/bls_sigverifier.rs

@@ -0,0 +1,427 @@
+//! The BLS signature verifier.
+//! This is just a placeholder for now, until we have a real implementation.
+
+mod stats;
+
+use {
+    crate::{
+        cluster_info_vote_listener::VerifiedVoteSender,
+        sigverify_stage::{SigVerifier, SigVerifyServiceError},
+    },
+    crossbeam_channel::{Sender, TrySendError},
+    solana_clock::Slot,
+    solana_pubkey::Pubkey,
+    solana_runtime::{
+        bank::Bank, epoch_stakes::BLSPubkeyToRankMap, root_bank_cache::RootBankCache,
+    },
+    solana_streamer::packet::PacketBatch,
+    solana_votor_messages::bls_message::BLSMessage,
+    stats::{BLSSigVerifierStats, StatsUpdater},
+    std::{collections::HashMap, sync::Arc},
+};
+
+fn get_key_to_rank_map(bank: &Bank, slot: Slot) -> Option<&Arc<BLSPubkeyToRankMap>> {
+    let stakes = bank.epoch_stakes_map();
+    let epoch = bank.epoch_schedule().get_epoch(slot);
+    stakes
+        .get(&epoch)
+        .map(|stake| stake.bls_pubkey_to_rank_map())
+}
+
+pub struct BLSSigVerifier {
+    verified_votes_sender: VerifiedVoteSender,
+    message_sender: Sender<BLSMessage>,
+    root_bank_cache: RootBankCache,
+    stats: BLSSigVerifierStats,
+}
+
+impl SigVerifier for BLSSigVerifier {
+    type SendType = BLSMessage;
+
+    // TODO(wen): just a placeholder without any verification.
+    fn verify_batches(&self, batches: Vec<PacketBatch>, _valid_packets: usize) -> Vec<PacketBatch> {
+        batches
+    }
+
+    fn send_packets(
+        &mut self,
+        packet_batches: Vec<PacketBatch>,
+    ) -> Result<(), SigVerifyServiceError<Self::SendType>> {
+        // TODO(wen): just a placeholder without any batching.
+        let mut verified_votes = HashMap::new();
+        let mut stats_updater = StatsUpdater::default();
+
+        for packet in packet_batches.iter().flatten() {
+            stats_updater.received += 1;
+
+            if packet.meta().discard() {
+                stats_updater.received_discarded += 1;
+                continue;
+            }
+
+            let message = match packet.deserialize_slice(..) {
+                Ok(msg) => msg,
+                Err(e) => {
+                    trace!("Failed to deserialize BLS message: {}", e);
+                    stats_updater.received_malformed += 1;
+                    continue;
+                }
+            };
+
+            let slot = match &message {
+                BLSMessage::Vote(vote_message) => vote_message.vote.slot(),
+                BLSMessage::Certificate(certificate_message) => {
+                    certificate_message.certificate.slot()
+                }
+            };
+
+            let bank = self.root_bank_cache.root_bank();
+            let Some(rank_to_pubkey_map) = get_key_to_rank_map(&bank, slot) else {
+                stats_updater.received_no_epoch_stakes += 1;
+                continue;
+            };
+
+            if let BLSMessage::Vote(vote_message) = &message {
+                let vote = &vote_message.vote;
+                stats_updater.received_votes += 1;
+                if vote.is_notarization_or_finalization() || vote.is_notarize_fallback() {
+                    let Some((pubkey, _)) = rank_to_pubkey_map.get_pubkey(vote_message.rank.into())
+                    else {
+                        stats_updater.received_malformed += 1;
+                        continue;
+                    };
+                    let cur_slots: &mut Vec<Slot> = verified_votes.entry(*pubkey).or_default();
+                    if !cur_slots.contains(&slot) {
+                        cur_slots.push(slot);
+                    }
+                }
+            }
+
+            // Now send the BLS message to certificate pool.
+            match self.message_sender.try_send(message) {
+                Ok(()) => stats_updater.sent += 1,
+                Err(TrySendError::Full(_)) => {
+                    stats_updater.sent_failed += 1;
+                }
+                Err(e @ TrySendError::Disconnected(_)) => {
+                    return Err(e.into());
+                }
+            }
+        }
+        self.send_verified_votes(verified_votes);
+        self.stats.update(stats_updater);
+        self.stats.maybe_report_stats();
+        Ok(())
+    }
+}
+
+impl BLSSigVerifier {
+    pub fn new(
+        root_bank_cache: RootBankCache,
+        verified_votes_sender: VerifiedVoteSender,
+        message_sender: Sender<BLSMessage>,
+    ) -> Self {
+        Self {
+            root_bank_cache,
+            verified_votes_sender,
+            message_sender,
+            stats: BLSSigVerifierStats::new(),
+        }
+    }
+
+    fn send_verified_votes(&mut self, verified_votes: HashMap<Pubkey, Vec<Slot>>) {
+        let mut stats_updater = StatsUpdater::default();
+        for (pubkey, slots) in verified_votes {
+            match self.verified_votes_sender.try_send((pubkey, slots)) {
+                Ok(()) => {
+                    stats_updater.verified_votes_sent += 1;
+                }
+                Err(e) => {
+                    trace!("Failed to send verified vote: {}", e);
+                    stats_updater.verified_votes_sent_failed += 1;
+                }
+            }
+        }
+        self.stats.update(stats_updater);
+    }
+}
+
+// Add tests for the BLS signature verifier
+#[cfg(test)]
+mod tests {
+    use {
+        super::*,
+        bitvec::prelude::*,
+        crossbeam_channel::Receiver,
+        solana_bls_signatures::Signature,
+        solana_hash::Hash,
+        solana_perf::packet::{Packet, PinnedPacketBatch},
+        solana_runtime::{
+            bank::Bank,
+            bank_forks::BankForks,
+            genesis_utils::{
+                create_genesis_config_with_alpenglow_vote_accounts_no_program,
+                ValidatorVoteKeypairs,
+            },
+        },
+        solana_signer::Signer,
+        solana_votor_messages::{
+            bls_message::{
+                BLSMessage, Certificate, CertificateMessage, CertificateType, VoteMessage,
+            },
+            vote::Vote,
+        },
+        stats::STATS_INTERVAL_DURATION,
+        std::time::{Duration, Instant},
+    };
+
+    fn create_keypairs_and_bls_sig_verifier(
+        verified_vote_sender: VerifiedVoteSender,
+        message_sender: Sender<BLSMessage>,
+    ) -> (Vec<ValidatorVoteKeypairs>, BLSSigVerifier) {
+        // Create 10 node validatorvotekeypairs vec
+        let validator_keypairs = (0..10)
+            .map(|_| ValidatorVoteKeypairs::new_rand())
+            .collect::<Vec<_>>();
+        let stakes_vec = (0..validator_keypairs.len())
+            .map(|i| 1_000 - i as u64)
+            .collect::<Vec<_>>();
+        let genesis = create_genesis_config_with_alpenglow_vote_accounts_no_program(
+            1_000_000_000,
+            &validator_keypairs,
+            stakes_vec,
+        );
+        let bank0 = Bank::new_for_tests(&genesis.genesis_config);
+        let bank_forks = BankForks::new_rw_arc(bank0);
+        let root_bank_cache = RootBankCache::new(bank_forks);
+        (
+            validator_keypairs,
+            BLSSigVerifier::new(root_bank_cache, verified_vote_sender, message_sender),
+        )
+    }
+
+    fn test_bls_message_transmission(
+        verifier: &mut BLSSigVerifier,
+        receiver: Option<&Receiver<BLSMessage>>,
+        messages: &[BLSMessage],
+        expect_send_packets_ok: bool,
+    ) {
+        let packets = messages
+            .iter()
+            .map(|msg| {
+                let mut packet = Packet::default();
+                packet
+                    .populate_packet(None, msg)
+                    .expect("Failed to populate packet");
+                packet
+            })
+            .collect::<Vec<Packet>>();
+        let packet_batches = vec![PinnedPacketBatch::new(packets).into()];
+        if expect_send_packets_ok {
+            assert!(verifier.send_packets(packet_batches).is_ok());
+            if let Some(receiver) = receiver {
+                for msg in messages {
+                    match receiver.recv_timeout(Duration::from_secs(1)) {
+                        Ok(received_msg) => assert_eq!(received_msg, *msg),
+                        Err(e) => warn!("Failed to receive BLS message: {}", e),
+                    }
+                }
+            }
+        } else {
+            assert!(verifier.send_packets(packet_batches).is_err());
+        }
+    }
+
+    #[test]
+    fn test_blssigverifier_send_packets() {
+        let (sender, receiver) = crossbeam_channel::unbounded();
+        let (verified_vote_sender, verfied_vote_receiver) = crossbeam_channel::unbounded();
+        // Create bank forks and epoch stakes
+
+        let (validator_keypairs, mut verifier) =
+            create_keypairs_and_bls_sig_verifier(verified_vote_sender, sender);
+
+        let mut bitmap = BitVec::<u8, Lsb0>::repeat(false, 8);
+        bitmap.set(3, true);
+        bitmap.set(5, true);
+        let vote_rank: usize = 2;
+
+        let certificate = Certificate::new(CertificateType::Finalize, 4, None);
+
+        let messages = vec![
+            BLSMessage::Vote(VoteMessage {
+                vote: Vote::new_finalization_vote(5),
+                signature: Signature::default(),
+                rank: vote_rank as u16,
+            }),
+            BLSMessage::Certificate(CertificateMessage {
+                certificate,
+                signature: Signature::default(),
+                bitmap,
+            }),
+        ];
+        test_bls_message_transmission(&mut verifier, Some(&receiver), &messages, true);
+        assert_eq!(verifier.stats.sent, 2);
+        assert_eq!(verifier.stats.received, 2);
+        assert_eq!(verifier.stats.received_malformed, 0);
+        let received_verified_votes = verfied_vote_receiver.try_recv().unwrap();
+        assert_eq!(
+            received_verified_votes,
+            (validator_keypairs[vote_rank].vote_keypair.pubkey(), vec![5])
+        );
+
+        let vote_rank: usize = 3;
+        let messages = vec![BLSMessage::Vote(VoteMessage {
+            vote: Vote::new_notarization_vote(6, Hash::new_unique()),
+            signature: Signature::default(),
+            rank: vote_rank as u16,
+        })];
+        test_bls_message_transmission(&mut verifier, Some(&receiver), &messages, true);
+        assert_eq!(verifier.stats.sent, 3);
+        assert_eq!(verifier.stats.received, 3);
+        assert_eq!(verifier.stats.received_malformed, 0);
+        let received_verified_votes = verfied_vote_receiver.try_recv().unwrap();
+        assert_eq!(
+            received_verified_votes,
+            (validator_keypairs[vote_rank].vote_keypair.pubkey(), vec![6])
+        );
+
+        // Pretend 10 seconds have passed, make sure stats are reset
+        verifier.stats.last_stats_logged = Instant::now() - STATS_INTERVAL_DURATION;
+        let vote_rank: usize = 9;
+        let messages = vec![BLSMessage::Vote(VoteMessage {
+            vote: Vote::new_notarization_fallback_vote(7, Hash::new_unique()),
+            signature: Signature::default(),
+            rank: vote_rank as u16,
+        })];
+        test_bls_message_transmission(&mut verifier, Some(&receiver), &messages, true);
+        // Since we just logged all stats (including the packet just sent), stats should be reset
+        assert_eq!(verifier.stats.sent, 0);
+        assert_eq!(verifier.stats.received, 0);
+        assert_eq!(verifier.stats.received_malformed, 0);
+        let received_verified_votes = verfied_vote_receiver.try_recv().unwrap();
+        assert_eq!(
+            received_verified_votes,
+            (validator_keypairs[vote_rank].vote_keypair.pubkey(), vec![7])
+        );
+    }
+
+    #[test]
+    fn test_blssigverifier_send_packets_malformed() {
+        let (sender, receiver) = crossbeam_channel::unbounded();
+        let (verified_vote_sender, _) = crossbeam_channel::unbounded();
+        let (_, mut verifier) = create_keypairs_and_bls_sig_verifier(verified_vote_sender, sender);
+
+        let packets = vec![Packet::default()];
+        let packet_batches = vec![PinnedPacketBatch::new(packets).into()];
+        assert!(verifier.send_packets(packet_batches).is_ok());
+        assert_eq!(verifier.stats.sent, 0);
+        assert_eq!(verifier.stats.received, 1);
+        assert_eq!(verifier.stats.received_malformed, 1);
+        assert_eq!(verifier.stats.received_no_epoch_stakes, 0);
+
+        // Expect no messages since the packet was malformed
+        assert!(receiver.is_empty());
+
+        // Send a packet with no epoch stakes
+        let messages = vec![BLSMessage::Vote(VoteMessage {
+            vote: Vote::new_finalization_vote(5_000_000_000),
+            signature: Signature::default(),
+            rank: 0,
+        })];
+        test_bls_message_transmission(&mut verifier, None, &messages, true);
+        assert_eq!(verifier.stats.sent, 0);
+        assert_eq!(verifier.stats.received, 2);
+        assert_eq!(verifier.stats.received_malformed, 1);
+        assert_eq!(verifier.stats.received_no_epoch_stakes, 1);
+
+        // Expect no messages since the packet was malformed
+        assert!(receiver.is_empty());
+
+        // Send a packet with invalid rank
+        let messages = vec![BLSMessage::Vote(VoteMessage {
+            vote: Vote::new_finalization_vote(5),
+            signature: Signature::default(),
+            rank: 1000, // Invalid rank
+        })];
+        test_bls_message_transmission(&mut verifier, None, &messages, true);
+        assert_eq!(verifier.stats.sent, 0);
+        assert_eq!(verifier.stats.received, 3);
+        assert_eq!(verifier.stats.received_malformed, 2);
+        assert_eq!(verifier.stats.received_no_epoch_stakes, 1);
+
+        // Expect no messages since the packet was malformed
+        assert!(receiver.is_empty());
+    }
+
+    #[test]
+    fn test_blssigverifier_send_packets_channel_full() {
+        solana_logger::setup();
+        let (sender, receiver) = crossbeam_channel::bounded(1);
+        let (verified_vote_sender, _) = crossbeam_channel::unbounded();
+        let (_, mut verifier) = create_keypairs_and_bls_sig_verifier(verified_vote_sender, sender);
+        let messages = vec![
+            BLSMessage::Vote(VoteMessage {
+                vote: Vote::new_finalization_vote(5),
+                signature: Signature::default(),
+                rank: 0,
+            }),
+            BLSMessage::Vote(VoteMessage {
+                vote: Vote::new_notarization_fallback_vote(6, Hash::new_unique()),
+                signature: Signature::default(),
+                rank: 2,
+            }),
+        ];
+        test_bls_message_transmission(&mut verifier, Some(&receiver), &messages, true);
+
+        // We failed to send the second message because the channel is full.
+        assert_eq!(verifier.stats.sent, 1);
+        assert_eq!(verifier.stats.received, 2);
+        assert_eq!(verifier.stats.received_malformed, 0);
+    }
+
+    #[test]
+    fn test_blssigverifier_send_packets_receiver_closed() {
+        let (sender, receiver) = crossbeam_channel::bounded(1);
+        let (verified_vote_sender, _) = crossbeam_channel::unbounded();
+        let (_, mut verifier) = create_keypairs_and_bls_sig_verifier(verified_vote_sender, sender);
+        // Close the receiver, should get panic on next send
+        drop(receiver);
+        let messages = vec![BLSMessage::Vote(VoteMessage {
+            vote: Vote::new_finalization_vote(5),
+            signature: Signature::default(),
+            rank: 0,
+        })];
+        test_bls_message_transmission(&mut verifier, None, &messages, false);
+    }
+
+    #[test]
+    fn test_blssigverifier_send_discarded_packets() {
+        let (sender, receiver) = crossbeam_channel::unbounded();
+        let (verified_vote_sender, _) = crossbeam_channel::unbounded();
+        let (_, mut verifier) = create_keypairs_and_bls_sig_verifier(verified_vote_sender, sender);
+        let message = BLSMessage::Vote(VoteMessage {
+            vote: Vote::new_finalization_vote(5),
+            signature: Signature::default(),
+            rank: 0,
+        });
+        let mut packet = Packet::default();
+        packet
+            .populate_packet(None, &message)
+            .expect("Failed to populate packet");
+        packet.meta_mut().set_discard(true);
+        let packets = vec![packet];
+        let packet_batches = vec![PinnedPacketBatch::new(packets).into()];
+        assert!(verifier.send_packets(packet_batches).is_ok());
+        assert_eq!(verifier.stats.sent, 0);
+        assert_eq!(verifier.stats.sent_failed, 0);
+        assert_eq!(verifier.stats.verified_votes_sent, 0);
+        assert_eq!(verifier.stats.verified_votes_sent_failed, 0);
+        assert_eq!(verifier.stats.received, 1);
+        assert_eq!(verifier.stats.received_discarded, 1);
+        assert_eq!(verifier.stats.received_malformed, 0);
+        assert_eq!(verifier.stats.received_no_epoch_stakes, 0);
+        assert_eq!(verifier.stats.received_votes, 0);
+        assert!(receiver.is_empty());
+    }
+}

+ 107 - 0
core/src/sigverifier/bls_sigverifier/stats.rs

@@ -0,0 +1,107 @@
+use std::time::{Duration, Instant};
+
+pub(super) const STATS_INTERVAL_DURATION: Duration = Duration::from_secs(1);
+
+#[derive(Debug, Default)]
+pub(super) struct StatsUpdater {
+    pub(super) sent: u64,
+    pub(super) sent_failed: u64,
+    pub(super) verified_votes_sent: u64,
+    pub(super) verified_votes_sent_failed: u64,
+    pub(super) received: u64,
+    pub(super) received_discarded: u64,
+    pub(super) received_malformed: u64,
+    pub(super) received_no_epoch_stakes: u64,
+    pub(super) received_votes: u64,
+}
+
+// We are adding our own stats because we do BLS decoding in batch verification,
+// and we send one BLS message at a time. So it makes sense to have finer-grained stats
+//
+// The fields are visible to support testing and should not be accessed
+// directly in production code.  Use `StatsUpdater` instead.
+#[derive(Debug)]
+pub(super) struct BLSSigVerifierStats {
+    pub(super) sent: u64,
+    pub(super) sent_failed: u64,
+    pub(super) verified_votes_sent: u64,
+    pub(super) verified_votes_sent_failed: u64,
+    pub(super) received: u64,
+    pub(super) received_discarded: u64,
+    pub(super) received_malformed: u64,
+    pub(super) received_no_epoch_stakes: u64,
+    pub(super) received_votes: u64,
+    pub(super) last_stats_logged: Instant,
+}
+
+impl BLSSigVerifierStats {
+    pub(super) fn new() -> Self {
+        Self {
+            sent: 0,
+            sent_failed: 0,
+            verified_votes_sent: 0,
+            verified_votes_sent_failed: 0,
+            received: 0,
+            received_discarded: 0,
+            received_malformed: 0,
+            received_no_epoch_stakes: 0,
+            received_votes: 0,
+            last_stats_logged: Instant::now(),
+        }
+    }
+
+    /// If sufficient time has passed since last report, report stats.
+    pub(super) fn maybe_report_stats(&mut self) {
+        let now = Instant::now();
+        let time_since_last_log = now.duration_since(self.last_stats_logged);
+        if time_since_last_log < STATS_INTERVAL_DURATION {
+            return;
+        }
+        datapoint_info!(
+            "bls_sig_verifier_stats",
+            ("sent", self.sent as i64, i64),
+            ("sent_failed", self.sent_failed as i64, i64),
+            ("verified_votes_sent", self.verified_votes_sent as i64, i64),
+            (
+                "verified_votes_sent_failed",
+                self.verified_votes_sent_failed as i64,
+                i64
+            ),
+            ("received", self.received as i64, i64),
+            ("received_discarded", self.received_discarded as i64, i64),
+            ("received_votes", self.received_votes as i64, i64),
+            (
+                "received_no_epoch_stakes",
+                self.received_no_epoch_stakes as i64,
+                i64
+            ),
+            ("received_malformed", self.received_malformed as i64, i64),
+        );
+        *self = BLSSigVerifierStats::new();
+    }
+
+    pub(super) fn update(
+        &mut self,
+        StatsUpdater {
+            sent,
+            sent_failed,
+            verified_votes_sent,
+            verified_votes_sent_failed,
+            received,
+            received_discarded,
+            received_malformed,
+            received_no_epoch_stakes,
+            received_votes,
+        }: StatsUpdater,
+    ) {
+        self.sent += sent;
+        self.sent_failed += sent_failed;
+        self.verified_votes_sent += verified_votes_sent;
+        self.verified_votes_sent_failed += verified_votes_sent_failed;
+        self.received += received;
+        self.received_discarded += received_discarded;
+        self.received_malformed += received_malformed;
+        self.received_no_epoch_stakes += received_no_epoch_stakes;
+        self.received_votes += received_votes;
+    }
+}

+ 0 - 0
core/src/sigverify.rs → core/src/sigverifier/ed25519_sigverifier.rs


+ 2 - 0
core/src/sigverifier/mod.rs

@@ -0,0 +1,2 @@
+pub mod bls_sigverifier;
+pub mod ed25519_sigverifier;

+ 10 - 5
core/src/sigverify_stage.rs

@@ -6,9 +6,9 @@
 //! if perf-libs are available
 
 use {
-    crate::sigverify,
+    crate::sigverifier::ed25519_sigverifier::ed25519_verify_disabled,
     core::time::Duration,
-    crossbeam_channel::{Receiver, RecvTimeoutError, SendError},
+    crossbeam_channel::{Receiver, RecvTimeoutError, SendError, TrySendError},
     itertools::Itertools,
     solana_measure::measure::Measure,
     solana_perf::{
@@ -44,6 +44,9 @@ pub enum SigVerifyServiceError<SendType> {
     #[error("send packets batch error")]
     Send(#[from] SendError<SendType>),
 
+    #[error("try_send packet errror")]
+    TrySend(#[from] TrySendError<SendType>),
+
     #[error("streamer error")]
     Streamer(#[from] StreamerError),
 }
@@ -221,7 +224,7 @@ impl SigVerifier for DisabledSigVerifier {
         mut batches: Vec<PacketBatch>,
         _valid_packets: usize,
     ) -> Vec<PacketBatch> {
-        sigverify::ed25519_verify_disabled(&mut batches);
+        ed25519_verify_disabled(&mut batches);
         batches
     }
 
@@ -412,7 +415,7 @@ impl SigVerifyStage {
                             SigVerifyServiceError::Streamer(StreamerError::RecvTimeout(
                                 RecvTimeoutError::Timeout,
                             )) => (),
-                            SigVerifyServiceError::Send(_) => {
+                            SigVerifyServiceError::Send(_) | SigVerifyServiceError::TrySend(_) => {
                                 break;
                             }
                             _ => error!("{e:?}"),
@@ -437,7 +440,9 @@ impl SigVerifyStage {
 mod tests {
     use {
         super::*,
-        crate::{banking_trace::BankingTracer, sigverify::TransactionSigVerifier},
+        crate::{
+            banking_trace::BankingTracer, sigverifier::ed25519_sigverifier::TransactionSigVerifier,
+        },
         crossbeam_channel::unbounded,
         solana_perf::{
             packet::{to_packet_batches, Packet, PinnedPacketBatch},

+ 887 - 0
core/src/staked_validators_cache.rs

@@ -0,0 +1,887 @@
+use {
+    crate::voting_service::AlpenglowPortOverride,
+    lru::LruCache,
+    solana_clock::{Epoch, Slot},
+    solana_gossip::{cluster_info::ClusterInfo, contact_info::Protocol},
+    solana_pubkey::Pubkey,
+    solana_runtime::bank_forks::BankForks,
+    std::{
+        collections::HashMap,
+        net::SocketAddr,
+        sync::{Arc, RwLock},
+        time::{Duration, Instant},
+    },
+};
+
+struct StakedValidatorsCacheEntry {
+    /// TPU Vote Sockets associated with the staked validators
+    validator_sockets: Vec<SocketAddr>,
+
+    /// Alpenglow Sockets associated with the staked validators
+    alpenglow_sockets: Vec<SocketAddr>,
+
+    /// The time at which this entry was created
+    creation_time: Instant,
+}
+
+/// Maintain `SocketAddr`s associated with all staked validators for a particular protocol (e.g.,
+/// UDP, QUIC) over number of epochs.
+///
+/// We employ an LRU cache with capped size, mapping Epoch to cache entries that store the socket
+/// information. We also track cache entry times, forcing recalculations of cache entries that are
+/// accessed after a specified TTL.
+pub struct StakedValidatorsCache {
+    /// key: the epoch for which we have cached our stake validators list
+    /// value: the cache entry
+    cache: LruCache<Epoch, StakedValidatorsCacheEntry>,
+
+    /// Time to live for cache entries
+    ttl: Duration,
+
+    /// Bank forks
+    bank_forks: Arc<RwLock<BankForks>>,
+
+    /// Protocol
+    protocol: Protocol,
+
+    /// Whether to include the running validator's socket address in cache entries
+    include_self: bool,
+
+    /// Optional override for Alpenglow port, used for testing purposes
+    alpenglow_port_override: Option<AlpenglowPortOverride>,
+
+    /// timestamp of the last alpenglow port override we read
+    alpenglow_port_override_last_modified: Instant,
+}
+
+enum PortsToUse {
+    TpuVote,
+    Alpenglow,
+}
+
+impl StakedValidatorsCache {
+    pub fn new(
+        bank_forks: Arc<RwLock<BankForks>>,
+        protocol: Protocol,
+        ttl: Duration,
+        max_cache_size: usize,
+        include_self: bool,
+        alpenglow_port_override: Option<AlpenglowPortOverride>,
+    ) -> Self {
+        Self {
+            cache: LruCache::new(max_cache_size),
+            ttl,
+            bank_forks,
+            protocol,
+            include_self,
+            alpenglow_port_override,
+            alpenglow_port_override_last_modified: Instant::now(),
+        }
+    }
+
+    #[inline]
+    fn cur_epoch(&self, slot: Slot) -> Epoch {
+        self.bank_forks
+            .read()
+            .unwrap()
+            .working_bank()
+            .epoch_schedule()
+            .get_epoch(slot)
+    }
+
+    fn refresh_cache_entry(
+        &mut self,
+        epoch: Epoch,
+        cluster_info: &ClusterInfo,
+        update_time: Instant,
+    ) {
+        let banks = {
+            let bank_forks = self.bank_forks.read().unwrap();
+            [bank_forks.root_bank(), bank_forks.working_bank()]
+        };
+
+        let epoch_staked_nodes = banks.iter().find_map(|bank| bank.epoch_staked_nodes(epoch)).unwrap_or_else(|| {
+            error!("StakedValidatorsCache::get: unknown Bank::epoch_staked_nodes for epoch: {epoch}");
+            Arc::<HashMap<Pubkey, u64>>::default()
+        });
+
+        struct Node {
+            pubkey: Pubkey,
+            stake: u64,
+            tpu_socket: SocketAddr,
+            // TODO(wen): this should not be an Option after BLS all-to-all is submitted.
+            alpenglow_socket: Option<SocketAddr>,
+        }
+
+        let mut nodes: Vec<_> = epoch_staked_nodes
+            .iter()
+            .filter(|(pubkey, stake)| {
+                let positive_stake = **stake > 0;
+                let not_self = pubkey != &&cluster_info.id();
+
+                positive_stake && (self.include_self || not_self)
+            })
+            .filter_map(|(pubkey, stake)| {
+                cluster_info.lookup_contact_info(pubkey, |node| {
+                    let tpu_socket = node.tpu_vote(self.protocol);
+                    let alpenglow_socket = node.alpenglow();
+                    // To not change current behavior, we only consider nodes that have a
+                    // TPU socket, and ignore nodes that only have an Alpenglow socket.
+                    // TODO(wen): tpu_socket is no longer needed after Alpenglow migration.
+                    tpu_socket.map(|tpu_socket| Node {
+                        pubkey: *pubkey,
+                        stake: *stake,
+                        tpu_socket,
+                        alpenglow_socket,
+                    })
+                })?
+            })
+            .collect();
+
+        // TODO(wen): After Alpenglow vote is no longer transaction, we dedup by alpenglow socket.
+        nodes.dedup_by_key(|node| node.tpu_socket);
+        nodes.sort_unstable_by(|a, b| a.stake.cmp(&b.stake));
+
+        let mut validator_sockets = Vec::new();
+        let mut alpenglow_sockets = Vec::new();
+        let override_map = self
+            .alpenglow_port_override
+            .as_ref()
+            .map(|x| x.get_override_map());
+        for node in nodes {
+            validator_sockets.push(node.tpu_socket);
+
+            if let Some(alpenglow_socket) = node.alpenglow_socket {
+                let socket = if let Some(override_map) = &override_map {
+                    // If we have an override, use it.
+                    override_map
+                        .get(&node.pubkey)
+                        .cloned()
+                        .unwrap_or(alpenglow_socket)
+                } else {
+                    alpenglow_socket
+                };
+                alpenglow_sockets.push(socket);
+            }
+        }
+        self.cache.push(
+            epoch,
+            StakedValidatorsCacheEntry {
+                validator_sockets,
+                alpenglow_sockets,
+                creation_time: update_time,
+            },
+        );
+    }
+
+    pub fn get_staked_validators_by_slot_with_tpu_vote_ports(
+        &mut self,
+        slot: Slot,
+        cluster_info: &ClusterInfo,
+        access_time: Instant,
+    ) -> (&[SocketAddr], bool) {
+        self.get_staked_validators_by_epoch(
+            self.cur_epoch(slot),
+            cluster_info,
+            access_time,
+            PortsToUse::TpuVote,
+        )
+    }
+
+    pub fn get_staked_validators_by_slot_with_alpenglow_ports(
+        &mut self,
+        slot: Slot,
+        cluster_info: &ClusterInfo,
+        access_time: Instant,
+    ) -> (&[SocketAddr], bool) {
+        // Check if self.alpenglow_port_override has a different last_modified.
+        // Immediately refresh the cache if it does.
+        if let Some(alpenglow_port_override) = &self.alpenglow_port_override {
+            if alpenglow_port_override.has_new_override(self.alpenglow_port_override_last_modified)
+            {
+                self.alpenglow_port_override_last_modified =
+                    alpenglow_port_override.last_modified();
+                trace!(
+                        "refreshing cache entry for epoch {} due to alpenglow port override last_modified change",
+                        self.cur_epoch(slot)
+                    );
+                self.refresh_cache_entry(self.cur_epoch(slot), cluster_info, access_time);
+            }
+        }
+
+        self.get_staked_validators_by_epoch(
+            self.cur_epoch(slot),
+            cluster_info,
+            access_time,
+            PortsToUse::Alpenglow,
+        )
+    }
+
+    fn get_staked_validators_by_epoch(
+        &mut self,
+        epoch: Epoch,
+        cluster_info: &ClusterInfo,
+        access_time: Instant,
+        ports_to_use: PortsToUse,
+    ) -> (&[SocketAddr], bool) {
+        // For a given epoch, if we either:
+        //
+        // (1) have a cache entry that has expired
+        // (2) have no existing cache entry
+        //
+        // then update the cache.
+        let refresh_cache = self
+            .cache
+            .get(&epoch)
+            .map(|v| access_time > v.creation_time + self.ttl)
+            .unwrap_or(true);
+
+        if refresh_cache {
+            self.refresh_cache_entry(epoch, cluster_info, access_time);
+        }
+
+        (
+            // Unwrapping is fine here, since update_cache guarantees that we push a cache entry to
+            // self.cache[epoch].
+            self.cache
+                .get(&epoch)
+                .map(|v| match ports_to_use {
+                    PortsToUse::TpuVote => &*v.validator_sockets,
+                    PortsToUse::Alpenglow => &*v.alpenglow_sockets,
+                })
+                .unwrap(),
+            refresh_cache,
+        )
+    }
+
+    pub fn len(&self) -> usize {
+        self.cache.len()
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.cache.is_empty()
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use {
+        super::StakedValidatorsCache,
+        crate::voting_service::AlpenglowPortOverride,
+        solana_account::AccountSharedData,
+        solana_clock::{Clock, Slot},
+        solana_genesis_config::GenesisConfig,
+        solana_gossip::{
+            cluster_info::{ClusterInfo, Node},
+            contact_info::{ContactInfo, Protocol},
+            crds::GossipRoute,
+            crds_data::CrdsData,
+            crds_value::CrdsValue,
+        },
+        solana_keypair::Keypair,
+        solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo},
+        solana_pubkey::Pubkey,
+        solana_runtime::{bank::Bank, bank_forks::BankForks, epoch_stakes::VersionedEpochStakes},
+        solana_signer::Signer,
+        solana_streamer::socket::SocketAddrSpace,
+        solana_time_utils::timestamp,
+        solana_vote::vote_account::{VoteAccount, VoteAccountsHashMap},
+        solana_vote_program::vote_state::{VoteInit, VoteState, VoteStateVersions},
+        std::{
+            collections::HashMap,
+            iter::{repeat, repeat_with},
+            net::{Ipv4Addr, SocketAddr},
+            sync::{Arc, RwLock},
+            time::{Duration, Instant},
+        },
+        test_case::{test_case, test_matrix},
+    };
+
+    fn new_rand_vote_account<R: rand::Rng>(
+        rng: &mut R,
+        node_pubkey: Option<Pubkey>,
+    ) -> (AccountSharedData, VoteState) {
+        let vote_init = VoteInit {
+            node_pubkey: node_pubkey.unwrap_or_else(Pubkey::new_unique),
+            authorized_voter: Pubkey::new_unique(),
+            authorized_withdrawer: Pubkey::new_unique(),
+            commission: rng.gen(),
+        };
+        let clock = Clock {
+            slot: rng.gen(),
+            epoch_start_timestamp: rng.gen(),
+            epoch: rng.gen(),
+            leader_schedule_epoch: rng.gen(),
+            unix_timestamp: rng.gen(),
+        };
+        let vote_state = VoteState::new(&vote_init, &clock);
+        let account = AccountSharedData::new_data(
+            rng.gen(), // lamports
+            &VoteStateVersions::new_current(vote_state.clone()),
+            &solana_sdk_ids::vote::id(), // owner
+        )
+        .unwrap();
+        (account, vote_state)
+    }
+
+    fn new_rand_vote_accounts<R: rand::Rng>(
+        rng: &mut R,
+        num_nodes: usize,
+        num_zero_stake_nodes: usize,
+    ) -> impl Iterator<Item = (Keypair, Keypair, /*stake:*/ u64, VoteAccount)> + '_ {
+        let node_keypairs: Vec<_> = repeat_with(Keypair::new).take(num_nodes).collect();
+
+        repeat(0..num_nodes).flatten().map(move |node_ix| {
+            let node_keypair = node_keypairs[node_ix].insecure_clone();
+            let vote_account_keypair = Keypair::new();
+
+            let (account, _) = new_rand_vote_account(rng, Some(node_keypair.pubkey()));
+            let stake = if node_ix < num_zero_stake_nodes {
+                0
+            } else {
+                rng.gen_range(1..997)
+            };
+            let vote_account = VoteAccount::try_from(account).unwrap();
+            (vote_account_keypair, node_keypair, stake, vote_account)
+        })
+    }
+
+    struct StakedValidatorsCacheHarness {
+        bank: Bank,
+        cluster_info: ClusterInfo,
+    }
+
+    impl StakedValidatorsCacheHarness {
+        pub fn new(genesis_config: &GenesisConfig, keypair: Keypair) -> Self {
+            let bank = Bank::new_for_tests(genesis_config);
+
+            let cluster_info = ClusterInfo::new(
+                Node::new_localhost_with_pubkey(&keypair.pubkey()).info,
+                Arc::new(keypair),
+                SocketAddrSpace::Unspecified,
+            );
+
+            Self { bank, cluster_info }
+        }
+
+        pub fn with_vote_accounts(
+            mut self,
+            slot: Slot,
+            node_keypair_map: HashMap<Pubkey, Keypair>,
+            vote_accounts: VoteAccountsHashMap,
+            protocol: Protocol,
+        ) -> Self {
+            // Update cluster info
+            {
+                let node_contact_info =
+                    node_keypair_map
+                        .keys()
+                        .enumerate()
+                        .map(|(node_ix, pubkey)| {
+                            let mut contact_info = ContactInfo::new(*pubkey, 0_u64, 0_u16);
+
+                            assert!(contact_info
+                                .set_tpu_vote(
+                                    protocol,
+                                    (Ipv4Addr::LOCALHOST, 8005 + node_ix as u16),
+                                )
+                                .is_ok());
+
+                            assert!(contact_info
+                                .set_alpenglow((Ipv4Addr::LOCALHOST, 8080 + node_ix as u16))
+                                .is_ok());
+
+                            contact_info
+                        });
+
+                for contact_info in node_contact_info {
+                    let node_pubkey = *contact_info.pubkey();
+
+                    let entry = CrdsValue::new(
+                        CrdsData::ContactInfo(contact_info),
+                        &node_keypair_map[&node_pubkey],
+                    );
+
+                    assert_eq!(node_pubkey, entry.label().pubkey());
+
+                    {
+                        let mut gossip_crds = self.cluster_info.gossip.crds.write().unwrap();
+
+                        gossip_crds
+                            .insert(entry, timestamp(), GossipRoute::LocalMessage)
+                            .unwrap();
+                    }
+                }
+            }
+
+            // Update bank
+            let epoch_num = self.bank.epoch_schedule().get_epoch(slot);
+            let epoch_stakes = VersionedEpochStakes::new_for_tests(vote_accounts, epoch_num);
+
+            self.bank.set_epoch_stakes_for_test(epoch_num, epoch_stakes);
+
+            self
+        }
+
+        pub fn bank_forks(self) -> (Arc<RwLock<BankForks>>, ClusterInfo) {
+            let bank_forks = self.bank.wrap_with_bank_forks_for_tests().1;
+            (bank_forks, self.cluster_info)
+        }
+    }
+
+    /// Create a number of nodes; each node will have one or more vote accounts. Each vote account
+    /// will have random stake in [1, 997), with the exception of the first few vote accounts
+    /// having exactly 0 stake.
+    fn build_epoch_stakes(
+        num_nodes: usize,
+        num_zero_stake_vote_accounts: usize,
+        num_vote_accounts: usize,
+    ) -> (HashMap<Pubkey, Keypair>, VoteAccountsHashMap) {
+        let mut rng = rand::thread_rng();
+
+        let vote_accounts: Vec<_> =
+            new_rand_vote_accounts(&mut rng, num_nodes, num_zero_stake_vote_accounts)
+                .take(num_vote_accounts)
+                .collect();
+
+        let node_keypair_map: HashMap<Pubkey, Keypair> = vote_accounts
+            .iter()
+            .map(|(_, node_keypair, _, _)| (node_keypair.pubkey(), node_keypair.insecure_clone()))
+            .collect();
+
+        let vahm = vote_accounts
+            .into_iter()
+            .map(|(vote_keypair, _, stake, vote_account)| {
+                (vote_keypair.pubkey(), (stake, vote_account))
+            })
+            .collect();
+
+        (node_keypair_map, vahm)
+    }
+
+    #[test_case(1_usize, 0_usize, 10_usize, Protocol::UDP, false)]
+    #[test_case(1_usize, 0_usize, 10_usize, Protocol::UDP, true)]
+    #[test_case(3_usize, 0_usize, 10_usize, Protocol::QUIC, false)]
+    #[test_case(10_usize, 2_usize, 10_usize, Protocol::UDP, false)]
+    #[test_case(10_usize, 2_usize, 10_usize, Protocol::UDP, true)]
+    #[test_case(10_usize, 10_usize, 10_usize, Protocol::QUIC, false)]
+    #[test_case(50_usize, 7_usize, 60_usize, Protocol::UDP, false)]
+    #[test_case(50_usize, 7_usize, 60_usize, Protocol::UDP, true)]
+    fn test_detect_only_staked_nodes_and_refresh_after_ttl(
+        num_nodes: usize,
+        num_zero_stake_nodes: usize,
+        num_vote_accounts: usize,
+        protocol: Protocol,
+        use_alpenglow_socket: bool,
+    ) {
+        let slot_num = 325_000_000_u64;
+        let genesis_lamports = 123_u64;
+        // Create our harness
+        let (keypair_map, vahm) =
+            build_epoch_stakes(num_nodes, num_zero_stake_nodes, num_vote_accounts);
+
+        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(genesis_lamports);
+
+        let (bank_forks, cluster_info) =
+            StakedValidatorsCacheHarness::new(&genesis_config, Keypair::new())
+                .with_vote_accounts(slot_num, keypair_map, vahm, protocol)
+                .bank_forks();
+
+        // Create our staked validators cache
+        let mut svc =
+            StakedValidatorsCache::new(bank_forks, protocol, Duration::from_secs(5), 5, true, None);
+
+        let now = Instant::now();
+
+        let (sockets, refreshed) = if use_alpenglow_socket {
+            svc.get_staked_validators_by_slot_with_alpenglow_ports(slot_num, &cluster_info, now)
+        } else {
+            svc.get_staked_validators_by_slot_with_tpu_vote_ports(slot_num, &cluster_info, now)
+        };
+
+        assert!(refreshed);
+        assert_eq!(num_nodes - num_zero_stake_nodes, sockets.len());
+        assert_eq!(1, svc.len());
+
+        // Re-fetch from the cache right before the 5-second deadline
+        let (sockets, refreshed) = if use_alpenglow_socket {
+            svc.get_staked_validators_by_slot_with_alpenglow_ports(
+                slot_num,
+                &cluster_info,
+                now + Duration::from_secs_f64(4.999),
+            )
+        } else {
+            svc.get_staked_validators_by_slot_with_tpu_vote_ports(
+                slot_num,
+                &cluster_info,
+                now + Duration::from_secs_f64(4.999),
+            )
+        };
+
+        assert!(!refreshed);
+        assert_eq!(num_nodes - num_zero_stake_nodes, sockets.len());
+        assert_eq!(1, svc.len());
+
+        // Re-fetch from the cache right at the 5-second deadline - we still shouldn't refresh.
+        let (sockets, refreshed) = if use_alpenglow_socket {
+            svc.get_staked_validators_by_slot_with_alpenglow_ports(
+                slot_num,
+                &cluster_info,
+                now + Duration::from_secs(5),
+            )
+        } else {
+            svc.get_staked_validators_by_slot_with_tpu_vote_ports(
+                slot_num,
+                &cluster_info,
+                now + Duration::from_secs(5),
+            )
+        };
+
+        assert!(!refreshed);
+        assert_eq!(num_nodes - num_zero_stake_nodes, sockets.len());
+        assert_eq!(1, svc.len());
+
+        // Re-fetch from the cache right after the 5-second deadline - now we should refresh.
+        let (sockets, refreshed) = if use_alpenglow_socket {
+            svc.get_staked_validators_by_slot_with_alpenglow_ports(
+                slot_num,
+                &cluster_info,
+                now + Duration::from_secs_f64(5.001),
+            )
+        } else {
+            svc.get_staked_validators_by_slot_with_tpu_vote_ports(
+                slot_num,
+                &cluster_info,
+                now + Duration::from_secs_f64(5.001),
+            )
+        };
+
+        assert!(refreshed);
+        assert_eq!(num_nodes - num_zero_stake_nodes, sockets.len());
+        assert_eq!(1, svc.len());
+
+        // Re-fetch from the cache well after the 5-second deadline - we should refresh.
+        let (sockets, refreshed) = if use_alpenglow_socket {
+            svc.get_staked_validators_by_slot_with_alpenglow_ports(
+                slot_num,
+                &cluster_info,
+                now + Duration::from_secs(100),
+            )
+        } else {
+            svc.get_staked_validators_by_slot_with_tpu_vote_ports(
+                slot_num,
+                &cluster_info,
+                now + Duration::from_secs(100),
+            )
+        };
+
+        assert!(refreshed);
+        assert_eq!(num_nodes - num_zero_stake_nodes, sockets.len());
+        assert_eq!(1, svc.len());
+    }
+
+    #[test_case(true)]
+    #[test_case(false)]
+    fn test_cache_eviction(use_alpenglow_socket: bool) {
+        // Create our harness
+        let (keypair_map, vahm) = build_epoch_stakes(50, 7, 60);
+
+        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
+
+        let base_slot = 325_000_000_000;
+        let (bank_forks, cluster_info) =
+            StakedValidatorsCacheHarness::new(&genesis_config, Keypair::new())
+                .with_vote_accounts(base_slot, keypair_map, vahm, Protocol::UDP)
+                .bank_forks();
+
+        // Create our staked validators cache
+        let mut svc = StakedValidatorsCache::new(
+            bank_forks,
+            Protocol::UDP,
+            Duration::from_secs(5),
+            5,
+            true,
+            None,
+        );
+
+        assert_eq!(0, svc.len());
+        assert!(svc.is_empty());
+
+        let now = Instant::now();
+
+        // Populate the first five entries; accessing the cache once again shouldn't trigger any
+        // refreshes.
+        for entry_ix in 1..=5 {
+            let (_, refreshed) = if use_alpenglow_socket {
+                svc.get_staked_validators_by_slot_with_alpenglow_ports(
+                    entry_ix * base_slot,
+                    &cluster_info,
+                    now,
+                )
+            } else {
+                svc.get_staked_validators_by_slot_with_tpu_vote_ports(
+                    entry_ix * base_slot,
+                    &cluster_info,
+                    now,
+                )
+            };
+            assert!(refreshed);
+            assert_eq!(entry_ix as usize, svc.len());
+
+            let (_, refreshed) = if use_alpenglow_socket {
+                svc.get_staked_validators_by_slot_with_alpenglow_ports(
+                    entry_ix * base_slot,
+                    &cluster_info,
+                    now,
+                )
+            } else {
+                svc.get_staked_validators_by_slot_with_tpu_vote_ports(
+                    entry_ix * base_slot,
+                    &cluster_info,
+                    now,
+                )
+            };
+            assert!(!refreshed);
+            assert_eq!(entry_ix as usize, svc.len());
+        }
+
+        // Entry 6 - this shouldn't increase the cache length.
+        let (_, refreshed) = if use_alpenglow_socket {
+            svc.get_staked_validators_by_slot_with_alpenglow_ports(
+                6 * base_slot,
+                &cluster_info,
+                now,
+            )
+        } else {
+            svc.get_staked_validators_by_slot_with_tpu_vote_ports(6 * base_slot, &cluster_info, now)
+        };
+        assert!(refreshed);
+        assert_eq!(5, svc.len());
+
+        // Epoch 1 should have been evicted
+        assert!(!svc.cache.contains(&svc.cur_epoch(base_slot)));
+
+        // Epochs 2 - 6 should have entries
+        for entry_ix in 2..=6 {
+            assert!(svc.cache.contains(&svc.cur_epoch(entry_ix * base_slot)));
+        }
+
+        // Accessing the cache after TTL should recalculate everything; the size remains 5, since
+        // we only ever lazily evict cache entries.
+        for entry_ix in 1..=5 {
+            let (_, refreshed) = if use_alpenglow_socket {
+                svc.get_staked_validators_by_slot_with_alpenglow_ports(
+                    entry_ix * base_slot,
+                    &cluster_info,
+                    now + Duration::from_secs(10),
+                )
+            } else {
+                svc.get_staked_validators_by_slot_with_tpu_vote_ports(
+                    entry_ix * base_slot,
+                    &cluster_info,
+                    now + Duration::from_secs(10),
+                )
+            };
+            assert!(refreshed);
+            assert_eq!(5, svc.len());
+        }
+    }
+
+    #[test_case(true)]
+    #[test_case(false)]
+    fn test_only_update_once_per_epoch(use_alpenglow_socket: bool) {
+        let slot_num = 325_000_000_u64;
+        let num_nodes = 10_usize;
+        let num_zero_stake_nodes = 2_usize;
+        let num_vote_accounts = 10_usize;
+        let genesis_lamports = 123_u64;
+        let protocol = Protocol::UDP;
+
+        // Create our harness
+        let (keypair_map, vahm) =
+            build_epoch_stakes(num_nodes, num_zero_stake_nodes, num_vote_accounts);
+
+        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(genesis_lamports);
+
+        let (bank_forks, cluster_info) =
+            StakedValidatorsCacheHarness::new(&genesis_config, Keypair::new())
+                .with_vote_accounts(slot_num, keypair_map, vahm, protocol)
+                .bank_forks();
+
+        // Create our staked validators cache
+        let mut svc =
+            StakedValidatorsCache::new(bank_forks, protocol, Duration::from_secs(5), 5, true, None);
+
+        let now = Instant::now();
+
+        let (_, refreshed) = if use_alpenglow_socket {
+            svc.get_staked_validators_by_slot_with_alpenglow_ports(slot_num, &cluster_info, now)
+        } else {
+            svc.get_staked_validators_by_slot_with_tpu_vote_ports(slot_num, &cluster_info, now)
+        };
+        assert!(refreshed);
+
+        let (_, refreshed) = if use_alpenglow_socket {
+            svc.get_staked_validators_by_slot_with_alpenglow_ports(slot_num, &cluster_info, now)
+        } else {
+            svc.get_staked_validators_by_slot_with_tpu_vote_ports(slot_num, &cluster_info, now)
+        };
+        assert!(!refreshed);
+
+        let (_, refreshed) = if use_alpenglow_socket {
+            svc.get_staked_validators_by_slot_with_alpenglow_ports(2 * slot_num, &cluster_info, now)
+        } else {
+            svc.get_staked_validators_by_slot_with_tpu_vote_ports(2 * slot_num, &cluster_info, now)
+        };
+        assert!(refreshed);
+    }
+
+    #[test_matrix(
+    [1_usize, 10_usize],
+    [Protocol::UDP, Protocol::QUIC],
+    [false, true]
+)]
+    fn test_exclude_self_from_cache(
+        num_nodes: usize,
+        protocol: Protocol,
+        use_alpenglow_socket: bool,
+    ) {
+        let slot_num = 325_000_000_u64;
+        let num_vote_accounts = 10_usize;
+        let genesis_lamports = 123_u64;
+
+        // Create our harness
+        let (keypair_map, vahm) = build_epoch_stakes(num_nodes, 0, num_vote_accounts);
+
+        // Fetch some keypair from the keypair map
+        let keypair = keypair_map.values().next().unwrap().insecure_clone();
+
+        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(genesis_lamports);
+
+        let (bank_forks, cluster_info) =
+            StakedValidatorsCacheHarness::new(&genesis_config, keypair.insecure_clone())
+                .with_vote_accounts(slot_num, keypair_map, vahm, protocol)
+                .bank_forks();
+
+        let my_socket_addr = cluster_info
+            .lookup_contact_info(&keypair.pubkey(), |node| {
+                if use_alpenglow_socket {
+                    node.alpenglow().unwrap()
+                } else {
+                    node.tpu_vote(protocol).unwrap()
+                }
+            })
+            .unwrap();
+
+        // Create our staked validators cache - set include_self to true
+        let mut svc = StakedValidatorsCache::new(
+            bank_forks.clone(),
+            protocol,
+            Duration::from_secs(5),
+            5,
+            true,
+            None,
+        );
+
+        let (sockets, _) = if use_alpenglow_socket {
+            svc.get_staked_validators_by_slot_with_alpenglow_ports(
+                slot_num,
+                &cluster_info,
+                Instant::now(),
+            )
+        } else {
+            svc.get_staked_validators_by_slot_with_tpu_vote_ports(
+                slot_num,
+                &cluster_info,
+                Instant::now(),
+            )
+        };
+        assert_eq!(sockets.len(), num_nodes);
+        assert!(sockets.contains(&my_socket_addr));
+
+        // Create our staked validators cache - set include_self to false
+        let mut svc = StakedValidatorsCache::new(
+            bank_forks.clone(),
+            protocol,
+            Duration::from_secs(5),
+            5,
+            false,
+            None,
+        );
+
+        let (sockets, _) = if use_alpenglow_socket {
+            svc.get_staked_validators_by_slot_with_alpenglow_ports(
+                slot_num,
+                &cluster_info,
+                Instant::now(),
+            )
+        } else {
+            svc.get_staked_validators_by_slot_with_tpu_vote_ports(
+                slot_num,
+                &cluster_info,
+                Instant::now(),
+            )
+        };
+        // We should have num_nodes - 1 sockets, since we exclude our own socket address.
+        assert_eq!(sockets.len(), num_nodes - 1);
+        assert!(!sockets.contains(&my_socket_addr));
+    }
+
+    #[test]
+    fn test_alpenglow_port_override() {
+        let (keypair_map, vahm) = build_epoch_stakes(3, 0, 3);
+        let pubkey_b = *keypair_map.keys().nth(1).unwrap();
+        let keypair = keypair_map.values().next().unwrap().insecure_clone();
+
+        let alpenglow_port_override = AlpenglowPortOverride::default();
+        let blackhole_addr: SocketAddr = "0.0.0.0:0".parse().unwrap();
+        let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100);
+
+        let (bank_forks, cluster_info) =
+            StakedValidatorsCacheHarness::new(&genesis_config, keypair.insecure_clone())
+                .with_vote_accounts(0, keypair_map, vahm, Protocol::UDP)
+                .bank_forks();
+
+        // Create our staked validators cache - set include_self to false
+        let mut svc = StakedValidatorsCache::new(
+            bank_forks.clone(),
+            Protocol::UDP,
+            Duration::from_secs(5),
+            5,
+            false,
+            Some(alpenglow_port_override.clone()),
+        );
+        // Nothing in the override, so we should get the original socket addresses.
+        let (sockets, _) = svc.get_staked_validators_by_slot_with_alpenglow_ports(
+            0,
+            &cluster_info,
+            Instant::now(),
+        );
+        assert_eq!(sockets.len(), 2);
+        assert!(!sockets.contains(&blackhole_addr));
+
+        // Add an override for pubkey_B, and check that we get the overridden socket address.
+        alpenglow_port_override.update_override(HashMap::from([(pubkey_b, blackhole_addr)]));
+        let (sockets, _) = svc.get_staked_validators_by_slot_with_alpenglow_ports(
+            0,
+            &cluster_info,
+            Instant::now(),
+        );
+        assert_eq!(sockets.len(), 2);
+        // Sort sockets to ensure the blackhole address is at index 0.
+        let mut sockets: Vec<_> = sockets.to_vec();
+        sockets.sort();
+        assert_eq!(sockets[0], blackhole_addr);
+        assert_ne!(sockets[1], blackhole_addr);
+
+        // Now clear the override, and check that we get the original socket addresses.
+        alpenglow_port_override.clear();
+        let (sockets, _) = svc.get_staked_validators_by_slot_with_alpenglow_ports(
+            0,
+            &cluster_info,
+            Instant::now(),
+        );
+        assert_eq!(sockets.len(), 2);
+        assert!(!sockets.contains(&blackhole_addr));
+    }
+}

+ 34 - 2
core/src/tpu.rs

@@ -23,7 +23,9 @@ use {
         forwarding_stage::{
             spawn_forwarding_stage, ForwardAddressGetter, SpawnForwardingStageResult,
         },
-        sigverify::TransactionSigVerifier,
+        sigverifier::{
+            bls_sigverifier::BLSSigVerifier, ed25519_sigverifier::TransactionSigVerifier,
+        },
         sigverify_stage::SigVerifyStage,
         staked_nodes_updater_service::StakedNodesUpdaterService,
         tpu_entry_notifier::TpuEntryNotifier,
@@ -53,7 +55,7 @@ use {
         bank_forks::BankForks,
         prioritization_fee_cache::PrioritizationFeeCache,
         root_bank_cache::RootBankCache,
-        vote_sender_types::{ReplayVoteReceiver, ReplayVoteSender},
+        vote_sender_types::{BLSVerifiedMessageSender, ReplayVoteReceiver, ReplayVoteSender},
     },
     solana_streamer::{
         quic::{spawn_server_multi, QuicServerParams, SpawnServerResult},
@@ -63,6 +65,7 @@ use {
         broadcast_stage::{BroadcastStage, BroadcastStageType},
         xdp::XdpSender,
     },
+    solana_votor::event::VotorEventSender,
     std::{
         collections::HashMap,
         net::{SocketAddr, UdpSocket},
@@ -73,6 +76,9 @@ use {
     tokio::sync::mpsc::Sender as AsyncSender,
 };
 
+// The maximum number of alpenglow packets that can be processed in a single batch
+pub const MAX_ALPENGLOW_PACKET_NUM: usize = 10000;
+
 pub struct TpuSockets {
     pub transactions: Vec<UdpSocket>,
     pub transaction_forwards: Vec<UdpSocket>,
@@ -84,6 +90,7 @@ pub struct TpuSockets {
     /// Client-side socket for the forwarding votes.
     pub vote_forwarding_client: UdpSocket,
     pub vortexor_receivers: Option<Vec<UdpSocket>>,
+    pub alpenglow: UdpSocket,
 }
 
 /// The `SigVerifier` enum is used to determine whether to use a local or remote signature verifier.
@@ -105,6 +112,7 @@ pub struct Tpu {
     fetch_stage: FetchStage,
     sig_verifier: SigVerifier,
     vote_sigverify_stage: SigVerifyStage,
+    alpenglow_sigverify_stage: SigVerifyStage,
     banking_stage: BankingStage,
     forwarding_stage: JoinHandle<()>,
     cluster_info_vote_listener: ClusterInfoVoteListener,
@@ -144,7 +152,9 @@ impl Tpu {
         tpu_coalesce: Duration,
         duplicate_confirmed_slot_sender: DuplicateConfirmedSlotsSender,
         client: ForwardingClientOption,
+        bls_verified_message_sender: BLSVerifiedMessageSender,
         turbine_quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>,
+        votor_event_sender: VotorEventSender,
         keypair: &Keypair,
         log_messages_bytes_limit: Option<usize>,
         staked_nodes: &Arc<RwLock<StakedNodes>>,
@@ -172,18 +182,22 @@ impl Tpu {
             vote_quic: tpu_vote_quic_sockets,
             vote_forwarding_client: vote_forwarding_client_socket,
             vortexor_receivers,
+            alpenglow: alpenglow_socket,
         } = sockets;
 
         let (packet_sender, packet_receiver) = unbounded();
         let (vote_packet_sender, vote_packet_receiver) = unbounded();
         let (forwarded_packet_sender, forwarded_packet_receiver) = unbounded();
+        let (bls_packet_sender, bls_packet_receiver) = bounded(MAX_ALPENGLOW_PACKET_NUM);
         let fetch_stage = FetchStage::new_with_sender(
             transactions_sockets,
             tpu_forwards_sockets,
             tpu_vote_sockets,
+            alpenglow_socket,
             exit.clone(),
             &packet_sender,
             &vote_packet_sender,
+            &bls_packet_sender,
             &forwarded_packet_sender,
             forwarded_packet_receiver,
             poh_recorder,
@@ -309,6 +323,21 @@ impl Tpu {
             )
         };
 
+        let alpenglow_sigverify_stage = {
+            let root_bank_cache = RootBankCache::new(bank_forks.clone());
+            let verifier = BLSSigVerifier::new(
+                root_bank_cache,
+                verified_vote_sender.clone(),
+                bls_verified_message_sender,
+            );
+            SigVerifyStage::new(
+                bls_packet_receiver,
+                verifier,
+                "solSigVerAlpenglow",
+                "tpu-alpenglow-verifier",
+            )
+        };
+
         let cluster_info_vote_listener = ClusterInfoVoteListener::new(
             exit.clone(),
             cluster_info.clone(),
@@ -376,6 +405,7 @@ impl Tpu {
             shred_version,
             turbine_quic_endpoint_sender,
             xdp_sender,
+            votor_event_sender,
         );
 
         let mut key_notifiers = key_notifiers.write().unwrap();
@@ -393,6 +423,7 @@ impl Tpu {
             fetch_stage,
             sig_verifier,
             vote_sigverify_stage,
+            alpenglow_sigverify_stage,
             banking_stage,
             forwarding_stage,
             cluster_info_vote_listener,
@@ -411,6 +442,7 @@ impl Tpu {
             self.fetch_stage.join(),
             self.sig_verifier.join(),
             self.vote_sigverify_stage.join(),
+            self.alpenglow_sigverify_stage.join(),
             self.cluster_info_vote_listener.join(),
             self.banking_stage.join(),
             self.forwarding_stage.join(),

+ 58 - 5
core/src/tvu.rs

@@ -4,6 +4,7 @@
 use {
     crate::{
         banking_trace::BankingTracer,
+        block_creation_loop::ReplayHighestFrozen,
         cluster_info_vote_listener::{
             DuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, VerifiedVoteReceiver,
             VoteTracker,
@@ -16,12 +17,12 @@ use {
         repair::repair_service::{OutstandingShredRepairs, RepairInfo, RepairServiceChannels},
         replay_stage::{ReplayReceivers, ReplaySenders, ReplayStage, ReplayStageConfig},
         shred_fetch_stage::{ShredFetchStage, SHRED_FETCH_CHANNEL_SIZE},
-        voting_service::VotingService,
+        voting_service::{VotingService, VotingServiceOverride},
         warm_quic_cache_service::WarmQuicCacheService,
         window_service::{WindowService, WindowServiceChannels},
     },
     bytes::Bytes,
-    crossbeam_channel::{unbounded, Receiver, Sender},
+    crossbeam_channel::{bounded, unbounded, Receiver, Sender},
     solana_client::connection_cache::ConnectionCache,
     solana_clock::Slot,
     solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierArc,
@@ -42,12 +43,22 @@ use {
         rpc_subscriptions::RpcSubscriptions, slot_status_notifier::SlotStatusNotifier,
     },
     solana_runtime::{
-        bank_forks::BankForks, commitment::BlockCommitmentCache,
-        prioritization_fee_cache::PrioritizationFeeCache, snapshot_controller::SnapshotController,
-        vote_sender_types::ReplayVoteSender,
+        bank_forks::BankForks,
+        commitment::BlockCommitmentCache,
+        prioritization_fee_cache::PrioritizationFeeCache,
+        snapshot_controller::SnapshotController,
+        vote_sender_types::{
+            BLSVerifiedMessageReceiver, BLSVerifiedMessageSender, ReplayVoteSender,
+        },
     },
     solana_streamer::evicting_sender::EvictingSender,
     solana_turbine::{retransmit_stage::RetransmitStage, xdp::XdpSender},
+    solana_votor::{
+        event::{VotorEventReceiver, VotorEventSender},
+        vote_history::VoteHistory,
+        vote_history_storage::VoteHistoryStorage,
+        votor::LeaderWindowNotifier,
+    },
     std::{
         collections::HashSet,
         net::{SocketAddr, UdpSocket},
@@ -138,6 +149,8 @@ impl Tvu {
         poh_recorder: &Arc<RwLock<PohRecorder>>,
         tower: Tower,
         tower_storage: Arc<dyn TowerStorage>,
+        vote_history: VoteHistory,
+        vote_history_storage: Arc<dyn VoteHistoryStorage>,
         leader_schedule_cache: &Arc<LeaderScheduleCache>,
         exit: Arc<AtomicBool>,
         block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
@@ -152,6 +165,8 @@ impl Tvu {
         completed_data_sets_sender: Option<CompletedDataSetsSender>,
         bank_notification_sender: Option<BankNotificationSenderConfig>,
         duplicate_confirmed_slots_receiver: DuplicateConfirmedSlotsReceiver,
+        own_vote_sender: BLSVerifiedMessageSender,
+        bls_verified_message_receiver: BLSVerifiedMessageReceiver,
         tvu_config: TvuConfig,
         max_slots: &Arc<MaxSlots>,
         block_metadata_notifier: Option<BlockMetadataNotifierArc>,
@@ -172,6 +187,11 @@ impl Tvu {
         wen_restart_repair_slots: Option<Arc<RwLock<Vec<Slot>>>>,
         slot_status_notifier: Option<SlotStatusNotifier>,
         vote_connection_cache: Arc<ConnectionCache>,
+        replay_highest_frozen: Arc<ReplayHighestFrozen>,
+        leader_window_notifier: Arc<LeaderWindowNotifier>,
+        voting_service_test_override: Option<VotingServiceOverride>,
+        votor_event_sender: VotorEventSender,
+        votor_event_receiver: VotorEventReceiver,
     ) -> Result<Self, String> {
         let in_wen_restart = wen_restart_repair_slots.is_some();
 
@@ -227,6 +247,7 @@ impl Tvu {
             rpc_subscriptions.clone(),
             slot_status_notifier.clone(),
             tvu_config.xdp_sender,
+            votor_event_sender.clone(),
         );
 
         let (ancestor_duplicate_slots_sender, ancestor_duplicate_slots_receiver) = unbounded();
@@ -235,6 +256,7 @@ impl Tvu {
             unbounded();
         let (dumped_slots_sender, dumped_slots_receiver) = unbounded();
         let (popular_pruned_forks_sender, popular_pruned_forks_receiver) = unbounded();
+        let (certificate_sender, certificate_receiver) = unbounded();
         let window_service = {
             let epoch_schedule = bank_forks
                 .read()
@@ -277,6 +299,7 @@ impl Tvu {
                 window_service_channels,
                 leader_schedule_cache.clone(),
                 outstanding_repair_requests,
+                certificate_receiver,
             )
         };
 
@@ -293,6 +316,10 @@ impl Tvu {
         let (cost_update_sender, cost_update_receiver) = unbounded();
         let (drop_bank_sender, drop_bank_receiver) = unbounded();
         let (voting_sender, voting_receiver) = unbounded();
+        // The BLS sender channel should be mostly used during standstill handling,
+        // there could be 10s/400ms = 25 slots, <=5 votes and <=5 certificates per slot,
+        // we cap the channel at 512 to give some headroom.
+        let (bls_sender, bls_receiver) = bounded(512);
 
         let replay_senders = ReplaySenders {
             rpc_subscriptions,
@@ -306,9 +333,13 @@ impl Tvu {
             cluster_slots_update_sender,
             cost_update_sender,
             voting_sender,
+            bls_sender,
             drop_bank_sender,
             block_metadata_notifier,
             dumped_slots_sender,
+            certificate_sender,
+            votor_event_sender,
+            own_vote_sender,
         };
 
         let replay_receivers = ReplayReceivers {
@@ -318,6 +349,8 @@ impl Tvu {
             duplicate_confirmed_slots_receiver,
             gossip_verified_vote_hash_receiver,
             popular_pruned_forks_receiver,
+            bls_verified_message_receiver,
+            votor_event_receiver,
         };
 
         let replay_stage_config = ReplayStageConfig {
@@ -336,20 +369,28 @@ impl Tvu {
             cluster_info: cluster_info.clone(),
             poh_recorder: poh_recorder.clone(),
             tower,
+            vote_history,
+            vote_history_storage: vote_history_storage.clone(),
             vote_tracker,
             cluster_slots,
             log_messages_bytes_limit,
             prioritization_fee_cache: prioritization_fee_cache.clone(),
             banking_tracer,
             snapshot_controller,
+            replay_highest_frozen,
+            leader_window_notifier,
         };
 
         let voting_service = VotingService::new(
             voting_receiver,
+            bls_receiver,
             cluster_info.clone(),
             poh_recorder.clone(),
             tower_storage,
+            vote_history_storage.clone(),
             vote_connection_cache.clone(),
+            bank_forks.clone(),
+            voting_service_test_override,
         );
 
         let warm_quic_cache_service = create_cache_warmer_if_needed(
@@ -473,6 +514,7 @@ pub mod tests {
         solana_signer::Signer,
         solana_streamer::socket::SocketAddrSpace,
         solana_tpu_client::tpu_client::{DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_VOTE_USE_QUIC},
+        solana_votor::vote_history_storage::FileVoteHistoryStorage,
         std::sync::atomic::{AtomicU64, Ordering},
     };
 
@@ -521,6 +563,7 @@ pub mod tests {
         let (_verified_vote_sender, verified_vote_receiver) = unbounded();
         let (replay_vote_sender, _replay_vote_receiver) = unbounded();
         let (_, gossip_confirmed_slots_receiver) = unbounded();
+        let (bls_verified_message_sender, bls_verified_message_receiver) = unbounded();
         let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
         let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64));
         let outstanding_repair_requests = Arc::<RwLock<OutstandingShredRepairs>>::default();
@@ -541,6 +584,7 @@ pub mod tests {
                 DEFAULT_TPU_CONNECTION_POOL_SIZE,
             )
         };
+        let (votor_event_sender, votor_event_receiver) = unbounded();
 
         let tvu = Tvu::new(
             &vote_keypair.pubkey(),
@@ -567,6 +611,8 @@ pub mod tests {
             &poh_recorder,
             Tower::default(),
             Arc::new(FileTowerStorage::default()),
+            VoteHistory::default(),
+            Arc::new(FileVoteHistoryStorage::default()),
             &leader_schedule_cache,
             exit.clone(),
             block_commitment_cache,
@@ -581,6 +627,8 @@ pub mod tests {
             /*completed_data_sets_sender:*/ None,
             None,
             gossip_confirmed_slots_receiver,
+            bls_verified_message_sender,
+            bls_verified_message_receiver,
             TvuConfig::default(),
             &Arc::new(MaxSlots::default()),
             None,
@@ -601,6 +649,11 @@ pub mod tests {
             wen_restart_repair_slots,
             None,
             Arc::new(connection_cache),
+            Arc::new(ReplayHighestFrozen::default()),
+            Arc::new(LeaderWindowNotifier::default()),
+            None,
+            votor_event_sender,
+            votor_event_receiver,
         )
         .expect("assume success");
         if enable_wen_restart {

+ 274 - 65
core/src/validator.rs

@@ -5,6 +5,7 @@ use {
     crate::{
         admin_rpc_post_init::{AdminRpcRequestMetadataPostInit, KeyUpdaterType, KeyUpdaters},
         banking_trace::{self, BankingTracer, TraceError},
+        block_creation_loop::{self, BlockCreationLoopConfig, ReplayHighestFrozen},
         cluster_info_vote_listener::VoteTracker,
         completed_data_sets_service::CompletedDataSetsService,
         consensus::{
@@ -19,14 +20,17 @@ use {
             serve_repair_service::ServeRepairService,
         },
         sample_performance_service::SamplePerformanceService,
-        sigverify,
+        sigverifier::ed25519_sigverifier,
         snapshot_packager_service::SnapshotPackagerService,
         stats_reporter_service::StatsReporterService,
         system_monitor_service::{
             verify_net_stats_access, SystemMonitorService, SystemMonitorStatsReportConfig,
         },
-        tpu::{ForwardingClientOption, Tpu, TpuSockets, DEFAULT_TPU_COALESCE},
+        tpu::{
+            ForwardingClientOption, Tpu, TpuSockets, DEFAULT_TPU_COALESCE, MAX_ALPENGLOW_PACKET_NUM,
+        },
         tvu::{Tvu, TvuConfig, TvuSockets},
+        voting_service::VotingServiceOverride,
     },
     anyhow::{anyhow, Context, Result},
     crossbeam_channel::{bounded, unbounded, Receiver},
@@ -131,6 +135,11 @@ use {
     solana_unified_scheduler_pool::DefaultSchedulerPool,
     solana_validator_exit::Exit,
     solana_vote_program::vote_state,
+    solana_votor::{
+        vote_history::{VoteHistory, VoteHistoryError},
+        vote_history_storage::{NullVoteHistoryStorage, VoteHistoryStorage},
+        votor::LeaderWindowNotifier,
+    },
     solana_wen_restart::wen_restart::{wait_for_wen_restart, WenRestartConfig},
     std::{
         borrow::Cow,
@@ -255,6 +264,7 @@ pub struct ValidatorConfig {
     pub run_verification: bool,
     pub require_tower: bool,
     pub tower_storage: Arc<dyn TowerStorage>,
+    pub vote_history_storage: Arc<dyn VoteHistoryStorage>,
     pub debug_keys: Option<Arc<HashSet<Pubkey>>>,
     pub contact_debug_interval: u64,
     pub contact_save_interval: u64,
@@ -296,6 +306,7 @@ pub struct ValidatorConfig {
     pub delay_leader_block_for_pending_fork: bool,
     pub use_tpu_client_next: bool,
     pub retransmit_xdp: Option<XdpConfig>,
+    pub voting_service_test_override: Option<VotingServiceOverride>,
     pub repair_handler_type: RepairHandlerType,
 }
 
@@ -333,6 +344,7 @@ impl ValidatorConfig {
             run_verification: true,
             require_tower: false,
             tower_storage: Arc::new(NullTowerStorage::default()),
+            vote_history_storage: Arc::new(NullVoteHistoryStorage::default()),
             debug_keys: None,
             contact_debug_interval: DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS,
             contact_save_interval: DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS,
@@ -377,6 +389,7 @@ impl ValidatorConfig {
             use_tpu_client_next: true,
             retransmit_xdp: None,
             repair_handler_type: RepairHandlerType::default(),
+            voting_service_test_override: None,
         }
     }
 
@@ -663,7 +676,7 @@ impl Validator {
         } else {
             info!("Initializing sigverify...");
         }
-        sigverify::init();
+        ed25519_sigverifier::init();
         info!("Initializing sigverify done.");
 
         if !ledger_path.is_dir() {
@@ -893,6 +906,8 @@ impl Validator {
         );
 
         let (replay_vote_sender, replay_vote_receiver) = unbounded();
+        let (bls_verified_message_sender, bls_verified_message_receiver) =
+            bounded(MAX_ALPENGLOW_PACKET_NUM);
 
         // block min prioritization fee cache should be readable by RPC, and writable by validator
         // (by both replay stage and banking stage)
@@ -902,7 +917,15 @@ impl Validator {
         let startup_verification_complete;
         let (mut poh_recorder, entry_receiver) = {
             let bank = &bank_forks.read().unwrap().working_bank();
+            let highest_frozen_bank = bank_forks.read().unwrap().highest_frozen_bank();
             startup_verification_complete = Arc::clone(bank.get_startup_verification_complete());
+            let first_alpenglow_slot = highest_frozen_bank.as_ref().and_then(|hfb| {
+                hfb.feature_set
+                    .activated_slot(&agave_feature_set::secp256k1_program_enabled::id())
+            });
+            let is_alpenglow_enabled = highest_frozen_bank
+                .zip(first_alpenglow_slot)
+                .is_some_and(|(hfs, fas)| hfs.slot() >= fas);
             PohRecorder::new_with_clear_signal(
                 bank.tick_height(),
                 bank.last_blockhash(),
@@ -915,6 +938,7 @@ impl Validator {
                 &leader_schedule_cache,
                 &genesis_config.poh_config,
                 exit.clone(),
+                is_alpenglow_enabled,
             )
         };
         if transaction_status_sender.is_some() {
@@ -969,6 +993,11 @@ impl Validator {
         let entry_notification_sender = entry_notifier_service
             .as_ref()
             .map(|service| service.sender());
+
+        let is_alpenglow = genesis_config
+            .accounts
+            .contains_key(&agave_feature_set::secp256k1_program_enabled::id());
+
         let mut process_blockstore = ProcessBlockStore::new(
             &id,
             vote_account,
@@ -983,6 +1012,7 @@ impl Validator {
             blockstore_root_scan,
             &snapshot_controller,
             config,
+            is_alpenglow,
         );
 
         maybe_warp_slot(
@@ -1323,6 +1353,26 @@ impl Validator {
         let wait_for_vote_to_start_leader =
             !waited_for_supermajority && !config.no_wait_for_vote_to_start_leader;
 
+        let replay_highest_frozen = Arc::new(ReplayHighestFrozen::default());
+        let leader_window_notifier = Arc::new(LeaderWindowNotifier::default());
+        let block_creation_loop_config = BlockCreationLoopConfig {
+            exit: exit.clone(),
+            bank_forks: bank_forks.clone(),
+            blockstore: blockstore.clone(),
+            cluster_info: cluster_info.clone(),
+            poh_recorder: poh_recorder.clone(),
+            leader_schedule_cache: leader_schedule_cache.clone(),
+            rpc_subscriptions: rpc_subscriptions.clone(),
+            banking_tracer: banking_tracer.clone(),
+            slot_status_notifier: slot_status_notifier.clone(),
+            record_receiver: record_receiver.clone(),
+            leader_window_notifier: leader_window_notifier.clone(),
+            replay_highest_frozen: replay_highest_frozen.clone(),
+        };
+        let block_creation_loop = || {
+            block_creation_loop::start_loop(block_creation_loop_config);
+        };
+
         let poh_service = PohService::new(
             poh_recorder.clone(),
             &genesis_config.poh_config,
@@ -1331,6 +1381,7 @@ impl Validator {
             config.poh_pinned_cpu_core,
             config.poh_hashes_per_batch,
             record_receiver,
+            block_creation_loop,
         );
         assert_eq!(
             blockstore.get_new_shred_signals_len(),
@@ -1344,7 +1395,6 @@ impl Validator {
         let (verified_vote_sender, verified_vote_receiver) = unbounded();
         let (gossip_verified_vote_hash_sender, gossip_verified_vote_hash_receiver) = unbounded();
         let (duplicate_confirmed_slot_sender, duplicate_confirmed_slots_receiver) = unbounded();
-
         let entry_notification_sender = entry_notifier_service
             .as_ref()
             .map(|service| service.sender_cloned());
@@ -1439,22 +1489,50 @@ impl Validator {
         } else {
             None
         };
-        let tower = match process_blockstore.process_to_create_tower() {
-            Ok(tower) => {
-                info!("Tower state: {tower:?}");
-                tower
-            }
-            Err(e) => {
-                warn!("Unable to retrieve tower: {e:?} creating default tower....");
-                Tower::default()
-            }
+        let (tower, vote_history) = if genesis_config
+            .accounts
+            .contains_key(&agave_feature_set::secp256k1_program_enabled::id())
+        {
+            let vote_history = match process_blockstore.process_to_create_vote_history() {
+                Ok(vote_history) => {
+                    info!("Vote history: {:?}", vote_history);
+                    vote_history
+                }
+                Err(e) => {
+                    warn!(
+                        "Unable to retrieve vote history: {:?} creating default vote history....",
+                        e
+                    );
+                    VoteHistory::default()
+                }
+            };
+            (Tower::default(), vote_history)
+        } else {
+            let tower = match process_blockstore.process_to_create_tower() {
+                Ok(tower) => {
+                    info!("Tower state: {:?}", tower);
+                    tower
+                }
+                Err(e) => {
+                    warn!(
+                        "Unable to retrieve tower: {:?} creating default tower....",
+                        e
+                    );
+                    Tower::default()
+                }
+            };
+            (tower, VoteHistory::default())
         };
+
         let last_vote = tower.last_vote();
 
         let outstanding_repair_requests =
             Arc::<RwLock<repair::repair_service::OutstandingShredRepairs>>::default();
         let cluster_slots =
             Arc::new(crate::cluster_slots_service::cluster_slots::ClusterSlots::default());
+        // This channel backing up indicates a serious problem in the voting loop
+        // Capping at 1000 for now, TODO: add metrics for channel len
+        let (votor_event_sender, votor_event_receiver) = bounded(1000);
 
         // If RPC is supported and ConnectionCache is used, pass ConnectionCache for being warmup inside Tvu.
         let connection_cache_for_warmup =
@@ -1493,6 +1571,8 @@ impl Validator {
             &poh_recorder,
             tower,
             config.tower_storage.clone(),
+            vote_history,
+            config.vote_history_storage.clone(),
             &leader_schedule_cache,
             exit.clone(),
             block_commitment_cache,
@@ -1507,6 +1587,8 @@ impl Validator {
             completed_data_sets_sender,
             bank_notification_sender.clone(),
             duplicate_confirmed_slots_receiver,
+            bls_verified_message_sender.clone(),
+            bls_verified_message_receiver,
             TvuConfig {
                 max_ledger_shreds: config.max_ledger_shreds,
                 shred_version: node.info.shred_version(),
@@ -1537,6 +1619,11 @@ impl Validator {
             wen_restart_repair_slots.clone(),
             slot_status_notifier,
             vote_connection_cache,
+            replay_highest_frozen,
+            leader_window_notifier,
+            config.voting_service_test_override.clone(),
+            votor_event_sender.clone(),
+            votor_event_receiver,
         )
         .map_err(ValidatorError::Other)?;
 
@@ -1593,6 +1680,7 @@ impl Validator {
                 vote_quic: node.sockets.tpu_vote_quic,
                 vote_forwarding_client: node.sockets.tpu_vote_forwarding_client,
                 vortexor_receivers: node.sockets.vortexor_receivers,
+                alpenglow: node.sockets.alpenglow,
             },
             rpc_subscriptions.clone(),
             transaction_status_sender,
@@ -1612,7 +1700,9 @@ impl Validator {
             config.tpu_coalesce,
             duplicate_confirmed_slot_sender,
             forwarding_tpu_client,
+            bls_verified_message_sender,
             turbine_quic_endpoint_sender,
+            votor_event_sender.clone(),
             &identity_keypair,
             config.runtime_config.log_messages_bytes_limit,
             &staked_nodes,
@@ -1663,6 +1753,7 @@ impl Validator {
             outstanding_repair_requests,
             cluster_slots,
             gossip_socket: Some(node.sockets.gossip.clone()),
+            votor_event_sender,
         });
 
         Ok(Self {
@@ -1738,6 +1829,10 @@ impl Validator {
             "local retransmit address: {}",
             node.sockets.retransmit_sockets[0].local_addr().unwrap()
         );
+        info!(
+            "local alpenglow address: {}",
+            node.sockets.alpenglow.local_addr().unwrap()
+        );
     }
 
     pub fn join(self) {
@@ -1982,6 +2077,80 @@ fn post_process_restored_tower(
     Ok(restored_tower)
 }
 
+fn post_process_restored_vote_history(
+    restored_vote_history: solana_votor::vote_history_storage::Result<VoteHistory>,
+    validator_identity: &Pubkey,
+    config: &ValidatorConfig,
+    bank_forks: &BankForks,
+) -> Result<VoteHistory, String> {
+    let mut should_require_vote_history = config.require_tower;
+
+    let restored_vote_history = restored_vote_history.and_then(|mut vote_history| {
+        let root_bank = bank_forks.root_bank();
+
+        if vote_history.root() < root_bank.slot() {
+            // Vote history is old, update
+            vote_history.set_root(root_bank.slot());
+        }
+
+        if let Some(hard_fork_restart_slot) =
+            maybe_cluster_restart_with_hard_fork(config, root_bank.slot())
+        {
+            // intentionally fail to restore vote_history; we're supposedly in a new hard fork; past
+            // out-of-chain votor state doesn't make sense at all
+            // what if --wait-for-supermajority again if the validator restarted?
+            let message =
+                format!("Hard fork is detected; discarding vote_history restoration result: {vote_history:?}");
+            datapoint_error!("vote_history_error", ("error", message, String),);
+            error!("{}", message);
+
+            // unconditionally relax vote_history requirement 
+            should_require_vote_history = false;
+            return Err(VoteHistoryError::HardFork(
+                hard_fork_restart_slot,
+            ));
+        }
+
+        if let Some(warp_slot) = config.warp_slot {
+            // unconditionally relax vote_history requirement 
+            should_require_vote_history = false;
+            return Err(VoteHistoryError::HardFork(warp_slot));
+        }
+
+        Ok(vote_history)
+    });
+
+    let restored_vote_history = match restored_vote_history {
+        Ok(vote_history) => vote_history,
+        Err(err) => {
+            if !err.is_file_missing() {
+                datapoint_error!(
+                    "vote_history_error",
+                    (
+                        "error",
+                        format!("Unable to restore vote_history: {err}"),
+                        String
+                    ),
+                );
+            }
+            if should_require_vote_history {
+                return Err(format!(
+                    "Requested mandatory vote_history restore failed: {err}. Ensure that the vote history \
+                    storage file has been copied to the correct directory. Aborting"
+                ));
+            }
+            error!(
+                "Rebuilding an empty vote_history from root slot due to failed restore: {}",
+                err
+            );
+
+            VoteHistory::new(*validator_identity, bank_forks.root())
+        }
+    };
+
+    Ok(restored_vote_history)
+}
+
 fn load_genesis(
     config: &ValidatorConfig,
     ledger_path: &Path,
@@ -2148,6 +2317,8 @@ pub struct ProcessBlockStore<'a> {
     snapshot_controller: &'a SnapshotController,
     config: &'a ValidatorConfig,
     tower: Option<Tower>,
+    vote_history: Option<VoteHistory>,
+    is_alpenglow: bool,
 }
 
 impl<'a> ProcessBlockStore<'a> {
@@ -2166,6 +2337,7 @@ impl<'a> ProcessBlockStore<'a> {
         blockstore_root_scan: BlockstoreRootScan,
         snapshot_controller: &'a SnapshotController,
         config: &'a ValidatorConfig,
+        is_alpenglow: bool,
     ) -> Self {
         Self {
             id,
@@ -2182,51 +2354,59 @@ impl<'a> ProcessBlockStore<'a> {
             snapshot_controller,
             config,
             tower: None,
+            vote_history: None,
+            is_alpenglow,
         }
     }
 
     pub(crate) fn process(&mut self) -> Result<(), String> {
-        if self.tower.is_none() {
-            let previous_start_process = *self.start_progress.read().unwrap();
-            *self.start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger;
-
-            let exit = Arc::new(AtomicBool::new(false));
-            if let Ok(Some(max_slot)) = self.blockstore.highest_slot() {
-                let bank_forks = self.bank_forks.clone();
-                let exit = exit.clone();
-                let start_progress = self.start_progress.clone();
-
-                let _ = Builder::new()
-                    .name("solRptLdgrStat".to_string())
-                    .spawn(move || {
-                        while !exit.load(Ordering::Relaxed) {
-                            let slot = bank_forks.read().unwrap().working_bank().slot();
-                            *start_progress.write().unwrap() =
-                                ValidatorStartProgress::ProcessingLedger { slot, max_slot };
-                            sleep(Duration::from_secs(2));
-                        }
-                    })
-                    .unwrap();
-            }
-            blockstore_processor::process_blockstore_from_root(
-                self.blockstore,
-                self.bank_forks,
-                self.leader_schedule_cache,
-                self.process_options,
-                self.transaction_status_sender,
-                self.entry_notification_sender,
-                Some(self.snapshot_controller),
-            )
-            .map_err(|err| {
-                exit.store(true, Ordering::Relaxed);
-                format!("Failed to load ledger: {err:?}")
-            })?;
+        if self.is_alpenglow && self.vote_history.is_some()
+            || !self.is_alpenglow && self.tower.is_some()
+        {
+            return Ok(());
+        }
+        let previous_start_process = *self.start_progress.read().unwrap();
+        *self.start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger;
+
+        let exit = Arc::new(AtomicBool::new(false));
+        if let Ok(Some(max_slot)) = self.blockstore.highest_slot() {
+            let bank_forks = self.bank_forks.clone();
+            let exit = exit.clone();
+            let start_progress = self.start_progress.clone();
+
+            let _ = Builder::new()
+                .name("solRptLdgrStat".to_string())
+                .spawn(move || {
+                    while !exit.load(Ordering::Relaxed) {
+                        let slot = bank_forks.read().unwrap().working_bank().slot();
+                        *start_progress.write().unwrap() =
+                            ValidatorStartProgress::ProcessingLedger { slot, max_slot };
+                        sleep(Duration::from_secs(2));
+                    }
+                })
+                .unwrap();
+        }
+        blockstore_processor::process_blockstore_from_root(
+            self.blockstore,
+            self.bank_forks,
+            self.leader_schedule_cache,
+            self.process_options,
+            self.transaction_status_sender,
+            self.entry_notification_sender,
+            Some(self.snapshot_controller),
+        )
+        .map_err(|err| {
             exit.store(true, Ordering::Relaxed);
+            format!("Failed to load ledger: {err:?}")
+        })?;
+        exit.store(true, Ordering::Relaxed);
 
-            if let Some(blockstore_root_scan) = self.blockstore_root_scan.take() {
-                blockstore_root_scan.join();
-            }
+        if let Some(blockstore_root_scan) = self.blockstore_root_scan.take() {
+            blockstore_root_scan.join();
+        }
 
+        if !self.is_alpenglow {
+            // Load and post process tower
             self.tower = Some({
                 let restored_tower = Tower::restore(self.config.tower_storage.as_ref(), self.id);
                 if let Ok(tower) = &restored_tower {
@@ -2247,23 +2427,47 @@ impl<'a> ProcessBlockStore<'a> {
                     &self.bank_forks.read().unwrap(),
                 )?
             });
+        } else {
+            // Load and post process vote history
+            self.vote_history = Some({
+                let restored_vote_history =
+                    VoteHistory::restore(self.config.vote_history_storage.as_ref(), self.id);
+                if let Ok(vote_history) = &restored_vote_history {
+                    // reconciliation attempt 1 of 2 with vote history
+                    reconcile_blockstore_roots_with_external_source(
+                        ExternalRootSource::VoteHistory(vote_history.root()),
+                        self.blockstore,
+                        &mut self.original_blockstore_root,
+                    )
+                    .map_err(|err| {
+                        format!("Failed to reconcile blockstore with vote history: {err:?}")
+                    })?;
+                }
 
-            if let Some(hard_fork_restart_slot) = maybe_cluster_restart_with_hard_fork(
-                self.config,
-                self.bank_forks.read().unwrap().root(),
-            ) {
-                // reconciliation attempt 2 of 2 with hard fork
-                // this should be #2 because hard fork root > tower root in almost all cases
-                reconcile_blockstore_roots_with_external_source(
-                    ExternalRootSource::HardFork(hard_fork_restart_slot),
-                    self.blockstore,
-                    &mut self.original_blockstore_root,
-                )
-                .map_err(|err| format!("Failed to reconcile blockstore with hard fork: {err:?}"))?;
-            }
+                post_process_restored_vote_history(
+                    restored_vote_history,
+                    self.id,
+                    self.config,
+                    &self.bank_forks.read().unwrap(),
+                )?
+            });
+        }
 
-            *self.start_progress.write().unwrap() = previous_start_process;
+        if let Some(hard_fork_restart_slot) = maybe_cluster_restart_with_hard_fork(
+            self.config,
+            self.bank_forks.read().unwrap().root(),
+        ) {
+            // reconciliation attempt 2 of 2 with hard fork
+            // this should be #2 because hard fork root > tower root in almost all cases
+            reconcile_blockstore_roots_with_external_source(
+                ExternalRootSource::HardFork(hard_fork_restart_slot),
+                self.blockstore,
+                &mut self.original_blockstore_root,
+            )
+            .map_err(|err| format!("Failed to reconcile blockstore with hard fork: {err:?}"))?;
         }
+
+        *self.start_progress.write().unwrap() = previous_start_process;
         Ok(())
     }
 
@@ -2271,6 +2475,11 @@ impl<'a> ProcessBlockStore<'a> {
         self.process()?;
         Ok(self.tower.unwrap())
     }
+
+    pub(crate) fn process_to_create_vote_history(mut self) -> Result<VoteHistory, String> {
+        self.process()?;
+        Ok(self.vote_history.unwrap())
+    }
 }
 
 fn maybe_warp_slot(

+ 23 - 18
core/src/vote_simulator.rs

@@ -14,7 +14,7 @@ use {
         repair::cluster_slot_state_verifier::{
             DuplicateConfirmedSlots, DuplicateSlotsTracker, EpochSlotsFrozenSlots,
         },
-        replay_stage::{HeaviestForkFailures, ReplayStage},
+        replay_stage::{HeaviestForkFailures, ReplayStage, TowerBFTStructures},
         unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes,
     },
     crossbeam_channel::unbounded,
@@ -44,8 +44,8 @@ pub struct VoteSimulator {
     pub vote_pubkeys: Vec<Pubkey>,
     pub bank_forks: Arc<RwLock<BankForks>>,
     pub progress: ProgressMap,
-    pub heaviest_subtree_fork_choice: HeaviestSubtreeForkChoice,
     pub latest_validator_votes_for_frozen_banks: LatestValidatorVotesForFrozenBanks,
+    pub tbft_structs: TowerBFTStructures,
 }
 
 impl VoteSimulator {
@@ -64,8 +64,14 @@ impl VoteSimulator {
             vote_pubkeys,
             bank_forks,
             progress,
-            heaviest_subtree_fork_choice,
             latest_validator_votes_for_frozen_banks: LatestValidatorVotesForFrozenBanks::default(),
+            tbft_structs: TowerBFTStructures {
+                heaviest_subtree_fork_choice,
+                duplicate_slots_tracker: DuplicateSlotsTracker::default(),
+                duplicate_confirmed_slots: DuplicateConfirmedSlots::default(),
+                unfrozen_gossip_verified_vote_hashes: UnfrozenGossipVerifiedVoteHashes::default(),
+                epoch_slots_frozen_slots: EpochSlotsFrozenSlots::default(),
+            },
         }
     }
 
@@ -105,7 +111,9 @@ impl VoteSimulator {
                     let tower_sync = if let Some(vote_account) =
                         parent_bank.get_vote_account(&keypairs.vote_keypair.pubkey())
                     {
-                        let mut vote_state = TowerVoteState::from(vote_account.vote_state_view());
+                        let mut vote_state = TowerVoteState::from(
+                            vote_account.vote_state_view().expect("must be TowerBFT"),
+                        );
                         vote_state.process_next_vote_slot(parent);
                         TowerSync::new(
                             vote_state.votes,
@@ -136,7 +144,7 @@ impl VoteSimulator {
                     let vote_account = new_bank
                         .get_vote_account(&keypairs.vote_keypair.pubkey())
                         .unwrap();
-                    let vote_state_view = vote_account.vote_state_view();
+                    let vote_state_view = vote_account.vote_state_view().unwrap();
                     assert!(vote_state_view
                         .votes_iter()
                         .any(|lockout| lockout.slot() == parent));
@@ -151,10 +159,12 @@ impl VoteSimulator {
                     .get_fork_stats_mut(new_bank.slot())
                     .expect("All frozen banks must exist in the Progress map")
                     .bank_hash = Some(new_bank.hash());
-                self.heaviest_subtree_fork_choice.add_new_leaf_slot(
-                    (new_bank.slot(), new_bank.hash()),
-                    Some((new_bank.parent_slot(), new_bank.parent_hash())),
-                );
+                self.tbft_structs
+                    .heaviest_subtree_fork_choice
+                    .add_new_leaf_slot(
+                        (new_bank.slot(), new_bank.hash()),
+                        Some((new_bank.parent_slot(), new_bank.parent_hash())),
+                    );
             }
 
             walk.forward();
@@ -186,7 +196,7 @@ impl VoteSimulator {
             &VoteTracker::default(),
             &ClusterSlots::default(),
             &self.bank_forks,
-            &mut self.heaviest_subtree_fork_choice,
+            &mut self.tbft_structs.heaviest_subtree_fork_choice,
             &mut self.latest_validator_votes_for_frozen_banks,
         );
 
@@ -210,7 +220,7 @@ impl VoteSimulator {
             &self.progress,
             tower,
             &self.latest_validator_votes_for_frozen_banks,
-            &self.heaviest_subtree_fork_choice,
+            &self.tbft_structs.heaviest_subtree_fork_choice,
         );
 
         // Make sure this slot isn't locked out or failing threshold
@@ -235,16 +245,11 @@ impl VoteSimulator {
             &mut self.progress,
             None, // snapshot_controller
             None,
-            &mut self.heaviest_subtree_fork_choice,
-            &mut DuplicateSlotsTracker::default(),
-            &mut DuplicateConfirmedSlots::default(),
-            &mut UnfrozenGossipVerifiedVoteHashes::default(),
             &mut true,
-            &mut Vec::new(),
-            &mut EpochSlotsFrozenSlots::default(),
             &drop_bank_sender,
+            &mut self.tbft_structs,
         )
-        .unwrap()
+        .unwrap();
     }
 
     pub fn create_and_vote_new_branch(

+ 409 - 37
core/src/voting_service.rs

@@ -2,25 +2,35 @@ use {
     crate::{
         consensus::tower_storage::{SavedTowerVersions, TowerStorage},
         next_leader::upcoming_leader_tpu_vote_sockets,
+        staked_validators_cache::StakedValidatorsCache,
     },
     bincode::serialize,
-    crossbeam_channel::Receiver,
+    crossbeam_channel::{select, Receiver},
     solana_client::connection_cache::ConnectionCache,
     solana_clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET},
     solana_connection_cache::client_connection::ClientConnection,
     solana_gossip::cluster_info::ClusterInfo,
     solana_measure::measure::Measure,
     solana_poh::poh_recorder::PohRecorder,
+    solana_pubkey::Pubkey,
+    solana_runtime::bank_forks::BankForks,
     solana_transaction::Transaction,
     solana_transaction_error::TransportError,
+    solana_votor::{vote_history_storage::VoteHistoryStorage, voting_utils::BLSOp},
+    solana_votor_messages::bls_message::BLSMessage,
     std::{
+        collections::HashMap,
         net::SocketAddr,
         sync::{Arc, RwLock},
         thread::{self, Builder, JoinHandle},
+        time::{Duration, Instant},
     },
     thiserror::Error,
 };
 
+const STAKED_VALIDATORS_CACHE_TTL_S: u64 = 5;
+const STAKED_VALIDATORS_CACHE_NUM_EPOCH_CAP: usize = 5;
+
 pub enum VoteOp {
     PushVote {
         tx: Transaction,
@@ -33,15 +43,6 @@ pub enum VoteOp {
     },
 }
 
-impl VoteOp {
-    fn tx(&self) -> &Transaction {
-        match self {
-            VoteOp::PushVote { tx, .. } => tx,
-            VoteOp::RefreshVote { tx, .. } => tx,
-        }
-    }
-}
-
 #[derive(Debug, Error)]
 enum SendVoteError {
     #[error(transparent)]
@@ -52,6 +53,16 @@ enum SendVoteError {
     TransportError(#[from] TransportError),
 }
 
+fn send_message(
+    buf: Vec<u8>,
+    socket: &SocketAddr,
+    connection_cache: &Arc<ConnectionCache>,
+) -> Result<(), TransportError> {
+    let client = connection_cache.get_connection(socket);
+
+    client.send_data_async(buf)
+}
+
 fn send_vote_transaction(
     cluster_info: &ClusterInfo,
     transaction: &Transaction,
@@ -78,48 +89,147 @@ pub struct VotingService {
     thread_hdl: JoinHandle<()>,
 }
 
+/// Override for Alpenglow ports to allow testing with different ports
+/// The last_modified is used to determine if the override has changed so
+/// StakedValidatorsCache can refresh its cache.
+/// Inside the map, the key is the validator's vote pubkey and the value
+/// is the overridden socket address.
+/// For example, if you want validator A to send messages for validator B's
+/// Alpenglow port to a new_address, you would insert an entry into the A's
+/// map like this: (B will not get the message as a result):
+/// `override_map.insert(validator_b_pubkey, new_address);`
+#[derive(Clone, Default)]
+pub struct AlpenglowPortOverride {
+    inner: Arc<RwLock<AlpenglowPortOverrideInner>>,
+}
+
+#[derive(Clone)]
+struct AlpenglowPortOverrideInner {
+    override_map: HashMap<Pubkey, SocketAddr>,
+    last_modified: Instant,
+}
+
+impl Default for AlpenglowPortOverrideInner {
+    fn default() -> Self {
+        Self {
+            override_map: HashMap::new(),
+            last_modified: Instant::now(),
+        }
+    }
+}
+
+impl AlpenglowPortOverride {
+    pub fn update_override(&self, new_override: HashMap<Pubkey, SocketAddr>) {
+        let mut inner = self.inner.write().unwrap();
+        inner.override_map = new_override;
+        inner.last_modified = Instant::now();
+    }
+
+    pub fn has_new_override(&self, previous: Instant) -> bool {
+        self.inner.read().unwrap().last_modified != previous
+    }
+
+    pub fn last_modified(&self) -> Instant {
+        self.inner.read().unwrap().last_modified
+    }
+
+    pub fn clear(&self) {
+        let mut inner = self.inner.write().unwrap();
+        inner.override_map.clear();
+        inner.last_modified = Instant::now();
+    }
+
+    pub fn get_override_map(&self) -> HashMap<Pubkey, SocketAddr> {
+        self.inner.read().unwrap().override_map.clone()
+    }
+}
+
+#[derive(Clone)]
+pub struct VotingServiceOverride {
+    pub additional_listeners: Vec<SocketAddr>,
+    pub alpenglow_port_override: AlpenglowPortOverride,
+}
+
 impl VotingService {
     pub fn new(
         vote_receiver: Receiver<VoteOp>,
+        bls_receiver: Receiver<BLSOp>,
         cluster_info: Arc<ClusterInfo>,
         poh_recorder: Arc<RwLock<PohRecorder>>,
         tower_storage: Arc<dyn TowerStorage>,
+        vote_history_storage: Arc<dyn VoteHistoryStorage>,
         connection_cache: Arc<ConnectionCache>,
+        bank_forks: Arc<RwLock<BankForks>>,
+        test_override: Option<VotingServiceOverride>,
     ) -> Self {
+        let (additional_listeners, alpenglow_port_override) = test_override
+            .map(|test_override| {
+                (
+                    Some(test_override.additional_listeners),
+                    Some(test_override.alpenglow_port_override),
+                )
+            })
+            .unwrap_or((None, None));
         let thread_hdl = Builder::new()
             .name("solVoteService".to_string())
             .spawn(move || {
-                for vote_op in vote_receiver.iter() {
-                    Self::handle_vote(
-                        &cluster_info,
-                        &poh_recorder,
-                        tower_storage.as_ref(),
-                        vote_op,
-                        connection_cache.clone(),
-                    );
+                let mut staked_validators_cache = StakedValidatorsCache::new(
+                    bank_forks.clone(),
+                    connection_cache.protocol(),
+                    Duration::from_secs(STAKED_VALIDATORS_CACHE_TTL_S),
+                    STAKED_VALIDATORS_CACHE_NUM_EPOCH_CAP,
+                    false,
+                    alpenglow_port_override,
+                );
+
+                loop {
+                    select! {
+                        recv(vote_receiver) -> vote_op => {
+                            match vote_op {
+                                Ok(vote_op) => {
+                                    Self::handle_vote(
+                                        &cluster_info,
+                                        &poh_recorder,
+                                        tower_storage.as_ref(),
+                                        vote_op,
+                                        connection_cache.clone(),
+                                    );
+                                }
+                                Err(_) => {
+                                    break;
+                                }
+                            }
+                        }
+                        recv(bls_receiver) -> bls_op => {
+                            match bls_op {
+                                Ok(bls_op) => {
+                                    Self::handle_bls_vote(
+                                        &cluster_info,
+                                        vote_history_storage.as_ref(),
+                                        bls_op,
+                                        connection_cache.clone(),
+                                        additional_listeners.as_ref(),
+                                        &mut staked_validators_cache,
+                                    );
+                                }
+                                Err(_) => {
+                                    break;
+                                }
+                            }
+                        }
+                    }
                 }
             })
             .unwrap();
         Self { thread_hdl }
     }
 
-    pub fn handle_vote(
+    fn broadcast_tower_vote(
         cluster_info: &ClusterInfo,
         poh_recorder: &RwLock<PohRecorder>,
-        tower_storage: &dyn TowerStorage,
-        vote_op: VoteOp,
-        connection_cache: Arc<ConnectionCache>,
+        tx: &Transaction,
+        connection_cache: &Arc<ConnectionCache>,
     ) {
-        if let VoteOp::PushVote { saved_tower, .. } = &vote_op {
-            let mut measure = Measure::start("tower storage save");
-            if let Err(err) = tower_storage.store(saved_tower) {
-                error!("Unable to save tower to storage: {err:?}");
-                std::process::exit(1);
-            }
-            measure.stop();
-            trace!("{measure}");
-        }
-
         // Attempt to send our vote transaction to the leaders for the next few
         // slots. From the current slot to the forwarding slot offset
         // (inclusive).
@@ -127,10 +237,13 @@ impl VotingService {
             FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET.saturating_add(1);
         #[cfg(test)]
         static_assertions::const_assert_eq!(UPCOMING_LEADER_FANOUT_SLOTS, 3);
+
+        let leader_fanout = UPCOMING_LEADER_FANOUT_SLOTS;
+
         let upcoming_leader_sockets = upcoming_leader_tpu_vote_sockets(
             cluster_info,
             poh_recorder,
-            UPCOMING_LEADER_FANOUT_SLOTS,
+            leader_fanout,
             connection_cache.protocol(),
         );
 
@@ -138,20 +251,123 @@ impl VotingService {
             for tpu_vote_socket in upcoming_leader_sockets {
                 let _ = send_vote_transaction(
                     cluster_info,
-                    vote_op.tx(),
+                    tx,
                     Some(tpu_vote_socket),
-                    &connection_cache,
+                    connection_cache,
                 );
             }
         } else {
             // Send to our own tpu vote socket if we cannot find a leader to send to
-            let _ = send_vote_transaction(cluster_info, vote_op.tx(), None, &connection_cache);
+            let _ = send_vote_transaction(cluster_info, tx, None, connection_cache);
         }
+    }
+
+    fn broadcast_alpenglow_message(
+        slot: Slot,
+        cluster_info: &ClusterInfo,
+        bls_message: &BLSMessage,
+        connection_cache: Arc<ConnectionCache>,
+        additional_listeners: Option<&Vec<SocketAddr>>,
+        staked_validators_cache: &mut StakedValidatorsCache,
+    ) {
+        let (staked_validator_alpenglow_sockets, _) = staked_validators_cache
+            .get_staked_validators_by_slot_with_alpenglow_ports(slot, cluster_info, Instant::now());
 
+        let sockets = additional_listeners
+            .map(|v| v.as_slice())
+            .unwrap_or(&[])
+            .iter()
+            .chain(staked_validator_alpenglow_sockets.iter());
+        let buf = match serialize(bls_message) {
+            Ok(buf) => buf,
+            Err(err) => {
+                error!("Failed to serialize alpenglow message: {:?}", err);
+                return;
+            }
+        };
+
+        // We use send_message in a loop right now because we worry that sending packets too fast
+        // will cause a packet spike and overwhelm the network. If we later find out that this is
+        // not an issue, we can optimize this by using multi_targret_send or similar methods.
+        for alpenglow_socket in sockets {
+            if let Err(e) = send_message(buf.clone(), alpenglow_socket, &connection_cache) {
+                warn!(
+                    "Failed to send alpenglow message to {}: {:?}",
+                    alpenglow_socket, e
+                );
+            }
+        }
+    }
+
+    pub fn handle_bls_vote(
+        cluster_info: &ClusterInfo,
+        vote_history_storage: &dyn VoteHistoryStorage,
+        bls_op: BLSOp,
+        connection_cache: Arc<ConnectionCache>,
+        additional_listeners: Option<&Vec<SocketAddr>>,
+        staked_validators_cache: &mut StakedValidatorsCache,
+    ) {
+        match bls_op {
+            BLSOp::PushVote {
+                bls_message,
+                slot,
+                saved_vote_history,
+            } => {
+                let mut measure = Measure::start("alpenglow vote history save");
+                if let Err(err) = vote_history_storage.store(&saved_vote_history) {
+                    error!("Unable to save vote history to storage: {:?}", err);
+                    std::process::exit(1);
+                }
+                measure.stop();
+                trace!("{measure}");
+
+                Self::broadcast_alpenglow_message(
+                    slot,
+                    cluster_info,
+                    &bls_message,
+                    connection_cache,
+                    additional_listeners,
+                    staked_validators_cache,
+                );
+            }
+            BLSOp::PushCertificate { certificate } => {
+                let vote_slot = certificate.certificate.slot();
+                let bls_message = BLSMessage::Certificate((*certificate).clone());
+                Self::broadcast_alpenglow_message(
+                    vote_slot,
+                    cluster_info,
+                    &bls_message,
+                    connection_cache,
+                    additional_listeners,
+                    staked_validators_cache,
+                );
+            }
+        }
+    }
+
+    pub fn handle_vote(
+        cluster_info: &ClusterInfo,
+        poh_recorder: &RwLock<PohRecorder>,
+        tower_storage: &dyn TowerStorage,
+        vote_op: VoteOp,
+        connection_cache: Arc<ConnectionCache>,
+    ) {
         match vote_op {
             VoteOp::PushVote {
-                tx, tower_slots, ..
+                tx,
+                tower_slots,
+                saved_tower,
             } => {
+                let mut measure = Measure::start("tower storage save");
+                if let Err(err) = tower_storage.store(&saved_tower) {
+                    error!("Unable to save tower to storage: {:?}", err);
+                    std::process::exit(1);
+                }
+                measure.stop();
+                trace!("{measure}");
+
+                Self::broadcast_tower_vote(cluster_info, poh_recorder, &tx, &connection_cache);
+
                 cluster_info.push_vote(&tower_slots, tx);
             }
             VoteOp::RefreshVote {
@@ -167,3 +383,159 @@ impl VotingService {
         self.thread_hdl.join()
     }
 }
+
+#[cfg(test)]
+mod tests {
+    use {
+        super::*,
+        crate::consensus::tower_storage::NullTowerStorage,
+        bitvec::prelude::*,
+        solana_bls_signatures::Signature as BLSSignature,
+        solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo},
+        solana_keypair::Keypair,
+        solana_ledger::{
+            blockstore::Blockstore, get_tmp_ledger_path_auto_delete,
+            leader_schedule_cache::LeaderScheduleCache,
+        },
+        solana_poh_config::PohConfig,
+        solana_runtime::{
+            bank::Bank,
+            bank_forks::BankForks,
+            genesis_utils::{
+                create_genesis_config_with_alpenglow_vote_accounts_no_program,
+                ValidatorVoteKeypairs,
+            },
+        },
+        solana_signer::Signer,
+        solana_streamer::{packet::Packet, recvmmsg::recv_mmsg, socket::SocketAddrSpace},
+        solana_votor::vote_history_storage::{
+            NullVoteHistoryStorage, SavedVoteHistory, SavedVoteHistoryVersions,
+        },
+        solana_votor_messages::{
+            bls_message::{
+                BLSMessage, Certificate, CertificateMessage, CertificateType, VoteMessage,
+            },
+            vote::Vote,
+        },
+        std::{
+            net::SocketAddr,
+            sync::{atomic::AtomicBool, Arc, RwLock},
+        },
+        test_case::test_case,
+    };
+
+    fn create_voting_service(
+        vote_receiver: Receiver<VoteOp>,
+        bls_receiver: Receiver<BLSOp>,
+        listener: SocketAddr,
+    ) -> VotingService {
+        // Create 10 node validatorvotekeypairs vec
+        let validator_keypairs = (0..10)
+            .map(|_| ValidatorVoteKeypairs::new_rand())
+            .collect::<Vec<_>>();
+        let genesis = create_genesis_config_with_alpenglow_vote_accounts_no_program(
+            1_000_000_000,
+            &validator_keypairs,
+            vec![100; validator_keypairs.len()],
+        );
+        let bank0 = Bank::new_for_tests(&genesis.genesis_config);
+        let bank_forks = BankForks::new_rw_arc(bank0);
+        let keypair = Keypair::new();
+        let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
+        let cluster_info = ClusterInfo::new(
+            contact_info,
+            Arc::new(keypair),
+            SocketAddrSpace::Unspecified,
+        );
+        let ledger_path = get_tmp_ledger_path_auto_delete!();
+        let blockstore = Blockstore::open(ledger_path.path())
+            .expect("Expected to be able to open database ledger");
+        let working_bank = bank_forks.read().unwrap().working_bank();
+        let poh_recorder = PohRecorder::new(
+            working_bank.tick_height(),
+            working_bank.last_blockhash(),
+            working_bank.clone(),
+            None,
+            working_bank.ticks_per_slot(),
+            Arc::new(blockstore),
+            &Arc::new(LeaderScheduleCache::new_from_bank(&working_bank)),
+            &PohConfig::default(),
+            Arc::new(AtomicBool::new(false)),
+        )
+        .0;
+
+        VotingService::new(
+            vote_receiver,
+            bls_receiver,
+            Arc::new(cluster_info),
+            Arc::new(RwLock::new(poh_recorder)),
+            Arc::new(NullTowerStorage::default()),
+            Arc::new(NullVoteHistoryStorage::default()),
+            Arc::new(ConnectionCache::with_udp("TestConnectionCache", 10)),
+            bank_forks,
+            Some(VotingServiceOverride {
+                additional_listeners: vec![listener],
+                alpenglow_port_override: AlpenglowPortOverride::default(),
+            }),
+        )
+    }
+
+    #[test_case(BLSOp::PushVote {
+        bls_message: Arc::new(BLSMessage::Vote(VoteMessage {
+            vote: Vote::new_skip_vote(5),
+            signature: BLSSignature::default(),
+            rank: 1,
+        })),
+        slot: 5,
+        saved_vote_history: SavedVoteHistoryVersions::Current(SavedVoteHistory::default()),
+    }, BLSMessage::Vote(VoteMessage {
+        vote: Vote::new_skip_vote(5),
+        signature: BLSSignature::default(),
+        rank: 1,
+    }))]
+    #[test_case(BLSOp::PushCertificate {
+        certificate: Arc::new(CertificateMessage {
+            certificate: Certificate::new(CertificateType::Skip, 5, None),
+            signature: BLSSignature::default(),
+            bitmap: BitVec::new(),
+        }),
+    }, BLSMessage::Certificate(CertificateMessage {
+        certificate: Certificate::new(CertificateType::Skip, 5, None),
+        signature: BLSSignature::default(),
+        bitmap: BitVec::new(),
+    }))]
+    fn test_send_bls_message(bls_op: BLSOp, expected_bls_message: BLSMessage) {
+        solana_logger::setup();
+        let (_vote_sender, vote_receiver) = crossbeam_channel::unbounded();
+        let (bls_sender, bls_receiver) = crossbeam_channel::unbounded();
+        // Create listener thread on a random port we allocated and return SocketAddr to create VotingService
+
+        // Bind to a random UDP port
+        let socket = solana_net_utils::bind_to_localhost().unwrap();
+        let listener_addr = socket.local_addr().unwrap();
+
+        // Create VotingService with the listener address
+        let _ = create_voting_service(vote_receiver, bls_receiver, listener_addr);
+
+        // Send a BLS message via the VotingService
+        assert!(bls_sender.send(bls_op).is_ok());
+
+        // Wait for the listener to receive the message
+        let mut packets = vec![Packet::default(); 1];
+        socket
+            .set_read_timeout(Some(Duration::from_secs(2)))
+            .unwrap();
+        assert!(recv_mmsg(&socket, &mut packets[..]).is_ok());
+        let packet = packets.first().expect("No packets received");
+        let received_bls_message = packet
+            .deserialize_slice::<BLSMessage, _>(..)
+            .unwrap_or_else(|err| {
+                panic!(
+                    "Failed to deserialize BLSMessage: {:?} {:?}",
+                    size_of::<BLSMessage>(),
+                    err
+                )
+            });
+        assert_eq!(received_bls_message, expected_bls_message);
+    }
+}

+ 20 - 3
core/src/window_service.rs

@@ -5,8 +5,11 @@
 use {
     crate::{
         completed_data_sets_service::CompletedDataSetsSender,
-        repair::repair_service::{
-            OutstandingShredRepairs, RepairInfo, RepairService, RepairServiceChannels,
+        repair::{
+            certificate_service::{CertificateReceiver, CertificateService},
+            repair_service::{
+                OutstandingShredRepairs, RepairInfo, RepairService, RepairServiceChannels,
+            },
         },
         result::{Error, Result},
     },
@@ -165,6 +168,13 @@ fn run_check_duplicate(
             }
         };
 
+        if root_bank
+            .feature_set
+            .is_active(&agave_feature_set::secp256k1_program_enabled::id())
+        {
+            return Ok(());
+        }
+
         // Propagate duplicate proof through gossip
         cluster_info.push_duplicate_shred(&shred1, &shred2)?;
         // Notify duplicate consensus state machine
@@ -272,6 +282,7 @@ pub(crate) struct WindowService {
     t_insert: JoinHandle<()>,
     t_check_duplicate: JoinHandle<()>,
     repair_service: RepairService,
+    certificate_service: CertificateService,
 }
 
 impl WindowService {
@@ -284,6 +295,7 @@ impl WindowService {
         window_service_channels: WindowServiceChannels,
         leader_schedule_cache: Arc<LeaderScheduleCache>,
         outstanding_repair_requests: Arc<RwLock<OutstandingShredRepairs>>,
+        certificate_receiver: CertificateReceiver,
     ) -> WindowService {
         let cluster_info = repair_info.cluster_info.clone();
         let bank_forks = repair_info.bank_forks.clone();
@@ -310,6 +322,9 @@ impl WindowService {
             repair_service_channels,
         );
 
+        let certificate_service =
+            CertificateService::new(exit.clone(), blockstore.clone(), certificate_receiver);
+
         let (duplicate_sender, duplicate_receiver) = unbounded();
 
         let t_check_duplicate = Self::start_check_duplicate_thread(
@@ -336,6 +351,7 @@ impl WindowService {
             t_insert,
             t_check_duplicate,
             repair_service,
+            certificate_service,
         }
     }
 
@@ -453,7 +469,8 @@ impl WindowService {
     pub(crate) fn join(self) -> thread::Result<()> {
         self.t_insert.join()?;
         self.t_check_duplicate.join()?;
-        self.repair_service.join()
+        self.repair_service.join()?;
+        self.certificate_service.join()
     }
 }
 

+ 19 - 22
core/tests/unified_scheduler.rs

@@ -12,10 +12,7 @@ use {
             progress_map::{ForkProgress, ProgressMap},
         },
         drop_bank_service::DropBankService,
-        repair::cluster_slot_state_verifier::{
-            DuplicateConfirmedSlots, DuplicateSlotsTracker, EpochSlotsFrozenSlots,
-        },
-        replay_stage::ReplayStage,
+        replay_stage::{ReplayStage, TowerBFTStructures},
         unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes,
     },
     solana_entry::entry::Entry,
@@ -135,44 +132,44 @@ fn test_scheduler_waited_by_drop_bank_service() {
     info!("calling handle_new_root()...");
     // Mostly copied from: test_handle_new_root()
     {
-        let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new((root, root_hash));
+        let heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new((root, root_hash));
 
         let mut progress = ProgressMap::default();
         for i in genesis..=root {
             progress.insert(i, ForkProgress::new(Hash::default(), None, None, 0, 0));
         }
 
-        let mut duplicate_slots_tracker: DuplicateSlotsTracker =
-            vec![root - 1, root, root + 1].into_iter().collect();
-        let mut duplicate_confirmed_slots: DuplicateConfirmedSlots = vec![root - 1, root, root + 1]
+        let duplicate_slots_tracker = vec![root - 1, root, root + 1].into_iter().collect();
+        let duplicate_confirmed_slots = vec![root - 1, root, root + 1]
             .into_iter()
             .map(|s| (s, Hash::default()))
             .collect();
-        let mut unfrozen_gossip_verified_vote_hashes: UnfrozenGossipVerifiedVoteHashes =
-            UnfrozenGossipVerifiedVoteHashes {
-                votes_per_slot: vec![root - 1, root, root + 1]
-                    .into_iter()
-                    .map(|s| (s, HashMap::new()))
-                    .collect(),
-            };
-        let mut epoch_slots_frozen_slots: EpochSlotsFrozenSlots = vec![root - 1, root, root + 1]
+        let unfrozen_gossip_verified_vote_hashes = UnfrozenGossipVerifiedVoteHashes {
+            votes_per_slot: vec![root - 1, root, root + 1]
+                .into_iter()
+                .map(|s| (s, HashMap::new()))
+                .collect(),
+        };
+        let epoch_slots_frozen_slots = vec![root - 1, root, root + 1]
             .into_iter()
             .map(|slot| (slot, Hash::default()))
             .collect();
+        let mut tbft_structs = TowerBFTStructures {
+            heaviest_subtree_fork_choice,
+            duplicate_slots_tracker,
+            duplicate_confirmed_slots,
+            unfrozen_gossip_verified_vote_hashes,
+            epoch_slots_frozen_slots,
+        };
         ReplayStage::handle_new_root(
             root,
             &bank_forks,
             &mut progress,
             None, // snapshot_controller
             None,
-            &mut heaviest_subtree_fork_choice,
-            &mut duplicate_slots_tracker,
-            &mut duplicate_confirmed_slots,
-            &mut unfrozen_gossip_verified_vote_hashes,
             &mut true,
-            &mut Vec::new(),
-            &mut epoch_slots_frozen_slots,
             &drop_bank_sender1,
+            &mut tbft_structs,
         )
         .unwrap();
     }

+ 25 - 0
curves/bls12-381/Cargo.toml

@@ -0,0 +1,25 @@
+[package]
+name = "solana-bls12-381"
+description = "Solana Bls12-381"
+documentation = "https://docs.rs/solana-bls12-381"
+version = { workspace = true }
+authors = { workspace = true }
+repository = { workspace = true }
+homepage = { workspace = true }
+license = { workspace = true }
+edition = { workspace = true }
+
+[dependencies]
+bytemuck = { workspace = true }
+bytemuck_derive = { workspace = true }
+solana-curve-traits = { workspace = true }
+thiserror = { workspace = true }
+
+[target.'cfg(not(target_os = "solana"))'.dependencies]
+blst = { workspace = true }
+
+[target.'cfg(target_os = "solana")'.dependencies]
+solana-define-syscall = { workspace = true }
+
+[lints]
+workspace = true

+ 13 - 0
curves/bls12-381/src/errors.rs

@@ -0,0 +1,13 @@
+use thiserror::Error;
+
+#[derive(Error, Clone, Debug, Eq, PartialEq)]
+pub enum BlsError {
+    #[error("encoding failed")]
+    BadEncoding,
+    #[error("point is not on curve")]
+    PointNotOnCurve,
+    #[error("point is not in group")]
+    PointNotInGroup,
+    #[error("scalar failed")]
+    BadScalar,
+}

+ 370 - 0
curves/bls12-381/src/g1.rs

@@ -0,0 +1,370 @@
+pub use target_arch::*;
+use {
+    crate::scalar::PodScalar,
+    bytemuck_derive::{Pod, Zeroable},
+};
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Pod, Zeroable)]
+#[repr(transparent)]
+pub struct PodG1Compressed(pub [u8; 48]);
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Pod, Zeroable)]
+#[repr(transparent)]
+pub struct PodG1Affine(pub [u8; 96]);
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+#[repr(transparent)]
+pub struct PodG1Projective(pub [u8; 144]);
+
+unsafe impl bytemuck::Zeroable for PodG1Projective {}
+unsafe impl bytemuck::Pod for PodG1Projective {}
+
+#[cfg(not(target_os = "solana"))]
+mod target_arch {
+    use {
+        super::*,
+        blst::{
+            blst_fp, blst_fp_from_lendian, blst_lendian_from_fp, blst_p1, blst_p1_add,
+            blst_p1_cneg, blst_p1_mult,
+        },
+        solana_curve_traits::GroupOperations,
+    };
+
+    pub fn add(
+        left_point: &PodG1Projective,
+        right_point: &PodG1Projective,
+    ) -> Option<PodG1Projective> {
+        PodG1Projective::add(left_point, right_point)
+    }
+
+    pub fn subtract(
+        left_point: &PodG1Projective,
+        right_point: &PodG1Projective,
+    ) -> Option<PodG1Projective> {
+        PodG1Projective::subtract(left_point, right_point)
+    }
+
+    pub fn multiply(scalar: &PodScalar, point: &PodG1Projective) -> Option<PodG1Projective> {
+        PodG1Projective::multiply(scalar, point)
+    }
+
+    impl GroupOperations for PodG1Projective {
+        type Scalar = PodScalar;
+        type Point = Self;
+
+        fn add(left_point: &Self, right_point: &Self) -> Option<Self> {
+            let mut result = blst_p1::default();
+            // TODO: this conversion makes a copy of bytes
+            //   see if it is possible to make zero-copy conversion
+            let left_point: blst_p1 = left_point.into();
+            let right_point: blst_p1 = right_point.into();
+
+            unsafe {
+                blst_p1_add(
+                    &mut result as *mut blst_p1,
+                    &left_point as *const blst_p1,
+                    &right_point as *const blst_p1,
+                );
+            }
+            Some(result.into())
+        }
+
+        fn subtract(left_point: &Self, right_point: &Self) -> Option<Self> {
+            let mut result = blst_p1::default();
+            let left_point: blst_p1 = left_point.into();
+            let right_point: blst_p1 = right_point.into();
+            unsafe {
+                let mut right_point_negated = right_point;
+                blst_p1_cneg(&mut right_point_negated as *mut blst_p1, true);
+                blst_p1_add(
+                    &mut result as *mut blst_p1,
+                    &left_point as *const blst_p1,
+                    &right_point_negated as *const blst_p1,
+                );
+            }
+            Some(result.into())
+        }
+
+        fn multiply(scalar: &PodScalar, point: &Self) -> Option<Self> {
+            let mut result = blst_p1::default();
+            let point: blst_p1 = point.into();
+            unsafe {
+                blst_p1_mult(
+                    &mut result as *mut blst_p1,
+                    &point as *const blst_p1,
+                    scalar.0.as_ptr(),
+                    256,
+                );
+            }
+            Some(result.into())
+        }
+    }
+
+    impl From<blst_p1> for PodG1Projective {
+        fn from(point: blst_p1) -> Self {
+            let mut bytes = [0u8; 144];
+            // TODO: this is unchecked; check if on curve and in the correct coset
+            unsafe {
+                blst_lendian_from_fp(bytes[0..48].as_mut_ptr(), &point.x as *const blst_fp);
+                blst_lendian_from_fp(bytes[48..96].as_mut_ptr(), &point.y as *const blst_fp);
+                blst_lendian_from_fp(bytes[96..144].as_mut_ptr(), &point.z as *const blst_fp);
+            }
+            Self(bytes)
+        }
+    }
+
+    impl From<PodG1Projective> for blst_p1 {
+        fn from(point: PodG1Projective) -> Self {
+            let mut x = blst_fp::default();
+            let mut y = blst_fp::default();
+            let mut z = blst_fp::default();
+            unsafe {
+                blst_fp_from_lendian(&mut x as *mut blst_fp, point.0[0..48].as_ptr());
+                blst_fp_from_lendian(&mut y as *mut blst_fp, point.0[48..96].as_ptr());
+                blst_fp_from_lendian(&mut z as *mut blst_fp, point.0[96..144].as_ptr());
+            }
+            blst_p1 { x, y, z }
+        }
+    }
+
+    impl From<&PodG1Projective> for blst_p1 {
+        fn from(point: &PodG1Projective) -> Self {
+            let mut x = blst_fp::default();
+            let mut y = blst_fp::default();
+            let mut z = blst_fp::default();
+            unsafe {
+                blst_fp_from_lendian(&mut x as *mut blst_fp, point.0[0..48].as_ptr());
+                blst_fp_from_lendian(&mut y as *mut blst_fp, point.0[48..96].as_ptr());
+                blst_fp_from_lendian(&mut z as *mut blst_fp, point.0[96..144].as_ptr());
+            }
+            blst_p1 { x, y, z }
+        }
+    }
+}
+
+#[cfg(target_os = "solana")]
+mod target_arch {
+    use {
+        super::*,
+        bytemuck::Zeroable,
+        solana_curve_traits::{ADD, BLS12_381_G1_PROJECTIVE, MUL, SUB},
+    };
+
+    pub fn add(
+        left_point: &PodG1Projective,
+        right_point: &PodG1Projective,
+    ) -> Option<PodG1Projective> {
+        let mut result_point = PodG1Projective::zeroed();
+        let result = unsafe {
+            solana_define_syscall::definitions::sol_curve_group_op(
+                BLS12_381_G1_PROJECTIVE,
+                ADD,
+                &left_point.0 as *const u8,
+                &right_point.0 as *const u8,
+                &mut result_point.0 as *mut u8,
+            )
+        };
+
+        if result == 0 {
+            Some(result_point)
+        } else {
+            None
+        }
+    }
+
+    pub fn subtract(
+        left_point: &PodG1Projective,
+        right_point: &PodG1Projective,
+    ) -> Option<PodG1Projective> {
+        let mut result_point = PodG1Projective::zeroed();
+        let result = unsafe {
+            solana_define_syscall::definitions::sol_curve_group_op(
+                BLS12_381_G1_PROJECTIVE,
+                SUB,
+                &left_point.0 as *const u8,
+                &right_point.0 as *const u8,
+                &mut result_point.0 as *mut u8,
+            )
+        };
+
+        if result == 0 {
+            Some(result_point)
+        } else {
+            None
+        }
+    }
+
+    pub fn multiply(scalar: &PodScalar, point: &PodG1Projective) -> Option<PodG1Projective> {
+        let mut result_point = PodG1Projective::zeroed();
+        let result = unsafe {
+            solana_define_syscall::definitions::sol_curve_group_op(
+                BLS12_381_G1_PROJECTIVE,
+                MUL,
+                &scalar.0 as *const u8,
+                &point.0 as *const u8,
+                &mut result_point.0 as *mut u8,
+            )
+        };
+
+        if result == 0 {
+            Some(result_point)
+        } else {
+            None
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use {
+        super::*,
+        crate::scalar::PodScalar,
+        blst::{blst_p1, blst_p1_affine},
+        solana_curve_traits::GroupOperations,
+    };
+
+    unsafe fn decompress(compressed: &PodG1Compressed) -> PodG1Projective {
+        let point_ptr = &compressed.0 as *const u8;
+
+        let mut point_affine = blst_p1_affine::default();
+        let point_affine_ptr = &mut point_affine as *mut blst_p1_affine;
+        blst::blst_p1_uncompress(point_affine_ptr, point_ptr);
+
+        let mut point_full = blst_p1::default();
+        let point_full_ptr = &mut point_full as *mut blst_p1;
+        blst::blst_p1_from_affine(point_full_ptr, point_affine_ptr);
+
+        point_full.into()
+    }
+
+    unsafe fn compress(projective: &PodG1Projective) -> PodG1Compressed {
+        let mut compressed = [0u8; 48];
+        let point_ptr = &projective.0 as *const u8 as *mut blst_p1;
+        blst::blst_p1_compress(compressed.as_mut_ptr(), point_ptr);
+        PodG1Compressed(compressed)
+    }
+
+    #[test]
+    fn test_add_subtract_bls_12_381() {
+        let identity: PodG1Projective = blst_p1::default().into();
+
+        let point_a_compressed = PodG1Compressed([
+            140, 112, 74, 2, 254, 123, 212, 72, 73, 122, 106, 93, 64, 7, 172, 236, 36, 227, 96,
+            130, 121, 240, 41, 205, 62, 7, 207, 15, 94, 159, 7, 91, 99, 57, 241, 162, 136, 81, 90,
+            5, 179, 98, 6, 98, 41, 146, 195, 14,
+        ]);
+
+        let point_b_compressed = PodG1Compressed([
+            149, 247, 195, 10, 243, 121, 148, 92, 212, 118, 110, 34, 133, 35, 193, 161, 225, 85,
+            122, 150, 192, 175, 136, 69, 63, 0, 146, 159, 103, 117, 89, 145, 171, 184, 105, 135,
+            75, 231, 97, 247, 162, 101, 208, 175, 198, 222, 35, 102,
+        ]);
+
+        let point_c_compressed = PodG1Compressed([
+            137, 46, 171, 236, 48, 64, 85, 76, 96, 91, 201, 87, 53, 133, 184, 211, 4, 113, 227,
+            145, 17, 134, 71, 182, 72, 39, 55, 230, 145, 29, 216, 20, 52, 247, 57, 191, 255, 53,
+            57, 150, 221, 59, 52, 78, 171, 240, 129, 39,
+        ]);
+
+        let point_a = unsafe { decompress(&point_a_compressed) };
+        let point_b = unsafe { decompress(&point_b_compressed) };
+        let point_c = unsafe { decompress(&point_c_compressed) };
+
+        // identity
+        assert_eq!(PodG1Projective::add(&point_a, &identity).unwrap(), point_a);
+
+        // associativity
+        unsafe {
+            assert_eq!(
+                compress(
+                    &PodG1Projective::add(
+                        &PodG1Projective::add(&point_a, &point_b).unwrap(),
+                        &point_c
+                    )
+                    .unwrap()
+                ),
+                compress(
+                    &PodG1Projective::add(
+                        &point_a,
+                        &PodG1Projective::add(&point_b, &point_c).unwrap()
+                    )
+                    .unwrap()
+                ),
+            )
+        };
+
+        unsafe {
+            assert_eq!(
+                compress(
+                    &PodG1Projective::subtract(
+                        &PodG1Projective::subtract(&point_a, &point_b).unwrap(),
+                        &point_c
+                    )
+                    .unwrap()
+                ),
+                compress(
+                    &PodG1Projective::subtract(
+                        &point_a,
+                        &PodG1Projective::add(&point_b, &point_c).unwrap()
+                    )
+                    .unwrap()
+                ),
+            )
+        };
+
+        // commutativity
+        unsafe {
+            assert_eq!(
+                compress(&PodG1Projective::add(&point_a, &point_b).unwrap()),
+                compress(&PodG1Projective::add(&point_b, &point_a).unwrap())
+            )
+        };
+
+        // subtraction
+        unsafe {
+            assert_eq!(
+                compress(&PodG1Projective::subtract(&point_a, &point_a).unwrap()),
+                compress(&identity)
+            )
+        };
+    }
+
+    #[test]
+    fn test_multiply_bls12_381() {
+        let scalar = PodScalar([
+            107, 15, 13, 77, 216, 207, 117, 144, 252, 166, 162, 81, 107, 12, 249, 164, 242, 212,
+            76, 68, 144, 198, 72, 233, 76, 116, 60, 179, 0, 32, 86, 93,
+        ]);
+
+        let point_a_compressed = PodG1Compressed([
+            140, 112, 74, 2, 254, 123, 212, 72, 73, 122, 106, 93, 64, 7, 172, 236, 36, 227, 96,
+            130, 121, 240, 41, 205, 62, 7, 207, 15, 94, 159, 7, 91, 99, 57, 241, 162, 136, 81, 90,
+            5, 179, 98, 6, 98, 41, 146, 195, 14,
+        ]);
+
+        let point_b_compressed = PodG1Compressed([
+            149, 247, 195, 10, 243, 121, 148, 92, 212, 118, 110, 34, 133, 35, 193, 161, 225, 85,
+            122, 150, 192, 175, 136, 69, 63, 0, 146, 159, 103, 117, 89, 145, 171, 184, 105, 135,
+            75, 231, 97, 247, 162, 101, 208, 175, 198, 222, 35, 102,
+        ]);
+
+        let point_a = unsafe { decompress(&point_a_compressed) };
+        let point_b = unsafe { decompress(&point_b_compressed) };
+
+        let ax = PodG1Projective::multiply(&scalar, &point_a).unwrap();
+        let bx = PodG1Projective::multiply(&scalar, &point_b).unwrap();
+
+        unsafe {
+            assert_eq!(
+                compress(&PodG1Projective::add(&ax, &bx).unwrap()),
+                compress(
+                    &PodG1Projective::multiply(
+                        &scalar,
+                        &PodG1Projective::add(&point_a, &point_b).unwrap()
+                    )
+                    .unwrap()
+                ),
+            )
+        };
+    }
+}

+ 406 - 0
curves/bls12-381/src/g2.rs

@@ -0,0 +1,406 @@
+pub use target_arch::*;
+use {
+    crate::scalar::PodScalar,
+    bytemuck_derive::{Pod, Zeroable},
+};
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Pod, Zeroable)]
+#[repr(transparent)]
+pub struct PodG2Compressed(pub [u8; 96]);
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+#[repr(transparent)]
+pub struct PodG2Affine(pub [u8; 192]);
+
+unsafe impl bytemuck::Zeroable for PodG2Affine {}
+unsafe impl bytemuck::Pod for PodG2Affine {}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+#[repr(transparent)]
+pub struct PodG2Projective(pub [u8; 288]);
+
+unsafe impl bytemuck::Zeroable for PodG2Projective {}
+unsafe impl bytemuck::Pod for PodG2Projective {}
+
+#[cfg(not(target_os = "solana"))]
+mod target_arch {
+    use {
+        super::*,
+        blst::{
+            blst_fp, blst_fp2, blst_fp_from_lendian, blst_lendian_from_fp, blst_p2, blst_p2_add,
+            blst_p2_cneg, blst_p2_mult,
+        },
+        solana_curve_traits::GroupOperations,
+    };
+
+    pub fn add(
+        left_point: &PodG2Projective,
+        right_point: &PodG2Projective,
+    ) -> Option<PodG2Projective> {
+        PodG2Projective::add(left_point, right_point)
+    }
+
+    pub fn subtract(
+        left_point: &PodG2Projective,
+        right_point: &PodG2Projective,
+    ) -> Option<PodG2Projective> {
+        PodG2Projective::subtract(left_point, right_point)
+    }
+
+    pub fn multiply(scalar: &PodScalar, point: &PodG2Projective) -> Option<PodG2Projective> {
+        PodG2Projective::multiply(scalar, point)
+    }
+
+    impl GroupOperations for PodG2Projective {
+        type Scalar = PodScalar;
+        type Point = Self;
+
+        fn add(left_point: &Self, right_point: &Self) -> Option<Self> {
+            let mut result = blst_p2::default();
+            // TODO: this conversion makes a copy of bytes
+            //   see if it is possible to make zero-copy conversion
+            let left_point: blst_p2 = left_point.into();
+            let right_point: blst_p2 = right_point.into();
+
+            unsafe {
+                blst_p2_add(
+                    &mut result as *mut blst_p2,
+                    &left_point as *const blst_p2,
+                    &right_point as *const blst_p2,
+                );
+            }
+            Some(result.into())
+        }
+
+        fn subtract(left_point: &Self, right_point: &Self) -> Option<Self> {
+            let mut result = blst_p2::default();
+            let left_point: blst_p2 = left_point.into();
+            let right_point: blst_p2 = right_point.into();
+            unsafe {
+                let mut right_point_negated = right_point;
+                blst_p2_cneg(&mut right_point_negated as *mut blst_p2, true);
+                blst_p2_add(
+                    &mut result as *mut blst_p2,
+                    &left_point as *const blst_p2,
+                    &right_point_negated as *const blst_p2,
+                );
+            }
+            Some(result.into())
+        }
+
+        fn multiply(scalar: &PodScalar, point: &Self) -> Option<Self> {
+            let mut result = blst_p2::default();
+            let point: blst_p2 = point.into();
+            unsafe {
+                blst_p2_mult(
+                    &mut result as *mut blst_p2,
+                    &point as *const blst_p2,
+                    scalar.0.as_ptr(),
+                    256,
+                );
+            }
+            Some(result.into())
+        }
+    }
+
+    impl From<blst_p2> for PodG2Projective {
+        fn from(point: blst_p2) -> Self {
+            let mut bytes = [0u8; 288];
+            unsafe {
+                blst_lendian_from_fp(bytes[0..48].as_mut_ptr(), &point.x.fp[0] as *const blst_fp);
+                blst_lendian_from_fp(bytes[48..96].as_mut_ptr(), &point.x.fp[1] as *const blst_fp);
+                blst_lendian_from_fp(
+                    bytes[96..144].as_mut_ptr(),
+                    &point.y.fp[0] as *const blst_fp,
+                );
+                blst_lendian_from_fp(
+                    bytes[144..192].as_mut_ptr(),
+                    &point.y.fp[1] as *const blst_fp,
+                );
+                blst_lendian_from_fp(
+                    bytes[192..240].as_mut_ptr(),
+                    &point.z.fp[0] as *const blst_fp,
+                );
+                blst_lendian_from_fp(
+                    bytes[240..288].as_mut_ptr(),
+                    &point.z.fp[1] as *const blst_fp,
+                );
+            }
+            Self(bytes)
+        }
+    }
+
+    impl From<PodG2Projective> for blst_p2 {
+        fn from(point: PodG2Projective) -> Self {
+            let mut x = blst_fp2::default();
+            let mut y = blst_fp2::default();
+            let mut z = blst_fp2::default();
+            unsafe {
+                blst_fp_from_lendian(&mut x.fp[0] as *mut blst_fp, point.0[0..48].as_ptr());
+                blst_fp_from_lendian(&mut x.fp[1] as *mut blst_fp, point.0[48..96].as_ptr());
+                blst_fp_from_lendian(&mut y.fp[0] as *mut blst_fp, point.0[96..144].as_ptr());
+                blst_fp_from_lendian(&mut y.fp[1] as *mut blst_fp, point.0[144..192].as_ptr());
+                blst_fp_from_lendian(&mut z.fp[0] as *mut blst_fp, point.0[192..240].as_ptr());
+                blst_fp_from_lendian(&mut z.fp[1] as *mut blst_fp, point.0[240..288].as_ptr());
+            }
+            blst_p2 { x, y, z }
+        }
+    }
+
+    impl From<&PodG2Projective> for blst_p2 {
+        fn from(point: &PodG2Projective) -> Self {
+            let mut x = blst_fp2::default();
+            let mut y = blst_fp2::default();
+            let mut z = blst_fp2::default();
+            unsafe {
+                blst_fp_from_lendian(&mut x.fp[0] as *mut blst_fp, point.0[0..48].as_ptr());
+                blst_fp_from_lendian(&mut x.fp[1] as *mut blst_fp, point.0[48..96].as_ptr());
+                blst_fp_from_lendian(&mut y.fp[0] as *mut blst_fp, point.0[96..144].as_ptr());
+                blst_fp_from_lendian(&mut y.fp[1] as *mut blst_fp, point.0[144..192].as_ptr());
+                blst_fp_from_lendian(&mut z.fp[0] as *mut blst_fp, point.0[192..240].as_ptr());
+                blst_fp_from_lendian(&mut z.fp[1] as *mut blst_fp, point.0[240..288].as_ptr());
+            }
+            blst_p2 { x, y, z }
+        }
+    }
+}
+
+#[cfg(target_os = "solana")]
+mod target_arch {
+    use {
+        super::*,
+        bytemuck::Zeroable,
+        solana_curve_traits::{ADD, BLS12_381_G1_PROJECTIVE, MUL, SUB},
+    };
+
+    pub fn add(
+        left_point: &PodG2Projective,
+        right_point: &PodG2Projective,
+    ) -> Option<PodG2Projective> {
+        let mut result_point = PodG2Projective::zeroed();
+        let result = unsafe {
+            solana_define_syscall::definitions::sol_curve_group_op(
+                BLS12_381_G1_PROJECTIVE,
+                ADD,
+                &left_point.0 as *const u8,
+                &right_point.0 as *const u8,
+                &mut result_point.0 as *mut u8,
+            )
+        };
+
+        if result == 0 {
+            Some(result_point)
+        } else {
+            None
+        }
+    }
+
+    pub fn subtract(
+        left_point: &PodG2Projective,
+        right_point: &PodG2Projective,
+    ) -> Option<PodG2Projective> {
+        let mut result_point = PodG2Projective::zeroed();
+        let result = unsafe {
+            solana_define_syscall::definitions::sol_curve_group_op(
+                BLS12_381_G1_PROJECTIVE,
+                SUB,
+                &left_point.0 as *const u8,
+                &right_point.0 as *const u8,
+                &mut result_point.0 as *mut u8,
+            )
+        };
+
+        if result == 0 {
+            Some(result_point)
+        } else {
+            None
+        }
+    }
+
+    pub fn multiply(scalar: &PodScalar, point: &PodG2Projective) -> Option<PodG2Projective> {
+        let mut result_point = PodG2Projective::zeroed();
+        let result = unsafe {
+            solana_define_syscall::definitions::sol_curve_group_op(
+                BLS12_381_G1_PROJECTIVE,
+                MUL,
+                &scalar.0 as *const u8,
+                &point.0 as *const u8,
+                &mut result_point.0 as *mut u8,
+            )
+        };
+
+        if result == 0 {
+            Some(result_point)
+        } else {
+            None
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use {
+        super::*,
+        crate::scalar::PodScalar,
+        blst::{blst_p2, blst_p2_affine},
+        solana_curve_traits::GroupOperations,
+    };
+
+    unsafe fn decompress(compressed: &PodG2Compressed) -> PodG2Projective {
+        let point_ptr = &compressed.0 as *const u8;
+
+        let mut point_affine = blst_p2_affine::default();
+        let point_affine_ptr = &mut point_affine as *mut blst_p2_affine;
+        blst::blst_p2_uncompress(point_affine_ptr, point_ptr);
+
+        let mut point_full = blst_p2::default();
+        let point_full_ptr = &mut point_full as *mut blst_p2;
+        blst::blst_p2_from_affine(point_full_ptr, point_affine_ptr);
+
+        point_full.into()
+    }
+
+    unsafe fn compress(projective: &PodG2Projective) -> PodG2Compressed {
+        let mut compressed = [0u8; 96];
+        let point_ptr = &projective.0 as *const u8 as *mut blst_p2;
+        blst::blst_p2_compress(compressed.as_mut_ptr(), point_ptr);
+        PodG2Compressed(compressed)
+    }
+
+    #[test]
+    fn test_add_subtract_bls_12_381() {
+        let identity: PodG2Projective = blst_p2::default().into();
+
+        let point_a_compressed = PodG2Compressed([
+            164, 206, 80, 113, 43, 158, 131, 37, 93, 106, 231, 75, 147, 161, 185, 106, 81, 151, 33,
+            215, 119, 212, 236, 144, 255, 79, 164, 84, 156, 164, 121, 86, 19, 207, 42, 161, 95, 32,
+            22, 141, 21, 250, 100, 154, 134, 50, 186, 209, 12, 208, 242, 49, 189, 146, 166, 202,
+            120, 136, 221, 182, 244, 18, 95, 15, 95, 85, 3, 216, 6, 37, 199, 101, 109, 31, 213, 20,
+            68, 69, 19, 79, 126, 19, 60, 71, 114, 17, 78, 220, 142, 37, 33, 157, 252, 2, 18, 182,
+        ]);
+
+        let point_b_compressed = PodG2Compressed([
+            183, 42, 8, 225, 237, 101, 184, 130, 73, 9, 104, 128, 181, 122, 114, 248, 38, 145, 28,
+            175, 76, 168, 219, 102, 168, 17, 1, 163, 145, 33, 127, 101, 159, 1, 108, 7, 56, 68,
+            142, 7, 151, 2, 220, 149, 227, 134, 194, 231, 9, 6, 86, 227, 163, 72, 228, 151, 235,
+            97, 51, 218, 156, 244, 234, 108, 157, 71, 90, 247, 143, 215, 224, 44, 68, 20, 155, 178,
+            155, 29, 183, 167, 10, 244, 56, 19, 49, 169, 90, 8, 100, 86, 172, 14, 119, 200, 205,
+            193,
+        ]);
+
+        let point_c_compressed = PodG2Compressed([
+            139, 35, 111, 111, 138, 15, 121, 99, 87, 180, 83, 67, 5, 100, 162, 78, 79, 114, 138,
+            150, 244, 249, 138, 213, 44, 122, 179, 155, 36, 156, 121, 98, 76, 57, 109, 116, 219,
+            227, 54, 177, 90, 19, 147, 215, 145, 4, 231, 175, 1, 144, 102, 168, 64, 217, 60, 234,
+            32, 38, 115, 250, 43, 47, 227, 138, 249, 195, 141, 231, 226, 207, 122, 246, 147, 50,
+            72, 230, 22, 215, 146, 161, 209, 111, 221, 185, 53, 103, 4, 224, 151, 54, 60, 94, 65,
+            34, 66, 247,
+        ]);
+
+        let point_a = unsafe { decompress(&point_a_compressed) };
+        let point_b = unsafe { decompress(&point_b_compressed) };
+        let point_c = unsafe { decompress(&point_c_compressed) };
+
+        // identity
+        assert_eq!(PodG2Projective::add(&point_a, &identity).unwrap(), point_a);
+
+        // associativity
+        unsafe {
+            assert_eq!(
+                compress(
+                    &PodG2Projective::add(
+                        &PodG2Projective::add(&point_a, &point_b).unwrap(),
+                        &point_c
+                    )
+                    .unwrap()
+                ),
+                compress(
+                    &PodG2Projective::add(
+                        &point_a,
+                        &PodG2Projective::add(&point_b, &point_c).unwrap()
+                    )
+                    .unwrap()
+                ),
+            )
+        };
+
+        unsafe {
+            assert_eq!(
+                compress(
+                    &PodG2Projective::subtract(
+                        &PodG2Projective::subtract(&point_a, &point_b).unwrap(),
+                        &point_c
+                    )
+                    .unwrap()
+                ),
+                compress(
+                    &PodG2Projective::subtract(
+                        &point_a,
+                        &PodG2Projective::add(&point_b, &point_c).unwrap()
+                    )
+                    .unwrap()
+                ),
+            )
+        };
+
+        // commutativity
+        unsafe {
+            assert_eq!(
+                compress(&PodG2Projective::add(&point_a, &point_b).unwrap()),
+                compress(&PodG2Projective::add(&point_b, &point_a).unwrap())
+            )
+        };
+
+        // subtraction
+        unsafe {
+            assert_eq!(
+                compress(&PodG2Projective::subtract(&point_a, &point_a).unwrap()),
+                compress(&identity)
+            )
+        };
+    }
+
+    #[test]
+    fn test_multiply_bls12_381() {
+        let scalar = PodScalar([
+            107, 15, 13, 77, 216, 207, 117, 144, 252, 166, 162, 81, 107, 12, 249, 164, 242, 212,
+            76, 68, 144, 198, 72, 233, 76, 116, 60, 179, 0, 32, 86, 93,
+        ]);
+
+        let point_a_compressed = PodG2Compressed([
+            164, 206, 80, 113, 43, 158, 131, 37, 93, 106, 231, 75, 147, 161, 185, 106, 81, 151, 33,
+            215, 119, 212, 236, 144, 255, 79, 164, 84, 156, 164, 121, 86, 19, 207, 42, 161, 95, 32,
+            22, 141, 21, 250, 100, 154, 134, 50, 186, 209, 12, 208, 242, 49, 189, 146, 166, 202,
+            120, 136, 221, 182, 244, 18, 95, 15, 95, 85, 3, 216, 6, 37, 199, 101, 109, 31, 213, 20,
+            68, 69, 19, 79, 126, 19, 60, 71, 114, 17, 78, 220, 142, 37, 33, 157, 252, 2, 18, 182,
+        ]);
+
+        let point_b_compressed = PodG2Compressed([
+            183, 42, 8, 225, 237, 101, 184, 130, 73, 9, 104, 128, 181, 122, 114, 248, 38, 145, 28,
+            175, 76, 168, 219, 102, 168, 17, 1, 163, 145, 33, 127, 101, 159, 1, 108, 7, 56, 68,
+            142, 7, 151, 2, 220, 149, 227, 134, 194, 231, 9, 6, 86, 227, 163, 72, 228, 151, 235,
+            97, 51, 218, 156, 244, 234, 108, 157, 71, 90, 247, 143, 215, 224, 44, 68, 20, 155, 178,
+            155, 29, 183, 167, 10, 244, 56, 19, 49, 169, 90, 8, 100, 86, 172, 14, 119, 200, 205,
+            193,
+        ]);
+
+        let point_a = unsafe { decompress(&point_a_compressed) };
+        let point_b = unsafe { decompress(&point_b_compressed) };
+
+        let ax = PodG2Projective::multiply(&scalar, &point_a).unwrap();
+        let bx = PodG2Projective::multiply(&scalar, &point_b).unwrap();
+
+        unsafe {
+            assert_eq!(
+                compress(&PodG2Projective::add(&ax, &bx).unwrap()),
+                compress(
+                    &PodG2Projective::multiply(
+                        &scalar,
+                        &PodG2Projective::add(&point_a, &point_b).unwrap()
+                    )
+                    .unwrap()
+                ),
+            )
+        };
+    }
+}

+ 7 - 0
curves/bls12-381/src/lib.rs

@@ -0,0 +1,7 @@
+#![allow(clippy::arithmetic_side_effects, clippy::op_ref)]
+//! Syscall operations for bls12-381
+
+pub mod errors;
+pub mod g1;
+pub mod g2;
+pub mod scalar;

+ 5 - 0
curves/bls12-381/src/scalar.rs

@@ -0,0 +1,5 @@
+use bytemuck_derive::{Pod, Zeroable};
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Pod, Zeroable)]
+#[repr(transparent)]
+pub struct PodScalar(pub [u8; 32]);

+ 15 - 0
curves/curve-traits/Cargo.toml

@@ -0,0 +1,15 @@
+[package]
+name = "solana-curve-traits"
+description = "Solana Curve Traits"
+documentation = "https://docs.rs/solana-curve-traits"
+version = { workspace = true }
+authors = { workspace = true }
+repository = { workspace = true }
+homepage = { workspace = true }
+license = { workspace = true }
+edition = { workspace = true }
+
+[dependencies]
+
+[lints]
+workspace = true

+ 20 - 2
curves/curve25519/src/curve_syscall_traits.rs → curves/curve-traits/src/lib.rs

@@ -22,6 +22,13 @@ pub trait PointValidation {
     fn validate_point(&self) -> bool;
 }
 
+pub trait HashToCurve {
+    type Point;
+
+    /// Hash a sequence of bytes to a curve point.
+    fn hash_to_curve(bytes: &[u8], dst: &[u8], aug: &[u8]) -> Self::Point;
+}
+
 pub trait GroupOperations {
     type Point;
     type Scalar;
@@ -72,13 +79,24 @@ pub trait Pairing {
     /// Applies the bilinear pairing operation to two curve points P1, P2 -> e(P1, P2). This trait
     /// is only relevant for "pairing-friendly" curves such as BN254 and BLS12-381.
     fn pairing_map(
-        left_point: &Self::G1Point,
-        right_point: &Self::G2Point,
+        left_point: &[Self::G1Point],
+        right_point: &[Self::G2Point],
+        n: usize,
     ) -> Option<Self::GTPoint>;
 }
 
 pub const CURVE25519_EDWARDS: u64 = 0;
 pub const CURVE25519_RISTRETTO: u64 = 1;
+pub const BN254_G1: u64 = 2;
+pub const BN254_G2: u64 = 3;
+pub const BN254_GT: u64 = 4;
+pub const BLS12_381_G1_COMPRESSED: u64 = 5;
+pub const BLS12_381_G2_COMPRESSED: u64 = 6;
+pub const BLS12_381_G1_AFFINE: u64 = 7;
+pub const BLS12_381_G2_AFFINE: u64 = 8;
+pub const BLS12_381_G1_PROJECTIVE: u64 = 9;
+pub const BLS12_381_G2_PROJECTIVE: u64 = 10;
+pub const BLS12_381_GT: u64 = 11;
 
 pub const ADD: u64 = 0;
 pub const SUB: u64 = 1;

+ 1 - 0
curves/curve25519/Cargo.toml

@@ -12,6 +12,7 @@ edition = { workspace = true }
 [dependencies]
 bytemuck = { workspace = true }
 bytemuck_derive = { workspace = true }
+solana-curve-traits = { workspace = true }
 # this crate uses `subtle::CtOption<Scalar>::into_option` via curve25519-dalek,
 # which requires subtle v2.6.1, but curve25519-dalek only requires v2.3.0
 # The line below help users of this crate obtain correct subtle version.

+ 2 - 5
curves/curve25519/src/edwards.rs

@@ -9,16 +9,13 @@ pub struct PodEdwardsPoint(pub [u8; 32]);
 mod target_arch {
     use {
         super::*,
-        crate::{
-            curve_syscall_traits::{GroupOperations, MultiScalarMultiplication, PointValidation},
-            errors::Curve25519Error,
-            scalar::PodScalar,
-        },
+        crate::{errors::Curve25519Error, scalar::PodScalar},
         curve25519_dalek::{
             edwards::{CompressedEdwardsY, EdwardsPoint},
             scalar::Scalar,
             traits::VartimeMultiscalarMul,
         },
+        solana_curve_traits::{GroupOperations, MultiScalarMultiplication, PointValidation},
     };
 
     pub fn validate_edwards(point: &PodEdwardsPoint) -> bool {

+ 0 - 1
curves/curve25519/src/lib.rs

@@ -1,7 +1,6 @@
 #![allow(clippy::arithmetic_side_effects, clippy::op_ref)]
 //! Syscall operations for curve25519
 
-pub mod curve_syscall_traits;
 pub mod edwards;
 pub mod errors;
 pub mod ristretto;

+ 2 - 5
curves/curve25519/src/ristretto.rs

@@ -9,16 +9,13 @@ pub struct PodRistrettoPoint(pub [u8; 32]);
 mod target_arch {
     use {
         super::*,
-        crate::{
-            curve_syscall_traits::{GroupOperations, MultiScalarMultiplication, PointValidation},
-            errors::Curve25519Error,
-            scalar::PodScalar,
-        },
+        crate::{errors::Curve25519Error, scalar::PodScalar},
         curve25519_dalek::{
             ristretto::{CompressedRistretto, RistrettoPoint},
             scalar::Scalar,
             traits::VartimeMultiscalarMul,
         },
+        solana_curve_traits::{GroupOperations, MultiScalarMultiplication, PointValidation},
     };
 
     pub fn validate_ristretto(point: &PodRistrettoPoint) -> bool {

+ 4 - 0
genesis/Cargo.toml

@@ -21,6 +21,7 @@ path = "src/main.rs"
 
 [dependencies]
 agave-feature-set = { workspace = true }
+alpenglow-vote = { workspace = true }
 base64 = { workspace = true }
 bincode = { workspace = true }
 clap = { workspace = true }
@@ -30,6 +31,7 @@ serde_json = { workspace = true }
 serde_yaml = { workspace = true }
 solana-account = "=2.2.1"
 solana-accounts-db = { workspace = true }
+solana-bls-signatures = { workspace = true }
 solana-clap-utils = { workspace = true }
 solana-cli-config = { workspace = true }
 solana-clock = "=2.2.2"
@@ -57,7 +59,9 @@ solana-stake-interface = "=1.2.1"
 solana-stake-program = { workspace = true }
 solana-time-utils = "2.2.1"
 solana-version = { workspace = true }
+solana-vote = { workspace = true }
 solana-vote-program = { workspace = true }
+solana-votor-messages = { workspace = true }
 tempfile = { workspace = true }
 
 [dev-dependencies]

+ 1 - 0
genesis/src/lib.rs

@@ -29,4 +29,5 @@ pub struct StakedValidatorAccountInfo {
     pub identity_account: String,
     pub vote_account: String,
     pub stake_account: String,
+    pub bls_pubkey: Option<String>,
 }

+ 102 - 12
genesis/src/main.rs

@@ -8,9 +8,11 @@ use {
     itertools::Itertools,
     solana_account::{Account, AccountSharedData, ReadableAccount, WritableAccount},
     solana_accounts_db::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
+    solana_bls_signatures::Pubkey as BLSPubkey,
     solana_clap_utils::{
         input_parsers::{
-            cluster_type_of, pubkey_of, pubkeys_of, unix_timestamp_from_rfc3339_datetime,
+            bls_pubkeys_of, cluster_type_of, pubkey_of, pubkeys_of,
+            unix_timestamp_from_rfc3339_datetime,
         },
         input_validators::{
             is_pubkey, is_pubkey_or_keypair, is_rfc3339_datetime, is_slot, is_url_or_moniker,
@@ -38,11 +40,13 @@ use {
     solana_rent::Rent,
     solana_rpc_client::rpc_client::RpcClient,
     solana_rpc_client_api::request::MAX_MULTIPLE_ACCOUNTS,
+    solana_runtime::genesis_utils::include_alpenglow_bpf_program,
     solana_sdk_ids::system_program,
     solana_signer::Signer,
     solana_stake_interface::state::StakeStateV2,
     solana_stake_program::stake_state,
     solana_vote_program::vote_state::{self, VoteStateV3},
+    solana_votor_messages::state::VoteState as AlpenglowVoteState,
     std::{
         collections::HashMap,
         error,
@@ -115,6 +119,7 @@ pub fn load_validator_accounts(
     commission: u8,
     rent: &Rent,
     genesis_config: &mut GenesisConfig,
+    is_alpenglow: bool,
 ) -> io::Result<()> {
     let accounts_file = File::open(file)?;
     let validator_genesis_accounts: Vec<StakedValidatorAccountInfo> =
@@ -143,15 +148,22 @@ pub fn load_validator_accounts(
                 ))
             })?,
         ];
+        let bls_pubkeys: Vec<BLSPubkey> = account_details.bls_pubkey.map_or(Ok(vec![]), |s| {
+            BLSPubkey::from_str(&s).map(|pk| vec![pk]).map_err(|err| {
+                io::Error::new(io::ErrorKind::Other, format!("Invalid BLS pubkey: {err}"))
+            })
+        })?;
 
         add_validator_accounts(
             genesis_config,
             &mut pubkeys.iter(),
+            bls_pubkeys,
             account_details.balance_lamports,
             account_details.stake_lamports,
             commission,
             rent,
             None,
+            is_alpenglow,
         )?;
     }
 
@@ -225,17 +237,20 @@ fn features_to_deactivate_for_cluster(
 fn add_validator_accounts(
     genesis_config: &mut GenesisConfig,
     pubkeys_iter: &mut Iter<Pubkey>,
+    bls_pubkeys: Vec<BLSPubkey>,
     lamports: u64,
     stake_lamports: u64,
     commission: u8,
     rent: &Rent,
     authorized_pubkey: Option<&Pubkey>,
+    is_alpenglow: bool,
 ) -> io::Result<()> {
     rent_exempt_check(
         stake_lamports,
         rent.minimum_balance(StakeStateV2::size_of()),
     )?;
 
+    let mut bls_pubkeys_iter = bls_pubkeys.iter();
     loop {
         let Some(identity_pubkey) = pubkeys_iter.next() else {
             break;
@@ -248,13 +263,27 @@ fn add_validator_accounts(
             AccountSharedData::new(lamports, 0, &system_program::id()),
         );
 
-        let vote_account = vote_state::create_account_with_authorized(
-            identity_pubkey,
-            identity_pubkey,
-            identity_pubkey,
-            commission,
-            VoteStateV3::get_rent_exempt_reserve(rent).max(1),
-        );
+        let vote_account = if is_alpenglow {
+            let bls_pubkey = bls_pubkeys_iter
+                .next()
+                .expect("Missing BLS pubkey for {identity_pubkey}");
+            AlpenglowVoteState::create_account_with_authorized(
+                identity_pubkey,
+                identity_pubkey,
+                identity_pubkey,
+                commission,
+                AlpenglowVoteState::get_rent_exempt_reserve(rent).max(1),
+                *bls_pubkey,
+            )
+        } else {
+            vote_state::create_account_with_authorized(
+                identity_pubkey,
+                identity_pubkey,
+                identity_pubkey,
+                commission,
+                VoteStateV3::get_rent_exempt_reserve(rent).max(1),
+            )
+        };
 
         genesis_config.add_account(
             *stake_pubkey,
@@ -315,7 +344,9 @@ fn main() -> Result<(), Box<dyn error::Error>> {
     // vote account
     let default_bootstrap_validator_lamports = &sol_to_lamports(500.0)
         .max(VoteStateV3::get_rent_exempt_reserve(&rent))
+        .max(AlpenglowVoteState::get_rent_exempt_reserve(&rent))
         .to_string();
+
     // stake account
     let default_bootstrap_validator_stake_lamports = &sol_to_lamports(0.5)
         .max(rent.minimum_balance(StakeStateV2::size_of()))
@@ -349,6 +380,15 @@ fn main() -> Result<(), Box<dyn error::Error>> {
                 .required(true)
                 .help("The bootstrap validator's identity, vote and stake pubkeys"),
         )
+        .arg(
+            Arg::with_name("bootstrap_validator_bls_pubkey")
+                .long("bootstrap-validator-bls-pubkey")
+                .value_name("BLS_PUBKEY")
+                .multiple(true)
+                .takes_value(true)
+                .required(false)
+                .help("The bootstrap validator's bls pubkey"),
+        )
         .arg(
             Arg::with_name("ledger_path")
                 .short("l")
@@ -606,6 +646,12 @@ fn main() -> Result<(), Box<dyn error::Error>> {
                     feature sets",
                 ),
         )
+        .arg(
+            Arg::with_name("alpenglow")
+                .long("alpenglow")
+                .takes_value(true)
+                .help("Path to spl-alpenglow_vote.so. When specified, we use Alpenglow consensus; when not specified, we use POH."),
+        )
         .get_matches();
 
     let ledger_path = PathBuf::from(matches.value_of("ledger_path").unwrap());
@@ -619,6 +665,16 @@ fn main() -> Result<(), Box<dyn error::Error>> {
     let bootstrap_validator_pubkeys = pubkeys_of(&matches, "bootstrap_validator").unwrap();
     assert_eq!(bootstrap_validator_pubkeys.len() % 3, 0);
 
+    let bootstrap_validator_bls_pubkeys =
+        bls_pubkeys_of(&matches, "bootstrap_validator_bls_pubkey");
+    if let Some(pubkeys) = &bootstrap_validator_bls_pubkeys {
+        assert_eq!(
+            pubkeys.len() * 3,
+            bootstrap_validator_pubkeys.len(),
+            "Number of BLS pubkeys must match the number of bootstrap validator identities"
+        );
+    }
+
     // Ensure there are no duplicated pubkeys in the --bootstrap-validator list
     {
         let mut v = bootstrap_validator_pubkeys.clone();
@@ -726,14 +782,18 @@ fn main() -> Result<(), Box<dyn error::Error>> {
     let commission = value_t_or_exit!(matches, "vote_commission_percentage", u8);
     let rent = genesis_config.rent.clone();
 
+    let alpenglow_so_path = matches.value_of("alpenglow");
+
     add_validator_accounts(
         &mut genesis_config,
         &mut bootstrap_validator_pubkeys.iter(),
+        bootstrap_validator_bls_pubkeys.unwrap_or_default(),
         bootstrap_validator_lamports,
         bootstrap_validator_stake_lamports,
         commission,
         &rent,
         bootstrap_stake_authorized_pubkey.as_ref(),
+        alpenglow_so_path.is_some(),
     )?;
 
     if let Some(creation_time) = unix_timestamp_from_rfc3339_datetime(&matches, "creation_time") {
@@ -748,7 +808,13 @@ fn main() -> Result<(), Box<dyn error::Error>> {
     }
 
     solana_stake_program::add_genesis_accounts(&mut genesis_config);
-    solana_runtime::genesis_utils::activate_all_features(&mut genesis_config);
+
+    if alpenglow_so_path.is_some() {
+        solana_runtime::genesis_utils::activate_all_features_alpenglow(&mut genesis_config);
+    } else {
+        solana_runtime::genesis_utils::activate_all_features(&mut genesis_config);
+    }
+
     if !features_to_deactivate.is_empty() {
         solana_runtime::genesis_utils::deactivate_features(
             &mut genesis_config,
@@ -764,7 +830,13 @@ fn main() -> Result<(), Box<dyn error::Error>> {
 
     if let Some(files) = matches.values_of("validator_accounts_file") {
         for file in files {
-            load_validator_accounts(file, commission, &rent, &mut genesis_config)?;
+            load_validator_accounts(
+                file,
+                commission,
+                &rent,
+                &mut genesis_config,
+                alpenglow_so_path.is_some(),
+            )?;
         }
     }
 
@@ -815,6 +887,10 @@ fn main() -> Result<(), Box<dyn error::Error>> {
         }
     }
 
+    if let Some(alpenglow_so_path) = alpenglow_so_path {
+        include_alpenglow_bpf_program(&mut genesis_config, alpenglow_so_path);
+    }
+
     if let Some(values) = matches.values_of("upgradeable_program") {
         for (address, loader, program, upgrade_authority) in values.tuples() {
             let address = parse_address(address, "address");
@@ -887,6 +963,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
 mod tests {
     use {
         super::*,
+        solana_bls_signatures::keypair::Keypair as BLSKeypair,
         solana_borsh::v1 as borsh1,
         solana_genesis_config::GenesisConfig,
         solana_stake_interface as stake,
@@ -1237,17 +1314,20 @@ mod tests {
             "unknownfile",
             100,
             &Rent::default(),
-            &mut GenesisConfig::default()
+            &mut GenesisConfig::default(),
+            false,
         )
         .is_err());
 
         let mut genesis_config = GenesisConfig::default();
 
+        let bls_pubkey: BLSPubkey = BLSKeypair::new().public.into();
         let validator_accounts = vec![
             StakedValidatorAccountInfo {
                 identity_account: solana_pubkey::new_rand().to_string(),
                 vote_account: solana_pubkey::new_rand().to_string(),
                 stake_account: solana_pubkey::new_rand().to_string(),
+                bls_pubkey: None,
                 balance_lamports: 100000000000,
                 stake_lamports: 10000000000,
             },
@@ -1255,6 +1335,7 @@ mod tests {
                 identity_account: solana_pubkey::new_rand().to_string(),
                 vote_account: solana_pubkey::new_rand().to_string(),
                 stake_account: solana_pubkey::new_rand().to_string(),
+                bls_pubkey: Some(bls_pubkey.to_string()),
                 balance_lamports: 200000000000,
                 stake_lamports: 20000000000,
             },
@@ -1262,6 +1343,7 @@ mod tests {
                 identity_account: solana_pubkey::new_rand().to_string(),
                 vote_account: solana_pubkey::new_rand().to_string(),
                 stake_account: solana_pubkey::new_rand().to_string(),
+                bls_pubkey: Some(bls_pubkey.to_string()),
                 balance_lamports: 300000000000,
                 stake_lamports: 30000000000,
             },
@@ -1280,6 +1362,7 @@ mod tests {
             100,
             &Rent::default(),
             &mut genesis_config,
+            false,
         )
         .expect("Failed to load validator accounts");
 
@@ -1291,7 +1374,7 @@ mod tests {
             assert_eq!(genesis_config.accounts.len(), expected_accounts_len);
 
             // test account data matches
-            for b64_account in validator_accounts.iter() {
+            for (i, b64_account) in validator_accounts.iter().enumerate() {
                 // check identity
                 let identity_pk = b64_account.identity_account.parse().unwrap();
                 assert_eq!(
@@ -1319,6 +1402,13 @@ mod tests {
                     genesis_config.accounts[&stake_pk].lamports
                 );
 
+                // check BLS pubkey
+                if i == 0 {
+                    assert!(b64_account.bls_pubkey.is_none());
+                } else {
+                    assert_eq!(b64_account.bls_pubkey, Some(bls_pubkey.to_string()));
+                }
+
                 let stake_data = genesis_config.accounts[&stake_pk].data.clone();
                 let stake_state =
                     borsh1::try_from_slice_unchecked::<StakeStateV2>(&stake_data).unwrap();

+ 1 - 1
gossip/Cargo.toml

@@ -63,7 +63,7 @@ solana-clock = "=2.2.2"
 solana-connection-cache = { workspace = true }
 solana-entry = { workspace = true }
 solana-epoch-schedule = "=2.2.1"
-solana-frozen-abi = { version = "=2.3.0", optional = true, features = [
+solana-frozen-abi = { workspace = true, optional = true, features = [
     "frozen-abi",
 ] }
 solana-frozen-abi-macro = { version = "=2.2.1", optional = true, features = [

+ 26 - 18
gossip/src/cluster_info.rs

@@ -544,8 +544,7 @@ impl ClusterInfo {
                     }
                     let ip_addr = node.gossip().as_ref().map(SocketAddr::ip);
                     Some(format!(
-                        "{:15} {:2}| {:5} | {:44} |{:^9}| {:5}|  {:5}| {:5}| {:5}| {:5}| {:5}| \
-                         {:5}| {}\n",
+                        "{:15} {:2}| {:5} | {:44} |{:^9}| {:5}|  {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
                         node.gossip()
                             .filter(|addr| self.socket_addr_space.check(addr))
                             .as_ref()
@@ -574,10 +573,8 @@ impl ClusterInfo {
                         ),
                         self.addr_to_string(&ip_addr, &node.tvu(contact_info::Protocol::UDP)),
                         self.addr_to_string(&ip_addr, &node.tvu(contact_info::Protocol::QUIC)),
-                        self.addr_to_string(
-                            &ip_addr,
-                            &node.serve_repair(contact_info::Protocol::UDP)
-                        ),
+                        self.addr_to_string(&ip_addr, &node.serve_repair(contact_info::Protocol::UDP)),
+                        self.addr_to_string(&ip_addr, &node.alpenglow()),
                         node.shred_version(),
                     ))
                 }
@@ -586,9 +583,9 @@ impl ClusterInfo {
 
         format!(
             "IP Address        |Age(ms)| Node identifier                              \
-             | Version |Gossip|TPUvote| TPU  |TPUfwd| TVU  |TVU Q |ServeR|ShredVer\n\
+             | Version |Gossip|TPUvote| TPU  |TPUfwd| TVU  |TVU Q |ServeR|Alpeng|ShredVer\n\
              ------------------+-------+----------------------------------------------\
-             +---------+------+-------+------+------+------+------+------+--------\n\
+             +---------+------+-------+------+------+------+------+------+------+--------\n\
              {}\
              Nodes: {}{}{}",
             nodes.join(""),
@@ -783,8 +780,8 @@ impl ClusterInfo {
         }
     }
 
-    /// If there are less than `MAX_LOCKOUT_HISTORY` votes present, returns the next index
-    /// without a vote. If there are `MAX_LOCKOUT_HISTORY` votes:
+    /// If there are less than `MAX_VOTES` votes present, returns the next index
+    /// without a vote. If there are `MAX_VOTES` votes:
     /// - Finds the oldest wallclock vote and returns its index
     /// - Otherwise returns the total amount of observed votes
     ///
@@ -2356,6 +2353,7 @@ pub struct Sockets {
     /// Client-side socket for RPC/SendTransactionService.
     pub rpc_sts_client: UdpSocket,
     pub vortexor_receivers: Option<Vec<UdpSocket>>,
+    pub alpenglow: UdpSocket,
 }
 
 pub struct NodeConfig {
@@ -2583,6 +2581,9 @@ impl Node {
         )
         .expect("retransmit multi_bind");
 
+        let (alpenglow_port, alpenglow) =
+            bind_in_range_with_config(bind_ip_addr, port_range, socket_config)
+                .expect("alpenglow bind");
         let (_, repair) = bind_in_range_with_config(bind_ip_addr, port_range, socket_config)
             .expect("repair bind");
         let (_, repair_quic) = bind_in_range_with_config(bind_ip_addr, port_range, socket_config)
@@ -2638,6 +2639,7 @@ impl Node {
             .unwrap();
         info.set_serve_repair(QUIC, (advertised_ip, serve_repair_quic_port))
             .unwrap();
+        info.set_alpenglow((advertised_ip, alpenglow_port)).unwrap();
 
         let vortexor_receivers = vortexor_receiver_addr.map(|vortexor_receiver_addr| {
             multi_bind_in_range_with_config(
@@ -2681,6 +2683,7 @@ impl Node {
             tpu_transaction_forwarding_client,
             rpc_sts_client,
             vortexor_receivers,
+            alpenglow,
         };
         info!("Bound all network sockets as follows: {:#?}", &sockets);
         Node { info, sockets }
@@ -3141,6 +3144,7 @@ mod tests {
         check_socket(&node.sockets.gossip.load(), ip, range);
         check_socket(&node.sockets.repair, ip, range);
         check_socket(&node.sockets.tvu_quic, ip, range);
+        check_socket(&node.sockets.alpenglow, ip, range);
 
         check_sockets(&node.sockets.tvu, ip, range);
         check_sockets(&node.sockets.tpu, ip, range);
@@ -4045,6 +4049,10 @@ mod tests {
     #[test]
     fn test_contact_trace() {
         solana_logger::setup();
+        // If you change the format of cluster_info_trace or rpc_info_trace, please make sure
+        // you read the actual output so the headers lign up with the output.
+        const CLUSTER_INFO_TRACE_LENGTH: usize = 452;
+        const RPC_INFO_TRACE_LENGTH: usize = 335;
         let keypair43 = Arc::new(
             Keypair::from_bytes(&[
                 198, 203, 8, 178, 196, 71, 119, 152, 31, 96, 221, 142, 115, 224, 45, 34, 173, 138,
@@ -4078,19 +4086,19 @@ mod tests {
         assert_eq!(keypair44.pubkey().to_string().len(), 44);
 
         let trace = cluster_info44.contact_info_trace();
-        info!("cluster:\n{trace}");
-        assert_eq!(trace.len(), 431);
+        info!("cluster:\n{}", trace);
+        assert_eq!(trace.len(), CLUSTER_INFO_TRACE_LENGTH);
 
         let trace = cluster_info44.rpc_info_trace();
-        info!("rpc:\n{trace}");
-        assert_eq!(trace.len(), 335);
+        info!("rpc:\n{}", trace);
+        assert_eq!(trace.len(), RPC_INFO_TRACE_LENGTH);
 
         let trace = cluster_info43.contact_info_trace();
-        info!("cluster:\n{trace}");
-        assert_eq!(trace.len(), 431);
+        info!("cluster:\n{}", trace);
+        assert_eq!(trace.len(), CLUSTER_INFO_TRACE_LENGTH);
 
         let trace = cluster_info43.rpc_info_trace();
-        info!("rpc:\n{trace}");
-        assert_eq!(trace.len(), 335);
+        info!("rpc:\n{}", trace);
+        assert_eq!(trace.len(), RPC_INFO_TRACE_LENGTH);
     }
 }

+ 18 - 0
gossip/src/contact_info.rs

@@ -314,6 +314,7 @@ impl ContactInfo {
         SOCKET_TAG_TPU_FORWARDS_QUIC
     );
     remove_socket!(remove_tvu, SOCKET_TAG_TVU, SOCKET_TAG_TVU_QUIC);
+    remove_socket!(remove_alpenglow, SOCKET_TAG_ALPENGLOW);
 
     #[cfg(test)]
     fn get_socket(&self, key: u8) -> Result<SocketAddr, Error> {
@@ -1104,6 +1105,23 @@ mod tests {
         assert_matches!(node.tpu_forwards(Protocol::QUIC), None);
     }
 
+    #[test]
+    fn test_set_and_remove_alpenglow() {
+        let mut rng = rand::thread_rng();
+        let mut node = ContactInfo::new(
+            Keypair::new().pubkey(),
+            rng.gen(), // wallclock
+            rng.gen(), // shred_version
+        );
+        let socket = repeat_with(|| new_rand_socket(&mut rng))
+            .find(|socket| matches!(sanitize_socket(socket), Ok(())))
+            .unwrap();
+        node.set_alpenglow(socket).unwrap();
+        assert_eq!(node.alpenglow().unwrap(), socket);
+        node.remove_alpenglow();
+        assert_matches!(node.alpenglow(), None);
+    }
+
     #[test]
     fn test_check_duplicate() {
         let mut rng = rand::thread_rng();

+ 1 - 0
gossip/tests/gossip.rs

@@ -153,6 +153,7 @@ fn retransmit_to(
 
 /// ring a -> b -> c -> d -> e -> a
 #[test]
+#[ignore]
 fn gossip_ring() {
     solana_logger::setup();
     run_gossip_topo(40, |listen| {

+ 4 - 0
keygen/Cargo.toml

@@ -17,11 +17,13 @@ name = "solana-keygen"
 path = "src/keygen.rs"
 
 [dependencies]
+alpenglow-vote = { workspace = true }
 bs58 = { workspace = true }
 clap = { version = "3.1.5", features = ["cargo"] }
 dirs-next = { workspace = true }
 num_cpus = { workspace = true }
 serde_json = { workspace = true }
+solana-bls-signatures = { workspace = true, features = ["solana-signer-derive"] }
 solana-clap-v3-utils = { workspace = true }
 solana-cli-config = { workspace = true }
 solana-derivation-path = "=2.2.1"
@@ -33,6 +35,8 @@ solana-remote-wallet = { workspace = true, features = ["default"] }
 solana-seed-derivable = "=2.2.1"
 solana-signer = "=2.2.1"
 solana-version = { workspace = true }
+solana-vote = { workspace = true }
+solana-votor-messages = { workspace = true }
 tiny-bip39 = { workspace = true }
 
 [dev-dependencies]

+ 67 - 0
keygen/src/keygen.rs

@@ -5,6 +5,7 @@ use {
         builder::ValueParser, crate_description, crate_name, value_parser, Arg, ArgAction,
         ArgMatches, Command,
     },
+    solana_bls_signatures::{keypair::Keypair as BLSKeypair, Pubkey as BLSPubkey},
     solana_clap_v3_utils::{
         input_parsers::{
             signer::{SignerSource, SignerSourceParserBuilder},
@@ -35,6 +36,7 @@ use {
     solana_pubkey::Pubkey,
     solana_remote_wallet::remote_wallet::RemoteWalletManager,
     solana_signer::Signer,
+    solana_votor_messages::bls_message::BLS_KEYPAIR_DERIVE_SEED,
     std::{
         collections::HashSet,
         error,
@@ -388,6 +390,40 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> {
                         .help("Overwrite the output file if it exists"),
                 )
         )
+        .subcommand(
+            Command::new("bls_pubkey")
+                .about("Display the BLS pubkey derived from given ed25519 keypair file")
+                .disable_version_flag(true)
+                .arg(
+                    Arg::new("keypair")
+                        .index(1)
+                        .value_name("KEYPAIR")
+                        .takes_value(true)
+                        .value_parser(
+                            SignerSourceParserBuilder::default().allow_all().build()
+                        )
+                        .help("Filepath or URL to a keypair"),
+                )
+                .arg(
+                    Arg::new(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
+                        .long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
+                        .help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
+                )
+                .arg(
+                    Arg::new("outfile")
+                        .short('o')
+                        .long("outfile")
+                        .value_name("FILEPATH")
+                        .takes_value(true)
+                        .help("Path to generated file"),
+                )
+                .arg(
+                    Arg::new("force")
+                        .short('f')
+                        .long("force")
+                        .help("Overwrite the output file if it exists"),
+                )
+        )
         .subcommand(
             Command::new("recover")
                 .about("Recover keypair from seed phrase and optional BIP39 passphrase")
@@ -438,6 +474,24 @@ fn write_pubkey_file(outfile: &str, pubkey: Pubkey) -> Result<(), Box<dyn std::e
     Ok(())
 }
 
+fn write_bls_pubkey_file(
+    outfile: &str,
+    bls_pubkey: BLSPubkey,
+) -> Result<(), Box<dyn std::error::Error>> {
+    use std::io::Write;
+
+    let printable = format!("{bls_pubkey}");
+    let serialized = serde_json::to_string(&printable)?;
+
+    if let Some(outdir) = std::path::Path::new(&outfile).parent() {
+        std::fs::create_dir_all(outdir)?;
+    }
+    let mut f = std::fs::File::create(outfile)?;
+    f.write_all(&serialized.into_bytes())?;
+
+    Ok(())
+}
+
 fn main() -> Result<(), Box<dyn error::Error>> {
     let default_num_threads = num_cpus::get().to_string();
     let matches = app(&default_num_threads, solana_version::version!())
@@ -470,6 +524,19 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box<dyn error::Error>> {
                 println!("{pubkey}");
             }
         }
+        ("bls_pubkey", matches) => {
+            let keypair = get_keypair_from_matches(matches, config, &mut wallet_manager)?;
+            let bls_keypair = BLSKeypair::derive_from_signer(&keypair, BLS_KEYPAIR_DERIVE_SEED)?;
+            let bls_pubkey: BLSPubkey = bls_keypair.public.into();
+
+            if matches.try_contains_id("outfile")? {
+                let outfile = matches.get_one::<String>("outfile").unwrap();
+                check_for_overwrite(outfile, matches)?;
+                write_bls_pubkey_file(outfile, bls_pubkey)?;
+            } else {
+                println!("{bls_pubkey}");
+            }
+        }
         ("new", matches) => {
             let mut path = dirs_next::home_dir().expect("home directory");
             let outfile = if matches.try_contains_id("outfile")? {

+ 8 - 2
ledger-tool/src/main.rs

@@ -210,7 +210,10 @@ fn graph_forks(bank_forks: &BankForks, config: &GraphConfig) -> String {
             .map(|(_, (stake, _))| stake)
             .sum();
         for (stake, vote_account) in bank.vote_accounts().values() {
-            let vote_state_view = vote_account.vote_state_view();
+            // TODO(wen): make this work for Alpenglow
+            let Some(vote_state_view) = vote_account.vote_state_view() else {
+                continue;
+            };
             if let Some(last_vote) = vote_state_view.last_voted_slot() {
                 let entry = last_votes.entry(*vote_state_view.node_pubkey()).or_insert((
                     last_vote,
@@ -250,7 +253,10 @@ fn graph_forks(bank_forks: &BankForks, config: &GraphConfig) -> String {
         let mut first = true;
         loop {
             for (_, vote_account) in bank.vote_accounts().values() {
-                let vote_state_view = vote_account.vote_state_view();
+                // TODO(wen): make this work for Alpenglow
+                let Some(vote_state_view) = vote_account.vote_state_view() else {
+                    continue;
+                };
                 if let Some(last_vote) = vote_state_view.last_voted_slot() {
                     let validator_votes =
                         all_votes.entry(*vote_state_view.node_pubkey()).or_default();

+ 4 - 0
ledger/Cargo.toml

@@ -27,6 +27,7 @@ frozen-abi = [
 [dependencies]
 agave-feature-set = { workspace = true }
 agave-reserved-account-keys = { workspace = true }
+alpenglow-vote = { workspace = true }
 anyhow = { workspace = true }
 assert_matches = { workspace = true }
 bincode = { workspace = true }
@@ -110,6 +111,9 @@ solana-transaction-error = { workspace = true }
 solana-transaction-status = { workspace = true }
 solana-vote = { workspace = true }
 solana-vote-program = { workspace = true }
+solana-votor-messages = { workspace = true }
+spl-token = { workspace = true, features = ["no-entrypoint"] }
+spl-token-2022 = { workspace = true, features = ["no-entrypoint"] }
 static_assertions = { workspace = true }
 strum = { workspace = true, features = ["derive"] }
 strum_macros = { workspace = true }

+ 64 - 5
ledger/src/blockstore.rs

@@ -60,6 +60,7 @@ use {
         VersionedConfirmedBlock, VersionedConfirmedBlockWithEntries,
         VersionedTransactionWithStatusMeta,
     },
+    solana_votor_messages::bls_message::CertificateMessage,
     std::{
         borrow::Cow,
         cell::RefCell,
@@ -267,6 +268,7 @@ pub struct Blockstore {
     perf_samples_cf: LedgerColumn<cf::PerfSamples>,
     rewards_cf: LedgerColumn<cf::Rewards>,
     roots_cf: LedgerColumn<cf::Root>,
+    slot_certificates_cf: LedgerColumn<cf::SlotCertificates>,
     transaction_memos_cf: LedgerColumn<cf::TransactionMemos>,
     transaction_status_cf: LedgerColumn<cf::TransactionStatus>,
     transaction_status_index_cf: LedgerColumn<cf::TransactionStatusIndex>,
@@ -414,6 +416,7 @@ impl Blockstore {
         let perf_samples_cf = db.column();
         let rewards_cf = db.column();
         let roots_cf = db.column();
+        let slot_certificates_cf = db.column();
         let transaction_memos_cf = db.column();
         let transaction_status_cf = db.column();
         let transaction_status_index_cf = db.column();
@@ -448,6 +451,7 @@ impl Blockstore {
             perf_samples_cf,
             rewards_cf,
             roots_cf,
+            slot_certificates_cf,
             transaction_memos_cf,
             transaction_status_cf,
             transaction_status_index_cf,
@@ -869,6 +873,7 @@ impl Blockstore {
         self.bank_hash_cf.submit_rocksdb_cf_metrics();
         self.optimistic_slots_cf.submit_rocksdb_cf_metrics();
         self.merkle_root_meta_cf.submit_rocksdb_cf_metrics();
+        self.slot_certificates_cf.submit_rocksdb_cf_metrics();
     }
 
     /// Report the accumulated RPC API metrics
@@ -3774,15 +3779,21 @@ impl Blockstore {
         &self,
         slot: Slot,
         bank_hash: Hash,
+        is_leader: bool,
         feature_set: &FeatureSet,
     ) -> std::result::Result<Option<Hash>, BlockstoreProcessorError> {
         let results = self.check_last_fec_set(slot);
         let Ok(results) = results else {
-            warn!(
-                "Unable to check the last fec set for slot {slot} {bank_hash}, marking as dead: \
-                 {results:?}",
-            );
-            return Err(BlockstoreProcessorError::IncompleteFinalFecSet);
+            if !is_leader {
+                warn!(
+                    "Unable to check the last fec set for slot {slot} {bank_hash}, \
+                 marking as dead: {results:?}",
+                );
+            }
+            if feature_set.is_active(&agave_feature_set::vote_only_full_fec_sets::id()) {
+                return Err(BlockstoreProcessorError::IncompleteFinalFecSet);
+            }
+            return Ok(None);
         };
         // Update metrics
         if results.last_fec_set_merkle_root.is_none() {
@@ -3959,6 +3970,54 @@ impl Blockstore {
             .map(|meta| (meta.hash(), meta.timestamp())))
     }
 
+    /// Insert newly completed notarization fallback certificate for `slot`
+    /// If already present, this will overwrite the old certificate
+    pub fn insert_new_notarization_fallback_certificate(
+        &self,
+        slot: Slot,
+        block_id: Hash,
+        certificate: CertificateMessage,
+    ) -> Result<()> {
+        let mut certificates = self
+            .slot_certificates(slot)?
+            .unwrap_or(SlotCertificates::default());
+        certificates.add_notarization_fallback_certificate(block_id, certificate);
+        self.slot_certificates_cf.put(slot, &certificates)
+    }
+
+    /// Insert newly completed skip certificate for `slot`
+    /// If already present, this will overwrite the old certificate
+    pub fn insert_new_skip_certificate(
+        &self,
+        slot: Slot,
+        certificate: CertificateMessage,
+    ) -> Result<()> {
+        let mut certificates = self
+            .slot_certificates(slot)?
+            .unwrap_or(SlotCertificates::default());
+        certificates.set_skip_certificate(certificate);
+        self.slot_certificates_cf.put(slot, &certificates)
+    }
+
+    /// Returns all completed certificates for `slot`
+    pub fn slot_certificates(&self, slot: Slot) -> Result<Option<SlotCertificates>> {
+        self.slot_certificates_cf.get(slot)
+    }
+
+    /// Returns all certificates from `slot` onwards
+    pub fn slot_certificates_iterator(
+        &self,
+        slot: Slot,
+    ) -> Result<impl Iterator<Item = (Slot, SlotCertificates)> + '_> {
+        let iter = self
+            .slot_certificates_cf
+            .iter(IteratorMode::From(slot, IteratorDirection::Forward))?;
+        Ok(iter.map(|(slot, bytes)| {
+            let certs: SlotCertificates = deserialize(&bytes).unwrap();
+            (slot, certs)
+        }))
+    }
+
     /// Returns information about the `num` latest optimistically confirmed slot
     pub fn get_latest_optimistic_slots(
         &self,

+ 8 - 0
ledger/src/blockstore/blockstore_purge.rs

@@ -304,6 +304,10 @@ impl Blockstore {
             & self
                 .merkle_root_meta_cf
                 .delete_range_in_batch(write_batch, from_slot, to_slot)
+                .is_ok()
+            & self
+                .slot_certificates_cf
+                .delete_range_in_batch(write_batch, from_slot, to_slot)
                 .is_ok();
 
         match purge_type {
@@ -385,6 +389,10 @@ impl Blockstore {
                 .merkle_root_meta_cf
                 .delete_file_in_range(from_slot, to_slot)
                 .is_ok()
+            & self
+                .slot_certificates_cf
+                .delete_file_in_range(from_slot, to_slot)
+                .is_ok()
     }
 
     /// Returns true if the special columns, TransactionStatus and

+ 18 - 0
ledger/src/blockstore/column.rs

@@ -208,6 +208,16 @@ pub mod columns {
     /// * index type: `crate::shred::ErasureSetId` `(Slot, fec_set_index: u32)`
     /// * value type: [`blockstore_meta::MerkleRootMeta`]`
     pub struct MerkleRootMeta;
+
+    #[derive(Debug)]
+    /// The vote certificate column
+    ///
+    /// Stores the `NotarizeFallback` and `Skip` certificates for each column
+    /// for use during catch up and to serve repair
+    ///
+    /// * index type: `u64` (see [`SlotColumn`])
+    /// * value type: [`blockstore_meta::SlotCertificates`]
+    pub struct SlotCertificates;
 }
 
 macro_rules! convert_column_index_to_key_bytes {
@@ -837,3 +847,11 @@ impl ColumnName for columns::MerkleRootMeta {
 impl TypedColumn for columns::MerkleRootMeta {
     type Type = blockstore_meta::MerkleRootMeta;
 }
+
+impl SlotColumn for columns::SlotCertificates {}
+impl ColumnName for columns::SlotCertificates {
+    const NAME: &'static str = "slot_certificates";
+}
+impl TypedColumn for columns::SlotCertificates {
+    type Type = blockstore_meta::SlotCertificates;
+}

+ 3 - 1
ledger/src/blockstore_db.rs

@@ -194,6 +194,7 @@ impl Rocks {
             new_cf_descriptor::<columns::BlockHeight>(options, oldest_slot),
             new_cf_descriptor::<columns::OptimisticSlots>(options, oldest_slot),
             new_cf_descriptor::<columns::MerkleRootMeta>(options, oldest_slot),
+            new_cf_descriptor::<columns::SlotCertificates>(options, oldest_slot),
         ];
 
         // If the access type is Secondary, we don't need to open all of the
@@ -242,7 +243,7 @@ impl Rocks {
         cf_descriptors
     }
 
-    const fn columns() -> [&'static str; 20] {
+    const fn columns() -> [&'static str; 21] {
         [
             columns::ErasureMeta::NAME,
             columns::DeadSlots::NAME,
@@ -264,6 +265,7 @@ impl Rocks {
             columns::BlockHeight::NAME,
             columns::OptimisticSlots::NAME,
             columns::MerkleRootMeta::NAME,
+            columns::SlotCertificates::NAME,
         ]
     }
 

+ 38 - 1
ledger/src/blockstore_meta.rs

@@ -8,8 +8,9 @@ use {
     serde::{Deserialize, Deserializer, Serialize, Serializer},
     solana_clock::{Slot, UnixTimestamp},
     solana_hash::Hash,
+    solana_votor_messages::bls_message::CertificateMessage,
     std::{
-        collections::BTreeSet,
+        collections::{BTreeSet, HashMap},
         ops::{Range, RangeBounds},
     },
 };
@@ -879,6 +880,42 @@ impl OptimisticSlotMetaVersioned {
     }
 }
 
+#[derive(Clone, Debug, Serialize, Deserialize, Default)]
+/// Holds the certificates for this slot in blockstore
+/// Under normal operation there will only be *one* certificate,
+/// either `notarize_fallback` or `skip`
+/// In the worse case (duplicate blocks) there can be at most:
+/// - 3 `notarize_fallback certificates`
+/// - plus 1 `skip_certificate`
+///
+/// Note: Currently these are pre BLS `CertificateMessage`, but post BLS
+/// the certificate will be one transaction / similar, roughly 800 bytes in size
+///
+/// This will normally be written to once per slot, but in the worst case 4 times per slot
+/// It will be read to serve repair to other nodes.
+pub struct SlotCertificates {
+    /// The notarization fallback certificates keyed by block_id
+    pub notarize_fallback_certificates: HashMap<Hash, CertificateMessage>,
+    /// The skip certificate
+    pub skip_certificate: Option<CertificateMessage>,
+}
+
+impl SlotCertificates {
+    /// Insert a new notarization fallback certificate for this slot.
+    /// Overwrites an existing one if it exists
+    pub fn add_notarization_fallback_certificate(
+        &mut self,
+        block_id: Hash,
+        cert: CertificateMessage,
+    ) {
+        self.notarize_fallback_certificates.insert(block_id, cert);
+    }
+
+    pub fn set_skip_certificate(&mut self, cert: CertificateMessage) {
+        self.skip_certificate.replace(cert);
+    }
+}
+
 #[cfg(test)]
 mod test {
     use {

+ 148 - 3
ledger/src/blockstore_processor.rs

@@ -824,6 +824,15 @@ pub enum BlockstoreProcessorError {
 
     #[error("invalid retransmitter signature final fec set")]
     InvalidRetransmitterSignatureFinalFecSet,
+
+    #[error("invalid notarization certificate in bank {0} for slot {1}")]
+    InvalidNotarizationCertificate(Slot, Slot),
+
+    #[error("invalid skip certificate in bank {0} for slot range {1} - {2}")]
+    InvalidSkipCertificate(Slot, Slot, Slot),
+
+    #[error("non consecutive leader slot for bank {0} parent {1}")]
+    NonConsecutiveLeaderSlot(Slot, Slot),
 }
 
 /// Callback for accessing bank state after each slot is confirmed while
@@ -1056,7 +1065,7 @@ pub fn process_blockstore_from_root(
 /// Verify that a segment of entries has the correct number of ticks and hashes
 fn verify_ticks(
     bank: &Bank,
-    entries: &[Entry],
+    mut entries: &[Entry],
     slot_full: bool,
     tick_hash_count: &mut u64,
 ) -> std::result::Result<(), BlockError> {
@@ -1086,6 +1095,28 @@ fn verify_ticks(
         }
     }
 
+    if let Some(first_alpenglow_slot) = bank
+        .feature_set
+        .activated_slot(&agave_feature_set::secp256k1_program_enabled::id())
+    {
+        if bank.parent_slot() >= first_alpenglow_slot {
+            return Ok(());
+        }
+
+        if bank.slot() >= first_alpenglow_slot && next_bank_tick_height == max_bank_tick_height {
+            if entries.is_empty() {
+                // This shouldn't happen, but good to double check
+                error!("Processing empty entries in verify_ticks()");
+                return Ok(());
+            }
+            // last entry must be a tick, as verified by the `has_trailing_entry`
+            // check above. Because in Alpenglow the last tick does not have any
+            // hashing guarantees, we pass everything but that last tick to the
+            // entry verification.
+            entries = &entries[..entries.len() - 1];
+        }
+    }
+
     let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0);
     if !entries.verify_tick_hash_count(tick_hash_count, hashes_per_tick) {
         warn!(
@@ -1786,6 +1817,7 @@ fn process_next_slots(
                     .unwrap(),
                 *next_slot,
             );
+            set_alpenglow_ticks(&next_bank);
             trace!(
                 "New bank for slot {}, parent slot is {}",
                 next_slot,
@@ -1800,6 +1832,59 @@ fn process_next_slots(
     Ok(())
 }
 
+/// Set alpenglow bank tick height.
+/// For alpenglow banks this tick height is `max_tick_height` - 1,
+/// For a bank on the boundary of feature activation, we need ticks_per_slot for
+/// TowerBFT ticks, and one extra tick for the alpenglow bank
+pub fn set_alpenglow_ticks(bank: &Bank) {
+    let Some(first_alpenglow_slot) = bank
+        .feature_set
+        .activated_slot(&agave_feature_set::secp256k1_program_enabled::id())
+    else {
+        return;
+    };
+
+    let Some(alpenglow_ticks) = calculate_alpenglow_ticks(
+        bank.slot(),
+        first_alpenglow_slot,
+        bank.parent_slot(),
+        bank.ticks_per_slot(),
+    ) else {
+        return;
+    };
+
+    info!(
+        "Setting tick height for slot {} to {}",
+        bank.slot(),
+        bank.max_tick_height() - alpenglow_ticks
+    );
+    bank.set_tick_height(bank.max_tick_height() - alpenglow_ticks);
+}
+
+fn calculate_alpenglow_ticks(
+    slot: Slot,
+    first_alpenglow_slot: Slot,
+    parent_slot: Slot,
+    ticks_per_slot: u64,
+) -> Option<u64> {
+    // Slots before alpenglow shouldn't have alpenglow ticks
+    if slot < first_alpenglow_slot {
+        return None;
+    }
+
+    let alpenglow_ticks = if parent_slot < first_alpenglow_slot && slot >= first_alpenglow_slot {
+        // 1. All slots between the parent and the first alpenglow slot need to
+        // have `ticks_per_slot` ticks
+        // 2. One extra tick for the actual alpenglow slot
+        // 3. There are no ticks for any skipped alpenglow slots
+        (first_alpenglow_slot - parent_slot - 1) * ticks_per_slot + 1
+    } else {
+        1
+    };
+
+    Some(alpenglow_ticks)
+}
+
 /// Starting with the root slot corresponding to `start_slot_meta`, iteratively
 /// find and process children slots from the blockstore.
 ///
@@ -2060,7 +2145,11 @@ fn supermajority_root_from_vote_accounts(
                 return None;
             }
 
-            Some((account.vote_state_view().root_slot()?, *stake))
+            if let Some(vote_state_view) = account.vote_state_view() {
+                Some((vote_state_view.root_slot()?, *stake))
+            } else {
+                None
+            }
         })
         .collect();
 
@@ -2127,7 +2216,7 @@ pub fn process_single_slot(
     }
 
     let block_id = blockstore
-        .check_last_fec_set_and_get_block_id(slot, bank.hash(), &bank.feature_set)
+        .check_last_fec_set_and_get_block_id(slot, bank.hash(), false, &bank.feature_set)
         .inspect_err(|err| {
             warn!("slot {slot} failed last fec set checks: {err}");
             if blockstore.is_primary_access() {
@@ -5352,4 +5441,60 @@ pub mod tests {
         // Adding another None will noop (even though the block is already full)
         assert!(check_block_cost_limits(&bank, &tx_costs[0..1]).is_ok());
     }
+
+    #[test]
+    fn test_calculate_alpenglow_ticks() {
+        let first_alpenglow_slot = 10;
+        let ticks_per_slot = 2;
+
+        // Slots before alpenglow don't have alpenglow ticks
+        let slot = 9;
+        let parent_slot = 8;
+        assert!(
+            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
+                .is_none()
+        );
+
+        // First alpenglow slot should only have 1 tick
+        let slot = first_alpenglow_slot;
+        let parent_slot = first_alpenglow_slot - 1;
+        assert_eq!(
+            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
+                .unwrap(),
+            1
+        );
+
+        // First alpenglow slot with skipped non-alpenglow slots
+        // need to have `ticks_per_slot` ticks per skipped slot and
+        // then one additional tick for the first alpenglow slot
+        let slot = first_alpenglow_slot;
+        let num_skipped_slots = 3;
+        let parent_slot = first_alpenglow_slot - num_skipped_slots - 1;
+        assert_eq!(
+            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
+                .unwrap(),
+            num_skipped_slots * ticks_per_slot + 1
+        );
+
+        // Skipped alpenglow slots don't need any additional ticks
+        let slot = first_alpenglow_slot + 2;
+        let parent_slot = first_alpenglow_slot;
+        assert_eq!(
+            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
+                .unwrap(),
+            1
+        );
+
+        // Skipped alpenglow slots along skipped non-alpenglow slots
+        // need to have `ticks_per_slot` ticks per skipped non-alpenglow
+        // slot only and then one additional tick for the alpenglow slot
+        let slot = first_alpenglow_slot + 2;
+        let num_skipped_non_alpenglow_slots = 4;
+        let parent_slot = first_alpenglow_slot - num_skipped_non_alpenglow_slots - 1;
+        assert_eq!(
+            calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot)
+                .unwrap(),
+            num_skipped_non_alpenglow_slots * ticks_per_slot + 1
+        );
+    }
 }

+ 1 - 0
ledger/src/genesis_utils.rs

@@ -25,5 +25,6 @@ pub fn create_genesis_config_with_mint_keypair(
         mint_lamports,
         &Pubkey::new_unique(),
         bootstrap_validator_stake_lamports(),
+        None,
     )
 }

+ 21 - 0
ledger/src/leader_schedule_utils.rs

@@ -70,6 +70,27 @@ pub fn first_of_consecutive_leader_slots(slot: Slot) -> Slot {
     (slot / NUM_CONSECUTIVE_LEADER_SLOTS) * NUM_CONSECUTIVE_LEADER_SLOTS
 }
 
+/// Returns the last slot in the leader window that contains `slot`
+#[inline]
+pub fn last_of_consecutive_leader_slots(slot: Slot) -> Slot {
+    first_of_consecutive_leader_slots(slot) + NUM_CONSECUTIVE_LEADER_SLOTS - 1
+}
+
+/// Returns the index within the leader slot range that contains `slot`
+#[inline]
+pub fn leader_slot_index(slot: Slot) -> usize {
+    (slot % NUM_CONSECUTIVE_LEADER_SLOTS) as usize
+}
+
+/// Returns the number of slots left after `slot` in the leader window
+/// that contains `slot`
+#[inline]
+pub fn remaining_slots_in_window(slot: Slot) -> u64 {
+    NUM_CONSECUTIVE_LEADER_SLOTS
+        .checked_sub(leader_slot_index(slot) as u64)
+        .unwrap()
+}
+
 #[cfg(test)]
 mod tests {
     use {

+ 7 - 1
local-cluster/Cargo.toml

@@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 dev-context-only-utils = []
 
 [dependencies]
+bincode = { workspace = true }
 crossbeam-channel = { workspace = true }
 itertools = { workspace = true }
 log = { workspace = true }
@@ -23,10 +24,13 @@ rand = { workspace = true }
 rayon = { workspace = true }
 solana-account = { workspace = true }
 solana-accounts-db = { workspace = true }
+solana-bls-signatures = { workspace = true }
+solana-build-alpenglow-vote = { workspace = true }
 solana-client = { workspace = true }
 solana-client-traits = { workspace = true }
 solana-clock = { workspace = true }
 solana-commitment-config = { workspace = true }
+solana-connection-cache = { workspace = true }
 solana-core = { workspace = true }
 solana-entry = { workspace = true }
 solana-epoch-schedule = { workspace = true }
@@ -59,13 +63,15 @@ solana-system-transaction = { workspace = true }
 solana-thin-client = { workspace = true }
 solana-time-utils = { workspace = true }
 solana-tpu-client = { workspace = true }
-solana-transaction = { workspace = true }
+solana-transaction = { workspace = true, features = ["bincode"] }
 solana-transaction-error = { workspace = true }
 solana-turbine = { workspace = true }
 solana-validator-exit = { workspace = true }
 solana-vote = { workspace = true }
 solana-vote-interface = { workspace = true }
 solana-vote-program = { workspace = true }
+solana-votor = { workspace = true }
+solana-votor-messages = { workspace = true }
 static_assertions = { workspace = true }
 strum = { workspace = true, features = ["derive"] }
 tempfile = { workspace = true }

+ 148 - 17
local-cluster/src/cluster_tests.rs

@@ -35,11 +35,15 @@ use {
     solana_transaction::Transaction,
     solana_transaction_error::TransportError,
     solana_validator_exit::Exit,
-    solana_vote::vote_transaction::{self, VoteTransaction},
+    solana_vote::{
+        vote_parser::ParsedVoteTransaction,
+        vote_transaction::{self},
+    },
     solana_vote_program::vote_state::TowerSync,
+    solana_votor_messages::bls_message::BLSMessage,
     std::{
         collections::{HashMap, HashSet, VecDeque},
-        net::{SocketAddr, TcpListener},
+        net::{SocketAddr, TcpListener, UdpSocket},
         path::Path,
         sync::{
             atomic::{AtomicBool, Ordering},
@@ -384,12 +388,46 @@ pub fn check_for_new_roots(
     connection_cache: &Arc<ConnectionCache>,
     test_name: &str,
 ) {
-    let mut roots = vec![HashSet::new(); contact_infos.len()];
+    check_for_new_commitment_slots(
+        num_new_roots,
+        contact_infos,
+        connection_cache,
+        test_name,
+        CommitmentConfig::finalized(),
+    );
+}
+
+/// For alpenglow, CommitmentConfig::processed() refers to the current voting loop slot,
+/// so this is more accurate for determining that each node is voting when stake distribution is
+/// uneven
+pub fn check_for_new_processed(
+    num_new_processed: usize,
+    contact_infos: &[ContactInfo],
+    connection_cache: &Arc<ConnectionCache>,
+    test_name: &str,
+) {
+    check_for_new_commitment_slots(
+        num_new_processed,
+        contact_infos,
+        connection_cache,
+        test_name,
+        CommitmentConfig::processed(),
+    );
+}
+
+fn check_for_new_commitment_slots(
+    num_new_slots: usize,
+    contact_infos: &[ContactInfo],
+    connection_cache: &Arc<ConnectionCache>,
+    test_name: &str,
+    commitment: CommitmentConfig,
+) {
+    let mut slots = vec![HashSet::new(); contact_infos.len()];
     let mut done = false;
     let mut last_print = Instant::now();
     let loop_start = Instant::now();
     let loop_timeout = Duration::from_secs(180);
-    let mut num_roots_map = HashMap::new();
+    let mut num_slots_map = HashMap::new();
     while !done {
         assert!(loop_start.elapsed() < loop_timeout);
 
@@ -397,16 +435,16 @@ pub fn check_for_new_roots(
             let client = new_tpu_quic_client(ingress_node, connection_cache.clone()).unwrap();
             let root_slot = client
                 .rpc_client()
-                .get_slot_with_commitment(CommitmentConfig::finalized())
+                .get_slot_with_commitment(commitment)
                 .unwrap_or(0);
-            roots[i].insert(root_slot);
-            num_roots_map.insert(*ingress_node.pubkey(), roots[i].len());
-            let num_roots = roots.iter().map(|r| r.len()).min().unwrap();
-            done = num_roots >= num_new_roots;
+            slots[i].insert(root_slot);
+            num_slots_map.insert(*ingress_node.pubkey(), slots[i].len());
+            let num_slots = slots.iter().map(|r| r.len()).min().unwrap();
+            done = num_slots >= num_new_slots;
             if done || last_print.elapsed().as_secs() > 3 {
                 info!(
-                    "{test_name} waiting for {num_new_roots} new roots.. observed: \
-                     {num_roots_map:?}"
+                    "{} waiting for {} new {:?} slots.. observed: {:?}",
+                    test_name, num_new_slots, commitment.commitment, num_slots_map
                 );
                 last_print = Instant::now();
             }
@@ -484,6 +522,90 @@ pub fn check_no_new_roots(
     }
 }
 
+pub fn check_for_new_notarized_votes(
+    num_new_votes: usize,
+    contact_infos: &[ContactInfo],
+    connection_cache: &Arc<ConnectionCache>,
+    test_name: &str,
+    vote_listener: UdpSocket,
+) {
+    let loop_start = Instant::now();
+    let loop_timeout = Duration::from_secs(180);
+    // First get the current max root.
+    let Some(current_root) = contact_infos
+        .iter()
+        .map(|ingress_node| {
+            let client = new_tpu_quic_client(ingress_node, connection_cache.clone()).unwrap();
+            let root_slot = client
+                .rpc_client()
+                .get_slot_with_commitment(CommitmentConfig::processed())
+                .unwrap_or(0);
+            root_slot
+        })
+        .max()
+    else {
+        panic!("No nodes found to get current root");
+    };
+
+    // Clone data for thread
+    let contact_infos_owned: Vec<ContactInfo> = contact_infos.to_vec();
+    let test_name_owned = test_name.to_string();
+
+    // Now start vote listener and wait for new notarized votes.
+    let vote_listener = std::thread::spawn({
+        let mut buf = [0_u8; 65_535];
+        let mut num_new_notarized_votes = contact_infos_owned.iter().map(|_| 0).collect::<Vec<_>>();
+        let mut last_notarized = contact_infos_owned
+            .iter()
+            .map(|_| current_root)
+            .collect::<Vec<_>>();
+        let mut last_print = Instant::now();
+        let mut done = false;
+
+        move || {
+            while !done {
+                assert!(loop_start.elapsed() < loop_timeout);
+                let n_bytes = vote_listener
+                    .recv_from(&mut buf)
+                    .expect("Failed to receive vote message")
+                    .0;
+                let bls_message = bincode::deserialize::<BLSMessage>(&buf[0..n_bytes]).unwrap();
+                let BLSMessage::Vote(vote_message) = bls_message else {
+                    continue;
+                };
+                let vote = vote_message.vote;
+                if !vote.is_notarization() {
+                    continue;
+                }
+                let rank = vote_message.rank;
+                if rank >= contact_infos_owned.len() as u16 {
+                    warn!(
+                        "Received vote with rank {} which is greater than number of nodes {}",
+                        rank,
+                        contact_infos_owned.len()
+                    );
+                    continue;
+                }
+                let slot = vote.slot();
+                if slot <= last_notarized[rank as usize] {
+                    continue;
+                }
+                last_notarized[rank as usize] = slot;
+                num_new_notarized_votes[rank as usize] += 1;
+                done = num_new_notarized_votes.iter().all(|&x| x > num_new_votes);
+                if done || last_print.elapsed().as_secs() > 3 {
+                    info!(
+                        "{} waiting for {} new notarized votes.. observed: {:?}",
+                        test_name_owned, num_new_votes, num_new_notarized_votes
+                    );
+                    last_print = Instant::now();
+                }
+            }
+        }
+    });
+    vote_listener.join().expect("Vote listener thread panicked");
+}
+
 fn poll_all_nodes_for_signature(
     entry_point_info: &ContactInfo,
     cluster_nodes: &[ContactInfo],
@@ -501,6 +623,9 @@ fn poll_all_nodes_for_signature(
     Ok(())
 }
 
+/// Represents a service that monitors the gossip network for votes, processes them according to
+/// provided filters and callbacks, and maintains a connection to the gossip network. Often used as
+/// a "spy" representing a Byzantine node in a cluster.
 pub struct GossipVoter {
     pub gossip_service: GossipService,
     pub tcp_listener: Option<TcpListener>,
@@ -517,16 +642,22 @@ impl GossipVoter {
     }
 }
 
-/// Reads votes from gossip and runs them through `vote_filter` to filter votes that then
-/// get passed to `generate_vote_tx` to create votes that are then pushed into gossip as if
-/// sent by a node with identity `node_keypair`.
+/// Creates and starts a gossip voter service that monitors the gossip network for votes.
+/// This service:
+/// 1. Connects to the gossip network at the specified address using the node's keypair
+/// 2. Waits for a specified number of peers to join before becoming active
+/// 3. Continuously polls for new votes in the network
+/// 4. Filters incoming votes through the provided `vote_filter` function
+/// 5. Processes filtered votes using the `process_vote_tx` callback
+/// 6. Maintains a queue of recent votes and periodically refreshes them
+/// 7. Returns a GossipVoter struct that can be used to control and shut down the service
 pub fn start_gossip_voter(
     gossip_addr: &SocketAddr,
     node_keypair: &Keypair,
-    vote_filter: impl Fn((CrdsValueLabel, Transaction)) -> Option<(VoteTransaction, Transaction)>
+    vote_filter: impl Fn((CrdsValueLabel, Transaction)) -> Option<(ParsedVoteTransaction, Transaction)>
         + std::marker::Send
         + 'static,
-    mut process_vote_tx: impl FnMut(Slot, &Transaction, &VoteTransaction, &ClusterInfo)
+    mut process_vote_tx: impl FnMut(Slot, &Transaction, &ParsedVoteTransaction, &ClusterInfo)
         + std::marker::Send
         + 'static,
     sleep_ms: u64,
@@ -554,7 +685,7 @@ pub fn start_gossip_voter(
     }
 
     let mut latest_voted_slot = 0;
-    let mut refreshable_votes: VecDeque<(Transaction, VoteTransaction)> = VecDeque::new();
+    let mut refreshable_votes: VecDeque<(Transaction, ParsedVoteTransaction)> = VecDeque::new();
     let mut latest_push_attempt = Instant::now();
 
     let t_voter = {

+ 148 - 20
local-cluster/src/integration_tests.rs

@@ -19,11 +19,15 @@ use {
     log::*,
     solana_account::AccountSharedData,
     solana_accounts_db::utils::create_accounts_run_and_snapshot_dirs,
-    solana_clock::{self as clock, Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT},
+    solana_clock::{
+        self as clock, Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT,
+        NUM_CONSECUTIVE_LEADER_SLOTS,
+    },
     solana_core::{
         consensus::{tower_storage::FileTowerStorage, Tower, SWITCH_FORK_THRESHOLD},
         snapshot_packager_service::SnapshotPackagerService,
         validator::{is_snapshot_config_valid, ValidatorConfig},
+        voting_service::{AlpenglowPortOverride, VotingServiceOverride},
     },
     solana_gossip::gossip_service::discover_validators,
     solana_hash::Hash,
@@ -44,8 +48,9 @@ use {
     solana_turbine::broadcast_stage::BroadcastStageType,
     static_assertions,
     std::{
-        collections::HashSet,
+        collections::{HashMap, HashSet},
         fs, iter,
+        net::SocketAddr,
         num::{NonZeroU64, NonZeroUsize},
         path::{Path, PathBuf},
         sync::{
@@ -61,6 +66,12 @@ use {
 pub const RUST_LOG_FILTER: &str =
     "error,solana_core::replay_stage=warn,solana_local_cluster=info,local_cluster=info";
 
+pub const AG_DEBUG_LOG_FILTER: &str = "error,solana_core::replay_stage=info,\
+        solana_local_cluster=info,local_cluster=info,\
+        solana_core::block_creation_loop=trace,\
+        solana_votor=trace,\
+        solana_votor::vote_history_storage=info,\
+        solana_core::validator=info";
 pub const DEFAULT_NODE_STAKE: u64 = 10 * LAMPORTS_PER_SOL;
 
 pub fn last_vote_in_tower(tower_path: &Path, node_pubkey: &Pubkey) -> Option<(Slot, Hash)> {
@@ -190,7 +201,26 @@ pub fn ms_for_n_slots(num_blocks: u64, ticks_per_slot: u64) -> u64 {
     (ticks_per_slot * DEFAULT_MS_PER_SLOT * num_blocks).div_ceil(DEFAULT_TICKS_PER_SLOT)
 }
 
-pub fn run_kill_partition_switch_threshold<C>(
+/// Implements a test scenario that creates a network partition by killing validator nodes.
+///
+/// # Arguments
+/// * `stakes_to_kill` - Validators to remove from the network, where each tuple contains:
+///   * First element (usize): The stake weight/size of the validator
+///   * Second element (usize): The number of slots assigned to the validator
+/// * `alive_stakes` - Validators to keep alive, where each tuple contains:
+///   * First element (usize): The stake weight/size of the validator
+///   * Second element (usize): The number of slots assigned to the validator
+/// * `ticks_per_slot` - Optional override for the default ticks per slot
+/// * `partition_context` - Test-specific context object that will be passed to callbacks
+/// * `on_partition_start` - Callback executed when the partition begins
+/// * `on_before_partition_resolved` - Callback executed right before the partition is resolved
+/// * `on_partition_resolved` - Callback executed after the partition is resolved
+///
+/// This function simulates a network partition by killing specified validator nodes,
+/// waiting for a period, resolving the partition, and then verifying the network
+/// can recover and reach consensus. The IS_ALPENGLOW parameter determines whether
+/// to use Alpenglow-specific cluster initialization.
+fn run_kill_partition_switch_threshold_impl<C, const IS_ALPENGLOW: bool>(
     stakes_to_kill: &[(usize, usize)],
     alive_stakes: &[(usize, usize)],
     ticks_per_slot: Option<u64>,
@@ -245,7 +275,7 @@ pub fn run_kill_partition_switch_threshold<C>(
             partition_context,
         );
     };
-    run_cluster_partition(
+    run_cluster_partition::<C>(
         &stake_partitions,
         Some((leader_schedule, validator_keys)),
         partition_context,
@@ -254,18 +284,65 @@ pub fn run_kill_partition_switch_threshold<C>(
         on_partition_resolved,
         ticks_per_slot,
         vec![],
+        IS_ALPENGLOW,
+    )
+}
+
+pub fn run_kill_partition_switch_threshold_alpenglow<C>(
+    stakes_to_kill: &[(usize, usize)],
+    alive_stakes: &[(usize, usize)],
+    ticks_per_slot: Option<u64>,
+    partition_context: C,
+    on_partition_start: impl Fn(&mut LocalCluster, &[Pubkey], Vec<ClusterValidatorInfo>, &mut C),
+    on_before_partition_resolved: impl Fn(&mut LocalCluster, &mut C),
+    on_partition_resolved: impl Fn(&mut LocalCluster, &mut C),
+) {
+    run_kill_partition_switch_threshold_impl::<C, true>(
+        stakes_to_kill,
+        alive_stakes,
+        ticks_per_slot,
+        partition_context,
+        on_partition_start,
+        on_before_partition_resolved,
+        on_partition_resolved,
+    )
+}
+
+pub fn run_kill_partition_switch_threshold<C>(
+    stakes_to_kill: &[(usize, usize)],
+    alive_stakes: &[(usize, usize)],
+    ticks_per_slot: Option<u64>,
+    partition_context: C,
+    on_partition_start: impl Fn(&mut LocalCluster, &[Pubkey], Vec<ClusterValidatorInfo>, &mut C),
+    on_before_partition_resolved: impl Fn(&mut LocalCluster, &mut C),
+    on_partition_resolved: impl Fn(&mut LocalCluster, &mut C),
+) {
+    run_kill_partition_switch_threshold_impl::<C, false>(
+        stakes_to_kill,
+        alive_stakes,
+        ticks_per_slot,
+        partition_context,
+        on_partition_start,
+        on_before_partition_resolved,
+        on_partition_resolved,
     )
 }
 
 pub fn create_custom_leader_schedule(
     validator_key_to_slots: impl Iterator<Item = (Pubkey, usize)>,
 ) -> LeaderSchedule {
-    let mut leader_schedule = vec![];
-    for (k, num_slots) in validator_key_to_slots {
-        for _ in 0..num_slots {
-            leader_schedule.push(k)
-        }
-    }
+    let leader_schedule: Vec<_> = validator_key_to_slots
+        .flat_map(|(pubkey, num_slots)| {
+            // Ensure that the number of slots is a multiple of NUM_CONSECUTIVE_LEADER_SLOTS
+            // Because we only check leadership every NUM_CONSECUTIVE_LEADER_SLOTS slots, for
+            // example, you can have [(pubkey_A, 70), (pubkey_B, 30)], A will happily produce
+            // block 70 and 71 because it is the leader for block 68, but when B gets the shred
+            // it check leadership for block 70 and 71, it will see that it is the leader, so the
+            // shreds from A will be ignored.
+            assert!(num_slots % (NUM_CONSECUTIVE_LEADER_SLOTS as usize) == 0);
+            std::iter::repeat_n(pubkey, num_slots)
+        })
+        .collect();
 
     info!("leader_schedule: {}", leader_schedule.len());
     Box::new(IdentityKeyedLeaderSchedule::new_from_schedule(
@@ -288,14 +365,35 @@ pub fn create_custom_leader_schedule_with_random_keys(
     (leader_schedule, validator_keys)
 }
 
-/// This function runs a network, initiates a partition based on a
-/// configuration, resolve the partition, then checks that the network
-/// continues to achieve consensus
+/// Simulates a network partition test scenario by creating a cluster, triggering a partition,
+/// allowing the partition to heal, and then verifying the network's ability to recover and
+/// achieve consensus after the partition is resolved.
+///
+/// This function:
+/// 1. Creates a local cluster with nodes configured according to the provided stakes
+/// 2. Induces a network partition by disabling communication between validators
+/// 3. Runs the partition for a predetermined duration
+/// 4. Resolves the partition by re-enabling communication
+/// 5. Verifies the network can recover and continue to make progress
+///
 /// # Arguments
-/// * `partitions` - A slice of partition configurations, where each partition
-///   configuration is a usize representing a node's stake
-/// * `leader_schedule` - An option that specifies whether the cluster should
-///   run with a fixed, predetermined leader schedule
+/// * `partitions` - A slice of partition configurations, where each usize represents a validator's
+///   stake weight. This determines the relative voting power of each node in the network.
+/// * `leader_schedule` - An option that specifies whether the cluster should run with a fixed,
+///   predetermined leader schedule. If provided, the partition will last for one complete
+///   iteration of the leader schedule.
+/// * `context` - A user-defined context object that is passed to the callback functions.
+/// * `on_partition_start` - Callback function that runs when the partition begins. Can be used
+///   to perform custom actions or checks at the start of the partition.
+/// * `on_before_partition_resolved` - Callback function that runs just before the partition
+///   is resolved. Can be used to verify partition state or prepare for resolution.
+/// * `on_partition_resolved` - Callback function that runs after the partition is resolved and
+///   the network has had time to recover. Can be used to verify recovery.
+/// * `ticks_per_slot` - Optional override for the default ticks per slot. Controls the
+///   rate at which slots advance in the cluster.
+/// * `additional_accounts` - Additional accounts to be added to the genesis configuration.
+/// * `is_alpenglow` - Boolean flag indicating whether to initialize the `LocalCluster` in Alpenglow
+///   mode.
 #[allow(clippy::cognitive_complexity)]
 pub fn run_cluster_partition<C>(
     partitions: &[usize],
@@ -306,6 +404,7 @@ pub fn run_cluster_partition<C>(
     on_partition_resolved: impl FnOnce(&mut LocalCluster, &mut C),
     ticks_per_slot: Option<u64>,
     additional_accounts: Vec<(Pubkey, AccountSharedData)>,
+    is_alpenglow: bool,
 ) {
     solana_logger::setup_with_default(RUST_LOG_FILTER);
     info!("PARTITION_TEST!");
@@ -346,10 +445,21 @@ pub fn run_cluster_partition<C>(
     };
 
     let slots_per_epoch = 2048;
+    let alpenglow_port_override = AlpenglowPortOverride::default();
+    let validator_configs = make_identical_validator_configs(&validator_config, num_nodes)
+        .into_iter()
+        .map(|mut config| {
+            config.voting_service_test_override = Some(VotingServiceOverride {
+                additional_listeners: vec![],
+                alpenglow_port_override: alpenglow_port_override.clone(),
+            });
+            config
+        })
+        .collect();
     let mut config = ClusterConfig {
         mint_lamports,
         node_stakes,
-        validator_configs: make_identical_validator_configs(&validator_config, num_nodes),
+        validator_configs,
         validator_keys: Some(
             validator_keys
                 .into_iter()
@@ -369,7 +479,12 @@ pub fn run_cluster_partition<C>(
         "PARTITION_TEST starting cluster with {:?} partitions slots_per_epoch: {}",
         partitions, config.slots_per_epoch,
     );
-    let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
+
+    let mut cluster = if is_alpenglow {
+        LocalCluster::new_alpenglow(&mut config, SocketAddrSpace::Unspecified)
+    } else {
+        LocalCluster::new(&mut config, SocketAddrSpace::Unspecified)
+    };
 
     info!("PARTITION_TEST spend_and_verify_all_nodes(), ensure all nodes are caught up");
     cluster_tests::spend_and_verify_all_nodes(
@@ -400,12 +515,25 @@ pub fn run_cluster_partition<C>(
     info!("PARTITION_TEST start partition");
     on_partition_start(&mut cluster, &mut context);
     turbine_disabled.store(true, Ordering::Relaxed);
-
+    // Make all to all votes/certs not able to reach each other by overriding the
+    // alpenglow port override to SocketAddr which no one is listening on.
+    let blackhole_addr: SocketAddr = solana_net_utils::bind_to_localhost()
+        .unwrap()
+        .local_addr()
+        .unwrap();
+    let new_override = HashMap::from_iter(
+        cluster_nodes
+            .iter()
+            .map(|node| (*node.pubkey(), blackhole_addr)),
+    );
+    alpenglow_port_override.update_override(new_override);
     sleep(partition_duration);
 
     on_before_partition_resolved(&mut cluster, &mut context);
     info!("PARTITION_TEST remove partition");
     turbine_disabled.store(false, Ordering::Relaxed);
+    // Restore the alpenglow port override to the default, so that the nodes can communicate again.
+    alpenglow_port_override.clear();
 
     // Give partitions time to propagate their blocks from during the partition
     // after the partition resolves

+ 62 - 6
local-cluster/src/local_cluster.rs

@@ -56,11 +56,12 @@ use {
         vote_instruction,
         vote_state::{self, VoteInit},
     },
+    solana_votor::vote_history_storage::FileVoteHistoryStorage,
     std::{
         collections::HashMap,
         io::{Error, Result},
         iter,
-        net::{IpAddr, Ipv4Addr, SocketAddr},
+        net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
         path::{Path, PathBuf},
         sync::{Arc, RwLock},
         time::Duration,
@@ -183,6 +184,8 @@ impl LocalCluster {
                 .0,
         ];
         config.tower_storage = Arc::new(FileTowerStorage::new(ledger_path.to_path_buf()));
+        config.vote_history_storage =
+            Arc::new(FileVoteHistoryStorage::new(ledger_path.to_path_buf()));
 
         let snapshot_config = &mut config.snapshot_config;
         let dummy: PathBuf = DUMMY_SNAPSHOT_CONFIG_PATH_MARKER.into();
@@ -195,6 +198,22 @@ impl LocalCluster {
     }
 
     pub fn new(config: &mut ClusterConfig, socket_addr_space: SocketAddrSpace) -> Self {
+        Self::init(config, socket_addr_space, None)
+    }
+
+    pub fn new_alpenglow(config: &mut ClusterConfig, socket_addr_space: SocketAddrSpace) -> Self {
+        Self::init(
+            config,
+            socket_addr_space,
+            Some(build_alpenglow_vote::ALPENGLOW_VOTE_SO_PATH),
+        )
+    }
+
+    pub fn init(
+        config: &mut ClusterConfig,
+        socket_addr_space: SocketAddrSpace,
+        alpenglow_so_path: Option<&str>,
+    ) -> Self {
         assert_eq!(config.validator_configs.len(), config.node_stakes.len());
 
         let quic_connection_cache_config = config.tpu_use_quic.then(|| {
@@ -270,11 +289,11 @@ impl LocalCluster {
                     );
                     if *in_genesis {
                         Some((
-                            ValidatorVoteKeypairs {
-                                node_keypair: node_keypair.insecure_clone(),
-                                vote_keypair: vote_keypair.insecure_clone(),
-                                stake_keypair: Keypair::new(),
-                            },
+                            ValidatorVoteKeypairs::new(
+                                node_keypair.insecure_clone(),
+                                vote_keypair.insecure_clone(),
+                                Keypair::new(),
+                            ),
                             stake,
                         ))
                     } else {
@@ -304,6 +323,7 @@ impl LocalCluster {
             &keys_in_genesis,
             stakes_in_genesis,
             config.cluster_type,
+            alpenglow_so_path,
         );
         genesis_config.accounts.extend(
             config
@@ -665,6 +685,23 @@ impl LocalCluster {
         info!("{test_name} done waiting for roots");
     }
 
+    pub fn check_for_new_processed(
+        &self,
+        num_new_processed: usize,
+        test_name: &str,
+        socket_addr_space: SocketAddrSpace,
+    ) {
+        let alive_node_contact_infos = self.discover_nodes(socket_addr_space, test_name);
+        info!("{} looking for new processed slots on all nodes", test_name);
+        cluster_tests::check_for_new_processed(
+            num_new_processed,
+            &alive_node_contact_infos,
+            &self.connection_cache,
+            test_name,
+        );
+        info!("{} done waiting for processed slots", test_name);
+    }
+
     pub fn check_no_new_roots(
         &self,
         num_slots_to_wait: usize,
@@ -728,6 +765,25 @@ impl LocalCluster {
         }
     }
 
+    pub fn check_for_new_notarized_votes(
+        &self,
+        num_new_notarized_votes: usize,
+        test_name: &str,
+        socket_addr_space: SocketAddrSpace,
+        vote_listener_addr: UdpSocket,
+    ) {
+        let alive_node_contact_infos = self.discover_nodes(socket_addr_space, test_name);
+        info!("{} looking for new notarized votes on all nodes", test_name);
+        cluster_tests::check_for_new_notarized_votes(
+            num_new_notarized_votes,
+            &alive_node_contact_infos,
+            &self.connection_cache,
+            test_name,
+            vote_listener_addr,
+        );
+        info!("{} done waiting for notarized votes", test_name);
+    }
+
     /// Attempt to send and confirm tx "attempts" times
     /// Wait for signature confirmation before returning
     /// Return the transaction signature

+ 5 - 5
local-cluster/src/validator_configs.rs

@@ -34,6 +34,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig {
         run_verification: config.run_verification,
         require_tower: config.require_tower,
         tower_storage: config.tower_storage.clone(),
+        vote_history_storage: config.vote_history_storage.clone(),
         debug_keys: config.debug_keys.clone(),
         contact_debug_interval: config.contact_debug_interval,
         contact_save_interval: config.contact_save_interval,
@@ -79,6 +80,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig {
         delay_leader_block_for_pending_fork: config.delay_leader_block_for_pending_fork,
         use_tpu_client_next: config.use_tpu_client_next,
         retransmit_xdp: config.retransmit_xdp.clone(),
+        voting_service_test_override: config.voting_service_test_override.clone(),
         repair_handler_type: config.repair_handler_type.clone(),
     }
 }
@@ -87,9 +89,7 @@ pub fn make_identical_validator_configs(
     config: &ValidatorConfig,
     num: usize,
 ) -> Vec<ValidatorConfig> {
-    let mut configs = vec![];
-    for _ in 0..num {
-        configs.push(safe_clone_config(config));
-    }
-    configs
+    std::iter::repeat_with(|| safe_clone_config(config))
+        .take(num)
+        .collect()
 }

+ 1418 - 29
local-cluster/tests/local_cluster.rs

@@ -11,11 +11,15 @@ use {
     solana_accounts_db::{
         hardened_unpack::open_genesis_config, utils::create_accounts_run_and_snapshot_dirs,
     },
+    solana_bls_signatures::{keypair::Keypair as BLSKeypair, Signature as BLSSignature},
+    solana_client::connection_cache::ConnectionCache,
     solana_client_traits::AsyncClient,
     solana_clock::{
         self as clock, Slot, DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE,
+        NUM_CONSECUTIVE_LEADER_SLOTS,
     },
     solana_commitment_config::CommitmentConfig,
+    solana_connection_cache::client_connection::ClientConnection,
     solana_core::{
         consensus::{
             tower_storage::FileTowerStorage, Tower, SWITCH_FORK_THRESHOLD, VOTE_THRESHOLD_DEPTH,
@@ -23,6 +27,7 @@ use {
         optimistic_confirmation_verifier::OptimisticConfirmationVerifier,
         replay_stage::DUPLICATE_THRESHOLD,
         validator::{BlockVerificationMethod, ValidatorConfig},
+        voting_service::{AlpenglowPortOverride, VotingServiceOverride},
     },
     solana_download_utils::download_snapshot_archive,
     solana_entry::entry::create_ticks,
@@ -31,7 +36,7 @@ use {
     solana_gossip::{crds_data::MAX_VOTES, gossip_service::discover_validators},
     solana_hard_forks::HardForks,
     solana_hash::Hash,
-    solana_keypair::Keypair,
+    solana_keypair::{keypair_from_seed, Keypair},
     solana_ledger::{
         ancestor_iterator::AncestorIterator,
         bank_forks_utils,
@@ -52,7 +57,7 @@ use {
             run_cluster_partition, run_kill_partition_switch_threshold, save_tower,
             setup_snapshot_validator_config, test_faulty_node, wait_for_duplicate_proof,
             wait_for_last_vote_in_tower_to_land_in_ledger, SnapshotValidatorConfig,
-            ValidatorTestConfig, DEFAULT_NODE_STAKE, RUST_LOG_FILTER,
+            ValidatorTestConfig, AG_DEBUG_LOG_FILTER, DEFAULT_NODE_STAKE, RUST_LOG_FILTER,
         },
         local_cluster::{ClusterConfig, LocalCluster, DEFAULT_MINT_LAMPORTS},
         validator_configs::*,
@@ -85,9 +90,16 @@ use {
         broadcast_duplicates_run::{BroadcastDuplicatesConfig, ClusterPartition},
         BroadcastStageType,
     },
-    solana_vote::{vote_parser, vote_transaction},
+    solana_vote::{
+        vote_parser::{self},
+        vote_transaction,
+    },
     solana_vote_interface::state::TowerSync,
     solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY,
+    solana_votor_messages::{
+        bls_message::{BLSMessage, CertificateType, VoteMessage, BLS_KEYPAIR_DERIVE_SEED},
+        vote::Vote,
+    },
     std::{
         collections::{BTreeSet, HashMap, HashSet},
         fs,
@@ -139,6 +151,87 @@ fn test_local_cluster_start_and_exit_with_config() {
     assert_eq!(cluster.validators.len(), NUM_NODES);
 }
 
+fn test_alpenglow_nodes_basic(num_nodes: usize, num_offline_nodes: usize) {
+    solana_logger::setup_with_default(AG_DEBUG_LOG_FILTER);
+    let validator_keys = (0..num_nodes)
+        .map(|i| (Arc::new(keypair_from_seed(&[i as u8; 32]).unwrap()), true))
+        .collect::<Vec<_>>();
+
+    let mut config = ClusterConfig {
+        validator_configs: make_identical_validator_configs(
+            &ValidatorConfig::default_for_test(),
+            num_nodes,
+        ),
+        validator_keys: Some(validator_keys.clone()),
+        node_stakes: vec![DEFAULT_NODE_STAKE; num_nodes],
+        ticks_per_slot: 8,
+        slots_per_epoch: MINIMUM_SLOTS_PER_EPOCH * 2,
+        stakers_slot_offset: MINIMUM_SLOTS_PER_EPOCH * 2,
+        poh_config: PohConfig {
+            target_tick_duration: PohConfig::default().target_tick_duration,
+            hashes_per_tick: Some(clock::DEFAULT_HASHES_PER_TICK),
+            target_tick_count: None,
+        },
+        ..ClusterConfig::default()
+    };
+    let mut cluster = LocalCluster::new_alpenglow(&mut config, SocketAddrSpace::Unspecified);
+    assert_eq!(cluster.validators.len(), num_nodes);
+
+    // Check transactions land
+    cluster_tests::spend_and_verify_all_nodes(
+        &cluster.entry_point_info,
+        &cluster.funding_keypair,
+        num_nodes,
+        HashSet::new(),
+        SocketAddrSpace::Unspecified,
+        &cluster.connection_cache,
+    );
+
+    if num_offline_nodes > 0 {
+        // Bring nodes offline
+        info!("Shutting down {num_offline_nodes} nodes");
+        for (key, _) in validator_keys.iter().take(num_offline_nodes) {
+            cluster.exit_node(&key.pubkey());
+        }
+    }
+
+    // Check for new roots
+    cluster.check_for_new_roots(
+        16,
+        &format!("test_{}_nodes_alpenglow", num_nodes),
+        SocketAddrSpace::Unspecified,
+    );
+}
+
+#[test]
+#[serial]
+fn test_1_node_alpenglow() {
+    const NUM_NODES: usize = 1;
+    test_alpenglow_nodes_basic(NUM_NODES, 0);
+}
+
+#[test]
+#[serial]
+fn test_2_nodes_alpenglow() {
+    const NUM_NODES: usize = 2;
+    test_alpenglow_nodes_basic(NUM_NODES, 0);
+}
+
+#[test]
+#[serial]
+fn test_4_nodes_alpenglow() {
+    const NUM_NODES: usize = 4;
+    test_alpenglow_nodes_basic(NUM_NODES, 0);
+}
+
+#[test]
+#[serial]
+fn test_4_nodes_with_1_offline_alpenglow() {
+    const NUM_NODES: usize = 4;
+    const NUM_OFFLINE: usize = 1;
+    test_alpenglow_nodes_basic(NUM_NODES, NUM_OFFLINE);
+}
+
 #[test]
 #[serial]
 fn test_spend_and_verify_all_nodes_1() {
@@ -1309,6 +1402,7 @@ fn test_snapshot_restart_tower() {
 
 #[test]
 #[serial]
+#[ignore]
 fn test_snapshots_blockstore_floor() {
     solana_logger::setup_with_default(RUST_LOG_FILTER);
     // First set up the cluster with 1 snapshotting leader
@@ -1551,6 +1645,7 @@ fn test_fake_shreds_broadcast_leader() {
 
 #[test]
 #[serial]
+#[ignore]
 fn test_wait_for_max_stake() {
     solana_logger::setup_with_default(RUST_LOG_FILTER);
     let validator_config = ValidatorConfig::default_for_test();
@@ -2638,10 +2733,16 @@ fn test_restart_tower_rollback() {
 #[test]
 #[serial]
 fn test_run_test_load_program_accounts_partition_root() {
-    run_test_load_program_accounts_partition(CommitmentConfig::finalized());
+    run_test_load_program_accounts_partition(CommitmentConfig::finalized(), false);
+}
+
+#[test]
+#[serial]
+fn test_alpenglow_run_test_load_program_accounts_partition_root() {
+    run_test_load_program_accounts_partition(CommitmentConfig::finalized(), true);
 }
 
-fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) {
+fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig, is_alpenglow: bool) {
     let num_slots_per_validator = 8;
     let partitions: [usize; 2] = [1, 1];
     let (leader_schedule, validator_keys) = create_custom_leader_schedule_with_random_keys(&[
@@ -2676,7 +2777,7 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) {
 
     let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
         cluster.check_for_new_roots(
-            20,
+            16,
             "run_test_load_program_accounts_partition",
             SocketAddrSpace::Unspecified,
         );
@@ -2694,6 +2795,7 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) {
         on_partition_resolved,
         None,
         additional_accounts,
+        is_alpenglow,
     );
 }
 
@@ -2832,10 +2934,12 @@ fn test_oc_bad_signatures() {
         |(_label, leader_vote_tx)| {
             let vote = vote_parser::parse_vote_transaction(&leader_vote_tx)
                 .map(|(_, vote, ..)| vote)
+                .unwrap()
+                .as_tower_transaction()
                 .unwrap();
             // Filter out empty votes
             if !vote.is_empty() {
-                Some((vote, leader_vote_tx))
+                Some((vote.into(), leader_vote_tx))
             } else {
                 None
             }
@@ -2845,7 +2949,8 @@ fn test_oc_bad_signatures() {
             let vote_keypair = vote_keypair.insecure_clone();
             let num_votes_simulated = num_votes_simulated.clone();
             move |vote_slot, leader_vote_tx, parsed_vote, _cluster_info| {
-                info!("received vote for {vote_slot}");
+                info!("received vote for {}", vote_slot);
+                let parsed_vote = parsed_vote.as_tower_transaction_ref().unwrap();
                 let vote_hash = parsed_vote.hash();
                 info!("Simulating vote from our node on slot {vote_slot}, hash {vote_hash}");
 
@@ -3296,7 +3401,7 @@ fn do_test_lockout_violation_with_or_without_tower(with_tower: bool) {
     let validator_to_slots = vec![
         (
             validator_b_pubkey,
-            validator_b_last_leader_slot as usize + 1,
+            (validator_b_last_leader_slot + NUM_CONSECUTIVE_LEADER_SLOTS) as usize,
         ),
         (validator_c_pubkey, DEFAULT_SLOTS_PER_EPOCH as usize),
     ];
@@ -3832,11 +3937,14 @@ fn test_kill_heaviest_partition() {
         on_partition_resolved,
         None,
         vec![],
+        // TODO: make Alpenglow equivalent when skips are available
+        false,
     )
 }
 
 #[test]
 #[serial]
+#[ignore]
 fn test_kill_partition_switch_threshold_no_progress() {
     let max_switch_threshold_failure_pct = 1.0 - 2.0 * SWITCH_FORK_THRESHOLD;
     let total_stake = 10_000 * DEFAULT_NODE_STAKE;
@@ -3871,6 +3979,7 @@ fn test_kill_partition_switch_threshold_no_progress() {
 
 #[test]
 #[serial]
+#[ignore]
 fn test_kill_partition_switch_threshold_progress() {
     let max_switch_threshold_failure_pct = 1.0 - 2.0 * SWITCH_FORK_THRESHOLD;
     let total_stake = 10_000 * DEFAULT_NODE_STAKE;
@@ -4023,10 +4132,12 @@ fn run_duplicate_shreds_broadcast_leader(vote_on_duplicate: bool) {
             if label.pubkey() == bad_leader_id {
                 let vote = vote_parser::parse_vote_transaction(&leader_vote_tx)
                     .map(|(_, vote, ..)| vote)
+                    .unwrap()
+                    .as_tower_transaction()
                     .unwrap();
                 // Filter out empty votes
                 if !vote.is_empty() {
-                    Some((vote, leader_vote_tx))
+                    Some((vote.into(), leader_vote_tx))
                 } else {
                     None
                 }
@@ -4049,6 +4160,7 @@ fn run_duplicate_shreds_broadcast_leader(vote_on_duplicate: bool) {
                 for slot in duplicate_slot_receiver.try_iter() {
                     duplicate_slots.push(slot);
                 }
+                let parsed_vote = parsed_vote.as_tower_transaction_ref().unwrap();
                 let vote_hash = parsed_vote.hash();
                 if vote_on_duplicate || !duplicate_slots.contains(&latest_vote_slot) {
                     info!(
@@ -4400,31 +4512,35 @@ fn find_latest_replayed_slot_from_ledger(
 #[test]
 #[serial]
 fn test_cluster_partition_1_1() {
-    let empty = |_: &mut LocalCluster, _: &mut ()| {};
-    let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
-        cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
-    };
-    run_cluster_partition(
-        &[1, 1],
-        None,
-        (),
-        empty,
-        empty,
-        on_partition_resolved,
-        None,
-        vec![],
-    )
+    run_test_cluster_partition(2, false);
+}
+
+#[test]
+#[serial]
+fn test_alpenglow_cluster_partition_1_1() {
+    run_test_cluster_partition(2, true);
 }
 
 #[test]
 #[serial]
 fn test_cluster_partition_1_1_1() {
+    run_test_cluster_partition(3, false);
+}
+
+#[test]
+#[serial]
+fn test_alpenglow_cluster_partition_1_1_1() {
+    run_test_cluster_partition(3, true);
+}
+
+fn run_test_cluster_partition(num_partitions: usize, is_alpenglow: bool) {
     let empty = |_: &mut LocalCluster, _: &mut ()| {};
     let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
         cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
     };
+    let partition_sizes = vec![1; num_partitions];
     run_cluster_partition(
-        &[1, 1, 1],
+        &partition_sizes,
         None,
         (),
         empty,
@@ -4432,6 +4548,7 @@ fn test_cluster_partition_1_1_1() {
         on_partition_resolved,
         None,
         vec![],
+        is_alpenglow,
     )
 }
 
@@ -4735,7 +4852,7 @@ fn test_duplicate_with_pruned_ancestor() {
     let observer_stake = DEFAULT_NODE_STAKE;
 
     let slots_per_epoch = 2048;
-    let fork_slot: u64 = 10;
+    let fork_slot: u64 = 12;
     let fork_length: u64 = 20;
     let majority_fork_buffer = 5;
 
@@ -5504,8 +5621,8 @@ fn test_duplicate_shreds_switch_failure() {
     );
 
     let validator_to_slots = vec![
-        (duplicate_leader_validator_pubkey, 50),
-        (target_switch_fork_validator_pubkey, 5),
+        (duplicate_leader_validator_pubkey, 52),
+        (target_switch_fork_validator_pubkey, 8),
         // The ideal sequence of events for the `duplicate_fork_validator1_pubkey` validator would go:
         // 1. Vote for duplicate block `D`
         // 2. See `D` is duplicate, remove from fork choice and reset to ancestor `A`, potentially generating a fork off that ancestor
@@ -5824,7 +5941,7 @@ fn test_invalid_forks_persisted_on_restart() {
     let (target_pubkey, majority_pubkey) = (validators[0], validators[1]);
     // Need majority validator to make the dup_slot
     let validator_to_slots = vec![
-        (majority_pubkey, dup_slot as usize + 5),
+        (majority_pubkey, dup_slot as usize + 6),
         (target_pubkey, DEFAULT_SLOTS_PER_EPOCH as usize),
     ];
     let leader_schedule = create_custom_leader_schedule(validator_to_slots.into_iter());
@@ -5951,3 +6068,1275 @@ fn test_invalid_forks_persisted_on_restart() {
         sleep(Duration::from_millis(100));
     }
 }
+
+#[test]
+#[serial]
+fn test_restart_node_alpenglow() {
+    solana_logger::setup_with_default(AG_DEBUG_LOG_FILTER);
+    let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH * 2;
+    let ticks_per_slot = 16;
+    let validator_config = ValidatorConfig::default_for_test();
+    let mut cluster = LocalCluster::new_alpenglow(
+        &mut ClusterConfig {
+            node_stakes: vec![DEFAULT_NODE_STAKE],
+            validator_configs: vec![safe_clone_config(&validator_config)],
+            ticks_per_slot,
+            slots_per_epoch,
+            stakers_slot_offset: slots_per_epoch,
+            skip_warmup_slots: true,
+            ..ClusterConfig::default()
+        },
+        SocketAddrSpace::Unspecified,
+    );
+    let nodes = cluster.get_node_pubkeys();
+    cluster_tests::sleep_n_epochs(
+        1.0,
+        &cluster.genesis_config.poh_config,
+        clock::DEFAULT_TICKS_PER_SLOT,
+        slots_per_epoch,
+    );
+    info!("Restarting node");
+    cluster.exit_restart_node(&nodes[0], validator_config, SocketAddrSpace::Unspecified);
+    cluster_tests::sleep_n_epochs(
+        0.5,
+        &cluster.genesis_config.poh_config,
+        clock::DEFAULT_TICKS_PER_SLOT,
+        slots_per_epoch,
+    );
+    cluster_tests::send_many_transactions(
+        &cluster.entry_point_info,
+        &cluster.funding_keypair,
+        &cluster.connection_cache,
+        10,
+        1,
+    );
+}
+
+/// We start 2 nodes, where the first node A holds 90% of the stake
+///
+/// We let A run by itself, and ensure that B can join and rejoin the network
+/// through fast forwarding their slot on receiving A's finalization certificate
+#[test]
+#[serial]
+fn test_alpenglow_imbalanced_stakes_catchup() {
+    solana_logger::setup_with_default(AG_DEBUG_LOG_FILTER);
+    // Create node stakes
+    let slots_per_epoch = 512;
+
+    let total_stake = 2 * DEFAULT_NODE_STAKE;
+    let tenth_stake = total_stake / 10;
+    let node_a_stake = 9 * tenth_stake;
+    let node_b_stake = total_stake - node_a_stake;
+
+    let node_stakes = vec![node_a_stake, node_b_stake];
+    let num_nodes = node_stakes.len();
+
+    // Create leader schedule with A and B as leader 72/28
+    let (leader_schedule, validator_keys) =
+        create_custom_leader_schedule_with_random_keys(&[72, 28]);
+
+    let leader_schedule = FixedSchedule {
+        leader_schedule: Arc::new(leader_schedule),
+    };
+
+    // Create our UDP socket to listen to votes
+    let vote_listener_addr = solana_net_utils::bind_to_localhost().unwrap();
+
+    let mut validator_config = ValidatorConfig::default_for_test();
+    validator_config.fixed_leader_schedule = Some(leader_schedule);
+    validator_config.voting_service_test_override = Some(VotingServiceOverride {
+        additional_listeners: vec![vote_listener_addr.local_addr().unwrap()],
+        alpenglow_port_override: AlpenglowPortOverride::default(),
+    });
+
+    // Collect node pubkeys
+    let node_pubkeys = validator_keys
+        .iter()
+        .map(|key| key.pubkey())
+        .collect::<Vec<_>>();
+
+    // Cluster config
+    let mut cluster_config = ClusterConfig {
+        mint_lamports: total_stake,
+        node_stakes,
+        validator_configs: make_identical_validator_configs(&validator_config, num_nodes),
+        validator_keys: Some(
+            validator_keys
+                .iter()
+                .cloned()
+                .zip(iter::repeat_with(|| true))
+                .collect(),
+        ),
+        slots_per_epoch,
+        stakers_slot_offset: slots_per_epoch,
+        ticks_per_slot: DEFAULT_TICKS_PER_SLOT,
+        skip_warmup_slots: true,
+        ..ClusterConfig::default()
+    };
+
+    // Create local cluster
+    let mut cluster =
+        LocalCluster::new_alpenglow(&mut cluster_config, SocketAddrSpace::Unspecified);
+
+    // Ensure all nodes are voting
+    cluster.check_for_new_processed(
+        8,
+        "test_alpenglow_imbalanced_stakes_catchup",
+        SocketAddrSpace::Unspecified,
+    );
+
+    info!("exiting node B");
+    let b_info = cluster.exit_node(&node_pubkeys[1]);
+
+    // Let A make roots by itself
+    cluster.check_for_new_roots(
+        8,
+        "test_alpenglow_imbalanced_stakes_catchup",
+        SocketAddrSpace::Unspecified,
+    );
+
+    info!("restarting node B");
+    cluster.restart_node(&node_pubkeys[1], b_info, SocketAddrSpace::Unspecified);
+
+    // Ensure all nodes are voting
+    cluster.check_for_new_notarized_votes(
+        16,
+        "test_alpenglow_imbalanced_stakes_catchup",
+        SocketAddrSpace::Unspecified,
+        vote_listener_addr,
+    );
+}
+
+fn broadcast_vote(
+    bls_message: BLSMessage,
+    tpu_socket_addrs: &[std::net::SocketAddr],
+    additional_listeners: Option<&Vec<std::net::SocketAddr>>,
+    connection_cache: Arc<ConnectionCache>,
+) {
+    for tpu_socket_addr in tpu_socket_addrs
+        .iter()
+        .chain(additional_listeners.unwrap_or(&vec![]).iter())
+    {
+        let buf = bincode::serialize(&bls_message).unwrap();
+        let client = connection_cache.get_connection(tpu_socket_addr);
+        client.send_data_async(buf).unwrap_or_else(|_| {
+            panic!("Failed to broadcast vote to {}", tpu_socket_addr);
+        });
+    }
+}
+
+fn _vote_to_tuple(vote: &Vote) -> (u64, u8) {
+    let discriminant = if vote.is_notarization() {
+        0
+    } else if vote.is_finalize() {
+        1
+    } else if vote.is_skip() {
+        2
+    } else if vote.is_notarize_fallback() {
+        3
+    } else if vote.is_skip_fallback() {
+        4
+    } else {
+        panic!("Invalid vote type: {:?}", vote)
+    };
+
+    let slot = vote.slot();
+
+    (slot, discriminant)
+}
+
+/// This test validates the Alpenglow consensus protocol's ability to maintain liveness when a node
+/// needs to issue a NotarizeFallback vote. The test sets up a two-node cluster with a specific
+/// stake distribution to create a scenario where:
+///
+/// - Node A has 60% of stake minus a small amount (epsilon)
+/// - Node B has 40% of stake plus a small amount (epsilon)
+///
+/// The test simulates the following sequence:
+/// 1. Node B (as leader) proposes a block for slot 32
+/// 2. Node A is unable to receive the block (simulated via turbine disconnection)
+/// 3. Node A sends Skip votes to both nodes for slot 32
+/// 4. Node B sends Notarize votes to both nodes for slot 32
+/// 5. Node A receives both votes and its certificate pool determines:
+///    - Skip has (60% - epsilon) votes
+///    - Notarize has (40% + epsilon) votes
+///    - Protocol determines it's "SafeToNotar" and issues a NotarizeFallback vote
+/// 6. Node B doesn't issue NotarizeFallback because it already submitted a Notarize
+/// 7. Node B receives Node A's NotarizeFallback vote
+/// 8. Network progresses and maintains liveness after this fallback scenario
+#[test]
+#[serial]
+fn test_alpenglow_ensure_liveness_after_single_notar_fallback() {
+    solana_logger::setup_with_default(AG_DEBUG_LOG_FILTER);
+    // Configure total stake and stake distribution
+    let total_stake = 2 * DEFAULT_NODE_STAKE;
+    let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH;
+
+    let node_a_stake = total_stake * 6 / 10 - 1;
+    let node_b_stake = total_stake * 4 / 10 + 1;
+
+    let node_stakes = vec![node_a_stake, node_b_stake];
+    let num_nodes = node_stakes.len();
+
+    assert_eq!(total_stake, node_a_stake + node_b_stake);
+
+    // Control components
+    let node_a_turbine_disabled = Arc::new(AtomicBool::new(false));
+
+    // Create leader schedule
+    let (leader_schedule, validator_keys) = create_custom_leader_schedule_with_random_keys(&[0, 4]);
+
+    let leader_schedule = FixedSchedule {
+        leader_schedule: Arc::new(leader_schedule),
+    };
+
+    // Create our UDP socket to listen to votes
+    let vote_listener = solana_net_utils::bind_to_localhost().unwrap();
+
+    // Create validator configs
+    let mut validator_config = ValidatorConfig::default_for_test();
+    validator_config.fixed_leader_schedule = Some(leader_schedule);
+    validator_config.voting_service_test_override = Some(VotingServiceOverride {
+        additional_listeners: vec![vote_listener.local_addr().unwrap()],
+        alpenglow_port_override: AlpenglowPortOverride::default(),
+    });
+
+    let mut validator_configs = make_identical_validator_configs(&validator_config, num_nodes);
+    validator_configs[0].turbine_disabled = node_a_turbine_disabled.clone();
+
+    assert_eq!(num_nodes, validator_keys.len());
+
+    // Cluster config
+    let mut cluster_config = ClusterConfig {
+        mint_lamports: total_stake,
+        node_stakes,
+        validator_configs,
+        validator_keys: Some(
+            validator_keys
+                .iter()
+                .cloned()
+                .zip(iter::repeat_with(|| true))
+                .collect(),
+        ),
+        slots_per_epoch,
+        stakers_slot_offset: slots_per_epoch,
+        ticks_per_slot: DEFAULT_TICKS_PER_SLOT,
+        ..ClusterConfig::default()
+    };
+
+    // Create local cluster
+    let cluster = LocalCluster::new_alpenglow(&mut cluster_config, SocketAddrSpace::Unspecified);
+
+    assert_eq!(cluster.validators.len(), num_nodes);
+
+    // Track Node A's votes and when the test can conclude
+    let mut post_experiment_votes = HashMap::new();
+    let mut post_experiment_roots = HashSet::new();
+
+    // Start vote listener thread to monitor and control the experiment
+    let vote_listener = std::thread::spawn({
+        let mut buf = [0_u8; 65_535];
+        let mut check_for_roots = false;
+        let mut slots_with_skip = HashSet::new();
+
+        move || loop {
+            let n_bytes = vote_listener.recv(&mut buf).unwrap();
+            let bls_message = bincode::deserialize::<BLSMessage>(&buf[0..n_bytes]).unwrap();
+            let BLSMessage::Vote(vote_message) = bls_message else {
+                continue;
+            };
+            let vote = vote_message.vote;
+
+            // Since A has 60% of the stake, it will be node 0, and B will be node 1
+            let node_index = vote_message.rank;
+
+            // Once we've received a vote from node B at slot 31, we can start the experiment.
+            if vote.slot() == 31 && node_index == 1 {
+                node_a_turbine_disabled.store(true, Ordering::Relaxed);
+            }
+
+            if vote.slot() >= 32 && node_index == 0 {
+                if vote.is_skip() {
+                    slots_with_skip.insert(vote.slot());
+                }
+
+                if !check_for_roots && vote.slot() == 32 && vote.is_notarize_fallback() {
+                    check_for_roots = true;
+                    assert!(slots_with_skip.contains(&32)); // skip on slot 32
+                }
+            }
+
+            // We should see a skip followed by a notar fallback. Once we do, the experiment is
+            // complete.
+            if check_for_roots {
+                node_a_turbine_disabled.store(false, Ordering::Relaxed);
+
+                if vote.is_finalize() {
+                    let value = post_experiment_votes.entry(vote.slot()).or_insert(vec![]);
+
+                    value.push(node_index);
+
+                    if value.len() == 2 {
+                        post_experiment_roots.insert(vote.slot());
+
+                        if post_experiment_roots.len() >= 10 {
+                            break;
+                        }
+                    }
+                }
+            }
+        }
+    });
+
+    vote_listener.join().unwrap();
+}
+
+/// Test to validate the Alpenglow consensus protocol's ability to maintain liveness when a node
+/// needs to issue multiple NotarizeFallback votes due to Byzantine behavior and network partitioning.
+///
+/// This test simulates a complex Byzantine scenario with four nodes having the following stake distribution:
+/// - Node A (Leader): 20% - ε (small epsilon)
+/// - Node B: 40%
+/// - Node C: 20%
+/// - Node D: 20% + ε
+///
+/// The test validates the protocol's behavior through the following phases:
+///
+/// ## Phase 1: Initial Network Partition
+/// - Node C's turbine is disabled at slot 50, causing it to miss blocks and vote Skip
+/// - Node A (leader) proposes blocks normally
+/// - Node B initially copies Node A's votes
+/// - Node D copies Node A's votes
+/// - Node C accumulates 10 NotarizeFallback votes while in this steady state
+///
+/// ## Phase 2: Byzantine Equivocation
+/// After Node C has issued sufficient NotarizeFallback votes, Node A begins equivocating:
+/// - Node A votes for block b1 (original block)
+/// - Node B votes for block b2 (equivocated block with different block_id and bank_hash)
+/// - Node C continues voting Skip but observes conflicting votes
+/// - Node D votes for block b1 (same as Node A)
+///
+/// This creates a voting distribution where:
+/// - b1 has 40% stake (A: 20%-ε + D: 20%+ε)
+/// - b2 has 40% stake (B: 40%)
+/// - Skip has 20% stake (C: 20%)
+///
+/// ## Phase 3: Double NotarizeFallback
+/// Node C, observing the conflicting votes, triggers SafeToNotar for both blocks:
+/// - Issues NotarizeFallback for b1 (A's block)
+/// - Issues NotarizeFallback for b2 (B's equivocated block)
+/// - Verifies the block IDs are different due to equivocation
+/// - Continues this pattern until 3 slots have double NotarizeFallback votes
+///
+/// ## Phase 4: Recovery and Liveness
+/// After confirming the double NotarizeFallback behavior:
+/// - Node A stops equivocating
+/// - Node C's turbine is re-enabled
+/// - Network returns to normal operation
+/// - Test verifies 10+ new roots are created, ensuring liveness is maintained
+///
+/// ## Key Validation Points
+/// - SafeToNotar triggers correctly when conflicting blocks have sufficient stake
+/// - NotarizeFallback votes are issued for both equivocated blocks
+/// - Network maintains liveness despite Byzantine behavior and temporary partitions
+/// - Protocol correctly handles the edge case where multiple blocks have equal stake
+/// - Recovery is possible once Byzantine behavior stops
+///
+/// NOTE: we could get away with just three nodes in this test, assigning A a total of 40% stake,
+/// since node D *always* copy votes node A. But, doing so technically makes all nodes have >= 20%
+/// stake, meaning that none of them is allowed to be Byzantine. We opt to be a bit more explicit in
+/// this test.
+#[test]
+#[serial]
+#[ignore]
+fn test_alpenglow_ensure_liveness_after_double_notar_fallback() {
+    solana_logger::setup_with_default(AG_DEBUG_LOG_FILTER);
+
+    // Configure total stake and stake distribution
+    const TOTAL_STAKE: u64 = 10 * DEFAULT_NODE_STAKE;
+    const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH;
+
+    // Node stakes with slight imbalance to trigger fallback behavior
+    let node_stakes = [
+        TOTAL_STAKE * 2 / 10 - 1, // Node A (Leader): 20% - ε
+        TOTAL_STAKE * 4 / 10,     // Node B: 40%
+        TOTAL_STAKE * 2 / 10,     // Node C: 20%
+        TOTAL_STAKE * 2 / 10 + 1, // Node D: 20% + ε
+    ];
+
+    assert_eq!(TOTAL_STAKE, node_stakes.iter().sum::<u64>());
+
+    // Control components
+    let node_c_turbine_disabled = Arc::new(AtomicBool::new(false));
+
+    // Create leader schedule with Node A as primary leader
+    let (leader_schedule, validator_keys) =
+        create_custom_leader_schedule_with_random_keys(&[4, 0, 0, 0]);
+
+    let leader_schedule = FixedSchedule {
+        leader_schedule: Arc::new(leader_schedule),
+    };
+
+    // Create UDP socket to listen to votes
+    let vote_listener_socket = solana_net_utils::bind_to_localhost().unwrap();
+
+    // Create validator configs
+    let mut validator_config = ValidatorConfig::default_for_test();
+    validator_config.fixed_leader_schedule = Some(leader_schedule);
+    validator_config.voting_service_test_override = Some(VotingServiceOverride {
+        additional_listeners: vec![vote_listener_socket.local_addr().unwrap()],
+        alpenglow_port_override: AlpenglowPortOverride::default(),
+    });
+
+    let mut validator_configs =
+        make_identical_validator_configs(&validator_config, node_stakes.len());
+    validator_configs[2].turbine_disabled = node_c_turbine_disabled.clone();
+
+    // Cluster config
+    let mut cluster_config = ClusterConfig {
+        mint_lamports: TOTAL_STAKE,
+        node_stakes: node_stakes.to_vec(),
+        validator_configs,
+        validator_keys: Some(
+            validator_keys
+                .iter()
+                .cloned()
+                .zip(std::iter::repeat(true))
+                .collect(),
+        ),
+        slots_per_epoch: SLOTS_PER_EPOCH,
+        stakers_slot_offset: SLOTS_PER_EPOCH,
+        ticks_per_slot: DEFAULT_TICKS_PER_SLOT,
+        ..ClusterConfig::default()
+    };
+
+    // Create local cluster
+    let mut cluster =
+        LocalCluster::new_alpenglow(&mut cluster_config, SocketAddrSpace::Unspecified);
+
+    // Create mapping from vote pubkeys to node indices
+    let vote_pubkeys: HashMap<_, _> = validator_keys
+        .iter()
+        .enumerate()
+        .filter_map(|(index, keypair)| {
+            cluster
+                .validators
+                .get(&keypair.pubkey())
+                .map(|validator| (validator.info.voting_keypair.pubkey(), index))
+        })
+        .collect();
+
+    assert_eq!(vote_pubkeys.len(), node_stakes.len());
+
+    // Collect node pubkeys and TPU addresses
+    let node_pubkeys: Vec<_> = validator_keys.iter().map(|key| key.pubkey()).collect();
+
+    let tpu_socket_addrs: Vec<_> = node_pubkeys
+        .iter()
+        .map(|pubkey| {
+            cluster
+                .get_contact_info(pubkey)
+                .unwrap()
+                .tpu_vote(cluster.connection_cache.protocol())
+                .unwrap_or_else(|| panic!("Failed to get TPU address for {}", pubkey))
+        })
+        .collect();
+
+    // Exit nodes B and D to control their voting behavior
+    let node_b_info = cluster.exit_node(&validator_keys[1].pubkey());
+    let node_b_vote_keypair = node_b_info.info.voting_keypair.clone();
+
+    let node_d_info = cluster.exit_node(&validator_keys[3].pubkey());
+    let node_d_vote_keypair = node_d_info.info.voting_keypair.clone();
+
+    // Vote listener state
+    #[derive(Debug)]
+    struct VoteListenerState {
+        num_notar_fallback_votes: u32,
+        a_equivocates: bool,
+        notar_fallback_map: HashMap<Slot, Vec<Hash>>,
+        double_notar_fallback_slots: Vec<Slot>,
+        check_for_roots: bool,
+        post_experiment_votes: HashMap<Slot, Vec<u16>>,
+        post_experiment_roots: HashSet<Slot>,
+    }
+
+    impl VoteListenerState {
+        fn new() -> Self {
+            Self {
+                num_notar_fallback_votes: 0,
+                a_equivocates: false,
+                notar_fallback_map: HashMap::new(),
+                double_notar_fallback_slots: Vec::new(),
+                check_for_roots: false,
+                post_experiment_votes: HashMap::new(),
+                post_experiment_roots: HashSet::new(),
+            }
+        }
+
+        fn sign_and_construct_vote_message(
+            &self,
+            vote: Vote,
+            keypair: &Keypair,
+            rank: u16,
+        ) -> BLSMessage {
+            let bls_keypair =
+                BLSKeypair::derive_from_signer(keypair, BLS_KEYPAIR_DERIVE_SEED).unwrap();
+            let signature: BLSSignature = bls_keypair
+                .sign(bincode::serialize(&vote).unwrap().as_slice())
+                .into();
+            BLSMessage::new_vote(vote, signature, rank)
+        }
+
+        fn handle_node_a_vote(
+            &self,
+            vote_message: &VoteMessage,
+            node_b_keypair: &Keypair,
+            node_d_keypair: &Keypair,
+            tpu_socket_addrs: &[std::net::SocketAddr],
+            connection_cache: Arc<ConnectionCache>,
+        ) {
+            // Create vote for Node B (potentially equivocated)
+            let vote = &vote_message.vote;
+            let vote_b = if self.a_equivocates && vote.is_notarization() {
+                let new_block_id = Hash::new_unique();
+                Vote::new_notarization_vote(vote.slot(), new_block_id)
+            } else {
+                *vote
+            };
+
+            broadcast_vote(
+                self.sign_and_construct_vote_message(
+                    vote_b,
+                    node_b_keypair,
+                    1, // Node B's rank is 1
+                ),
+                tpu_socket_addrs,
+                None,
+                connection_cache.clone(),
+            );
+
+            // Create vote for Node D (always copies Node A)
+            broadcast_vote(
+                self.sign_and_construct_vote_message(
+                    *vote,
+                    node_d_keypair,
+                    3, // Node D's rank is 3
+                ),
+                tpu_socket_addrs,
+                None,
+                connection_cache,
+            );
+        }
+
+        fn handle_node_c_vote(
+            &mut self,
+            vote: &Vote,
+            node_c_turbine_disabled: &Arc<AtomicBool>,
+        ) -> bool {
+            let turbine_disabled = node_c_turbine_disabled.load(Ordering::Acquire);
+
+            // Count NotarizeFallback votes while turbine is disabled
+            if turbine_disabled && vote.is_notarize_fallback() {
+                self.num_notar_fallback_votes += 1;
+            }
+
+            // Handle double NotarizeFallback during equivocation
+            if self.a_equivocates && vote.is_notarize_fallback() {
+                let block_id = vote.block_id().copied().unwrap();
+
+                let entry = self.notar_fallback_map.entry(vote.slot()).or_default();
+                entry.push(block_id);
+
+                assert!(
+                    entry.len() <= 2,
+                    "More than 2 NotarizeFallback votes for slot {}",
+                    vote.slot()
+                );
+
+                if entry.len() == 2 {
+                    // Verify equivocation: different block IDs
+                    assert_ne!(
+                        entry[0], entry[1],
+                        "Block IDs should differ due to equivocation"
+                    );
+
+                    self.double_notar_fallback_slots.push(vote.slot());
+
+                    // End experiment after 3 double NotarizeFallback slots
+                    if self.double_notar_fallback_slots.len() == 3 {
+                        info!("Phase 4, checking for 10 roots");
+                        self.a_equivocates = false;
+                        node_c_turbine_disabled.store(false, Ordering::Release);
+                        self.check_for_roots = true;
+                    }
+                }
+            }
+
+            // Start equivocation after stable NotarizeFallback behavior
+            if turbine_disabled && self.num_notar_fallback_votes == 10 {
+                info!("Phase 2, checking for 3 double notarize fallback votes from C");
+                self.a_equivocates = true;
+            }
+
+            // Disable turbine at slot 50 to start the experiment
+            if vote.slot() == 50 {
+                info!("Phase 1, checking for 10 notarize fallback votes from C");
+                node_c_turbine_disabled.store(true, Ordering::Release);
+            }
+
+            false
+        }
+
+        fn handle_finalize_vote(&mut self, vote_message: &VoteMessage) -> bool {
+            if !self.check_for_roots {
+                return false;
+            }
+
+            let slot = vote_message.vote.slot();
+            let slot_votes = self.post_experiment_votes.entry(slot).or_default();
+            slot_votes.push(vote_message.rank);
+
+            // We expect votes from 2 nodes (A and C) since B and D are copy-voting
+            if slot_votes.len() == 2 {
+                self.post_experiment_roots.insert(slot);
+
+                // End test after 10 new roots
+                if self.post_experiment_roots.len() >= 10 {
+                    return true;
+                }
+            }
+
+            false
+        }
+    }
+
+    // Start vote listener thread to monitor and control the experiment
+    let vote_listener_thread = std::thread::spawn({
+        let mut buf = [0u8; 65_535];
+        let mut state = VoteListenerState::new();
+
+        move || {
+            loop {
+                let n_bytes = vote_listener_socket.recv(&mut buf).unwrap();
+                let BLSMessage::Vote(vote_message) =
+                    bincode::deserialize::<BLSMessage>(&buf[0..n_bytes]).unwrap()
+                else {
+                    continue;
+                };
+
+                match vote_message.rank {
+                    0 => {
+                        // Node A: Handle vote broadcasting to B and D
+                        state.handle_node_a_vote(
+                            &vote_message,
+                            &node_b_vote_keypair,
+                            &node_d_vote_keypair,
+                            &tpu_socket_addrs,
+                            cluster.connection_cache.clone(),
+                        );
+                    }
+                    2 => {
+                        // Node C: Handle experiment state transitions
+                        state.handle_node_c_vote(&vote_message.vote, &node_c_turbine_disabled);
+                    }
+                    _ => {}
+                }
+
+                // Check for finalization votes to determine test completion
+                if vote_message.vote.is_finalize() && state.handle_finalize_vote(&vote_message) {
+                    break;
+                }
+            }
+        }
+    });
+
+    vote_listener_thread.join().unwrap();
+}
+
+/// Test to validate Alpenglow's ability to maintain liveness when nodes issue both NotarizeFallback
+/// and SkipFallback votes in an intertwined manner.
+///
+/// This test simulates a consensus scenario with four nodes having specific stake distributions:
+/// - Node A: 40% + epsilon stake
+/// - Node B: 40% - epsilon stake
+/// - Node C: 20% - epsilon stake
+/// - Node D: epsilon stake (minimal, acts as perpetual leader)
+///
+/// The test proceeds through two main stages:
+///
+/// ## Stage 1: Stable Network Operation
+/// All nodes are voting normally for leader D's proposals, with notarization votes going through
+/// successfully and the network maintaining consensus.
+///
+/// ## Stage 2: Network Partition and Fallback Scenario
+/// At slot 50, Node A's turbine is disabled, creating a network partition. This triggers the
+/// following sequence:
+/// 1. Node D (leader) proposes a block b1
+/// 2. Nodes B, C, and D can communicate and vote to notarize b1
+/// 3. Node A is partitioned and cannot receive b1, so it issues a skip vote
+/// 4. The vote distribution creates a complex fallback scenario:
+///    - Nodes B, C, D: Issue notarize votes initially, then skip fallback votes
+///    - Node A: Issues skip vote initially, then notarize fallback vote
+/// 5. This creates the specific vote pattern:
+///    - B, C, D: notarize + skip_fallback
+///    - A: skip + notarize_fallback
+///
+/// The test validates that:
+/// - The network can handle intertwined fallback scenarios
+/// - Consensus is maintained despite complex vote patterns
+/// - The network continues to make progress and create new roots after the partition is resolved
+/// - At least 10 new roots are created post-experiment to ensure sustained liveness
+#[test]
+#[serial]
+fn test_alpenglow_ensure_liveness_after_intertwined_notar_and_skip_fallbacks() {
+    solana_logger::setup_with_default(AG_DEBUG_LOG_FILTER);
+
+    // Configure stake distribution for the four-node cluster
+    const TOTAL_STAKE: u64 = 10 * DEFAULT_NODE_STAKE;
+    const EPSILON: u64 = 1;
+    const NUM_NODES: usize = 4;
+
+    // Ensure that node stakes are in decreasing order, so node_index can directly be set as
+    // vote_message.rank.
+    let node_stakes = [
+        TOTAL_STAKE * 4 / 10 + EPSILON, // Node A: 40% + epsilon
+        TOTAL_STAKE * 4 / 10 - EPSILON, // Node B: 40% - epsilon
+        TOTAL_STAKE * 2 / 10 - EPSILON, // Node C: 20% - epsilon
+        EPSILON,                        // Node D: epsilon
+    ];
+
+    assert_eq!(NUM_NODES, node_stakes.len());
+
+    // Verify stake distribution adds up correctly
+    assert_eq!(TOTAL_STAKE, node_stakes.iter().sum::<u64>());
+
+    // Control mechanism for network partition
+    let node_a_turbine_disabled = Arc::new(AtomicBool::new(false));
+
+    // Create leader schedule with A as perpetual leader
+    let (leader_schedule, validator_keys) =
+        create_custom_leader_schedule_with_random_keys(&[0, 0, 0, 4]);
+
+    let leader_schedule = FixedSchedule {
+        leader_schedule: Arc::new(leader_schedule),
+    };
+
+    // Set up vote monitoring
+    let vote_listener_socket =
+        solana_net_utils::bind_to_localhost().expect("Failed to bind vote listener socket");
+
+    // Configure validators
+    let mut validator_config = ValidatorConfig::default_for_test();
+    validator_config.fixed_leader_schedule = Some(leader_schedule);
+    validator_config.voting_service_test_override = Some(VotingServiceOverride {
+        additional_listeners: vec![vote_listener_socket.local_addr().unwrap()],
+        alpenglow_port_override: AlpenglowPortOverride::default(),
+    });
+
+    let mut validator_configs = make_identical_validator_configs(&validator_config, NUM_NODES);
+    // Node A (index 0) will have its turbine disabled during the experiment
+    validator_configs[0].turbine_disabled = node_a_turbine_disabled.clone();
+
+    assert_eq!(NUM_NODES, validator_keys.len());
+
+    // Set up cluster configuration
+    let mut cluster_config = ClusterConfig {
+        mint_lamports: TOTAL_STAKE,
+        node_stakes: node_stakes.to_vec(),
+        validator_configs,
+        validator_keys: Some(
+            validator_keys
+                .iter()
+                .cloned()
+                .zip(std::iter::repeat(true))
+                .collect(),
+        ),
+        ..ClusterConfig::default()
+    };
+
+    // Initialize the cluster
+    let cluster = LocalCluster::new_alpenglow(&mut cluster_config, SocketAddrSpace::Unspecified);
+    assert_eq!(NUM_NODES, cluster.validators.len());
+
+    /// Helper struct to manage experiment state and vote pattern tracking
+    #[derive(Debug, PartialEq, Eq)]
+    enum Stage {
+        Stability,
+        ObserveSkipFallbacks,
+        ObserveLiveness,
+    }
+
+    impl Stage {
+        fn timeout(&self) -> Duration {
+            match self {
+                Stage::Stability => Duration::from_secs(60),
+                Stage::ObserveSkipFallbacks => Duration::from_secs(120),
+                Stage::ObserveLiveness => Duration::from_secs(180),
+            }
+        }
+
+        fn all() -> Vec<Stage> {
+            vec![
+                Stage::Stability,
+                Stage::ObserveSkipFallbacks,
+                Stage::ObserveLiveness,
+            ]
+        }
+    }
+
+    #[derive(Debug)]
+    struct ExperimentState {
+        stage: Stage,
+        vote_type_bitmap: HashMap<u64, [u8; 4]>, // slot -> [node_vote_pattern; 4]
+        consecutive_pattern_matches: usize,
+        post_experiment_roots: HashSet<u64>,
+    }
+
+    impl ExperimentState {
+        fn new() -> Self {
+            Self {
+                stage: Stage::Stability,
+                vote_type_bitmap: HashMap::new(),
+                consecutive_pattern_matches: 0,
+                post_experiment_roots: HashSet::new(),
+            }
+        }
+
+        fn record_vote_bitmap(&mut self, slot: u64, node_index: usize, vote: &Vote) {
+            let (_, vote_type) = _vote_to_tuple(vote);
+            let slot_pattern = self.vote_type_bitmap.entry(slot).or_insert([0u8; 4]);
+
+            assert!(node_index < NUM_NODES, "Invalid node index: {}", node_index);
+            slot_pattern[node_index] |= 1 << vote_type;
+        }
+
+        fn matches_expected_pattern(&mut self) -> bool {
+            // Expected patterns:
+            // Nodes 1, 2, 3: notarize + skip_fallback = (1 << 0) | (1 << 4) = 17
+            // Node 0: skip + notarize_fallback = (1 << 2) | (1 << 3) = 12
+            const EXPECTED_PATTERN_MAJORITY: u8 = 17; // notarize + skip_fallback
+            const EXPECTED_PATTERN_MINORITY: u8 = 12; // skip + notarize_fallback
+
+            for pattern in self.vote_type_bitmap.values() {
+                if pattern[0] == EXPECTED_PATTERN_MINORITY
+                    && pattern[1] == EXPECTED_PATTERN_MAJORITY
+                    && pattern[2] == EXPECTED_PATTERN_MAJORITY
+                    && pattern[3] == EXPECTED_PATTERN_MAJORITY
+                {
+                    self.consecutive_pattern_matches += 1;
+                    if self.consecutive_pattern_matches >= 3 {
+                        return true;
+                    }
+                }
+            }
+            false
+        }
+
+        fn record_certificate(&mut self, slot: u64) {
+            self.post_experiment_roots.insert(slot);
+        }
+
+        fn sufficient_roots_created(&self) -> bool {
+            self.post_experiment_roots.len() >= 8
+        }
+    }
+
+    // Start vote monitoring thread
+    let vote_listener_thread = std::thread::spawn({
+        let node_c_turbine_disabled = node_a_turbine_disabled.clone();
+
+        move || {
+            let mut buffer = [0u8; 65_535];
+            let mut experiment_state = ExperimentState::new();
+
+            let timer = std::time::Instant::now();
+
+            loop {
+                let bytes_received = vote_listener_socket
+                    .recv(&mut buffer)
+                    .expect("Failed to receive vote data");
+
+                let bls_message = bincode::deserialize::<BLSMessage>(&buffer[..bytes_received])
+                    .expect("Failed to deserialize BLS message");
+
+                match bls_message {
+                    BLSMessage::Vote(vote_message) => {
+                        let vote = &vote_message.vote;
+                        let node_index = vote_message.rank as usize;
+
+                        // Stage timeouts
+                        let elapsed_time = timer.elapsed();
+
+                        for stage in Stage::all() {
+                            if elapsed_time > stage.timeout() {
+                                panic!(
+                                    "Timeout during {:?}. node_c_turbine_disabled: {:#?}. Latest vote: {:#?}. Experiment state: {:#?}",
+                                    stage,
+                                    node_c_turbine_disabled.load(Ordering::Acquire),
+                                    vote,
+                                    experiment_state
+                                );
+                            }
+                        }
+
+                        // Stage 1: Wait for stability, then introduce partition at slot 20
+                        if vote.slot() == 20 && !node_c_turbine_disabled.load(Ordering::Acquire) {
+                            node_c_turbine_disabled.store(true, Ordering::Release);
+                            experiment_state.stage = Stage::ObserveSkipFallbacks;
+                        }
+
+                        // Stage 2: Monitor for expected fallback vote patterns
+                        if experiment_state.stage == Stage::ObserveSkipFallbacks {
+                            experiment_state.record_vote_bitmap(vote.slot(), node_index, vote);
+
+                            // Check if we've observed the expected pattern for 3 consecutive slots
+                            if experiment_state.matches_expected_pattern() {
+                                node_c_turbine_disabled.store(false, Ordering::Release);
+                                experiment_state.stage = Stage::ObserveLiveness;
+                            }
+                        }
+                    }
+                    BLSMessage::Certificate(cert_message) => {
+                        // Stage 3: Verify continued liveness after partition resolution
+                        if experiment_state.stage == Stage::ObserveLiveness
+                            && [CertificateType::Finalize, CertificateType::FinalizeFast]
+                                .contains(&cert_message.certificate.certificate_type())
+                        {
+                            experiment_state.record_certificate(cert_message.certificate.slot());
+
+                            if experiment_state.sufficient_roots_created() {
+                                break;
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    });
+
+    vote_listener_thread
+        .join()
+        .expect("Vote listener thread panicked");
+}
+
+/// Test to validate the Alpenglow consensus protocol's ability to maintain liveness when a node
+/// needs to issue NotarizeFallback votes due to the second fallback condition.
+///
+/// This test simulates a scenario with three nodes having the following stake distribution:
+/// - Node A: 40% - ε (small epsilon)
+/// - Node B (Leader): 30% + ε
+/// - Node C: 30%
+///
+/// The test validates the protocol's behavior through two main phases:
+///
+/// ## Phase 1: Node A Goes Offline (Byzantine + Offline Stake)
+/// - Node A (40% - ε stake) is taken offline, representing combined Byzantine and offline stake
+/// - This leaves Node B (30% + ε) and Node C (30%) as the active validators
+/// - Despite the significant offline stake, the remaining nodes can still achieve consensus
+/// - Network continues to slow finalize blocks with the remaining 60% + ε stake
+///
+/// ## Phase 2: Network Partition Triggers NotarizeFallback
+/// - Node C's turbine is disabled at slot 20, causing it to miss incoming blocks
+/// - Node B (as leader) proposes blocks and votes Notarize for them
+/// - Node C, unable to receive blocks, votes Skip for the same slots
+/// - This creates a voting scenario where:
+///   - Notarize votes: 30% + ε (Node B only)
+///   - Skip votes: 30% (Node C only)
+///   - Offline: 40% - ε (Node A)
+///
+/// ## NotarizeFallback Condition 2 Trigger
+/// Node C observes that:
+/// - There are insufficient notarization votes for the current block (30% + ε < 40%)
+/// - But the combination of notarize + skip votes represents >= 60% participation while there is
+///   sufficient notarize stake (>= 20%).
+/// - Protocol determines it's "SafeToNotar" under condition 2 and issues NotarizeFallback
+///
+/// ## Phase 3: Recovery and Liveness Verification
+/// After observing 5 NotarizeFallback votes from Node C:
+/// - Node C's turbine is re-enabled to restore normal block reception
+/// - Network returns to normal operation with both active nodes
+/// - Test verifies 10+ new roots are created, ensuring liveness is maintained
+///
+/// ## Key Validation Points
+/// - Protocol handles significant offline stake (40%) gracefully
+/// - NotarizeFallback condition 2 triggers correctly with insufficient notarization
+/// - Network maintains liveness despite temporary partitioning
+/// - Recovery is seamless once partition is resolved
+#[test]
+#[serial]
+fn test_alpenglow_ensure_liveness_after_second_notar_fallback_condition() {
+    solana_logger::setup_with_default(AG_DEBUG_LOG_FILTER);
+
+    // Configure total stake and stake distribution
+    const TOTAL_STAKE: u64 = 10 * DEFAULT_NODE_STAKE;
+    const SLOTS_PER_EPOCH: u64 = MINIMUM_SLOTS_PER_EPOCH;
+
+    // Node stakes designed to trigger NotarizeFallback condition 2
+    let node_stakes = [
+        TOTAL_STAKE * 4 / 10 - 1, // Node A: 40% - ε (will go offline)
+        TOTAL_STAKE * 3 / 10 + 1, // Node B: 30% + ε (leader, stays online)
+        TOTAL_STAKE * 3 / 10,     // Node C: 30% (will be partitioned)
+    ];
+
+    assert_eq!(TOTAL_STAKE, node_stakes.iter().sum::<u64>());
+
+    // Control component for network partition simulation
+    let node_c_turbine_disabled = Arc::new(AtomicBool::new(false));
+
+    // Create leader schedule with Node B as primary leader (Node A will go offline)
+    let (leader_schedule, validator_keys) =
+        create_custom_leader_schedule_with_random_keys(&[0, 4, 0]);
+
+    let leader_schedule = FixedSchedule {
+        leader_schedule: Arc::new(leader_schedule),
+    };
+
+    // Create UDP socket to listen to votes for experiment control
+    let vote_listener_socket = solana_net_utils::bind_to_localhost().unwrap();
+
+    // Create validator configs
+    let mut validator_config = ValidatorConfig::default_for_test();
+    validator_config.fixed_leader_schedule = Some(leader_schedule);
+    validator_config.voting_service_test_override = Some(VotingServiceOverride {
+        additional_listeners: vec![vote_listener_socket.local_addr().unwrap()],
+        alpenglow_port_override: AlpenglowPortOverride::default(),
+    });
+
+    let mut validator_configs =
+        make_identical_validator_configs(&validator_config, node_stakes.len());
+
+    // Node C will have its turbine disabled during the experiment
+    validator_configs[2].turbine_disabled = node_c_turbine_disabled.clone();
+
+    // Cluster configuration
+    let mut cluster_config = ClusterConfig {
+        mint_lamports: TOTAL_STAKE,
+        node_stakes: node_stakes.to_vec(),
+        validator_configs,
+        validator_keys: Some(
+            validator_keys
+                .iter()
+                .cloned()
+                .zip(std::iter::repeat(true))
+                .collect(),
+        ),
+        slots_per_epoch: SLOTS_PER_EPOCH,
+        stakers_slot_offset: SLOTS_PER_EPOCH,
+        ticks_per_slot: DEFAULT_TICKS_PER_SLOT,
+        ..ClusterConfig::default()
+    };
+
+    // Create local cluster
+    let mut cluster =
+        LocalCluster::new_alpenglow(&mut cluster_config, SocketAddrSpace::Unspecified);
+
+    // Create mapping from vote pubkeys to node indices for vote identification
+    let vote_pubkeys: HashMap<_, _> = validator_keys
+        .iter()
+        .enumerate()
+        .filter_map(|(index, keypair)| {
+            cluster
+                .validators
+                .get(&keypair.pubkey())
+                .map(|validator| (validator.info.voting_keypair.pubkey(), index))
+        })
+        .collect();
+
+    assert_eq!(vote_pubkeys.len(), node_stakes.len());
+
+    // Vote listener state management
+    #[derive(Debug, PartialEq, Eq)]
+    enum Stage {
+        WaitForReady,
+        Stability,
+        ObserveNotarFallbacks,
+        ObserveLiveness,
+    }
+
+    impl Stage {
+        fn timeout(&self) -> Duration {
+            match self {
+                Stage::WaitForReady => Duration::from_secs(60),
+                Stage::Stability => Duration::from_secs(60),
+                Stage::ObserveNotarFallbacks => Duration::from_secs(120),
+                Stage::ObserveLiveness => Duration::from_secs(180),
+            }
+        }
+
+        fn all() -> Vec<Stage> {
+            vec![
+                Stage::WaitForReady,
+                Stage::Stability,
+                Stage::ObserveNotarFallbacks,
+                Stage::ObserveLiveness,
+            ]
+        }
+    }
+
+    #[derive(Debug)]
+    struct ExperimentState {
+        stage: Stage,
+        number_of_nodes: usize,
+        initial_notar_votes: HashSet<usize>,
+        notar_fallbacks: HashSet<Slot>,
+        post_experiment_roots: HashSet<Slot>,
+    }
+
+    impl ExperimentState {
+        fn new(number_of_nodes: usize) -> Self {
+            Self {
+                stage: Stage::WaitForReady,
+                number_of_nodes,
+                initial_notar_votes: HashSet::new(),
+                notar_fallbacks: HashSet::new(),
+                post_experiment_roots: HashSet::new(),
+            }
+        }
+
+        fn wait_for_nodes_ready(
+            &mut self,
+            vote: &Vote,
+            node_name: usize,
+            cluster: &mut LocalCluster,
+            node_a_pubkey: &Pubkey,
+        ) {
+            if self.stage != Stage::WaitForReady || !vote.is_notarization() {
+                return;
+            }
+
+            self.initial_notar_votes.insert(node_name);
+
+            // Wait until we have observed a notarization vote from all nodes.
+            if self.initial_notar_votes.len() >= self.number_of_nodes {
+                // Phase 1: Take Node A offline to simulate Byzantine + offline stake
+                // This represents 40% - ε of total stake going offline
+                info!("Phase 1: Exiting Node A. Transitioning to stability phase.");
+                cluster.exit_node(node_a_pubkey);
+                self.stage = Stage::Stability;
+            }
+        }
+
+        fn handle_experiment_start(
+            &mut self,
+            vote: &Vote,
+            node_c_turbine_disabled: &Arc<AtomicBool>,
+        ) {
+            // Phase 2: Start network partition experiment at slot 20
+            if vote.slot() >= 20 && self.stage == Stage::Stability {
+                info!(
+                    "Starting network partition experiment at slot {}",
+                    vote.slot()
+                );
+                node_c_turbine_disabled.store(true, Ordering::Relaxed);
+                self.stage = Stage::ObserveNotarFallbacks;
+            }
+        }
+
+        fn handle_notar_fallback(
+            &mut self,
+            vote: &Vote,
+            node_name: usize,
+            node_c_turbine_disabled: &Arc<AtomicBool>,
+        ) {
+            // Track NotarizeFallback votes from Node C
+            if self.stage == Stage::ObserveNotarFallbacks
+                && node_name == 2
+                && vote.is_notarize_fallback()
+            {
+                self.notar_fallbacks.insert(vote.slot());
+                info!(
+                    "Node C issued NotarizeFallback for slot {}, total fallbacks: {}",
+                    vote.slot(),
+                    self.notar_fallbacks.len()
+                );
+
+                // Phase 3: End partition after observing sufficient NotarizeFallback votes
+                if self.notar_fallbacks.len() >= 5 {
+                    info!("Sufficient NotarizeFallback votes observed, ending partition");
+                    node_c_turbine_disabled.store(false, Ordering::Relaxed);
+                    self.stage = Stage::ObserveLiveness;
+                }
+            }
+        }
+
+        fn record_certificate(&mut self, slot: u64) {
+            self.post_experiment_roots.insert(slot);
+        }
+
+        fn sufficient_roots_created(&self) -> bool {
+            self.post_experiment_roots.len() >= 8
+        }
+    }
+
+    // Start vote listener thread to monitor and control the experiment
+    let vote_listener_thread = std::thread::spawn({
+        let mut buf = [0u8; 65_535];
+        let node_c_turbine_disabled = node_c_turbine_disabled.clone();
+        let mut experiment_state = ExperimentState::new(vote_pubkeys.len());
+        let timer = std::time::Instant::now();
+
+        move || {
+            loop {
+                let n_bytes = vote_listener_socket.recv(&mut buf).unwrap();
+
+                let bls_message = bincode::deserialize::<BLSMessage>(&buf[0..n_bytes]).unwrap();
+
+                match bls_message {
+                    BLSMessage::Vote(vote_message) => {
+                        let vote = &vote_message.vote;
+                        let node_name = vote_message.rank as usize;
+
+                        // Stage timeouts
+                        let elapsed_time = timer.elapsed();
+
+                        for stage in Stage::all() {
+                            if elapsed_time > stage.timeout() {
+                                panic!(
+                                    "Timeout during {:?}. node_c_turbine_disabled: {:#?}. Latest vote: {:#?}. Experiment state: {:#?}",
+                                    stage,
+                                    node_c_turbine_disabled.load(Ordering::Acquire),
+                                    vote,
+                                    experiment_state
+                                );
+                            }
+                        }
+
+                        // Handle experiment phase transitions
+                        experiment_state.wait_for_nodes_ready(
+                            vote,
+                            node_name,
+                            &mut cluster,
+                            &validator_keys[0].pubkey(),
+                        );
+                        experiment_state.handle_experiment_start(vote, &node_c_turbine_disabled);
+                        experiment_state.handle_notar_fallback(
+                            vote,
+                            node_name,
+                            &node_c_turbine_disabled,
+                        );
+                    }
+
+                    BLSMessage::Certificate(cert_message) => {
+                        // Wait until the final stage before looking for finalization certificates.
+                        if experiment_state.stage != Stage::ObserveLiveness {
+                            continue;
+                        }
+                        // Observing finalization certificates to ensure liveness.
+                        if [CertificateType::Finalize, CertificateType::FinalizeFast]
+                            .contains(&cert_message.certificate.certificate_type())
+                        {
+                            experiment_state.record_certificate(cert_message.certificate.slot());
+
+                            if experiment_state.sufficient_roots_created() {
+                                break;
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    });
+
+    vote_listener_thread.join().unwrap();
+}

+ 8 - 0
metrics/src/datapoint.rs

@@ -81,6 +81,11 @@ impl DataPoint {
         self
     }
 
+    pub fn add_field_u64(&mut self, name: &'static str, value: u64) -> &mut Self {
+        self.fields.push((name, value.to_string() + "u"));
+        self
+    }
+
     pub fn add_field_f64(&mut self, name: &'static str, value: f64) -> &mut Self {
         self.fields.push((name, value.to_string()));
         self
@@ -108,6 +113,9 @@ macro_rules! create_datapoint {
     (@field $point:ident $name:expr, $value:expr, i64) => {
         $point.add_field_i64($name, $value as i64);
     };
+    (@field $point:ident $name:expr, $value:expr, u64) => {
+        $point.add_field_u64($name, $value as u64);
+    };
     (@field $point:ident $name:expr, $value:expr, f64) => {
         $point.add_field_f64($name, $value as f64);
     };

+ 3 - 0
multinode-demo/setup.sh

@@ -32,6 +32,8 @@ else
   $solana_keygen new --no-passphrase -so "$SOLANA_CONFIG_DIR"/bootstrap-validator/vote-account.json
 fi
 
+BLS_PUBKEY=$($solana_keygen bls_pubkey "$SOLANA_CONFIG_DIR"/bootstrap-validator/identity.json)
+
 args=(
   "$@"
   --max-genesis-archive-unpacked-size 1073741824
@@ -39,6 +41,7 @@ args=(
   --bootstrap-validator "$SOLANA_CONFIG_DIR"/bootstrap-validator/identity.json
                         "$SOLANA_CONFIG_DIR"/bootstrap-validator/vote-account.json
                         "$SOLANA_CONFIG_DIR"/bootstrap-validator/stake-account.json
+  --bootstrap-validator-bls-pubkey "$BLS_PUBKEY"
 )
 
 "$SOLANA_ROOT"/fetch-core-bpf.sh

+ 11 - 2
multinode-demo/validator.sh

@@ -19,6 +19,7 @@ vote_account=
 no_restart=0
 gossip_entrypoint=
 ledger_dir=
+alpenglow=
 
 usage() {
   if [[ -n $1 ]]; then
@@ -191,6 +192,9 @@ while [[ -n $1 ]]; do
     elif [[ $1 == --wen-restart-coordinator ]]; then
       args+=("$1" "$2")
       shift 2
+    elif [[ $1 == --alpenglow ]]; then
+      alpenglow=(--alpenglow)
+      shift
     elif [[ $1 = -h ]]; then
       usage "$@"
     else
@@ -329,8 +333,13 @@ setup_validator_accounts() {
       ) || return $?
     fi
 
-    echo "Creating validator vote account"
-    wallet create-vote-account "$vote_account" "$identity" "$authorized_withdrawer" || return $?
+    if [[ -n "$alpenglow" ]]; then
+      echo "Creating Alpenglow validator vote account"
+      wallet create-vote-account "$alpenglow" "$vote_account" "$identity" "$authorized_withdrawer" || return $?
+    else
+      echo "Creating POH validator vote account"
+      wallet create-vote-account "$vote_account" "$identity" "$authorized_withdrawer" || return $?
+    fi
   fi
   echo "Validator vote account configured"
 

+ 2 - 2
net-utils/src/sockets.rs

@@ -14,8 +14,8 @@ use {
 // base port for deconflicted allocations
 const BASE_PORT: u16 = 5000;
 // how much to allocate per individual process.
-// we expect to have at most 64 concurrent tests in CI at any moment on a given host.
-const SLICE_PER_PROCESS: u16 = (u16::MAX - BASE_PORT) / 64;
+// we expect to have at most 256 concurrent tests in CI at any moment on a given host.
+const SLICE_PER_PROCESS: u16 = (u16::MAX - BASE_PORT) / 256;
 /// When running under nextest, this will try to provide
 /// a unique slice of port numbers (assuming no other nextest processes
 /// are running on the same host) based on NEXTEST_TEST_GLOBAL_SLOT variable

+ 26 - 3
net/net.sh

@@ -319,6 +319,7 @@ startBootstrapLeader() {
   declare ipAddress=$1
   declare nodeIndex="$2"
   declare logFile="$3"
+  declare alpenglow="$4"
   echo "--- Starting bootstrap validator: $ipAddress"
   echo "start log: $logFile"
 
@@ -330,6 +331,21 @@ startBootstrapLeader() {
 
     deployBootstrapValidator "$ipAddress"
 
+    # TODO: once we cut a public release of alpenglow-vote, we can eliminate this block
+    # below. For now though, as we're developing alpenglow in tandem with alpenglow-vote,
+    # we auto-generate spl_alpenglow-vote.so while building alpenglow. This block here
+    # copies over this auto-generated spl_alpenglow-vote.so over to the bootstrap
+    # validator.
+    if $alpenglow; then
+      declare remoteHome
+      remoteHome=$(remoteHomeDir "$ipAddress")
+      local remoteSolanaHome="${remoteHome}/solana"
+
+      rsync -vPrc -e "ssh ${sshOptions[*]}" \
+        "$SOLANA_ROOT"/target/alpenglow-vote-so/spl_alpenglow-vote.so \
+        "$ipAddress":"$remoteSolanaHome"/ > /dev/null
+    fi
+
     ssh "${sshOptions[@]}" -n "$ipAddress" \
       "./solana/net/remote/remote-node.sh \
          $deployMethod \
@@ -356,6 +372,7 @@ startBootstrapLeader() {
          \"$disableQuic\" \
          \"$enableUdp\" \
          \"$maybeWenRestart\" \
+         \"$alpenglow\" \
       "
 
   ) >> "$logFile" 2>&1 || {
@@ -369,6 +386,7 @@ startNode() {
   declare ipAddress=$1
   declare nodeType=$2
   declare nodeIndex="$3"
+  declare alpenglow="$4"
 
   initLogDir
   declare logFile="$netLogDir/validator-$ipAddress.log"
@@ -431,6 +449,7 @@ startNode() {
          \"$disableQuic\" \
          \"$enableUdp\" \
          \"$maybeWenRestart\" \
+         \"$alpenglow\" \
       "
   ) >> "$logFile" 2>&1 &
   declare pid=$!
@@ -642,7 +661,7 @@ deploy() {
     if $bootstrapLeader; then
       SECONDS=0
       declare bootstrapNodeDeployTime=
-      startBootstrapLeader "$nodeAddress" "$nodeIndex" "$netLogDir/bootstrap-validator-$ipAddress.log"
+      startBootstrapLeader "$nodeAddress" "$nodeIndex" "$netLogDir/bootstrap-validator-$ipAddress.log" "$alpenglow"
       bootstrapNodeDeployTime=$SECONDS
       $metricsWriteDatapoint "testnet-deploy net-bootnode-leader-started=1"
 
@@ -650,7 +669,7 @@ deploy() {
       SECONDS=0
       pids=()
     else
-      startNode "$ipAddress" "$nodeType" "$nodeIndex"
+      startNode "$ipAddress" "$nodeType" "$nodeIndex" "$alpenglow"
 
       # Stagger additional node start time. If too many nodes start simultaneously
       # the bootstrap node gets more rsync requests from the additional nodes than
@@ -853,6 +872,7 @@ enableUdp=false
 clientType=tpu-client
 maybeUseUnstakedConnection=""
 maybeWenRestart=""
+alpenglow=false
 
 command=$1
 [[ -n $command ]] || usage
@@ -1010,6 +1030,9 @@ while [[ -n $1 ]]; do
       skipSetup=true
       maybeWenRestart="$2"
       shift 2
+    elif [[ $1 = --alpenglow ]]; then
+      alpenglow=true
+      shift 1
     else
       usage "Unknown long option: $1"
     fi
@@ -1119,7 +1142,7 @@ if [[ "$numClientsRequested" -eq 0 ]]; then
   numClientsRequested=$numClients
 else
   if [[ "$numClientsRequested" -gt "$numClients" ]]; then
-    echo "Error: More clients requested ($numClientsRequested) then available ($numClients)"
+    echo "Error: More clients requested ($numClientsRequested) than available ($numClients)"
     exit 1
   fi
 fi

+ 16 - 0
net/remote/remote-node.sh

@@ -31,6 +31,7 @@ tmpfsAccounts="${22:false}"
 disableQuic="${23}"
 enableUdp="${24}"
 maybeWenRestart="${25}"
+alpenglow="${26}"
 
 set +x
 
@@ -236,9 +237,17 @@ EOF
                                        "$(solana-keygen pubkey "config/validator-vote-$i.json")"
                                        "$(solana-keygen pubkey "config/validator-stake-$i.json")"
           )
+          args+=(--bootstrap-validator-bls-pubkey "$(solana-keygen bls_pubkey "config/validator-identity-$i.json")")
         done
       fi
 
+      if $alpenglow; then
+        echo "Consensus method: Alpenglow"
+        args+=(--alpenglow "$HOME"/solana/spl_alpenglow-vote.so)
+      else
+        echo "Consensus method: POH"
+      fi
+
       multinode-demo/setup.sh "${args[@]}"
 
       maybeWaitForSupermajority=
@@ -440,6 +449,13 @@ EOF
       args+=(--wen-restart-coordinator "$maybeWenRestart")
     fi
 
+    if $alpenglow; then
+      echo "Consensus method: Alpenglow"
+      args+=(--alpenglow)
+    else
+      echo "Consensus method: POH"
+    fi
+
 cat >> ~/solana/on-reboot <<EOF
     $maybeSkipAccountsCreation
     nohup multinode-demo/validator.sh ${args[@]} > validator.log.\$now 2>&1 &

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно