shredder.rs 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. #![allow(clippy::arithmetic_side_effects)]
  2. use {
  3. bencher::{benchmark_group, benchmark_main, Bencher},
  4. rand::Rng,
  5. solana_entry::entry::{create_ticks, Entry},
  6. solana_hash::Hash,
  7. solana_keypair::Keypair,
  8. solana_ledger::shred::{
  9. get_data_shred_bytes_per_batch_typical, max_entries_per_n_shred, max_ticks_per_n_shreds,
  10. recover, ProcessShredsStats, ReedSolomonCache, Shred, Shredder,
  11. CODING_SHREDS_PER_FEC_BLOCK, DATA_SHREDS_PER_FEC_BLOCK,
  12. },
  13. solana_perf::test_tx,
  14. std::hint::black_box,
  15. };
  16. fn make_test_entry(txs_per_entry: u64) -> Entry {
  17. Entry {
  18. num_hashes: 100_000,
  19. hash: Hash::default(),
  20. transactions: vec![test_tx::test_tx().into(); txs_per_entry as usize],
  21. }
  22. }
  23. fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec<Entry> {
  24. (0..num_entries)
  25. .map(|_| make_test_entry(txs_per_entry))
  26. .collect()
  27. }
  28. const SHRED_SIZE_TYPICAL: usize = {
  29. let batch_payload = get_data_shred_bytes_per_batch_typical() as usize;
  30. batch_payload / DATA_SHREDS_PER_FEC_BLOCK
  31. };
  32. fn bench_shredder_ticks(bencher: &mut Bencher) {
  33. let kp = Keypair::new();
  34. let num_shreds = 1_000_000_usize.div_ceil(SHRED_SIZE_TYPICAL);
  35. // ~1Mb
  36. let num_ticks = max_ticks_per_n_shreds(1, Some(SHRED_SIZE_TYPICAL)) * num_shreds as u64;
  37. let entries = create_ticks(num_ticks, 0, Hash::default());
  38. let reed_solomon_cache = ReedSolomonCache::default();
  39. let chained_merkle_root = Hash::new_from_array(rand::thread_rng().gen());
  40. bencher.iter(|| {
  41. let shredder = Shredder::new(1, 0, 0, 0).unwrap();
  42. shredder.entries_to_merkle_shreds_for_tests(
  43. &kp,
  44. &entries,
  45. true,
  46. chained_merkle_root,
  47. 0,
  48. 0,
  49. &reed_solomon_cache,
  50. &mut ProcessShredsStats::default(),
  51. );
  52. })
  53. }
  54. fn bench_shredder_large_entries(bencher: &mut Bencher) {
  55. let kp = Keypair::new();
  56. let shred_size = SHRED_SIZE_TYPICAL;
  57. let num_shreds = 1_000_000_usize.div_ceil(shred_size);
  58. let txs_per_entry = 128;
  59. let num_entries = max_entries_per_n_shred(
  60. &make_test_entry(txs_per_entry),
  61. num_shreds as u64,
  62. Some(shred_size),
  63. );
  64. let entries = make_large_unchained_entries(txs_per_entry, num_entries);
  65. let chained_merkle_root = Hash::new_from_array(rand::thread_rng().gen());
  66. let reed_solomon_cache = ReedSolomonCache::default();
  67. // 1Mb
  68. bencher.iter(|| {
  69. let shredder = Shredder::new(1, 0, 0, 0).unwrap();
  70. shredder.entries_to_merkle_shreds_for_tests(
  71. &kp,
  72. &entries,
  73. true,
  74. chained_merkle_root,
  75. 0,
  76. 0,
  77. &reed_solomon_cache,
  78. &mut ProcessShredsStats::default(),
  79. );
  80. })
  81. }
  82. fn bench_deshredder(bencher: &mut Bencher) {
  83. let kp = Keypair::new();
  84. let shred_size = SHRED_SIZE_TYPICAL;
  85. // ~10Mb
  86. let num_shreds = 10_000_000_usize.div_ceil(shred_size);
  87. let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
  88. let entries = create_ticks(num_ticks, 0, Hash::default());
  89. let shredder = Shredder::new(1, 0, 0, 0).unwrap();
  90. let chained_merkle_root = Hash::new_from_array(rand::thread_rng().gen());
  91. let (data_shreds, _) = shredder.entries_to_merkle_shreds_for_tests(
  92. &kp,
  93. &entries,
  94. true,
  95. chained_merkle_root,
  96. 0,
  97. 0,
  98. &ReedSolomonCache::default(),
  99. &mut ProcessShredsStats::default(),
  100. );
  101. bencher.iter(|| {
  102. let data_shreds = data_shreds.iter().map(Shred::payload);
  103. let raw = &mut Shredder::deshred(data_shreds).unwrap();
  104. assert_ne!(raw.len(), 0);
  105. })
  106. }
  107. fn bench_deserialize_hdr(bencher: &mut Bencher) {
  108. let keypair = Keypair::new();
  109. let shredder = Shredder::new(2, 1, 0, 0).unwrap();
  110. let merkle_root = Hash::new_from_array(rand::thread_rng().gen());
  111. let mut stats = ProcessShredsStats::default();
  112. let reed_solomon_cache = ReedSolomonCache::default();
  113. let mut shreds = shredder
  114. .make_merkle_shreds_from_entries(
  115. &keypair,
  116. &[],
  117. true, // is_last_in_slot
  118. merkle_root,
  119. 1, // next_shred_index
  120. 0, // next_code_index
  121. &reed_solomon_cache,
  122. &mut stats,
  123. )
  124. .filter(Shred::is_data)
  125. .collect::<Vec<_>>();
  126. let shred = shreds.remove(0);
  127. bencher.iter(|| {
  128. let payload = shred.payload().clone();
  129. let _ = Shred::new_from_serialized_shred(payload).unwrap();
  130. })
  131. }
  132. fn make_entries() -> Vec<Entry> {
  133. let txs_per_entry = 128;
  134. let num_entries = max_entries_per_n_shred(&make_test_entry(txs_per_entry), 200, Some(1000));
  135. make_large_unchained_entries(txs_per_entry, num_entries)
  136. }
  137. fn bench_shredder_coding(bencher: &mut Bencher) {
  138. let entries = make_entries();
  139. let shredder = Shredder::new(1, 0, 0, 0).unwrap();
  140. let reed_solomon_cache = ReedSolomonCache::default();
  141. let merkle_root = Hash::new_from_array(rand::thread_rng().gen());
  142. bencher.iter(|| {
  143. let result: Vec<_> = shredder
  144. .make_merkle_shreds_from_entries(
  145. &Keypair::new(),
  146. &entries,
  147. true, // is_last_in_slot
  148. merkle_root,
  149. 0, // next_shred_index
  150. 0, // next_code_index
  151. &reed_solomon_cache,
  152. &mut ProcessShredsStats::default(),
  153. )
  154. .collect();
  155. black_box(result);
  156. })
  157. }
  158. fn bench_shredder_decoding(bencher: &mut Bencher) {
  159. let entries = make_entries();
  160. let shredder = Shredder::new(1, 0, 0, 0).unwrap();
  161. let reed_solomon_cache = ReedSolomonCache::default();
  162. let merkle_root = Hash::new_from_array(rand::thread_rng().gen());
  163. let (_data_shreds, mut coding_shreds): (Vec<_>, Vec<_>) = shredder
  164. .make_merkle_shreds_from_entries(
  165. &Keypair::new(),
  166. &entries,
  167. true, // is_last_in_slot
  168. merkle_root,
  169. 0, // next_shred_index
  170. 0, // next_code_index
  171. &reed_solomon_cache,
  172. &mut ProcessShredsStats::default(),
  173. )
  174. .partition(Shred::is_data);
  175. coding_shreds.truncate(CODING_SHREDS_PER_FEC_BLOCK);
  176. bencher.iter(|| {
  177. for shred in recover(coding_shreds.clone(), &reed_solomon_cache).unwrap() {
  178. black_box(shred.unwrap());
  179. }
  180. })
  181. }
  182. benchmark_group!(
  183. benches,
  184. bench_shredder_ticks,
  185. bench_shredder_large_entries,
  186. bench_deshredder,
  187. bench_deserialize_hdr,
  188. bench_shredder_coding,
  189. bench_shredder_decoding
  190. );
  191. benchmark_main!(benches);