entry.rs 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908
  1. //! The `entry` module is a fundamental building block of Proof of History. It contains a
  2. //! unique ID that is the hash of the Entry before it, plus the hash of the
  3. //! transactions within it. Entries cannot be reordered, and its field `num_hashes`
  4. //! represents an approximate amount of time since the last Entry was created.
  5. use crate::packet::{Blob, SharedBlob, BLOB_DATA_SIZE};
  6. use crate::poh::Poh;
  7. use crate::result::Result;
  8. use bincode::{deserialize, serialized_size};
  9. use chrono::prelude::Utc;
  10. use rayon::prelude::*;
  11. use rayon::ThreadPool;
  12. use solana_budget_api::budget_instruction;
  13. use solana_merkle_tree::MerkleTree;
  14. use solana_metrics::inc_new_counter_warn;
  15. use solana_sdk::hash::Hash;
  16. use solana_sdk::signature::{Keypair, KeypairUtil};
  17. use solana_sdk::timing;
  18. use solana_sdk::transaction::Transaction;
  19. use std::borrow::Borrow;
  20. use std::cell::RefCell;
  21. use std::sync::mpsc::{Receiver, Sender};
  22. use std::sync::{Arc, RwLock};
  23. #[cfg(feature = "cuda")]
  24. use crate::sigverify::poh_verify_many;
  25. use solana_rayon_threadlimit::get_thread_count;
  26. #[cfg(feature = "cuda")]
  27. use std::sync::Mutex;
  28. #[cfg(feature = "cuda")]
  29. use std::thread;
  30. use std::time::Instant;
  31. pub const NUM_THREADS: u32 = 10;
  32. thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
  33. .num_threads(get_thread_count())
  34. .build()
  35. .unwrap()));
  36. pub type EntrySender = Sender<Vec<Entry>>;
  37. pub type EntryReceiver = Receiver<Vec<Entry>>;
  38. /// Each Entry contains three pieces of data. The `num_hashes` field is the number
  39. /// of hashes performed since the previous entry. The `hash` field is the result
  40. /// of hashing `hash` from the previous entry `num_hashes` times. The `transactions`
  41. /// field points to Transactions that took place shortly before `hash` was generated.
  42. ///
  43. /// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
  44. /// get a duration estimate since the last Entry. Since processing power increases
  45. /// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
  46. /// An upper bound on Duration can be estimated by assuming each hash was generated by the
  47. /// world's fastest processor at the time the entry was recorded. Or said another way, it
  48. /// is physically not possible for a shorter duration to have occurred if one assumes the
  49. /// hash was computed by the world's fastest processor at that time. The hash chain is both
  50. /// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof of
  51. /// Work consensus!)
  52. #[derive(Serialize, Deserialize, Debug, Default, PartialEq, Eq, Clone)]
  53. pub struct Entry {
  54. /// The number of hashes since the previous Entry ID.
  55. pub num_hashes: u64,
  56. /// The SHA-256 hash `num_hashes` after the previous Entry ID.
  57. pub hash: Hash,
  58. /// An unordered list of transactions that were observed before the Entry ID was
  59. /// generated. They may have been observed before a previous Entry ID but were
  60. /// pushed back into this list to ensure deterministic interpretation of the ledger.
  61. pub transactions: Vec<Transaction>,
  62. }
  63. impl Entry {
  64. /// Creates the next Entry `num_hashes` after `start_hash`.
  65. pub fn new(prev_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Self {
  66. assert!(Self::serialized_to_blob_size(&transactions) <= BLOB_DATA_SIZE as u64);
  67. if num_hashes == 0 && transactions.is_empty() {
  68. Entry {
  69. num_hashes: 0,
  70. hash: *prev_hash,
  71. transactions,
  72. }
  73. } else if num_hashes == 0 {
  74. // If you passed in transactions, but passed in num_hashes == 0, then
  75. // next_hash will generate the next hash and set num_hashes == 1
  76. let hash = next_hash(prev_hash, 1, &transactions);
  77. Entry {
  78. num_hashes: 1,
  79. hash,
  80. transactions,
  81. }
  82. } else {
  83. // Otherwise, the next Entry `num_hashes` after `start_hash`.
  84. // If you wanted a tick for instance, then pass in num_hashes = 1
  85. // and transactions = empty
  86. let hash = next_hash(prev_hash, num_hashes, &transactions);
  87. Entry {
  88. num_hashes,
  89. hash,
  90. transactions,
  91. }
  92. }
  93. }
  94. pub fn to_shared_blob(&self) -> SharedBlob {
  95. let blob = self.to_blob();
  96. Arc::new(RwLock::new(blob))
  97. }
  98. pub fn to_blob(&self) -> Blob {
  99. Blob::from_serializable(&vec![&self])
  100. }
  101. /// return serialized_size of a vector with a single Entry for given TXs
  102. /// since Blobs carry Vec<Entry>...
  103. /// calculate the total without actually constructing the full Entry (which
  104. /// would require a clone() of the transactions)
  105. pub fn serialized_to_blob_size(transactions: &[Transaction]) -> u64 {
  106. let txs_size: u64 = transactions
  107. .iter()
  108. .map(|tx| serialized_size(tx).unwrap())
  109. .sum();
  110. serialized_size(&vec![Entry {
  111. num_hashes: 0,
  112. hash: Hash::default(),
  113. transactions: vec![],
  114. }])
  115. .unwrap()
  116. + txs_size
  117. }
  118. pub fn new_mut(
  119. start_hash: &mut Hash,
  120. num_hashes: &mut u64,
  121. transactions: Vec<Transaction>,
  122. ) -> Self {
  123. assert!(Self::serialized_to_blob_size(&transactions) <= BLOB_DATA_SIZE as u64);
  124. let entry = Self::new(start_hash, *num_hashes, transactions);
  125. *start_hash = entry.hash;
  126. *num_hashes = 0;
  127. entry
  128. }
  129. #[cfg(test)]
  130. pub fn new_tick(num_hashes: u64, hash: &Hash) -> Self {
  131. Entry {
  132. num_hashes,
  133. hash: *hash,
  134. transactions: vec![],
  135. }
  136. }
  137. /// Verifies self.hash is the result of hashing a `start_hash` `self.num_hashes` times.
  138. /// If the transaction is not a Tick, then hash that as well.
  139. pub fn verify(&self, start_hash: &Hash) -> bool {
  140. let ref_hash = next_hash(start_hash, self.num_hashes, &self.transactions);
  141. if self.hash != ref_hash {
  142. warn!(
  143. "next_hash is invalid expected: {:?} actual: {:?}",
  144. self.hash, ref_hash
  145. );
  146. return false;
  147. }
  148. true
  149. }
  150. pub fn is_tick(&self) -> bool {
  151. self.transactions.is_empty()
  152. }
  153. }
  154. pub fn hash_transactions(transactions: &[Transaction]) -> Hash {
  155. // a hash of a slice of transactions only needs to hash the signatures
  156. let signatures: Vec<_> = transactions
  157. .iter()
  158. .flat_map(|tx| tx.signatures.iter())
  159. .collect();
  160. let merkle_tree = MerkleTree::new(&signatures);
  161. if let Some(root_hash) = merkle_tree.get_root() {
  162. *root_hash
  163. } else {
  164. Hash::default()
  165. }
  166. }
  167. /// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
  168. /// a signature, the final hash will be a hash of both the previous ID and
  169. /// the signature. If num_hashes is zero and there's no transaction data,
  170. /// start_hash is returned.
  171. pub fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
  172. if num_hashes == 0 && transactions.is_empty() {
  173. return *start_hash;
  174. }
  175. let mut poh = Poh::new(*start_hash, None);
  176. poh.hash(num_hashes.saturating_sub(1));
  177. if transactions.is_empty() {
  178. poh.tick().unwrap().hash
  179. } else {
  180. poh.record(hash_transactions(transactions)).unwrap().hash
  181. }
  182. }
  183. pub fn reconstruct_entries_from_blobs<I>(blobs: I) -> Result<(Vec<Entry>, u64)>
  184. where
  185. I: IntoIterator,
  186. I::Item: Borrow<Blob>,
  187. {
  188. let mut entries: Vec<Entry> = vec![];
  189. let mut num_ticks = 0;
  190. for blob in blobs.into_iter() {
  191. let new_entries: Vec<Entry> = {
  192. let msg_size = blob.borrow().size();
  193. deserialize(&blob.borrow().data()[..msg_size])?
  194. };
  195. let num_new_ticks: u64 = new_entries.iter().map(|entry| entry.is_tick() as u64).sum();
  196. num_ticks += num_new_ticks;
  197. entries.extend(new_entries)
  198. }
  199. Ok((entries, num_ticks))
  200. }
  201. // an EntrySlice is a slice of Entries
  202. pub trait EntrySlice {
  203. /// Verifies the hashes and counts of a slice of transactions are all consistent.
  204. fn verify_cpu(&self, start_hash: &Hash) -> bool;
  205. fn verify(&self, start_hash: &Hash) -> bool;
  206. fn to_shared_blobs(&self) -> Vec<SharedBlob>;
  207. fn to_blobs(&self) -> Vec<Blob>;
  208. fn to_single_entry_blobs(&self) -> Vec<Blob>;
  209. fn to_single_entry_shared_blobs(&self) -> Vec<SharedBlob>;
  210. }
  211. impl EntrySlice for [Entry] {
  212. fn verify_cpu(&self, start_hash: &Hash) -> bool {
  213. let now = Instant::now();
  214. let genesis = [Entry {
  215. num_hashes: 0,
  216. hash: *start_hash,
  217. transactions: vec![],
  218. }];
  219. let entry_pairs = genesis.par_iter().chain(self).zip(self);
  220. let res = PAR_THREAD_POOL.with(|thread_pool| {
  221. thread_pool.borrow().install(|| {
  222. entry_pairs.all(|(x0, x1)| {
  223. let r = x1.verify(&x0.hash);
  224. if !r {
  225. warn!(
  226. "entry invalid!: x0: {:?}, x1: {:?} num txs: {}",
  227. x0.hash,
  228. x1.hash,
  229. x1.transactions.len()
  230. );
  231. }
  232. r
  233. })
  234. })
  235. });
  236. inc_new_counter_warn!(
  237. "entry_verify-duration",
  238. timing::duration_as_ms(&now.elapsed()) as usize
  239. );
  240. res
  241. }
  242. #[cfg(not(feature = "cuda"))]
  243. fn verify(&self, start_hash: &Hash) -> bool {
  244. self.verify_cpu(start_hash)
  245. }
  246. #[cfg(feature = "cuda")]
  247. fn verify(&self, start_hash: &Hash) -> bool {
  248. inc_new_counter_warn!("entry_verify-num_entries", self.len() as usize);
  249. // Use CPU verify if the batch length is < 1K
  250. if self.len() < 1024 {
  251. return self.verify_cpu(start_hash);
  252. }
  253. let start = Instant::now();
  254. let genesis = [Entry {
  255. num_hashes: 0,
  256. hash: *start_hash,
  257. transactions: vec![],
  258. }];
  259. let hashes: Vec<Hash> = genesis
  260. .iter()
  261. .chain(self)
  262. .map(|entry| entry.hash)
  263. .take(self.len())
  264. .collect();
  265. let num_hashes_vec: Vec<u64> = self
  266. .into_iter()
  267. .map(|entry| entry.num_hashes.saturating_sub(1))
  268. .collect();
  269. let length = self.len();
  270. let hashes = Arc::new(Mutex::new(hashes));
  271. let hashes_clone = hashes.clone();
  272. let gpu_wait = Instant::now();
  273. let gpu_verify_thread = thread::spawn(move || {
  274. let mut hashes = hashes_clone.lock().unwrap();
  275. let res;
  276. unsafe {
  277. res = poh_verify_many(
  278. hashes.as_mut_ptr() as *mut u8,
  279. num_hashes_vec.as_ptr(),
  280. length,
  281. 1,
  282. );
  283. }
  284. if res != 0 {
  285. panic!("GPU PoH verify many failed");
  286. }
  287. });
  288. let tx_hashes: Vec<Option<Hash>> = PAR_THREAD_POOL.with(|thread_pool| {
  289. thread_pool.borrow().install(|| {
  290. self.into_par_iter()
  291. .map(|entry| {
  292. if entry.transactions.is_empty() {
  293. None
  294. } else {
  295. Some(hash_transactions(&entry.transactions))
  296. }
  297. })
  298. .collect()
  299. })
  300. });
  301. gpu_verify_thread.join().unwrap();
  302. inc_new_counter_warn!(
  303. "entry_verify-gpu_thread",
  304. timing::duration_as_ms(&gpu_wait.elapsed()) as usize
  305. );
  306. let hashes = Arc::try_unwrap(hashes).unwrap().into_inner().unwrap();
  307. let res =
  308. PAR_THREAD_POOL.with(|thread_pool| {
  309. thread_pool.borrow().install(|| {
  310. hashes.into_par_iter().zip(tx_hashes).zip(self).all(
  311. |((hash, tx_hash), answer)| {
  312. if answer.num_hashes == 0 {
  313. hash == answer.hash
  314. } else {
  315. let mut poh = Poh::new(hash, None);
  316. if let Some(mixin) = tx_hash {
  317. poh.record(mixin).unwrap().hash == answer.hash
  318. } else {
  319. poh.tick().unwrap().hash == answer.hash
  320. }
  321. }
  322. },
  323. )
  324. })
  325. });
  326. inc_new_counter_warn!(
  327. "entry_verify-duration",
  328. timing::duration_as_ms(&start.elapsed()) as usize
  329. );
  330. res
  331. }
  332. fn to_blobs(&self) -> Vec<Blob> {
  333. split_serializable_chunks(
  334. &self,
  335. BLOB_DATA_SIZE as u64,
  336. &|s| bincode::serialized_size(&s).unwrap(),
  337. &mut |entries: &[Entry]| Blob::from_serializable(entries),
  338. )
  339. }
  340. fn to_shared_blobs(&self) -> Vec<SharedBlob> {
  341. self.to_blobs()
  342. .into_iter()
  343. .map(|b| Arc::new(RwLock::new(b)))
  344. .collect()
  345. }
  346. fn to_single_entry_shared_blobs(&self) -> Vec<SharedBlob> {
  347. self.to_single_entry_blobs()
  348. .into_iter()
  349. .map(|b| Arc::new(RwLock::new(b)))
  350. .collect()
  351. }
  352. fn to_single_entry_blobs(&self) -> Vec<Blob> {
  353. self.iter().map(Entry::to_blob).collect()
  354. }
  355. }
  356. pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
  357. let entry = Entry::new(&start, num_hashes, transactions);
  358. *start = entry.hash;
  359. entry
  360. }
  361. pub fn num_will_fit<T, F>(serializables: &[T], max_size: u64, serialized_size: &F) -> usize
  362. where
  363. F: Fn(&[T]) -> u64,
  364. {
  365. if serializables.is_empty() {
  366. return 0;
  367. }
  368. let mut num = serializables.len();
  369. let mut upper = serializables.len();
  370. let mut lower = 1; // if one won't fit, we have a lot of TODOs
  371. loop {
  372. let next;
  373. if serialized_size(&serializables[..num]) <= max_size {
  374. next = (upper + num) / 2;
  375. lower = num;
  376. } else {
  377. if num == 1 {
  378. // if not even one will fit, bail
  379. num = 0;
  380. break;
  381. }
  382. next = (lower + num) / 2;
  383. upper = num;
  384. }
  385. // same as last time
  386. if next == num {
  387. break;
  388. }
  389. num = next;
  390. }
  391. num
  392. }
  393. pub fn split_serializable_chunks<T, R, F1, F2>(
  394. serializables: &[T],
  395. max_size: u64,
  396. serialized_size: &F1,
  397. converter: &mut F2,
  398. ) -> Vec<R>
  399. where
  400. F1: Fn(&[T]) -> u64,
  401. F2: FnMut(&[T]) -> R,
  402. {
  403. let mut result = vec![];
  404. let mut chunk_start = 0;
  405. while chunk_start < serializables.len() {
  406. let chunk_end =
  407. chunk_start + num_will_fit(&serializables[chunk_start..], max_size, serialized_size);
  408. result.push(converter(&serializables[chunk_start..chunk_end]));
  409. chunk_start = chunk_end;
  410. }
  411. result
  412. }
  413. /// Creates the next entries for given transactions, outputs
  414. /// updates start_hash to hash of last Entry, sets num_hashes to 0
  415. fn next_entries_mut(
  416. start_hash: &mut Hash,
  417. num_hashes: &mut u64,
  418. transactions: Vec<Transaction>,
  419. ) -> Vec<Entry> {
  420. split_serializable_chunks(
  421. &transactions[..],
  422. BLOB_DATA_SIZE as u64,
  423. &Entry::serialized_to_blob_size,
  424. &mut |txs: &[Transaction]| Entry::new_mut(start_hash, num_hashes, txs.to_vec()),
  425. )
  426. }
  427. /// Creates the next Entries for given transactions
  428. pub fn next_entries(
  429. start_hash: &Hash,
  430. num_hashes: u64,
  431. transactions: Vec<Transaction>,
  432. ) -> Vec<Entry> {
  433. let mut hash = *start_hash;
  434. let mut num_hashes = num_hashes;
  435. next_entries_mut(&mut hash, &mut num_hashes, transactions)
  436. }
  437. pub fn create_ticks(num_ticks: u64, mut hash: Hash) -> Vec<Entry> {
  438. let mut ticks = Vec::with_capacity(num_ticks as usize);
  439. for _ in 0..num_ticks {
  440. let new_tick = next_entry_mut(&mut hash, 1, vec![]);
  441. ticks.push(new_tick);
  442. }
  443. ticks
  444. }
  445. pub fn make_tiny_test_entries_from_hash(start: &Hash, num: usize) -> Vec<Entry> {
  446. let keypair = Keypair::new();
  447. let pubkey = keypair.pubkey();
  448. let mut hash = *start;
  449. let mut num_hashes = 0;
  450. (0..num)
  451. .map(|_| {
  452. let ix = budget_instruction::apply_timestamp(&pubkey, &pubkey, &pubkey, Utc::now());
  453. let tx = Transaction::new_signed_instructions(&[&keypair], vec![ix], *start);
  454. Entry::new_mut(&mut hash, &mut num_hashes, vec![tx])
  455. })
  456. .collect()
  457. }
  458. pub fn make_tiny_test_entries(num: usize) -> Vec<Entry> {
  459. let zero = Hash::default();
  460. let one = solana_sdk::hash::hash(&zero.as_ref());
  461. make_tiny_test_entries_from_hash(&one, num)
  462. }
  463. pub fn make_large_test_entries(num_entries: usize) -> Vec<Entry> {
  464. let zero = Hash::default();
  465. let one = solana_sdk::hash::hash(&zero.as_ref());
  466. let keypair = Keypair::new();
  467. let pubkey = keypair.pubkey();
  468. let ix = budget_instruction::apply_timestamp(&pubkey, &pubkey, &pubkey, Utc::now());
  469. let tx = Transaction::new_signed_instructions(&[&keypair], vec![ix], one);
  470. let serialized_size = serialized_size(&tx).unwrap();
  471. let num_txs = BLOB_DATA_SIZE / serialized_size as usize;
  472. let txs = vec![tx; num_txs];
  473. let entry = next_entries(&one, 1, txs)[0].clone();
  474. vec![entry; num_entries]
  475. }
  476. #[cfg(test)]
  477. pub fn make_consecutive_blobs(
  478. id: &solana_sdk::pubkey::Pubkey,
  479. num_blobs_to_make: u64,
  480. start_height: u64,
  481. start_hash: Hash,
  482. addr: &std::net::SocketAddr,
  483. ) -> Vec<SharedBlob> {
  484. let entries = create_ticks(num_blobs_to_make, start_hash);
  485. let blobs = entries.to_single_entry_shared_blobs();
  486. let mut index = start_height;
  487. for blob in &blobs {
  488. let mut blob = blob.write().unwrap();
  489. blob.set_index(index);
  490. blob.set_id(id);
  491. blob.meta.set_addr(addr);
  492. index += 1;
  493. }
  494. blobs
  495. }
  496. #[cfg(test)]
  497. /// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
  498. pub fn next_entry(prev_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
  499. assert!(num_hashes > 0 || transactions.is_empty());
  500. Entry {
  501. num_hashes,
  502. hash: next_hash(prev_hash, num_hashes, &transactions),
  503. transactions,
  504. }
  505. }
  506. #[cfg(test)]
  507. mod tests {
  508. use super::*;
  509. use crate::entry::Entry;
  510. use crate::packet::{to_blobs, BLOB_DATA_SIZE, PACKET_DATA_SIZE};
  511. use solana_sdk::hash::hash;
  512. use solana_sdk::instruction::Instruction;
  513. use solana_sdk::pubkey::Pubkey;
  514. use solana_sdk::signature::{Keypair, KeypairUtil};
  515. use solana_sdk::system_transaction;
  516. use std::net::{IpAddr, Ipv4Addr, SocketAddr};
  517. fn create_sample_payment(keypair: &Keypair, hash: Hash) -> Transaction {
  518. let pubkey = keypair.pubkey();
  519. let ixs = budget_instruction::payment(&pubkey, &pubkey, 1);
  520. Transaction::new_signed_instructions(&[keypair], ixs, hash)
  521. }
  522. fn create_sample_timestamp(keypair: &Keypair, hash: Hash) -> Transaction {
  523. let pubkey = keypair.pubkey();
  524. let ix = budget_instruction::apply_timestamp(&pubkey, &pubkey, &pubkey, Utc::now());
  525. Transaction::new_signed_instructions(&[keypair], vec![ix], hash)
  526. }
  527. fn create_sample_apply_signature(keypair: &Keypair, hash: Hash) -> Transaction {
  528. let pubkey = keypair.pubkey();
  529. let ix = budget_instruction::apply_signature(&pubkey, &pubkey, &pubkey);
  530. Transaction::new_signed_instructions(&[keypair], vec![ix], hash)
  531. }
  532. #[test]
  533. fn test_entry_verify() {
  534. let zero = Hash::default();
  535. let one = hash(&zero.as_ref());
  536. assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case, never used
  537. assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
  538. assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
  539. assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad
  540. }
  541. #[test]
  542. fn test_transaction_reorder_attack() {
  543. let zero = Hash::default();
  544. // First, verify entries
  545. let keypair = Keypair::new();
  546. let tx0 = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 0, zero);
  547. let tx1 = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 1, zero);
  548. let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
  549. assert!(e0.verify(&zero));
  550. // Next, swap two transactions and ensure verification fails.
  551. e0.transactions[0] = tx1; // <-- attack
  552. e0.transactions[1] = tx0;
  553. assert!(!e0.verify(&zero));
  554. }
  555. #[test]
  556. fn test_witness_reorder_attack() {
  557. let zero = Hash::default();
  558. // First, verify entries
  559. let keypair = Keypair::new();
  560. let tx0 = create_sample_timestamp(&keypair, zero);
  561. let tx1 = create_sample_apply_signature(&keypair, zero);
  562. let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
  563. assert!(e0.verify(&zero));
  564. // Next, swap two witness transactions and ensure verification fails.
  565. e0.transactions[0] = tx1; // <-- attack
  566. e0.transactions[1] = tx0;
  567. assert!(!e0.verify(&zero));
  568. }
  569. #[test]
  570. fn test_next_entry() {
  571. let zero = Hash::default();
  572. let tick = next_entry(&zero, 1, vec![]);
  573. assert_eq!(tick.num_hashes, 1);
  574. assert_ne!(tick.hash, zero);
  575. let tick = next_entry(&zero, 0, vec![]);
  576. assert_eq!(tick.num_hashes, 0);
  577. assert_eq!(tick.hash, zero);
  578. let keypair = Keypair::new();
  579. let tx0 = create_sample_timestamp(&keypair, zero);
  580. let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
  581. assert_eq!(entry0.num_hashes, 1);
  582. assert_eq!(entry0.hash, next_hash(&zero, 1, &vec![tx0]));
  583. }
  584. #[test]
  585. #[should_panic]
  586. fn test_next_entry_panic() {
  587. let zero = Hash::default();
  588. let keypair = Keypair::new();
  589. let tx = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 0, zero);
  590. next_entry(&zero, 0, vec![tx]);
  591. }
  592. #[test]
  593. fn test_serialized_to_blob_size() {
  594. let zero = Hash::default();
  595. let keypair = Keypair::new();
  596. let tx = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 0, zero);
  597. let entry = next_entry(&zero, 1, vec![tx.clone()]);
  598. assert_eq!(
  599. Entry::serialized_to_blob_size(&[tx]),
  600. serialized_size(&vec![entry]).unwrap() // blobs are Vec<Entry>
  601. );
  602. }
  603. #[test]
  604. fn test_verify_slice() {
  605. solana_logger::setup();
  606. let zero = Hash::default();
  607. let one = hash(&zero.as_ref());
  608. assert!(vec![][..].verify(&zero)); // base case
  609. assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
  610. assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
  611. assert!(vec![next_entry(&zero, 0, vec![]); 2][..].verify(&zero)); // inductive step
  612. let mut bad_ticks = vec![next_entry(&zero, 0, vec![]); 2];
  613. bad_ticks[1].hash = one;
  614. assert!(!bad_ticks.verify(&zero)); // inductive step, bad
  615. }
  616. #[test]
  617. fn test_verify_slice_with_hashes() {
  618. solana_logger::setup();
  619. let zero = Hash::default();
  620. let one = hash(&zero.as_ref());
  621. let two = hash(&one.as_ref());
  622. assert!(vec![][..].verify(&one)); // base case
  623. assert!(vec![Entry::new_tick(1, &two)][..].verify(&one)); // singleton case 1
  624. assert!(!vec![Entry::new_tick(1, &two)][..].verify(&two)); // singleton case 2, bad
  625. let mut ticks = vec![next_entry(&one, 1, vec![])];
  626. ticks.push(next_entry(&ticks.last().unwrap().hash, 1, vec![]));
  627. assert!(ticks.verify(&one)); // inductive step
  628. let mut bad_ticks = vec![next_entry(&one, 1, vec![])];
  629. bad_ticks.push(next_entry(&bad_ticks.last().unwrap().hash, 1, vec![]));
  630. bad_ticks[1].hash = one;
  631. assert!(!bad_ticks.verify(&one)); // inductive step, bad
  632. }
  633. #[test]
  634. fn test_verify_slice_with_hashes_and_transactions() {
  635. solana_logger::setup();
  636. let zero = Hash::default();
  637. let one = hash(&zero.as_ref());
  638. let two = hash(&one.as_ref());
  639. let alice_pubkey = Keypair::default();
  640. let tx0 = create_sample_payment(&alice_pubkey, one);
  641. let tx1 = create_sample_timestamp(&alice_pubkey, one);
  642. assert!(vec![][..].verify(&one)); // base case
  643. assert!(vec![next_entry(&one, 1, vec![tx0.clone()])][..].verify(&one)); // singleton case 1
  644. assert!(!vec![next_entry(&one, 1, vec![tx0.clone()])][..].verify(&two)); // singleton case 2, bad
  645. let mut ticks = vec![next_entry(&one, 1, vec![tx0.clone()])];
  646. ticks.push(next_entry(
  647. &ticks.last().unwrap().hash,
  648. 1,
  649. vec![tx1.clone()],
  650. ));
  651. assert!(ticks.verify(&one)); // inductive step
  652. let mut bad_ticks = vec![next_entry(&one, 1, vec![tx0])];
  653. bad_ticks.push(next_entry(&bad_ticks.last().unwrap().hash, 1, vec![tx1]));
  654. bad_ticks[1].hash = one;
  655. assert!(!bad_ticks.verify(&one)); // inductive step, bad
  656. }
  657. fn blob_sized_entries(num_entries: usize) -> Vec<Entry> {
  658. // rough guess
  659. let mut magic_len = BLOB_DATA_SIZE
  660. - serialized_size(&vec![Entry {
  661. num_hashes: 0,
  662. hash: Hash::default(),
  663. transactions: vec![],
  664. }])
  665. .unwrap() as usize;
  666. loop {
  667. let entries = vec![Entry {
  668. num_hashes: 0,
  669. hash: Hash::default(),
  670. transactions: vec![Transaction::new_unsigned_instructions(vec![
  671. Instruction::new(Pubkey::default(), &vec![0u8; magic_len as usize], vec![]),
  672. ])],
  673. }];
  674. let size = serialized_size(&entries).unwrap() as usize;
  675. if size < BLOB_DATA_SIZE {
  676. magic_len += BLOB_DATA_SIZE - size;
  677. } else if size > BLOB_DATA_SIZE {
  678. magic_len -= size - BLOB_DATA_SIZE;
  679. } else {
  680. break;
  681. }
  682. }
  683. vec![
  684. Entry {
  685. num_hashes: 0,
  686. hash: Hash::default(),
  687. transactions: vec![Transaction::new_unsigned_instructions(vec![
  688. Instruction::new(Pubkey::default(), &vec![0u8; magic_len], vec![]),
  689. ])],
  690. };
  691. num_entries
  692. ]
  693. }
  694. #[test]
  695. fn test_entries_to_blobs() {
  696. solana_logger::setup();
  697. let entries = blob_sized_entries(10);
  698. let blobs = entries.to_blobs();
  699. for blob in &blobs {
  700. assert_eq!(blob.size(), BLOB_DATA_SIZE);
  701. }
  702. assert_eq!(reconstruct_entries_from_blobs(blobs).unwrap().0, entries);
  703. }
  704. #[test]
  705. fn test_multiple_entries_to_blobs() {
  706. solana_logger::setup();
  707. let num_blobs = 10;
  708. let serialized_size =
  709. bincode::serialized_size(&make_tiny_test_entries_from_hash(&Hash::default(), 1))
  710. .unwrap();
  711. let num_entries = (num_blobs * BLOB_DATA_SIZE as u64) / serialized_size;
  712. let entries = make_tiny_test_entries_from_hash(&Hash::default(), num_entries as usize);
  713. let blob_q = entries.to_blobs();
  714. assert_eq!(blob_q.len() as u64, num_blobs);
  715. assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap().0, entries);
  716. }
  717. #[test]
  718. fn test_bad_blobs_attack() {
  719. solana_logger::setup();
  720. let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
  721. let blobs_q = to_blobs(vec![(0, addr)]).unwrap(); // <-- attack!
  722. assert!(reconstruct_entries_from_blobs(blobs_q).is_err());
  723. }
  724. #[test]
  725. fn test_next_entries() {
  726. solana_logger::setup();
  727. let hash = Hash::default();
  728. let next_hash = solana_sdk::hash::hash(&hash.as_ref());
  729. let keypair = Keypair::new();
  730. let tx_small = create_sample_timestamp(&keypair, next_hash);
  731. let tx_large = create_sample_payment(&keypair, next_hash);
  732. let tx_small_size = serialized_size(&tx_small).unwrap() as usize;
  733. let tx_large_size = serialized_size(&tx_large).unwrap() as usize;
  734. let entry_size = serialized_size(&Entry {
  735. num_hashes: 0,
  736. hash: Hash::default(),
  737. transactions: vec![],
  738. })
  739. .unwrap() as usize;
  740. assert!(tx_small_size < tx_large_size);
  741. assert!(tx_large_size < PACKET_DATA_SIZE);
  742. let threshold = (BLOB_DATA_SIZE - entry_size) / tx_small_size;
  743. // verify no split
  744. let transactions = vec![tx_small.clone(); threshold];
  745. let entries0 = next_entries(&hash, 0, transactions.clone());
  746. assert_eq!(entries0.len(), 1);
  747. assert!(entries0.verify(&hash));
  748. // verify the split with uniform transactions
  749. let transactions = vec![tx_small.clone(); threshold * 2];
  750. let entries0 = next_entries(&hash, 0, transactions.clone());
  751. assert_eq!(entries0.len(), 2);
  752. assert!(entries0.verify(&hash));
  753. // verify the split with small transactions followed by large
  754. // transactions
  755. let mut transactions = vec![tx_small.clone(); BLOB_DATA_SIZE / tx_small_size];
  756. let large_transactions = vec![tx_large.clone(); BLOB_DATA_SIZE / tx_large_size];
  757. transactions.extend(large_transactions);
  758. let entries0 = next_entries(&hash, 0, transactions.clone());
  759. assert!(entries0.len() >= 2);
  760. assert!(entries0.verify(&hash));
  761. }
  762. #[test]
  763. fn test_num_will_fit_empty() {
  764. let serializables: Vec<u32> = vec![];
  765. let result = num_will_fit(&serializables[..], 8, &|_| 4);
  766. assert_eq!(result, 0);
  767. }
  768. #[test]
  769. fn test_num_will_fit() {
  770. let serializables_vec: Vec<u8> = (0..10).map(|_| 1).collect();
  771. let serializables = &serializables_vec[..];
  772. let sum = |i: &[u8]| (0..i.len()).into_iter().sum::<usize>() as u64;
  773. // sum[0] is = 0, but sum[0..1] > 0, so result contains 1 item
  774. let result = num_will_fit(serializables, 0, &sum);
  775. assert_eq!(result, 1);
  776. // sum[0..3] is <= 8, but sum[0..4] > 8, so result contains 3 items
  777. let result = num_will_fit(serializables, 8, &sum);
  778. assert_eq!(result, 4);
  779. // sum[0..1] is = 1, but sum[0..2] > 0, so result contains 2 items
  780. let result = num_will_fit(serializables, 1, &sum);
  781. assert_eq!(result, 2);
  782. // sum[0..9] = 45, so contains all items
  783. let result = num_will_fit(serializables, 45, &sum);
  784. assert_eq!(result, 10);
  785. // sum[0..8] <= 44, but sum[0..9] = 45, so contains all but last item
  786. let result = num_will_fit(serializables, 44, &sum);
  787. assert_eq!(result, 9);
  788. // sum[0..9] <= 46, but contains all items
  789. let result = num_will_fit(serializables, 46, &sum);
  790. assert_eq!(result, 10);
  791. // too small to fit a single u64
  792. let result = num_will_fit(&[0u64], (std::mem::size_of::<u64>() - 1) as u64, &|i| {
  793. (std::mem::size_of::<u64>() * i.len()) as u64
  794. });
  795. assert_eq!(result, 0);
  796. }
  797. }