|
|
@@ -282,13 +282,23 @@ where
|
|
|
WormholePayload::Merkle(proof) => {
|
|
|
tracing::info!(slot = proof.slot, "Storing VAA Merkle Proof.");
|
|
|
|
|
|
- store_wormhole_merkle_verified_message(
|
|
|
+ // Store the wormhole merkle verified message and check if it was already stored
|
|
|
+ let is_new = store_wormhole_merkle_verified_message(
|
|
|
self,
|
|
|
proof.clone(),
|
|
|
update_vaa.to_owned(),
|
|
|
)
|
|
|
.await?;
|
|
|
|
|
|
+ // If the message was already stored, return early
|
|
|
+ if !is_new {
|
|
|
+ tracing::info!(
|
|
|
+ slot = proof.slot,
|
|
|
+ "VAA Merkle Proof already stored, skipping."
|
|
|
+ );
|
|
|
+ return Ok(());
|
|
|
+ }
|
|
|
+
|
|
|
self.into()
|
|
|
.data
|
|
|
.write()
|
|
|
@@ -304,9 +314,22 @@ where
|
|
|
let slot = accumulator_messages.slot;
|
|
|
tracing::info!(slot = slot, "Storing Accumulator Messages.");
|
|
|
|
|
|
- self.store_accumulator_messages(accumulator_messages)
|
|
|
+ // Store the accumulator messages and check if they were already stored in a single operation
|
|
|
+ // This avoids the race condition where multiple threads could check and find nothing
|
|
|
+ // but then both store the same messages
|
|
|
+ let is_new = self
|
|
|
+ .store_accumulator_messages(accumulator_messages)
|
|
|
.await?;
|
|
|
|
|
|
+ // If the messages were already stored, return early
|
|
|
+ if !is_new {
|
|
|
+ tracing::info!(
|
|
|
+ slot = slot,
|
|
|
+ "Accumulator Messages already stored, skipping."
|
|
|
+ );
|
|
|
+ return Ok(());
|
|
|
+ }
|
|
|
+
|
|
|
self.into()
|
|
|
.data
|
|
|
.write()
|
|
|
@@ -351,28 +374,23 @@ where
|
|
|
// Update the aggregate state
|
|
|
let mut aggregate_state = self.into().data.write().await;
|
|
|
|
|
|
- // Send update event to subscribers. We are purposefully ignoring the result
|
|
|
- // because there might be no subscribers.
|
|
|
- let _ = match aggregate_state.latest_completed_slot {
|
|
|
+ // Atomic check and update
|
|
|
+ let event = match aggregate_state.latest_completed_slot {
|
|
|
None => {
|
|
|
- aggregate_state.latest_completed_slot.replace(slot);
|
|
|
- self.into()
|
|
|
- .api_update_tx
|
|
|
- .send(AggregationEvent::New { slot })
|
|
|
+ aggregate_state.latest_completed_slot = Some(slot);
|
|
|
+ AggregationEvent::New { slot }
|
|
|
}
|
|
|
Some(latest) if slot > latest => {
|
|
|
self.prune_removed_keys(message_state_keys).await;
|
|
|
- aggregate_state.latest_completed_slot.replace(slot);
|
|
|
- self.into()
|
|
|
- .api_update_tx
|
|
|
- .send(AggregationEvent::New { slot })
|
|
|
+ aggregate_state.latest_completed_slot = Some(slot);
|
|
|
+ AggregationEvent::New { slot }
|
|
|
}
|
|
|
- _ => self
|
|
|
- .into()
|
|
|
- .api_update_tx
|
|
|
- .send(AggregationEvent::OutOfOrder { slot }),
|
|
|
+ _ => AggregationEvent::OutOfOrder { slot },
|
|
|
};
|
|
|
|
|
|
+ // Only send the event after the state has been updated
|
|
|
+ let _ = self.into().api_update_tx.send(event);
|
|
|
+
|
|
|
aggregate_state.latest_completed_slot = aggregate_state
|
|
|
.latest_completed_slot
|
|
|
.map(|latest| latest.max(slot))
|
|
|
@@ -1374,6 +1392,115 @@ mod test {
|
|
|
|
|
|
assert_eq!(result.unwrap_err().to_string(), "Message not found");
|
|
|
}
|
|
|
+
|
|
|
+ /// Test that verifies only one event is sent per slot, even when updates arrive out of order
|
|
|
+ /// or when a slot is processed multiple times.
|
|
|
+ #[tokio::test]
|
|
|
+ pub async fn test_out_of_order_updates_send_single_event_per_slot() {
|
|
|
+ let (state, mut update_rx) = setup_state(10).await;
|
|
|
+
|
|
|
+ // Create price feed messages
|
|
|
+ let price_feed_100 = create_dummy_price_feed_message(100, 10, 9);
|
|
|
+ let price_feed_101 = create_dummy_price_feed_message(100, 11, 10);
|
|
|
+
|
|
|
+ // First, process slot 100
|
|
|
+ store_multiple_concurrent_valid_updates(
|
|
|
+ state.clone(),
|
|
|
+ generate_update(vec![Message::PriceFeedMessage(price_feed_100)], 100, 20),
|
|
|
+ )
|
|
|
+ .await;
|
|
|
+
|
|
|
+ // Check that we received the New event for slot 100
|
|
|
+ assert_eq!(
|
|
|
+ update_rx.recv().await,
|
|
|
+ Ok(AggregationEvent::New { slot: 100 })
|
|
|
+ );
|
|
|
+
|
|
|
+ // Next, process slot 101
|
|
|
+ store_multiple_concurrent_valid_updates(
|
|
|
+ state.clone(),
|
|
|
+ generate_update(vec![Message::PriceFeedMessage(price_feed_101)], 101, 21),
|
|
|
+ )
|
|
|
+ .await;
|
|
|
+
|
|
|
+ // Check that we received the New event for slot 101
|
|
|
+ assert_eq!(
|
|
|
+ update_rx.recv().await,
|
|
|
+ Ok(AggregationEvent::New { slot: 101 })
|
|
|
+ );
|
|
|
+
|
|
|
+ // Now, process slot 100 again
|
|
|
+ store_multiple_concurrent_valid_updates(
|
|
|
+ state.clone(),
|
|
|
+ generate_update(vec![Message::PriceFeedMessage(price_feed_100)], 100, 22),
|
|
|
+ )
|
|
|
+ .await;
|
|
|
+
|
|
|
+ // Try to receive another event with a timeout to ensure no more events were sent
|
|
|
+ // We should not receive an OutOfOrder event for slot 100 since we've already sent an event for it
|
|
|
+ let timeout_result =
|
|
|
+ tokio::time::timeout(std::time::Duration::from_millis(100), update_rx.recv()).await;
|
|
|
+
|
|
|
+ // The timeout should occur, indicating no more events were received
|
|
|
+ assert!(
|
|
|
+ timeout_result.is_err(),
|
|
|
+ "Received unexpected additional event"
|
|
|
+ );
|
|
|
+
|
|
|
+ // Verify that both price feeds were stored correctly
|
|
|
+ let price_feed_ids = (*state).get_price_feed_ids().await;
|
|
|
+ assert_eq!(price_feed_ids.len(), 1);
|
|
|
+ assert!(price_feed_ids.contains(&PriceIdentifier::new([100; 32])));
|
|
|
+ }
|
|
|
+
|
|
|
+ /// Test that verifies only one event is sent when multiple concurrent updates
|
|
|
+ /// for the same slot are processed.
|
|
|
+ #[tokio::test]
|
|
|
+ pub async fn test_concurrent_updates_same_slot_sends_single_event() {
|
|
|
+ let (state, mut update_rx) = setup_state(10).await;
|
|
|
+
|
|
|
+ // Create a single price feed message
|
|
|
+ let price_feed = create_dummy_price_feed_message(100, 10, 9);
|
|
|
+
|
|
|
+ // Generate 100 identical updates for the same slot but with different sequence numbers
|
|
|
+ let mut all_updates = Vec::new();
|
|
|
+ for seq in 0..100 {
|
|
|
+ let updates = generate_update(vec![Message::PriceFeedMessage(price_feed)], 10, seq);
|
|
|
+ all_updates.extend(updates);
|
|
|
+ }
|
|
|
+
|
|
|
+ // Process updates concurrently - we don't care if some fail due to the race condition
|
|
|
+ // The important thing is that only one event is sent
|
|
|
+ let state_arc = Arc::clone(&state);
|
|
|
+ let futures = all_updates.into_iter().map(move |u| {
|
|
|
+ let state_clone = Arc::clone(&state_arc);
|
|
|
+ async move {
|
|
|
+ let _ = state_clone.store_update(u).await;
|
|
|
+ }
|
|
|
+ });
|
|
|
+ futures::future::join_all(futures).await;
|
|
|
+
|
|
|
+ // Check that only one AggregationEvent::New is received
|
|
|
+ assert_eq!(
|
|
|
+ update_rx.recv().await,
|
|
|
+ Ok(AggregationEvent::New { slot: 10 })
|
|
|
+ );
|
|
|
+
|
|
|
+ // Try to receive another event with a timeout to ensure no more events were sent
|
|
|
+ let timeout_result =
|
|
|
+ tokio::time::timeout(std::time::Duration::from_millis(100), update_rx.recv()).await;
|
|
|
+
|
|
|
+ // The timeout should occur, indicating no more events were received
|
|
|
+ assert!(
|
|
|
+ timeout_result.is_err(),
|
|
|
+ "Received unexpected additional event"
|
|
|
+ );
|
|
|
+
|
|
|
+ // Verify that the price feed was stored correctly
|
|
|
+ let price_feed_ids = (*state).get_price_feed_ids().await;
|
|
|
+ assert_eq!(price_feed_ids.len(), 1);
|
|
|
+ assert!(price_feed_ids.contains(&PriceIdentifier::new([100; 32])));
|
|
|
+ }
|
|
|
}
|
|
|
#[cfg(test)]
|
|
|
/// Unit tests for the core TWAP calculation logic in `calculate_twap`
|