Explorar o código

zero-copy slot hashes sysvar (with checked alternatives) (#152)

Adds SlotHashes sysvar support. Checked and unchecked zero-copy paths are both included.

Binary search is used, but zerocopy Iterator and copy into buffer are also implemented for those with different SlotHashes access needs.

---------

Co-authored-by: Illia Bobyr <illia.bobyr@gmail.com>
Co-authored-by: Fernando Otero <febo@anza.xyz>
Peter Keay hai 3 meses
pai
achega
7c40b9d6fc

+ 1 - 1
Cargo.lock

@@ -161,4 +161,4 @@ dependencies = [
 name = "unicode-ident"
 version = "1.0.13"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe"
+checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe"

+ 5 - 1
scripts/setup/solana.dic

@@ -64,4 +64,8 @@ RPC
 ed25519
 performant
 syscall/S
-bitmask
+bitmask
+pinocchio
+mainnet
+getters
+PRNG

+ 3 - 0
sdk/pinocchio/Cargo.toml

@@ -19,3 +19,6 @@ unexpected_cfgs = { level = "warn", check-cfg = [
 
 [features]
 std = []
+
+[dev-dependencies]
+five8_const = { workspace = true }

+ 1 - 0
sdk/pinocchio/src/sysvars/mod.rs

@@ -10,6 +10,7 @@ pub mod clock;
 pub mod fees;
 pub mod instructions;
 pub mod rent;
+pub mod slot_hashes;
 
 /// Return value indicating that the `offset + length` is greater than the length of
 /// the sysvar data.

+ 333 - 0
sdk/pinocchio/src/sysvars/slot_hashes/mod.rs

@@ -0,0 +1,333 @@
+//! Efficient, zero-copy access to `SlotHashes` sysvar data.
+
+pub mod raw;
+#[doc(inline)]
+pub use raw::{fetch_into, fetch_into_unchecked, validate_fetch_offset};
+
+#[cfg(test)]
+mod test;
+#[cfg(test)]
+mod test_edge;
+#[cfg(test)]
+mod test_raw;
+#[cfg(test)]
+mod test_utils;
+
+use crate::{
+    account_info::{AccountInfo, Ref},
+    program_error::ProgramError,
+    pubkey::Pubkey,
+    sysvars::clock::Slot,
+};
+use core::{mem, ops::Deref, slice::from_raw_parts};
+#[cfg(feature = "std")]
+use std::boxed::Box;
+
+/// `SysvarS1otHashes111111111111111111111111111`
+pub const SLOTHASHES_ID: Pubkey = [
+    6, 167, 213, 23, 25, 47, 10, 175, 198, 242, 101, 227, 251, 119, 204, 122, 218, 130, 197, 41,
+    208, 190, 59, 19, 110, 45, 0, 85, 32, 0, 0, 0,
+];
+/// Number of bytes in a hash.
+pub const HASH_BYTES: usize = 32;
+/// Sysvar data is:
+/// `len`    (8 bytes): little-endian entry count (`≤ 512`)
+/// `entries`(`len × 40 bytes`):    consecutive `(u64 slot, [u8;32] hash)` pairs
+/// Size of the entry count field at the beginning of sysvar data.
+pub const NUM_ENTRIES_SIZE: usize = mem::size_of::<u64>();
+/// Size of a slot number in bytes.
+pub const SLOT_SIZE: usize = mem::size_of::<Slot>();
+/// Size of a single slot hash entry.
+pub const ENTRY_SIZE: usize = SLOT_SIZE + HASH_BYTES;
+/// Maximum number of slot hash entries that can be stored in the sysvar.
+pub const MAX_ENTRIES: usize = 512;
+/// Max size of the sysvar data in bytes. 20488. Golden on mainnet (never smaller)
+pub const MAX_SIZE: usize = NUM_ENTRIES_SIZE + MAX_ENTRIES * ENTRY_SIZE;
+/// A single hash.
+pub type Hash = [u8; HASH_BYTES];
+
+/// A single entry in the `SlotHashes` sysvar.
+#[derive(Debug, PartialEq, Eq, Clone, Copy)]
+#[repr(C)]
+pub struct SlotHashEntry {
+    /// The slot number stored as little-endian bytes.
+    slot_le: [u8; 8],
+    /// The hash corresponding to the slot.
+    pub hash: Hash,
+}
+
+// Fail compilation if `SlotHashEntry` is not byte-aligned.
+const _: [(); 1] = [(); mem::align_of::<SlotHashEntry>()];
+
+/// `SlotHashes` provides read-only, zero-copy access to `SlotHashes` sysvar bytes.
+pub struct SlotHashes<T: Deref<Target = [u8]>> {
+    data: T,
+}
+
+/// Log a `Hash` from a program.
+pub fn log(hash: &Hash) {
+    crate::pubkey::log(hash);
+}
+
+/// Reads the entry count from the first 8 bytes of data.
+/// Returns None if the data is too short.
+#[inline(always)]
+pub(crate) fn read_entry_count_from_bytes(data: &[u8]) -> Option<usize> {
+    if data.len() < NUM_ENTRIES_SIZE {
+        return None;
+    }
+    Some(unsafe {
+        // SAFETY: `data` is guaranteed to be at least `NUM_ENTRIES_SIZE` bytes long by the
+        // preceding length check, so it is sound to read the first 8 bytes and interpret
+        // them as a little-endian `u64`.
+        u64::from_le_bytes(*(data.as_ptr() as *const [u8; NUM_ENTRIES_SIZE]))
+    } as usize)
+}
+
+/// Reads the entry count from the first 8 bytes of data.
+///
+/// # Safety
+/// Caller must ensure data has at least `NUM_ENTRIES_SIZE` bytes.
+#[inline(always)]
+pub(crate) unsafe fn read_entry_count_from_bytes_unchecked(data: &[u8]) -> usize {
+    u64::from_le_bytes(*(data.as_ptr() as *const [u8; NUM_ENTRIES_SIZE])) as usize
+}
+
+/// Validates `SlotHashes` data format.
+///
+/// The function checks:
+/// 1. The buffer is large enough to contain the entry count.
+/// 2. The buffer length is sufficient to hold the declared number of entries.
+///
+/// It returns `Ok(())` if the data is well-formed, otherwise an appropriate
+/// `ProgramError` describing the issue.
+#[inline]
+fn parse_and_validate_data(data: &[u8]) -> Result<(), ProgramError> {
+    if data.len() < NUM_ENTRIES_SIZE {
+        return Err(ProgramError::AccountDataTooSmall);
+    }
+
+    // SAFETY: We've confirmed that data has enough bytes to read the entry count.
+    let num_entries = unsafe { read_entry_count_from_bytes_unchecked(data) };
+
+    let min_size = NUM_ENTRIES_SIZE + num_entries * ENTRY_SIZE;
+    if data.len() < min_size {
+        return Err(ProgramError::AccountDataTooSmall);
+    }
+
+    Ok(())
+}
+
+impl SlotHashEntry {
+    /// Returns the slot number as a `u64`.
+    #[inline(always)]
+    pub fn slot(&self) -> Slot {
+        u64::from_le_bytes(self.slot_le)
+    }
+}
+
+impl<T: Deref<Target = [u8]>> SlotHashes<T> {
+    /// Creates a `SlotHashes` instance with validation of the entry count and buffer size.
+    ///
+    /// This constructor validates that the buffer has at least enough bytes to contain
+    /// the declared number of entries. The buffer can be any size above the minimum required,
+    /// making it suitable for both full `MAX_SIZE` buffers and smaller test data.
+    /// Does not validate that entries are sorted in descending order.
+    #[inline(always)]
+    pub fn new(data: T) -> Result<Self, ProgramError> {
+        parse_and_validate_data(&data)?;
+        // SAFETY: `parse_and_validate_data` verifies that the data slice has at least
+        // `NUM_ENTRIES_SIZE` bytes for the entry count and enough additional bytes to
+        // contain the declared number of entries, thus upholding all invariants required
+        // by `SlotHashes::new_unchecked`.
+        Ok(unsafe { Self::new_unchecked(data) })
+    }
+
+    /// Creates a `SlotHashes` instance without validation.
+    ///
+    /// This is an unsafe constructor that bypasses all validation checks for performance.
+    /// In debug builds, it still runs `parse_and_validate_data` as a sanity check.
+    ///
+    /// # Safety
+    ///
+    /// This function is unsafe because it does not validate the data size or format.
+    /// The caller must ensure:
+    /// 1. The underlying byte slice in `data` represents valid `SlotHashes` data
+    ///    (length prefix plus entries, where entries are sorted in descending order by slot).
+    /// 2. The data slice has at least `NUM_ENTRIES_SIZE + (declared_entries * ENTRY_SIZE)` bytes.
+    /// 3. The first 8 bytes contain a valid entry count in little-endian format.
+    ///
+    #[inline(always)]
+    pub unsafe fn new_unchecked(data: T) -> Self {
+        if cfg!(debug_assertions) {
+            parse_and_validate_data(&data)
+                .expect("`data` matches all the same requirements as for `new()`");
+        }
+
+        SlotHashes { data }
+    }
+
+    /// Returns the number of `SlotHashEntry` items accessible.
+    #[inline(always)]
+    pub fn len(&self) -> usize {
+        // SAFETY: `SlotHashes::new` and `new_unchecked` guarantee that `self.data` has at
+        // least `NUM_ENTRIES_SIZE` bytes, so reading the entry count without additional
+        // checks is safe.
+        unsafe { read_entry_count_from_bytes_unchecked(&self.data) }
+    }
+
+    /// Returns if the sysvar is empty.
+    #[inline(always)]
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    /// Returns a `&[SlotHashEntry]` view into the underlying data.
+    ///
+    /// Call once and reuse the slice if you need many look-ups.
+    ///
+    /// The constructor (in the safe path that called `parse_and_validate_data`)
+    /// or caller (if unsafe `new_unchecked` path) is responsible for ensuring
+    /// the slice is big enough and properly aligned.
+    #[inline(always)]
+    pub fn entries(&self) -> &[SlotHashEntry] {
+        unsafe {
+            // SAFETY: The slice begins `NUM_ENTRIES_SIZE` bytes into `self.data`, which
+            // is guaranteed by parse_and_validate_data() to have at least `len * ENTRY_SIZE`
+            // additional bytes. The pointer is properly aligned for `SlotHashEntry` (which
+            // a compile-time assertion ensures is alignment 1).
+            from_raw_parts(
+                self.data.as_ptr().add(NUM_ENTRIES_SIZE) as *const SlotHashEntry,
+                self.len(),
+            )
+        }
+    }
+
+    /// Gets a reference to the entry at `index` or `None` if out of bounds.
+    #[inline(always)]
+    pub fn get_entry(&self, index: usize) -> Option<&SlotHashEntry> {
+        if index >= self.len() {
+            return None;
+        }
+        Some(unsafe { self.get_entry_unchecked(index) })
+    }
+
+    /// Finds the hash for a specific slot using binary search.
+    ///
+    /// Returns the hash if the slot is found, or `None` if not found.
+    /// Assumes entries are sorted by slot in descending order.
+    /// If calling repeatedly, prefer getting `entries()` in caller
+    /// to avoid repeated slice construction.
+    #[inline(always)]
+    pub fn get_hash(&self, target_slot: Slot) -> Option<&Hash> {
+        self.position(target_slot)
+            .map(|index| unsafe { &self.get_entry_unchecked(index).hash })
+    }
+
+    /// Finds the position (index) of a specific slot using binary search.
+    ///
+    /// Returns the index if the slot is found, or `None` if not found.
+    /// Assumes entries are sorted by slot in descending order.
+    /// If calling repeatedly, prefer getting `entries()` in caller
+    /// to avoid repeated slice construction.
+    #[inline(always)]
+    pub fn position(&self, target_slot: Slot) -> Option<usize> {
+        self.entries()
+            .binary_search_by(|probe_entry| probe_entry.slot().cmp(&target_slot).reverse())
+            .ok()
+    }
+
+    /// Returns a reference to the entry at `index` **without** bounds checking.
+    ///
+    /// # Safety
+    /// Caller must guarantee that `index < self.len()`.
+    #[inline(always)]
+    pub unsafe fn get_entry_unchecked(&self, index: usize) -> &SlotHashEntry {
+        debug_assert!(index < self.len());
+        // SAFETY: Caller guarantees `index < self.len()`. The data pointer is valid
+        // and aligned for `SlotHashEntry`. The offset calculation points to a
+        // valid entry within the allocated data.
+        let entries_ptr = self.data.as_ptr().add(NUM_ENTRIES_SIZE) as *const SlotHashEntry;
+        &*entries_ptr.add(index)
+    }
+}
+
+impl<'a, T: Deref<Target = [u8]>> IntoIterator for &'a SlotHashes<T> {
+    type Item = &'a SlotHashEntry;
+    type IntoIter = core::slice::Iter<'a, SlotHashEntry>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        self.entries().iter()
+    }
+}
+
+impl<'a> SlotHashes<Ref<'a, [u8]>> {
+    /// Creates a `SlotHashes` instance by safely borrowing data from an `AccountInfo`.
+    ///
+    /// This function verifies that:
+    /// - The account key matches the `SLOTHASHES_ID`
+    /// - The account data can be successfully borrowed
+    ///
+    /// Returns a `SlotHashes` instance that borrows the account's data for zero-copy access.
+    /// The returned instance is valid for the lifetime of the borrow.
+    ///
+    /// # Errors
+    /// - `ProgramError::InvalidArgument` if the account key doesn't match the `SlotHashes` sysvar ID
+    /// - `ProgramError::AccountBorrowFailed` if the account data is already mutably borrowed
+    #[inline(always)]
+    pub fn from_account_info(account_info: &'a AccountInfo) -> Result<Self, ProgramError> {
+        if account_info.key() != &SLOTHASHES_ID {
+            return Err(ProgramError::InvalidArgument);
+        }
+
+        let data_ref = account_info.try_borrow_data()?;
+
+        // SAFETY: The account was validated to be the `SlotHashes` sysvar.
+        Ok(unsafe { SlotHashes::new_unchecked(data_ref) })
+    }
+}
+
+#[cfg(feature = "std")]
+impl SlotHashes<Box<[u8]>> {
+    /// Fills the provided buffer with the full `SlotHashes` sysvar data.
+    ///
+    /// # Safety
+    /// The caller must ensure the buffer pointer is valid for `MAX_SIZE` bytes.
+    /// The syscall will write exactly `MAX_SIZE` bytes to the buffer.
+    #[inline(always)]
+    unsafe fn fill_from_sysvar(buffer_ptr: *mut u8) -> Result<(), ProgramError> {
+        crate::sysvars::get_sysvar_unchecked(buffer_ptr, &SLOTHASHES_ID, 0, MAX_SIZE)?;
+
+        // For tests on builds that don't actually fill the buffer.
+        #[cfg(not(target_os = "solana"))]
+        core::ptr::write_bytes(buffer_ptr, 0, NUM_ENTRIES_SIZE);
+
+        Ok(())
+    }
+
+    /// Allocates an optimal buffer for the sysvar data based on available features.
+    #[inline(always)]
+    fn allocate_and_fetch() -> Result<Box<[u8]>, ProgramError> {
+        let mut buf = std::vec::Vec::with_capacity(MAX_SIZE);
+        unsafe {
+            // SAFETY: `buf` was allocated with capacity `MAX_SIZE` so its
+            // pointer is valid for exactly that many bytes. `fill_from_sysvar`
+            // writes `MAX_SIZE` bytes, and we immediately set the length to
+            // `MAX_SIZE`, marking the entire buffer as initialized before it is
+            // turned into a boxed slice.
+            Self::fill_from_sysvar(buf.as_mut_ptr())?;
+            buf.set_len(MAX_SIZE);
+        }
+        Ok(buf.into_boxed_slice())
+    }
+
+    /// Fetches the `SlotHashes` sysvar data directly via syscall. This copies
+    /// the full sysvar data (`MAX_SIZE` bytes).
+    #[inline(always)]
+    pub fn fetch() -> Result<Self, ProgramError> {
+        let data_init = Self::allocate_and_fetch()?;
+
+        // SAFETY: The data was initialized by the syscall.
+        Ok(unsafe { SlotHashes::new_unchecked(data_init) })
+    }
+}

+ 129 - 0
sdk/pinocchio/src/sysvars/slot_hashes/raw.rs

@@ -0,0 +1,129 @@
+//! Raw / caller-supplied buffer helpers for the `SlotHashes` sysvar.
+//!
+//! This sub-module exposes lightweight functions that let a program copy
+//! `SlotHashes` data directly into an arbitrary buffer **without** constructing
+//! a `SlotHashes<T>` view. Use these when you only need a byte snapshot or
+//! when including the sysvar account is infeasible.
+#![allow(clippy::inline_always)]
+
+use super::*;
+
+/// Validates buffer format for `SlotHashes` data and calculates entry capacity.
+///
+/// Validates that the buffer follows the correct format:
+/// - If `offset == 0`: Buffer must have `8 + (N × 40)` format (header and entries)
+/// - If `offset != 0`: Buffer must be a multiple of 40 bytes (entries only)
+///
+/// Does not validate that `offset + buffer_len ≤ MAX_SIZE`; this is checked
+/// separately in `validate_fetch_offset`, and the syscall will fail anyway if
+/// `offset + buffer_len > MAX_SIZE`.
+///
+/// Returns the number of entries that can fit in the buffer.
+#[inline(always)]
+pub(crate) fn get_valid_buffer_capacity(
+    buffer_len: usize,
+    offset: usize,
+) -> Result<usize, ProgramError> {
+    if offset == 0 {
+        // Buffer includes header: must have 8 + (N × 40) format
+        if buffer_len == MAX_SIZE {
+            return Ok(MAX_ENTRIES);
+        }
+
+        if buffer_len < NUM_ENTRIES_SIZE {
+            return Err(ProgramError::AccountDataTooSmall);
+        }
+
+        let entry_data_len = buffer_len - NUM_ENTRIES_SIZE;
+        if entry_data_len % ENTRY_SIZE != 0 {
+            return Err(ProgramError::InvalidArgument);
+        }
+
+        Ok(entry_data_len / ENTRY_SIZE)
+    } else {
+        // Buffer contains only entry data: must be multiple of ENTRY_SIZE
+        if buffer_len % ENTRY_SIZE != 0 {
+            return Err(ProgramError::InvalidArgument);
+        }
+
+        Ok(buffer_len / ENTRY_SIZE)
+    }
+}
+
+/// Validates offset parameters for fetching `SlotHashes` data.
+///
+/// * `offset` - Byte offset within the `SlotHashes` sysvar data.
+/// * `buffer_len` - Length of the destination buffer.
+#[inline(always)]
+pub fn validate_fetch_offset(offset: usize, buffer_len: usize) -> Result<(), ProgramError> {
+    if offset >= MAX_SIZE {
+        return Err(ProgramError::InvalidArgument);
+    }
+    if offset != 0 && (offset < NUM_ENTRIES_SIZE || (offset - NUM_ENTRIES_SIZE) % ENTRY_SIZE != 0) {
+        return Err(ProgramError::InvalidArgument);
+    }
+    // Perhaps redundant, as the syscall will fail later if
+    // `buffer.len() + offset > MAX_SIZE`, but this is for
+    // checked paths.
+    if offset.saturating_add(buffer_len) > MAX_SIZE {
+        return Err(ProgramError::InvalidArgument);
+    }
+
+    Ok(())
+}
+
+/// Copies `SlotHashes` sysvar bytes into `buffer`, performing validation.
+///
+/// # Arguments
+///
+/// * `buffer` - Destination buffer to copy sysvar data into
+/// * `offset` - Byte offset within the `SlotHashes` sysvar data to start copying from
+///
+/// # Returns
+///
+/// Returns the number of entries:
+/// - If `offset == 0`: The actual entry count read from the sysvar header
+/// - If `offset != 0`: The number of entries that can fit in the buffer
+///
+/// The return value helps callers understand the structure of the copied data.
+#[inline(always)]
+pub fn fetch_into(buffer: &mut [u8], offset: usize) -> Result<usize, ProgramError> {
+    let num_entries = get_valid_buffer_capacity(buffer.len(), offset)?;
+
+    validate_fetch_offset(offset, buffer.len())?;
+
+    // SAFETY: Buffer format and offset alignment validated above.
+    unsafe { fetch_into_unchecked(buffer, offset) }?;
+
+    if offset == 0 {
+        // Buffer includes header: return actual entry count from sysvar data
+        Ok(read_entry_count_from_bytes(buffer).unwrap_or(0))
+    } else {
+        // Buffer excludes header: return calculated entry capacity
+        Ok(num_entries)
+    }
+}
+
+/// Copies `SlotHashes` sysvar bytes into `buffer` **without** validation.
+///
+/// The caller is responsible for ensuring that:
+/// 1. `buffer` is large enough for the requested `offset + buffer.len()` range and
+///    properly laid out (see `validate_buffer_size` and `validate_fetch_offset`).
+/// 2. `offset + buffer.len()` is not greater than `MAX_SIZE`, or the syscall will
+///    fail.
+/// 3. The memory behind `buffer` is writable for its full length.
+///
+/// # Safety
+/// Internally this function performs an unchecked Solana syscall that writes
+/// raw bytes into the provided pointer.
+#[inline(always)]
+pub unsafe fn fetch_into_unchecked(buffer: &mut [u8], offset: usize) -> Result<(), ProgramError> {
+    crate::sysvars::get_sysvar_unchecked(
+        buffer.as_mut_ptr(),
+        &SLOTHASHES_ID,
+        offset,
+        buffer.len(),
+    )?;
+
+    Ok(())
+}

+ 485 - 0
sdk/pinocchio/src/sysvars/slot_hashes/test.rs

@@ -0,0 +1,485 @@
+use super::test_utils::*;
+use crate::{
+    account_info::{Account, AccountInfo},
+    program_error::ProgramError,
+    sysvars::{clock::Slot, slot_hashes::*},
+};
+use core::{
+    mem::{align_of, size_of},
+    ptr,
+};
+
+extern crate std;
+use std::io::Write;
+use std::vec::Vec;
+
+#[test]
+fn test_layout_constants() {
+    assert_eq!(NUM_ENTRIES_SIZE, size_of::<u64>());
+    assert_eq!(SLOT_SIZE, size_of::<u64>());
+    assert_eq!(HASH_BYTES, 32);
+    assert_eq!(ENTRY_SIZE, size_of::<u64>() + 32);
+    assert_eq!(MAX_SIZE, 20_488);
+    assert_eq!(size_of::<SlotHashEntry>(), ENTRY_SIZE);
+    assert_eq!(align_of::<SlotHashEntry>(), align_of::<[u8; 8]>());
+    assert_eq!(
+        SLOTHASHES_ID,
+        [
+            6, 167, 213, 23, 25, 47, 10, 175, 198, 242, 101, 227, 251, 119, 204, 122, 218, 130,
+            197, 41, 208, 190, 59, 19, 110, 45, 0, 85, 32, 0, 0, 0,
+        ]
+    );
+
+    pub fn check_base58(input_bytes: &[u8], expected_b58: &str) {
+        assert_eq!(five8_const::decode_32_const(expected_b58), input_bytes);
+    }
+
+    check_base58(
+        &SLOTHASHES_ID,
+        "SysvarS1otHashes111111111111111111111111111",
+    );
+}
+
+#[test]
+fn test_binary_search_no_std() {
+    const TEST_NUM_ENTRIES: usize = 512;
+    const START_SLOT: u64 = 2000;
+
+    let entries =
+        generate_mock_entries(TEST_NUM_ENTRIES, START_SLOT, DecrementStrategy::Average1_05);
+    let data = create_mock_data(&entries);
+    let entry_count = entries.len();
+
+    let first_slot = entries[0].0;
+    let mid_index = entry_count / 2;
+    let mid_slot = entries[mid_index].0;
+    let last_slot = entries[entry_count - 1].0;
+
+    let slot_hashes = unsafe { SlotHashes::new_unchecked(data.as_slice()) };
+
+    assert_eq!(slot_hashes.position(first_slot), Some(0));
+
+    let expected_mid_index = Some(mid_index);
+    let actual_pos_mid = slot_hashes.position(mid_slot);
+
+    // Extract surrounding entries for context in case of failure
+    let start_idx = mid_index.saturating_sub(2);
+    let end_idx = core::cmp::min(entry_count, mid_index.saturating_add(3));
+    let surrounding_slots: Vec<_> = entries[start_idx..end_idx].iter().map(|e| e.0).collect();
+    assert_eq!(
+        actual_pos_mid, expected_mid_index,
+        "position({}) failed! Surrounding slots: {:?}",
+        mid_slot, surrounding_slots
+    );
+
+    assert_eq!(slot_hashes.position(last_slot), Some(entry_count - 1));
+
+    assert_eq!(slot_hashes.position(START_SLOT + 1), None);
+
+    // Find an actual gap to test a guaranteed non-existent internal slot
+    let mut missing_internal_slot = None;
+    for i in 0..(entries.len() - 1) {
+        if entries[i].0 > entries[i + 1].0 + 1 {
+            missing_internal_slot = Some(entries[i + 1].0 + 1);
+            break;
+        }
+    }
+    assert!(
+        missing_internal_slot.is_some(),
+        "Test requires at least one gap between slots"
+    );
+    assert_eq!(slot_hashes.position(missing_internal_slot.unwrap()), None);
+
+    assert_eq!(slot_hashes.get_hash(first_slot), Some(&entries[0].1));
+    assert_eq!(slot_hashes.get_hash(mid_slot), Some(&entries[mid_index].1));
+    assert_eq!(
+        slot_hashes.get_hash(last_slot),
+        Some(&entries[entry_count - 1].1)
+    );
+    assert_eq!(slot_hashes.get_hash(START_SLOT + 1), None);
+
+    // Test empty list explicitly
+    let empty_entries = generate_mock_entries(0, START_SLOT, DecrementStrategy::Strictly1);
+    let empty_data = create_mock_data(&empty_entries);
+    let empty_hashes = unsafe { SlotHashes::new_unchecked(empty_data.as_slice()) };
+    assert_eq!(empty_hashes.get_hash(100), None);
+
+    let pos_start_plus_1 = slot_hashes.position(START_SLOT + 1);
+    assert!(
+        pos_start_plus_1.is_none(),
+        "position(START_SLOT + 1) should be None"
+    );
+}
+
+#[test]
+fn test_basic_getters_and_iterator_no_std() {
+    const NUM_ENTRIES: usize = 512;
+    const START_SLOT: u64 = 2000;
+    let entries = generate_mock_entries(NUM_ENTRIES, START_SLOT, DecrementStrategy::Strictly1);
+    let data = create_mock_data(&entries);
+    let slot_hashes = unsafe { SlotHashes::new_unchecked(data.as_slice()) };
+
+    assert_eq!(slot_hashes.len(), NUM_ENTRIES);
+
+    let entry0 = slot_hashes.get_entry(0);
+    assert!(entry0.is_some());
+    assert_eq!(entry0.unwrap().slot(), START_SLOT); // Check against start slot
+    assert_eq!(entry0.unwrap().hash, [0u8; HASH_BYTES]); // First generated hash is [0u8; 32]
+
+    let entry2 = slot_hashes.get_entry(NUM_ENTRIES - 1); // Last entry
+    assert!(entry2.is_some());
+    assert_eq!(entry2.unwrap().slot(), entries[NUM_ENTRIES - 1].0);
+    assert_eq!(entry2.unwrap().hash, entries[NUM_ENTRIES - 1].1);
+    assert!(slot_hashes.get_entry(NUM_ENTRIES).is_none()); // Out of bounds
+
+    for (i, entry) in slot_hashes.into_iter().enumerate() {
+        assert_eq!(entry.slot(), entries[i].0);
+        assert_eq!(entry.hash, entries[i].1);
+    }
+    assert!(slot_hashes.into_iter().nth(NUM_ENTRIES).is_none());
+
+    // Test ExactSizeIterator hint
+    let mut iter_hint = slot_hashes.into_iter();
+    assert_eq!(iter_hint.len(), NUM_ENTRIES);
+    iter_hint.next();
+    assert_eq!(iter_hint.len(), NUM_ENTRIES - 1);
+    // Skip to end
+    for _ in 1..NUM_ENTRIES {
+        iter_hint.next();
+    }
+    iter_hint.next();
+    assert_eq!(iter_hint.len(), 0);
+
+    // Test empty case
+    let empty_data = create_mock_data(&[]);
+    let empty_hashes = unsafe { SlotHashes::new_unchecked(empty_data.as_slice()) };
+    assert_eq!(empty_hashes.len(), 0);
+    assert!(empty_hashes.get_entry(0).is_none());
+    assert!(empty_hashes.into_iter().next().is_none());
+}
+
+#[test]
+fn test_entry_count_no_std() {
+    // Valid data (2 entries)
+    let entries: &[(Slot, Hash)] = &[(100, [1u8; HASH_BYTES]), (98, [2u8; HASH_BYTES])];
+    let data = create_mock_data(entries);
+    let slot_hashes = unsafe { SlotHashes::new_unchecked(data.as_slice()) };
+    assert_eq!(slot_hashes.len(), 2);
+
+    // Too small buffer should fail new()
+    let num_entries = entries.len() as u64;
+    let data_len = NUM_ENTRIES_SIZE + entries.len() * ENTRY_SIZE;
+    let mut small_data = std::vec![0u8; data_len];
+    small_data[0..NUM_ENTRIES_SIZE].copy_from_slice(&num_entries.to_le_bytes());
+    let mut offset = NUM_ENTRIES_SIZE;
+    for (slot, hash) in entries {
+        small_data[offset..offset + SLOT_SIZE].copy_from_slice(&slot.to_le_bytes());
+        small_data[offset + SLOT_SIZE..offset + ENTRY_SIZE].copy_from_slice(hash);
+        offset += ENTRY_SIZE;
+    }
+    let res1 = SlotHashes::new(small_data.as_slice());
+    assert!(
+        res1.is_ok(),
+        "SlotHashes::new should succeed with a correctly sized buffer"
+    );
+    let slot_hashes_from_small = res1.unwrap();
+    assert_eq!(slot_hashes_from_small.len(), entries.len());
+
+    // Empty data is valid
+    let empty_data = create_mock_data(&[]);
+    let empty_hashes = unsafe { SlotHashes::new_unchecked(empty_data.as_slice()) };
+    assert_eq!(empty_hashes.len(), 0);
+}
+
+#[test]
+fn test_get_entry_unchecked_no_std() {
+    let single_entry: &[(Slot, Hash)] = &[(100, [1u8; HASH_BYTES])];
+    let data = create_mock_data(single_entry);
+    let slot_hashes = unsafe { SlotHashes::new_unchecked(data.as_slice()) };
+
+    let entry = unsafe { slot_hashes.get_entry_unchecked(0) };
+    assert_eq!(entry.slot(), 100);
+    assert_eq!(entry.hash, [1u8; HASH_BYTES]);
+}
+
+#[test]
+fn test_get_entry_unchecked_last_no_std() {
+    const COUNT: usize = 8;
+    const START_SLOT: u64 = 600;
+    let entries = generate_mock_entries(COUNT, START_SLOT, DecrementStrategy::Strictly1);
+    let data = create_mock_data(&entries);
+    let sh = unsafe { SlotHashes::new_unchecked(data.as_slice()) };
+
+    let last = unsafe { sh.get_entry_unchecked(COUNT - 1) };
+    assert_eq!(last.slot(), entries[COUNT - 1].0);
+    assert_eq!(last.hash, entries[COUNT - 1].1);
+}
+
+#[test]
+fn test_iterator_into_ref_no_std() {
+    const NUM: usize = 16;
+    const START: u64 = 100;
+    let entries = generate_mock_entries(NUM, START, DecrementStrategy::Strictly1);
+    let data = create_mock_data(&entries);
+    let sh = unsafe { SlotHashes::new_unchecked(data.as_slice()) };
+
+    // Collect slots via iterator
+    let mut sum: u64 = 0;
+    for e in &sh {
+        sum += e.slot();
+    }
+    let expected_sum: u64 = entries.iter().map(|(s, _)| *s).sum();
+    assert_eq!(sum, expected_sum);
+
+    let iter = (&sh).into_iter();
+    assert_eq!(iter.len(), sh.len());
+}
+
+// Tests to verify mock data helpers
+#[test]
+fn test_mock_data_max_entries_boundary() {
+    let entries = generate_mock_entries(MAX_ENTRIES, 1000, DecrementStrategy::Strictly1);
+    let data = create_mock_data(&entries);
+    let sh = unsafe { SlotHashes::new_unchecked(data.as_slice()) };
+    assert_eq!(sh.len(), MAX_ENTRIES);
+}
+
+#[test]
+fn test_mock_data_raw_byte_layout() {
+    let entries = &[(100u64, [0xAB; 32])];
+    let data = create_mock_data(entries);
+    // length prefix
+    assert_eq!(&data[0..8], &1u64.to_le_bytes());
+    // slot bytes
+    assert_eq!(&data[8..16], &100u64.to_le_bytes());
+    // hash bytes
+    assert_eq!(&data[16..48], &[0xAB; 32]);
+}
+
+#[test]
+fn test_read_entry_count_from_bytes() {
+    let entry_count = 42u64;
+    let mut data = [0u8; 16];
+    data[0..8].copy_from_slice(&entry_count.to_le_bytes());
+
+    let result = read_entry_count_from_bytes(&data);
+    assert_eq!(result, Some(42));
+
+    let zero_count = 0u64;
+    let mut zero_data = [0u8; 8];
+    zero_data.copy_from_slice(&zero_count.to_le_bytes());
+
+    let zero_result = read_entry_count_from_bytes(&zero_data);
+    assert_eq!(zero_result, Some(0));
+
+    let max_count = MAX_ENTRIES as u64;
+    let mut max_data = [0u8; 8];
+    max_data.copy_from_slice(&max_count.to_le_bytes());
+
+    let max_result = read_entry_count_from_bytes(&max_data);
+    assert_eq!(max_result, Some(MAX_ENTRIES));
+}
+
+fn mock_fetch_into_unchecked(
+    mock_sysvar_data: &[u8],
+    buffer: &mut [u8],
+    offset: u64,
+) -> Result<(), ProgramError> {
+    let offset = offset as usize;
+    if offset >= mock_sysvar_data.len() {
+        return Err(ProgramError::InvalidArgument);
+    }
+
+    let available_len = mock_sysvar_data.len() - offset;
+    let copy_len = core::cmp::min(buffer.len(), available_len);
+
+    buffer[..copy_len].copy_from_slice(&mock_sysvar_data[offset..offset + copy_len]);
+    Ok(())
+}
+
+/// Verifies that the mock byte-copy helper (`mock_fetch_into_unchecked`) obeys
+/// the same offset semantics we expect from the real `raw::fetch_into_*` API.
+///
+/// This is purely an internal byte-math test; it does not call the
+/// production syscall wrapper and therefore does not attest that the runtime
+/// offset logic works.  Its value is guarding against mistakes
+/// in the offset arithmetic used by other in-test helpers.
+#[test]
+fn test_mock_offset_copy() {
+    // Create mock sysvar data: 8-byte length + 3 entries
+    let entries = &[
+        (100u64, [1u8; HASH_BYTES]),
+        (99u64, [2u8; HASH_BYTES]),
+        (98u64, [3u8; HASH_BYTES]),
+    ];
+    let mock_sysvar_data = create_mock_data(entries);
+
+    // Test offset 0 (full data)
+    let mut buffer_full = std::vec![0u8; mock_sysvar_data.len()];
+    mock_fetch_into_unchecked(&mock_sysvar_data, &mut buffer_full, 0).unwrap();
+    assert_eq!(buffer_full, mock_sysvar_data);
+
+    // Test offset 8 (skip length prefix, get entries only)
+    let entries_size = 3 * ENTRY_SIZE;
+    let mut buffer_entries = std::vec![0u8; entries_size];
+    mock_fetch_into_unchecked(&mock_sysvar_data, &mut buffer_entries, 8).unwrap();
+    assert_eq!(buffer_entries, &mock_sysvar_data[8..8 + entries_size]);
+
+    // Test offset 8 + ENTRY_SIZE (skip first entry)
+    let remaining_entries_size = 2 * ENTRY_SIZE;
+    let mut buffer_skip_first = std::vec![0u8; remaining_entries_size];
+    let skip_first_offset = 8 + ENTRY_SIZE;
+    mock_fetch_into_unchecked(
+        &mock_sysvar_data,
+        &mut buffer_skip_first,
+        skip_first_offset as u64,
+    )
+    .unwrap();
+    assert_eq!(
+        buffer_skip_first,
+        &mock_sysvar_data[skip_first_offset..skip_first_offset + remaining_entries_size]
+    );
+
+    // Test partial read with small buffer
+    let mut small_buffer = [0u8; 16]; // Only 16 bytes
+    mock_fetch_into_unchecked(&mock_sysvar_data, &mut small_buffer, 0).unwrap();
+    assert_eq!(small_buffer, &mock_sysvar_data[0..16]);
+
+    // Test offset beyond data (should fail)
+    let mut buffer_beyond = [0u8; 10];
+    let beyond_offset = mock_sysvar_data.len() as u64;
+    assert!(
+        mock_fetch_into_unchecked(&mock_sysvar_data, &mut buffer_beyond, beyond_offset).is_err()
+    );
+}
+
+#[test]
+fn test_entries_exposed_no_std() {
+    let entries = generate_mock_entries(8, 80, DecrementStrategy::Strictly1);
+    let data = create_mock_data(&entries);
+    let sh = unsafe { SlotHashes::new_unchecked(data.as_slice()) };
+
+    let slice = sh.entries();
+    assert_eq!(slice.len(), entries.len());
+    for (i, e) in slice.iter().enumerate() {
+        assert_eq!(e.slot(), entries[i].0);
+        assert_eq!(e.hash, entries[i].1);
+    }
+}
+
+#[test]
+fn test_safe_vs_unsafe_getters_consistency() {
+    let entries = generate_mock_entries(16, 200, DecrementStrategy::Strictly1);
+    let data = create_mock_data(&entries);
+    let sh = unsafe { SlotHashes::new_unchecked(data.as_slice()) };
+
+    for i in 0..entries.len() {
+        let safe_entry = sh.get_entry(i).unwrap();
+        let unsafe_entry = unsafe { sh.get_entry_unchecked(i) };
+        assert_eq!(safe_entry, unsafe_entry);
+    }
+
+    assert_eq!(sh.len(), entries.len());
+}
+
+#[test]
+fn test_entry_count_header_too_short() {
+    let short = [0u8; 4];
+    assert!(SlotHashes::new(&short[..]).is_err());
+    assert_eq!(read_entry_count_from_bytes(&short), None);
+}
+
+#[test]
+fn test_log_function() {
+    let test_hash: Hash = [
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+        26, 27, 28, 29, 30, 31, 32,
+    ];
+
+    // Should not panic
+    log(&test_hash);
+}
+
+#[test]
+fn test_from_account_info_constructor() {
+    std::io::stderr().flush().unwrap();
+
+    const NUM_ENTRIES: usize = 3;
+    const START_SLOT: u64 = 1234;
+
+    let mock_entries = generate_mock_entries(NUM_ENTRIES, START_SLOT, DecrementStrategy::Strictly1);
+    let data = create_mock_data(&mock_entries);
+
+    let mut aligned_backing: Vec<u64>;
+    let acct_ptr;
+
+    unsafe {
+        let header_size = core::mem::size_of::<AccountLayout>();
+        let total_size = header_size + data.len();
+        let word_len = (total_size + 7) / 8;
+        aligned_backing = std::vec![0u64; word_len];
+        let base_ptr = aligned_backing.as_mut_ptr() as *mut u8;
+
+        let header_ptr = base_ptr as *mut AccountLayout;
+        ptr::write(
+            header_ptr,
+            AccountLayout {
+                borrow_state: crate::NON_DUP_MARKER,
+                is_signer: 0,
+                is_writable: 0,
+                executable: 0,
+                resize_delta: 0,
+                key: SLOTHASHES_ID,
+                owner: [0u8; 32],
+                lamports: 0,
+                data_len: data.len() as u64,
+            },
+        );
+
+        ptr::copy_nonoverlapping(data.as_ptr(), base_ptr.add(header_size), data.len());
+
+        acct_ptr = base_ptr as *mut Account;
+    }
+
+    let account_info = AccountInfo { raw: acct_ptr };
+
+    let slot_hashes = SlotHashes::from_account_info(&account_info)
+        .expect("from_account_info should succeed with well-formed data");
+
+    assert_eq!(slot_hashes.len(), NUM_ENTRIES);
+    for (i, entry) in slot_hashes.into_iter().enumerate() {
+        assert_eq!(entry.slot(), mock_entries[i].0);
+        assert_eq!(entry.hash, mock_entries[i].1);
+    }
+}
+
+/// Host-side sanity test: ensure the `SlotHashes::fetch()` helper compiles and
+/// allocates a MAX_SIZE-sized buffer without panicking.
+///
+/// On non-Solana targets the underlying syscall is stubbed; the returned buffer
+/// is zero-initialized and contains zero entries.  We overwrite
+/// that buffer with deterministic fixture data and then exercise the normal
+/// `SlotHashes` getters to make sure the view itself works.  We do not verify
+/// that the syscall populated real on-chain bytes, as doing so requires an
+/// environment outside the scope of host `cargo test`.
+#[cfg(feature = "std")]
+#[test]
+fn test_fetch_allocates_buffer_host() {
+    const START_SLOT: u64 = 500;
+    let entries = generate_mock_entries(5, START_SLOT, DecrementStrategy::Strictly1);
+    let data = create_mock_data(&entries);
+
+    // This should allocate a 20_488-byte boxed slice and *not* panic.
+    let mut slot_hashes =
+        SlotHashes::<std::boxed::Box<[u8]>>::fetch().expect("fetch() should allocate");
+
+    // Overwrite the stubbed contents with known data so we can reuse the
+    // remainder of the test harness.
+    slot_hashes.data[..data.len()].copy_from_slice(&data);
+
+    assert_eq!(slot_hashes.len(), entries.len());
+    for (i, entry) in slot_hashes.into_iter().enumerate() {
+        assert_eq!(entry.slot(), entries[i].0);
+        assert_eq!(entry.hash, entries[i].1);
+    }
+}

+ 89 - 0
sdk/pinocchio/src/sysvars/slot_hashes/test_edge.rs

@@ -0,0 +1,89 @@
+use crate::{program_error::ProgramError, sysvars::slot_hashes::*};
+extern crate std;
+use super::test_utils::{build_slot_hashes_bytes as raw_slot_hashes, make_account_info};
+
+#[test]
+fn test_wrong_key_from_account_info() {
+    let bytes = raw_slot_hashes(0, &[]);
+    let (info, _backing) = unsafe { make_account_info([1u8; 32], &bytes, crate::NON_DUP_MARKER) };
+    assert!(matches!(
+        SlotHashes::from_account_info(&info),
+        Err(ProgramError::InvalidArgument)
+    ));
+}
+
+#[test]
+fn test_wrong_size_buffer_rejected() {
+    // Buffer that declares 1 entry but is 1 byte too small to hold it.
+    let num_entries: u64 = 1;
+    let required_size = NUM_ENTRIES_SIZE + (num_entries as usize) * ENTRY_SIZE;
+    let mut small_buffer = std::vec![0u8; required_size - 1];
+    small_buffer[0..NUM_ENTRIES_SIZE].copy_from_slice(&num_entries.to_le_bytes());
+
+    assert!(matches!(
+        SlotHashes::new(small_buffer.as_slice()),
+        Err(ProgramError::AccountDataTooSmall)
+    ));
+
+    // Buffer too small to even contain the length header.
+    let too_small_for_header = [0u8; NUM_ENTRIES_SIZE - 1];
+    assert!(matches!(
+        SlotHashes::new(too_small_for_header.as_slice()),
+        Err(ProgramError::AccountDataTooSmall)
+    ));
+}
+
+#[test]
+fn test_truncated_payload_with_max_size_buffer_is_valid() {
+    let entry = (123u64, [7u8; HASH_BYTES]);
+    let bytes = raw_slot_hashes(2, &[entry]); // says 2 but provides 1, rest is zeros
+
+    // With MAX_SIZE buffers, this is now valid - the second entry is just zeros
+    let slot_hashes = SlotHashes::new(bytes.as_slice()).expect("Should be valid");
+    assert_eq!(slot_hashes.len(), 2);
+
+    // First entry should match what we provided
+    let first_entry = slot_hashes.get_entry(0).unwrap();
+    assert_eq!(first_entry.slot(), 123);
+    assert_eq!(first_entry.hash, [7u8; HASH_BYTES]);
+
+    // Second entry should be all zeros (default padding)
+    let second_entry = slot_hashes.get_entry(1).unwrap();
+    assert_eq!(second_entry.slot(), 0);
+    assert_eq!(second_entry.hash, [0u8; HASH_BYTES]);
+}
+
+#[test]
+fn test_duplicate_slots_binary_search_safe() {
+    let entries = &[
+        (200, [0u8; HASH_BYTES]),
+        (200, [1u8; HASH_BYTES]),
+        (199, [2u8; HASH_BYTES]),
+    ];
+    let bytes = raw_slot_hashes(entries.len() as u64, entries);
+    let sh = unsafe { SlotHashes::new_unchecked(&bytes[..]) };
+    let dup_pos = sh.position(200).expect("slot 200 must exist");
+    assert!(
+        dup_pos <= 1,
+        "binary_search should return one of the duplicate indices (0 or 1)"
+    );
+    assert_eq!(sh.get_hash(199), Some(&entries[2].1));
+}
+
+#[test]
+fn test_zero_len_minimal_slice_iterates_empty() {
+    let zero_data = raw_slot_hashes(0, &[]);
+    let sh = unsafe { SlotHashes::new_unchecked(&zero_data[..]) };
+    assert_eq!(sh.len(), 0);
+    assert!(sh.into_iter().next().is_none());
+}
+
+#[test]
+fn test_borrow_state_failure_from_account_info() {
+    let bytes = raw_slot_hashes(0, &[]);
+    let (info, _backing) = unsafe { make_account_info(SLOTHASHES_ID, &bytes, 0) };
+    assert!(matches!(
+        SlotHashes::from_account_info(&info),
+        Err(ProgramError::AccountBorrowFailed)
+    ));
+}

+ 204 - 0
sdk/pinocchio/src/sysvars/slot_hashes/test_raw.rs

@@ -0,0 +1,204 @@
+//! Tests focusing on low-level `slot_hashes::raw` helpers.
+
+use super::raw;
+use super::*;
+extern crate std;
+
+#[test]
+fn test_validate_buffer_size() {
+    // ===== Tests with offset = 0 (buffer includes header) =====
+
+    // Too small to fit header
+    let small_len = 4;
+    assert!(raw::get_valid_buffer_capacity(small_len, 0).is_err());
+
+    // Misaligned: header + partial entry
+    let misaligned_len = NUM_ENTRIES_SIZE + 39;
+    assert!(raw::get_valid_buffer_capacity(misaligned_len, 0).is_err());
+
+    // Valid cases with offset = 0
+    let valid_empty_len = NUM_ENTRIES_SIZE;
+    assert_eq!(
+        raw::get_valid_buffer_capacity(valid_empty_len, 0).unwrap(),
+        0
+    );
+
+    let valid_one_len = NUM_ENTRIES_SIZE + ENTRY_SIZE;
+    assert_eq!(raw::get_valid_buffer_capacity(valid_one_len, 0).unwrap(), 1);
+
+    let valid_max_len = NUM_ENTRIES_SIZE + MAX_ENTRIES * ENTRY_SIZE;
+    assert_eq!(
+        raw::get_valid_buffer_capacity(valid_max_len, 0).unwrap(),
+        MAX_ENTRIES
+    );
+
+    // Edge case: exactly at the boundary (MAX_SIZE)
+    assert_eq!(
+        raw::get_valid_buffer_capacity(MAX_SIZE, 0).unwrap(),
+        MAX_ENTRIES
+    );
+
+    // ===== Tests with offset != 0 (buffer doesn't include header) =====
+
+    // Valid cases with non-zero offset - buffer contains only entry data
+
+    // Buffer for exactly 1 entry
+    assert_eq!(raw::get_valid_buffer_capacity(ENTRY_SIZE, 8).unwrap(), 1);
+
+    // Buffer for exactly 2 entries
+    assert_eq!(
+        raw::get_valid_buffer_capacity(2 * ENTRY_SIZE, 8).unwrap(),
+        2
+    );
+
+    // Buffer for maximum entries (without header space)
+    assert_eq!(
+        raw::get_valid_buffer_capacity(MAX_ENTRIES * ENTRY_SIZE, 8).unwrap(),
+        MAX_ENTRIES
+    );
+
+    // Buffer for 10 entries
+    assert_eq!(
+        raw::get_valid_buffer_capacity(10 * ENTRY_SIZE, 48).unwrap(),
+        10
+    );
+
+    // Error cases with non-zero offset
+
+    // Misaligned buffer - not a multiple of ENTRY_SIZE
+    assert!(raw::get_valid_buffer_capacity(ENTRY_SIZE + 1, 8).is_err());
+    assert!(raw::get_valid_buffer_capacity(ENTRY_SIZE - 1, 8).is_err());
+    assert!(raw::get_valid_buffer_capacity(39, 8).is_err()); // 39 is not divisible by 40
+
+    // Large buffers that would exceed MAX_SIZE - these now pass validate_buffer_size
+    // (the syscall will fail later, but that's acceptable)
+    assert_eq!(
+        raw::get_valid_buffer_capacity((MAX_ENTRIES + 1) * ENTRY_SIZE, 8).unwrap(),
+        MAX_ENTRIES + 1
+    );
+    assert_eq!(
+        raw::get_valid_buffer_capacity((MAX_ENTRIES + 10) * ENTRY_SIZE, 48).unwrap(),
+        MAX_ENTRIES + 10
+    );
+
+    // Empty buffer with offset (valid - 0 entries)
+    assert_eq!(raw::get_valid_buffer_capacity(0, 8).unwrap(), 0);
+
+    // ===== Additional edge cases =====
+
+    // Large offset values (should still work for buffer size validation)
+    assert_eq!(
+        raw::get_valid_buffer_capacity(5 * ENTRY_SIZE, 1000).unwrap(),
+        5
+    );
+    assert!(raw::get_valid_buffer_capacity(5 * ENTRY_SIZE + 1, 2000).is_err());
+    // misaligned
+}
+
+#[test]
+fn test_fetch_into_offset_validation() {
+    let buffer_len = 200;
+
+    // Offset 0 (start of data) - should pass validation
+    assert!(validate_fetch_offset(0, buffer_len).is_ok());
+
+    // Offset 8 (start of first entry) - should pass validation
+    assert!(validate_fetch_offset(8, buffer_len).is_ok());
+
+    // Offset 48 (start of second entry) - should pass validation
+    assert!(validate_fetch_offset(48, buffer_len).is_ok());
+
+    // Offset 88 (start of third entry) - should pass validation
+    assert!(validate_fetch_offset(88, buffer_len).is_ok());
+
+    // Invalid offsets that should fail validation
+
+    // Offset beyond MAX_SIZE
+    assert!(validate_fetch_offset(MAX_SIZE, buffer_len).is_err());
+
+    // Offset pointing mid-entry (not aligned)
+    assert!(validate_fetch_offset(12, buffer_len).is_err()); // 8 + 4, mid-entry
+    assert!(validate_fetch_offset(20, buffer_len).is_err()); // 8 + 12, mid-entry
+    assert!(validate_fetch_offset(35, buffer_len).is_err()); // 8 + 27, mid-entry
+
+    // Offset in header but not at start
+    assert!(validate_fetch_offset(4, buffer_len).is_err()); // Mid-header
+    assert!(validate_fetch_offset(7, buffer_len).is_err()); // End of header
+
+    // Test buffer + offset exceeding MAX_SIZE
+    assert!(validate_fetch_offset(1, MAX_SIZE).is_err());
+    assert!(validate_fetch_offset(MAX_SIZE - 100, 200).is_err());
+
+    // Last entry
+    assert!(validate_fetch_offset(8 + 511 * ENTRY_SIZE, 40).is_ok());
+
+    // One past last valid entry
+    assert!(validate_fetch_offset(8 + 512 * ENTRY_SIZE, 40).is_err());
+}
+
+/// Host-only smoke test for `raw::fetch_into`.
+///
+/// On a host build the underlying sysvar syscall is stubbed out.
+#[test]
+fn test_fetch_into_host_stub() {
+    // 1. Full-size buffer, offset 0.
+    let mut full = std::vec![0u8; MAX_SIZE];
+    let n = raw::fetch_into(&mut full, 0).expect("fetch_into(full, 0)");
+    assert_eq!(n, 0);
+
+    // 2. Header-only buffer.
+    let mut header_only = std::vec![0u8; NUM_ENTRIES_SIZE];
+    let n2 = raw::fetch_into(&mut header_only, 0).expect("fetch_into(header_only, 0)");
+    assert_eq!(n2, 0);
+
+    // 3. One-entry buffer.
+    let mut one_entry = std::vec![0u8; NUM_ENTRIES_SIZE + ENTRY_SIZE];
+    let n3 = raw::fetch_into(&mut one_entry, 0).expect("fetch_into(one_entry, 0)");
+    assert_eq!(n3, 0);
+
+    // 4. Header-skipped fetch should succeed and return the number of entries that fit.
+    let mut skip_header = std::vec![0u8; ENTRY_SIZE];
+    let entries_count = raw::fetch_into(&mut skip_header, 8).expect("fetch_into(skip_header, 8)");
+    assert_eq!(entries_count, 1); // Buffer can fit exactly 1 entry
+
+    // 5. Mis-aligned buffer size should error.
+    let mut misaligned = std::vec![0u8; NUM_ENTRIES_SIZE + 39];
+    assert!(raw::fetch_into(&mut misaligned, 0).is_err());
+
+    // 6. Mid-entry offset should error.
+    let mut buf = std::vec![0u8; 64];
+    assert!(raw::fetch_into(&mut buf, 12).is_err());
+
+    // 7. Offset + len overflow should error.
+    let mut small = std::vec![0u8; 200];
+    assert!(raw::fetch_into(&mut small, MAX_SIZE - 199).is_err());
+}
+
+/// Test that `fetch_into` with offset correctly avoids interpreting slot
+/// data as entry count.
+#[cfg(test)]
+#[test]
+fn test_fetch_into_offset_avoids_incorrect_entry_count() {
+    // When fetch_into is called with offset != 0, the first
+    // 8 bytes of the buffer contains header data, not entry data.
+    let mut buffer = std::vec![0u8; 3 * ENTRY_SIZE];
+
+    // Call fetch_into with offset 8 (skipping the 8-byte header)
+    let result = raw::fetch_into(&mut buffer, 8);
+
+    assert!(
+        result.is_ok(),
+        "fetch_into should succeed with offset that skips header"
+    );
+
+    let entries_that_fit = result.unwrap();
+    assert_eq!(
+        entries_that_fit, 3,
+        "Should return number of entries that fit in buffer, not some slot number"
+    );
+
+    // Buffer for exactly 1 entry starting from offset 48 (2nd entry)
+    let mut second_entry_buffer = std::vec![0u8; ENTRY_SIZE];
+    let second_result = raw::fetch_into(&mut second_entry_buffer, 48).unwrap();
+    assert_eq!(second_result, 1);
+}

+ 201 - 0
sdk/pinocchio/src/sysvars/slot_hashes/test_utils.rs

@@ -0,0 +1,201 @@
+//! Shared helpers for `SlotHashes` sysvar tests.
+//! This module is compiled only when `cfg(test)` is active so `std` can be used
+//! freely while production code remains `#![no_std]`.
+
+use super::*;
+extern crate std;
+use crate::account_info::{Account, AccountInfo};
+use crate::pubkey::Pubkey;
+use core::{mem, ptr};
+use std::vec::Vec;
+
+/// Matches the pinocchio Account struct.
+/// Account fields are private, so this struct allows more readable
+/// use of them in tests.
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct AccountLayout {
+    pub borrow_state: u8,
+    pub is_signer: u8,
+    pub is_writable: u8,
+    pub executable: u8,
+    pub resize_delta: i32,
+    pub key: Pubkey,
+    pub owner: Pubkey,
+    pub lamports: u64,
+    pub data_len: u64,
+}
+
+/// Strategy that decides how much the slot number is decremented between
+/// successive entries in `generate_mock_entries`.
+#[allow(dead_code)]
+#[derive(Clone, Copy, Debug)]
+pub enum DecrementStrategy {
+    /// Always decrement by exactly 1.
+    Strictly1,
+    /// Mostly a decrement of 1 with occasional decrement of 2 so that the
+    /// *average* decrement is `1.05`.
+    Average1_05,
+    /// Average decrement of 2.
+    Average2,
+}
+
+/// Tiny deterministic PRNG (linear-congruential) good enough for unit tests.
+#[inline]
+pub fn simple_prng(seed: u64) -> u64 {
+    const A: u64 = 16_807;
+    const M: u64 = 2_147_483_647; // 2^31 ‑ 1
+    let s = if seed == 0 { 1 } else { seed };
+    (A.wrapping_mul(s)) % M
+}
+
+/// Produce `num_entries` mock `(slot, hash)` pairs sorted by slot descending.
+pub fn generate_mock_entries(
+    num_entries: usize,
+    start_slot: u64,
+    strategy: DecrementStrategy,
+) -> Vec<(u64, Hash)> {
+    let mut entries = Vec::with_capacity(num_entries);
+    let mut current_slot = start_slot;
+    for i in 0..num_entries {
+        let hash_byte = (i % 256) as u8;
+        let hash = [hash_byte; HASH_BYTES];
+        entries.push((current_slot, hash));
+
+        let random_val = simple_prng(i as u64);
+        let dec = match strategy {
+            DecrementStrategy::Strictly1 => 1,
+            DecrementStrategy::Average1_05 => {
+                if random_val % 20 == 0 {
+                    2
+                } else {
+                    1
+                }
+            }
+            DecrementStrategy::Average2 => {
+                if random_val % 2 == 0 {
+                    1
+                } else {
+                    3
+                }
+            }
+        };
+        current_slot = current_slot.saturating_sub(dec);
+    }
+    entries
+}
+
+/// Build a `Vec<u8>` the size of the *golden* `SlotHashes` sysvar (20 488 bytes)
+/// containing the supplied `entries` and with the `declared_len` header.
+pub fn build_slot_hashes_bytes(declared_len: u64, entries: &[(u64, Hash)]) -> Vec<u8> {
+    let mut data = std::vec![0u8; MAX_SIZE];
+    data[..NUM_ENTRIES_SIZE].copy_from_slice(&declared_len.to_le_bytes());
+    let mut offset = NUM_ENTRIES_SIZE;
+    for (slot, hash) in entries {
+        data[offset..offset + SLOT_SIZE].copy_from_slice(&slot.to_le_bytes());
+        data[offset + SLOT_SIZE..offset + ENTRY_SIZE].copy_from_slice(hash);
+        offset += ENTRY_SIZE;
+    }
+    data
+}
+
+/// Convenience wrapper where `declared_len == entries.len()`.
+#[inline]
+pub fn create_mock_data(entries: &[(u64, Hash)]) -> Vec<u8> {
+    build_slot_hashes_bytes(entries.len() as u64, entries)
+}
+
+/// Allocate a heap-backed `AccountInfo` whose data region is initialized with
+/// `data` and whose key is `key`.
+///
+/// The function also returns the backing `Vec<u64>` so the caller can keep it
+/// alive for the duration of the test (otherwise the memory would be freed and
+/// the raw pointer inside `AccountInfo` would dangle).
+///
+/// # Safety
+/// The caller must ensure the returned `AccountInfo` is used only for reading
+/// or according to borrow rules because the Solana runtime invariants are not
+/// fully enforced in this hand-rolled representation.
+pub unsafe fn make_account_info(
+    key: Pubkey,
+    data: &[u8],
+    borrow_state: u8,
+) -> (AccountInfo, Vec<u64>) {
+    let hdr_size = mem::size_of::<AccountLayout>();
+    let total = hdr_size + data.len();
+    let words = (total + 7) / 8;
+    let mut backing: Vec<u64> = std::vec![0u64; words];
+    assert!(
+        mem::align_of::<u64>() >= mem::align_of::<AccountLayout>(),
+        "`backing` should be properly aligned to store an `AccountLayout` instance"
+    );
+
+    let hdr_ptr = backing.as_mut_ptr() as *mut AccountLayout;
+    ptr::write(
+        hdr_ptr,
+        AccountLayout {
+            borrow_state,
+            is_signer: 0,
+            is_writable: 0,
+            executable: 0,
+            resize_delta: 0,
+            key,
+            owner: [0u8; 32],
+            lamports: 0,
+            data_len: data.len() as u64,
+        },
+    );
+
+    ptr::copy_nonoverlapping(
+        data.as_ptr(),
+        (hdr_ptr as *mut u8).add(hdr_size),
+        data.len(),
+    );
+
+    (
+        AccountInfo {
+            raw: hdr_ptr as *mut Account,
+        },
+        backing,
+    )
+}
+
+#[cfg(test)]
+#[test]
+fn test_account_layout_compatibility() {
+    assert_eq!(
+        mem::size_of::<AccountLayout>(),
+        mem::size_of::<Account>(),
+        "Header size must match Account size"
+    );
+    assert_eq!(
+        mem::align_of::<AccountLayout>(),
+        mem::align_of::<Account>(),
+        "Header alignment must match Account alignment"
+    );
+
+    unsafe {
+        let test_header = AccountLayout {
+            borrow_state: 42,
+            is_signer: 1,
+            is_writable: 1,
+            executable: 0,
+            resize_delta: 100,
+            key: [1u8; 32],
+            owner: [2u8; 32],
+            lamports: 1000,
+            data_len: 256,
+        };
+
+        let account_ptr = &test_header as *const AccountLayout as *const Account;
+        let account_ref = &*account_ptr;
+        assert_eq!(
+            account_ref.borrow_state, 42,
+            "borrow_state field should be accessible and match"
+        );
+        assert_eq!(
+            account_ref.data_len, 256,
+            "data_len field should be accessible and match"
+        );
+    }
+}