Prechádzať zdrojové kódy

feat(fortuna): Support Postgres DB backend (#2841)

* use AnyPool for generic SQL cxn, add postgres migrations

* migrations

* fix sqlite issues

* fix sqlite issues

* remove old migrations, update migrations!()

* bump ver
Tejas Badadare 4 mesiacov pred
rodič
commit
18c4a44a01

+ 19 - 1
Cargo.lock

@@ -2300,6 +2300,12 @@ dependencies = [
  "const-random",
 ]
 
+[[package]]
+name = "dotenv"
+version = "0.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f"
+
 [[package]]
 name = "dotenvy"
 version = "0.15.7"
@@ -3052,7 +3058,7 @@ dependencies = [
 
 [[package]]
 name = "fortuna"
-version = "8.1.0"
+version = "8.2.0"
 dependencies = [
  "anyhow",
  "axum 0.6.20",
@@ -3064,6 +3070,7 @@ dependencies = [
  "byteorder",
  "chrono",
  "clap",
+ "dotenv",
  "ethabi",
  "ethers",
  "futures",
@@ -9361,6 +9368,7 @@ dependencies = [
  "memchr",
  "once_cell",
  "percent-encoding",
+ "rustls 0.23.28",
  "serde",
  "serde_json",
  "sha2 0.10.9",
@@ -9370,6 +9378,7 @@ dependencies = [
  "tokio-stream",
  "tracing",
  "url",
+ "webpki-roots 0.26.11",
 ]
 
 [[package]]
@@ -10702,6 +10711,15 @@ version = "0.25.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1"
 
+[[package]]
+name = "webpki-roots"
+version = "0.26.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9"
+dependencies = [
+ "webpki-roots 1.0.1",
+]
+
 [[package]]
 name = "webpki-roots"
 version = "1.0.1"

+ 1 - 0
apps/fortuna/.gitignore

@@ -4,3 +4,4 @@
 *private-key*
 .envrc
 fortuna.db*
+.env*

+ 0 - 12
apps/fortuna/.sqlx/query-03901bcfb28b127d99fe8a53e480b88336dd2aab632411114f02ce8dd8fe07e8.json

@@ -1,12 +0,0 @@
-{
-  "db_name": "SQLite",
-  "query": "UPDATE request SET state = ?, last_updated_at = ?, info = ?, provider_random_number = ? WHERE network_id = ? AND sequence = ? AND provider = ? AND request_tx_hash = ? AND state = 'Pending'",
-  "describe": {
-    "columns": [],
-    "parameters": {
-      "Right": 8
-    },
-    "nullable": []
-  },
-  "hash": "03901bcfb28b127d99fe8a53e480b88336dd2aab632411114f02ce8dd8fe07e8"
-}

+ 0 - 12
apps/fortuna/.sqlx/query-4c8c05ec08e128d847faafdd3d79fa50da70066f30b74f354e5d3a843ba6a2c0.json

@@ -1,12 +0,0 @@
-{
-  "db_name": "SQLite",
-  "query": "UPDATE request SET state = ?, last_updated_at = ?, reveal_block_number = ?, reveal_tx_hash = ?, provider_random_number =?, gas_used = ? WHERE network_id = ? AND sequence = ? AND provider = ? AND request_tx_hash = ?",
-  "describe": {
-    "columns": [],
-    "parameters": {
-      "Right": 10
-    },
-    "nullable": []
-  },
-  "hash": "4c8c05ec08e128d847faafdd3d79fa50da70066f30b74f354e5d3a843ba6a2c0"
-}

+ 0 - 12
apps/fortuna/.sqlx/query-b0d9afebb3825c3509ad80e5ebab5d72360326593407518770fe537ac3da1e10.json

@@ -1,12 +0,0 @@
-{
-  "db_name": "SQLite",
-  "query": "INSERT INTO request(chain_id, network_id, provider, sequence, created_at, last_updated_at, state, request_block_number, request_tx_hash, user_random_number, sender, gas_limit) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
-  "describe": {
-    "columns": [],
-    "parameters": {
-      "Right": 12
-    },
-    "nullable": []
-  },
-  "hash": "b0d9afebb3825c3509ad80e5ebab5d72360326593407518770fe537ac3da1e10"
-}

+ 10 - 2
apps/fortuna/Cargo.toml

@@ -1,6 +1,6 @@
 [package]
 name = "fortuna"
-version = "8.1.0"
+version = "8.2.0"
 edition = "2021"
 
 [lib]
@@ -46,8 +46,16 @@ chrono = { version = "0.4.38", features = [
 backoff = { version = "0.4.0", features = ["futures", "tokio"] }
 thiserror = "1.0.61"
 futures-locks = "0.7.1"
-sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite", "chrono"] }
+sqlx = { version = "0.8", features = [
+  "runtime-tokio",
+  "tls-rustls",
+  "sqlite",
+  "any",
+  "postgres",
+  "chrono",
+] }
 num-traits = "0.2.19"
+dotenv = "0.15.0"
 
 [dev-dependencies]
 axum-test = "13.1.1"

+ 19 - 16
apps/fortuna/README.md

@@ -10,35 +10,38 @@ Each blockchain is configured in `config.yaml`.
 
 ## Build & Test
 
-We use sqlx query macros to check the SQL queries at compile time. This requires
-a database to be available at build time. Create a `.env` file in the root of the project with the following content:
+Fortuna uses Cargo for building and dependency management.
+Simply run `cargo build` and `cargo test` to build and test the project.
+To run Fortuna locally, see the [Local Development](#local-development) section below.
 
+### Connect a database
+Fortuna stores request history in a SQL database and serves it from its explorer API.
+Any SQLite or Postgres database is supported. The database connection is sourced from the `DATABASE_URL` env var.
+Create a `.env` file in the root of the project with a DB connection string.
 ```
 DATABASE_URL="sqlite:fortuna.db?mode=rwc"
 ```
+If not provided, Fortuna will create and use a SQLite file-based database at `./fortuna.db`, as in the example above.
+
+### Database migrations
+Fortuna will automatically apply the schema migrations in the `./migrations` directory when connecting to the database.
+To manually administer the migrations, use the `sqlx` tool for cargo. The tool automatically uses the
+database connection in the `.env` file.
 
-Install sqlx for cargo with:
+Install `sqlx`:
 ```bash
 cargo install sqlx
 ```
 
-Next, you need to create the database and apply the schema migrations. You can do this by running:
-
+To create the database if needed and apply the migrations:
 ```bash
-cargo sqlx migrate run # automatically picks up the .env file
+cargo sqlx migrate run
 ```
-This will create a SQLite database file called `fortuna.db` in the root of the project and apply the schema migrations to it.
-This will allow `cargo check` to check the queries against the existing database.
-
-Fortuna uses Cargo for building and dependency management.
-Simply run `cargo build` and `cargo test` to build and test the project.
-
-If you have changed any queries in the code, you need to update the .sqlx folder with the new queries:
 
+To restore the database to a fresh state (drop, recreate, apply migrations):
 ```bash
-cargo sqlx prepare
+cargo sqlx database reset
 ```
-Please add the changed files in the `.sqlx` folder to your git commit.
 
 ## Command-Line Interface
 
@@ -124,7 +127,7 @@ To start an instance of the webserver for local testing, you first need to perfo
 1. Run `cargo run -- setup-provider` to register a randomness provider for this service. This command
    will update the on-chain contracts such that the configured provider key is a randomness provider,
    and its on-chain configuration matches `config.yaml`.
-
+1. Review the [Connect a database](#connect-a-database) section above. The default configuration will create a file-based DB.
 Once you've completed the setup, simply run the following command to start the service:
 
 ```bash

+ 0 - 1
apps/fortuna/migrations/20250502164500_init.down.sql

@@ -1 +0,0 @@
-DROP TABLE request;

+ 0 - 26
apps/fortuna/migrations/20250502164500_init.up.sql

@@ -1,26 +0,0 @@
--- we use VARCHAR(40) for addresses and VARCHAR(64) for tx_hashes and 32 byte numbers
-CREATE TABLE request(
-                    chain_id VARCHAR(20) NOT NULL,
-                    network_id INTEGER NOT NULL,
-                    provider VARCHAR(40) NOT NULL,
-                    sequence INTEGER NOT NULL,
-                    created_at DATETIME NOT NULL,
-                    last_updated_at DATETIME NOT NULL,
-                    state VARCHAR(10) NOT NULL,
-                    request_block_number INT NOT NULL,
-                    request_tx_hash VARCHAR(64) NOT NULL,
-                    user_random_number VARCHAR(64) NOT NULL,
-                    sender VARCHAR(40) NOT NULL,
-                    reveal_block_number INT,
-                    reveal_tx_hash VARCHAR(64),
-                    provider_random_number VARCHAR(64),
-                    info TEXT,
-                    PRIMARY KEY (network_id, sequence, provider, request_tx_hash)
-);
-
-CREATE INDEX idx_request_sequence ON request (sequence);
-CREATE INDEX idx_request_network_id_created_at ON request (network_id, created_at);
-CREATE INDEX idx_request_created_at ON request (created_at);
-CREATE INDEX idx_request_request_tx_hash ON request (request_tx_hash) WHERE request_tx_hash IS NOT NULL;
-CREATE INDEX idx_request_reveal_tx_hash ON request (reveal_tx_hash) WHERE reveal_tx_hash IS NOT NULL;
-CREATE INDEX idx_request_sender ON request (sender) WHERE sender IS NOT NULL;

+ 0 - 4
apps/fortuna/migrations/20250521203448_gas.down.sql

@@ -1,4 +0,0 @@
-ALTER TABLE request
-DROP COLUMN gas_used;
-ALTER TABLE request
-DROP COLUMN gas_limit;

+ 0 - 5
apps/fortuna/migrations/20250521203448_gas.up.sql

@@ -1,5 +0,0 @@
--- U256 max value is 78 digits, so 100 is a safe upper bound
-ALTER TABLE request
-ADD COLUMN gas_used VARCHAR(100);
-ALTER TABLE request
-ADD COLUMN gas_limit VARCHAR(100) NOT NULL;

+ 0 - 22
apps/fortuna/migrations/20250605004757_add_indices_for_advanced_search.down.sql

@@ -1,22 +0,0 @@
--- Add down migration script here
-
-DROP INDEX request__network_id__state__created_at;
-DROP INDEX request__network_id__created_at;
-DROP INDEX request__sender__network_id__state__created_at;
-DROP INDEX request__sender__network_id__created_at;
-DROP INDEX request__sender__state__created_at;
-DROP INDEX request__sender__created_at;
-DROP INDEX request__sequence__network_id__state__created_at;
-DROP INDEX request__sequence__network_id__created_at;
-DROP INDEX request__sequence__state__created_at;
-DROP INDEX request__sequence__created_at;
-DROP INDEX request__state__created_at;
-DROP INDEX request__created_at;
-
-
-CREATE INDEX idx_request_sequence ON request (sequence);
-CREATE INDEX idx_request_network_id_created_at ON request (network_id, created_at);
-CREATE INDEX idx_request_created_at ON request (created_at);
-CREATE INDEX idx_request_request_tx_hash ON request (request_tx_hash) WHERE request_tx_hash IS NOT NULL;
-CREATE INDEX idx_request_reveal_tx_hash ON request (reveal_tx_hash) WHERE reveal_tx_hash IS NOT NULL;
-CREATE INDEX idx_request_sender ON request (sender) WHERE sender IS NOT NULL;

+ 0 - 4
apps/fortuna/migrations/20250605165549_re-add_tx_hash_indices.down.sql

@@ -1,4 +0,0 @@
--- Add down migration script here
-
-DROP INDEX request__request_tx_hash;
-DROP INDEX request__reveal_tx_hash;

+ 0 - 4
apps/fortuna/migrations/20250605165549_re-add_tx_hash_indices.up.sql

@@ -1,4 +0,0 @@
--- Add up migration script here
-
-CREATE INDEX request__request_tx_hash ON request (request_tx_hash) WHERE request_tx_hash IS NOT NULL;
-CREATE INDEX request__reveal_tx_hash ON request (reveal_tx_hash) WHERE reveal_tx_hash IS NOT NULL;

+ 1 - 0
apps/fortuna/migrations/20250707000000_init.down.sql

@@ -0,0 +1 @@
+DROP TABLE IF EXISTS request;

+ 23 - 9
apps/fortuna/migrations/20250605004757_add_indices_for_advanced_search.up.sql → apps/fortuna/migrations/20250707000000_init.up.sql

@@ -1,12 +1,23 @@
--- Add up migration script here
-
-DROP INDEX idx_request_sequence;
-DROP INDEX idx_request_network_id_created_at;
-DROP INDEX idx_request_created_at;
-DROP INDEX idx_request_request_tx_hash;
-DROP INDEX idx_request_reveal_tx_hash;
-DROP INDEX idx_request_sender;
-
+CREATE TABLE request(
+    chain_id VARCHAR(20) NOT NULL,
+    network_id INTEGER NOT NULL,
+    provider VARCHAR(40) NOT NULL,
+    sequence INTEGER NOT NULL,
+    created_at INTEGER NOT NULL,
+    last_updated_at INTEGER NOT NULL,
+    state VARCHAR(10) NOT NULL,
+    request_block_number INTEGER NOT NULL,
+    request_tx_hash VARCHAR(64) NOT NULL,
+    user_random_number VARCHAR(64) NOT NULL,
+    sender VARCHAR(40) NOT NULL,
+    reveal_block_number INTEGER,
+    reveal_tx_hash VARCHAR(64),
+    provider_random_number VARCHAR(64),
+    info TEXT,
+    gas_used VARCHAR(100),
+    gas_limit VARCHAR(100) NOT NULL,
+    PRIMARY KEY (network_id, sequence, provider, request_tx_hash)
+);
 
 CREATE INDEX request__network_id__state__created_at ON request(network_id, state, created_at);
 CREATE INDEX request__network_id__created_at ON request(network_id, created_at);
@@ -20,3 +31,6 @@ CREATE INDEX request__sequence__state__created_at ON request(sequence, state, cr
 CREATE INDEX request__sequence__created_at ON request(sequence, created_at);
 CREATE INDEX request__state__created_at ON request(state, created_at);
 CREATE INDEX request__created_at ON request(created_at);
+
+CREATE INDEX request__request_tx_hash ON request (request_tx_hash) WHERE request_tx_hash IS NOT NULL;
+CREATE INDEX request__reveal_tx_hash ON request (reveal_tx_hash) WHERE reveal_tx_hash IS NOT NULL;

+ 10 - 0
apps/fortuna/src/api.rs

@@ -42,6 +42,16 @@ pub enum StateTag {
     Failed,
 }
 
+impl std::fmt::Display for StateTag {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
+            StateTag::Pending => write!(f, "Pending"),
+            StateTag::Completed => write!(f, "Completed"),
+            StateTag::Failed => write!(f, "Failed"),
+        }
+    }
+}
+
 #[derive(Clone, Debug, Hash, PartialEq, Eq, EncodeLabelSet)]
 pub struct RequestLabel {
     pub value: String,

+ 1 - 1
apps/fortuna/src/api/explorer.rs

@@ -110,7 +110,7 @@ pub struct ExplorerQueryParams {
 #[derive(Debug, serde::Serialize, utoipa::ToSchema)]
 pub struct ExplorerResponse {
     pub requests: Vec<RequestStatus>,
-    pub total_results: u64,
+    pub total_results: i64,
 }
 
 /// Returns the logs of all requests captured by the keeper.

+ 2 - 0
apps/fortuna/src/command/run.rs

@@ -84,6 +84,8 @@ pub async fn run_api(
 }
 
 pub async fn run(opts: &RunOptions) -> Result<()> {
+    // Load environment variables from a .env file if present
+    let _ = dotenv::dotenv()?;
     let config = Config::load(&opts.config.config)?;
     let secret = config.provider.secret.load()?.ok_or(anyhow!(
         "Please specify a provider secret in the config file."

+ 170 - 60
apps/fortuna/src/history.rs

@@ -1,7 +1,7 @@
 use {
     crate::api::{ChainId, NetworkId, StateTag},
     anyhow::Result,
-    chrono::{DateTime, NaiveDateTime},
+    chrono::DateTime,
     ethers::{
         core::utils::hex::ToHex,
         prelude::TxHash,
@@ -10,13 +10,16 @@ use {
     },
     serde::Serialize,
     serde_with::serde_as,
-    sqlx::{migrate, FromRow, Pool, QueryBuilder, Sqlite, SqlitePool},
+    sqlx::{any::AnyPoolOptions, migrate, AnyPool, FromRow},
     std::{str::FromStr, sync::Arc},
     tokio::{spawn, sync::mpsc},
     utoipa::ToSchema,
 };
 
 const LOG_RETURN_LIMIT: u64 = 1000;
+const ONE_DAY: u64 = 60 * 60 * 24;
+const ONE_HOUR: u64 = 60 * 60;
+const DEFAULT_DATABASE_URL: &str = "sqlite:fortuna.db?mode=rwc";
 
 #[serde_as]
 #[derive(Clone, Debug, Serialize, ToSchema, PartialEq)]
@@ -105,8 +108,8 @@ struct RequestRow {
     network_id: i64,
     provider: String,
     sequence: i64,
-    created_at: NaiveDateTime,
-    last_updated_at: NaiveDateTime,
+    created_at: i64,      // Unix timestamp
+    last_updated_at: i64, // Unix timestamp
     state: String,
     request_block_number: i64,
     request_tx_hash: String,
@@ -128,8 +131,10 @@ impl TryFrom<RequestRow> for RequestStatus {
         let network_id = row.network_id as u64;
         let provider = row.provider.parse()?;
         let sequence = row.sequence as u64;
-        let created_at = row.created_at.and_utc();
-        let last_updated_at = row.last_updated_at.and_utc();
+        let created_at = DateTime::from_timestamp(row.created_at, 0)
+            .ok_or(anyhow::anyhow!("Invalid created_at timestamp"))?;
+        let last_updated_at = DateTime::from_timestamp(row.last_updated_at, 0)
+            .ok_or(anyhow::anyhow!("Invalid last_updated_at timestamp"))?;
         let request_block_number = row.request_block_number as u64;
         let user_random_number = hex::FromHex::from_hex(row.user_random_number)?;
         let request_tx_hash = row.request_tx_hash.parse()?;
@@ -211,7 +216,7 @@ impl From<RequestRow> for Option<RequestStatus> {
 }
 
 pub struct History {
-    pool: Pool<Sqlite>,
+    pool: AnyPool,
     write_queue: mpsc::Sender<RequestStatus>,
     _writer_thread: Arc<tokio::task::JoinHandle<()>>,
 }
@@ -219,20 +224,46 @@ pub struct History {
 impl History {
     const MAX_WRITE_QUEUE: usize = 1_000;
     pub async fn new() -> Result<Self> {
-        Self::new_with_url("sqlite:fortuna.db?mode=rwc").await
+        let database_url =
+            std::env::var("DATABASE_URL").unwrap_or_else(|_| DEFAULT_DATABASE_URL.to_string());
+        Self::new_with_url(&database_url).await
     }
 
+    /// Create a History instance with an ephemeral in-memory DB.
+    /// Useful for testing.
     pub async fn new_in_memory() -> Result<Self> {
-        Self::new_with_url("sqlite::memory:").await
+        sqlx::any::install_default_drivers();
+        // Connect to an in-memory SQLite database
+        // Don't let the pool drop the cxn, otherwise the database will be deleted
+        let pool = AnyPoolOptions::new()
+            .min_connections(1)
+            .max_connections(1)
+            .idle_timeout(None)
+            .max_lifetime(None)
+            .connect("sqlite::memory:")
+            .await?;
+        let migrator = migrate!(); // defaults to "./migrations"
+        migrator.run(&pool).await?;
+        Self::new_with_pool(pool).await
     }
 
+    /// Create a History instance with production DB parameters
     pub async fn new_with_url(url: &str) -> Result<Self> {
-        let pool = SqlitePool::connect(url).await?;
+        sqlx::any::install_default_drivers();
+        let pool = AnyPoolOptions::new()
+            .min_connections(0)
+            .max_connections(10)
+            // Allow the cloud DB to spin down after 1 hour of inactivity (cost savings)
+            .idle_timeout(std::time::Duration::from_secs(ONE_HOUR))
+            // Retire the connection after 1 day to avoid memory leaks in the DB
+            .max_lifetime(std::time::Duration::from_secs(ONE_DAY))
+            .connect(url)
+            .await?;
         let migrator = migrate!("./migrations");
         migrator.run(&pool).await?;
         Self::new_with_pool(pool).await
     }
-    pub async fn new_with_pool(pool: Pool<Sqlite>) -> Result<Self> {
+    pub async fn new_with_pool(pool: AnyPool) -> Result<Self> {
         let (sender, mut receiver) = mpsc::channel(Self::MAX_WRITE_QUEUE);
         let pool_write_connection = pool.clone();
         let writer_thread = spawn(async move {
@@ -247,7 +278,7 @@ impl History {
         })
     }
 
-    async fn update_request_status(pool: &Pool<Sqlite>, new_status: RequestStatus) {
+    async fn update_request_status(pool: &AnyPool, new_status: RequestStatus) {
         let sequence = new_status.sequence as i64;
         let chain_id = new_status.chain_id;
         let network_id = new_status.network_id as i64;
@@ -259,13 +290,13 @@ impl History {
                 let block_number = new_status.request_block_number as i64;
                 let sender: String = new_status.sender.encode_hex();
                 let user_random_number: String = new_status.user_random_number.encode_hex();
-                sqlx::query("INSERT INTO request(chain_id, network_id, provider, sequence, created_at, last_updated_at, state, request_block_number, request_tx_hash, user_random_number, sender, gas_limit) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+                sqlx::query("INSERT INTO request(chain_id, network_id, provider, sequence, created_at, last_updated_at, state, request_block_number, request_tx_hash, user_random_number, sender, gas_limit) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)")
                     .bind(chain_id.clone())
                     .bind(network_id)
                     .bind(provider.clone())
                     .bind(sequence)
-                    .bind(new_status.created_at)
-                    .bind(new_status.last_updated_at)
+                    .bind(new_status.created_at.timestamp())
+                    .bind(new_status.last_updated_at.timestamp())
                     .bind("Pending")
                     .bind(block_number)
                     .bind(request_tx_hash.clone())
@@ -286,9 +317,9 @@ impl History {
                 let reveal_tx_hash: String = reveal_tx_hash.encode_hex();
                 let provider_random_number: String = provider_random_number.encode_hex();
                 let gas_used: String = gas_used.to_string();
-                let result = sqlx::query("UPDATE request SET state = ?, last_updated_at = ?, reveal_block_number = ?, reveal_tx_hash = ?, provider_random_number =?, gas_used = ? WHERE network_id = ? AND sequence = ? AND provider = ? AND request_tx_hash = ?")
+                let result = sqlx::query("UPDATE request SET state = $1, last_updated_at = $2, reveal_block_number = $3, reveal_tx_hash = $4, provider_random_number = $5, gas_used = $6 WHERE network_id = $7 AND sequence = $8 AND provider = $9 AND request_tx_hash = $10")
                     .bind("Completed")
-                    .bind(new_status.last_updated_at)
+                    .bind(new_status.last_updated_at.timestamp())
                     .bind(reveal_block_number)
                     .bind(reveal_tx_hash)
                     .bind(provider_random_number)
@@ -312,9 +343,9 @@ impl History {
             } => {
                 let provider_random_number: Option<String> = provider_random_number
                     .map(|provider_random_number| provider_random_number.encode_hex());
-                sqlx::query("UPDATE request SET state = ?, last_updated_at = ?, info = ?, provider_random_number = ? WHERE network_id = ? AND sequence = ? AND provider = ? AND request_tx_hash = ? AND state = 'Pending'")
+                sqlx::query("UPDATE request SET state = $1, last_updated_at = $2, info = $3, provider_random_number = $4 WHERE network_id = $5 AND sequence = $6 AND provider = $7 AND request_tx_hash = $8 AND state = 'Pending'")
                     .bind("Failed")
-                    .bind(new_status.last_updated_at)
+                    .bind(new_status.last_updated_at.timestamp())
                     .bind(reason)
                     .bind(provider_random_number)
                     .bind(network_id)
@@ -343,7 +374,7 @@ impl History {
 
 #[derive(Debug, Clone)]
 pub struct RequestQueryBuilder<'a> {
-    pool: &'a Pool<Sqlite>,
+    pool: &'a AnyPool,
     pub search: Option<SearchField>,
     pub network_id: Option<i64>,
     pub state: Option<StateTag>,
@@ -354,7 +385,7 @@ pub struct RequestQueryBuilder<'a> {
 }
 
 impl<'a> RequestQueryBuilder<'a> {
-    fn new(pool: &'a Pool<Sqlite>) -> Self {
+    fn new(pool: &'a AnyPool) -> Self {
         Self {
             pool,
             search: None,
@@ -426,14 +457,76 @@ impl<'a> RequestQueryBuilder<'a> {
     }
 
     pub async fn execute(&self) -> Result<Vec<RequestStatus>> {
-        let mut query_builder = self.build_query("*");
-        query_builder.push(" LIMIT ");
-        query_builder.push_bind(self.limit);
-        query_builder.push(" OFFSET ");
-        query_builder.push_bind(self.offset);
+        let mut sql = "SELECT * FROM request WHERE created_at BETWEEN $1 AND $2".to_string();
+        let mut param_count = 2;
+
+        // Build the SQL string with parameter placeholders
+        match &self.search {
+            Some(SearchField::TxHash(_)) => {
+                param_count += 1;
+                sql.push_str(&format!(" AND (request_tx_hash = ${param_count}"));
+                param_count += 1;
+                sql.push_str(&format!(" OR reveal_tx_hash = ${param_count})"));
+            }
+            Some(SearchField::Sender(_)) => {
+                param_count += 1;
+                sql.push_str(&format!(" AND sender = ${param_count}"));
+            }
+            Some(SearchField::SequenceNumber(_)) => {
+                param_count += 1;
+                sql.push_str(&format!(" AND sequence = ${param_count}"));
+            }
+            None => (),
+        }
+
+        if self.network_id.is_some() {
+            param_count += 1;
+            sql.push_str(&format!(" AND network_id = ${param_count}"));
+        }
+
+        if self.state.is_some() {
+            param_count += 1;
+            sql.push_str(&format!(" AND state = ${param_count}"));
+        }
+
+        sql.push_str(" ORDER BY created_at DESC");
+
+        param_count += 1;
+        sql.push_str(&format!(" LIMIT ${param_count}"));
+        param_count += 1;
+        sql.push_str(&format!(" OFFSET ${param_count}"));
+
+        // Now bind all parameters in order
+        let mut query = sqlx::query_as::<_, RequestRow>(&sql)
+            .bind(self.min_timestamp.timestamp())
+            .bind(self.max_timestamp.timestamp());
+
+        match &self.search {
+            Some(SearchField::TxHash(tx_hash)) => {
+                let tx_hash: String = tx_hash.encode_hex();
+                query = query.bind(tx_hash.clone()).bind(tx_hash);
+            }
+            Some(SearchField::Sender(sender)) => {
+                let sender: String = sender.encode_hex();
+                query = query.bind(sender);
+            }
+            Some(SearchField::SequenceNumber(sequence_number)) => {
+                query = query.bind(sequence_number);
+            }
+            None => (),
+        }
+
+        if let Some(network_id) = &self.network_id {
+            query = query.bind(network_id);
+        }
+
+        if let Some(state) = &self.state {
+            query = query.bind(state.to_string());
+        }
+
+        query = query.bind(self.limit).bind(self.offset);
 
-        let result: sqlx::Result<Vec<RequestRow>> =
-            query_builder.build_query_as().fetch_all(self.pool).await;
+        let result: sqlx::Result<Vec<RequestRow>> = query.fetch_all(self.pool).await;
 
         if let Err(e) = &result {
             tracing::error!("Failed to fetch request: {}", e);
@@ -442,55 +535,68 @@ impl<'a> RequestQueryBuilder<'a> {
         Ok(result?.into_iter().filter_map(|row| row.into()).collect())
     }
 
-    pub async fn count_results(&self) -> Result<u64> {
-        self.build_query("COUNT(*) AS count")
-            .build_query_scalar::<u64>()
-            .fetch_one(self.pool)
-            .await
-            .map_err(|err| err.into())
-    }
+    pub async fn count_results(&self) -> Result<i64> {
+        let mut sql = "SELECT COUNT(*) FROM request WHERE created_at BETWEEN $1 AND $2".to_string();
+        let mut param_count = 2;
+
+        // Build the SQL string with parameter placeholders
+        match &self.search {
+            Some(SearchField::TxHash(_)) => {
+                param_count += 1;
+                sql.push_str(&format!(" AND (request_tx_hash = ${param_count}"));
+                param_count += 1;
+                sql.push_str(&format!(" OR reveal_tx_hash = ${param_count})"));
+            }
+            Some(SearchField::Sender(_)) => {
+                param_count += 1;
+                sql.push_str(&format!(" AND sender = ${param_count}"));
+            }
+            Some(SearchField::SequenceNumber(_)) => {
+                param_count += 1;
+                sql.push_str(&format!(" AND sequence = ${param_count}"));
+            }
+            None => (),
+        }
+
+        if self.network_id.is_some() {
+            param_count += 1;
+            sql.push_str(&format!(" AND network_id = ${param_count}"));
+        }
+
+        if self.state.is_some() {
+            param_count += 1;
+            sql.push_str(&format!(" AND state = ${param_count}"));
+        }
 
-    fn build_query(&self, columns: &str) -> QueryBuilder<Sqlite> {
-        let mut query_builder = QueryBuilder::new(format!(
-            "SELECT {columns} FROM request WHERE created_at BETWEEN "
-        ));
-        query_builder.push_bind(self.min_timestamp);
-        query_builder.push(" AND ");
-        query_builder.push_bind(self.max_timestamp);
+        // Now bind all parameters in order
+        let mut query = sqlx::query_scalar::<_, i64>(&sql)
+            .bind(self.min_timestamp.timestamp())
+            .bind(self.max_timestamp.timestamp());
 
         match &self.search {
             Some(SearchField::TxHash(tx_hash)) => {
                 let tx_hash: String = tx_hash.encode_hex();
-                query_builder.push(" AND (request_tx_hash = ");
-                query_builder.push_bind(tx_hash.clone());
-                query_builder.push(" OR reveal_tx_hash = ");
-                query_builder.push_bind(tx_hash);
-                query_builder.push(")");
+                query = query.bind(tx_hash.clone()).bind(tx_hash);
             }
             Some(SearchField::Sender(sender)) => {
                 let sender: String = sender.encode_hex();
-                query_builder.push(" AND sender = ");
-                query_builder.push_bind(sender);
+                query = query.bind(sender);
             }
             Some(SearchField::SequenceNumber(sequence_number)) => {
-                query_builder.push(" AND sequence = ");
-                query_builder.push_bind(sequence_number);
+                query = query.bind(sequence_number);
             }
             None => (),
         }
 
         if let Some(network_id) = &self.network_id {
-            query_builder.push(" AND network_id = ");
-            query_builder.push_bind(network_id);
+            query = query.bind(network_id);
         }
 
         if let Some(state) = &self.state {
-            query_builder.push(" AND state = ");
-            query_builder.push_bind(state);
+            query = query.bind(state.to_string());
         }
 
-        query_builder.push(" ORDER BY created_at DESC");
-        query_builder
+        query.fetch_one(self.pool).await.map_err(|err| err.into())
     }
 }
 
@@ -510,7 +616,11 @@ pub enum SearchField {
 
 #[cfg(test)]
 mod test {
-    use {super::*, chrono::Duration, tokio::time::sleep};
+    use {
+        super::*,
+        chrono::{Duration, Timelike},
+        tokio::time::sleep,
+    };
 
     fn get_random_request_status() -> RequestStatus {
         RequestStatus {
@@ -518,8 +628,8 @@ mod test {
             network_id: 121,
             provider: Address::random(),
             sequence: 1,
-            created_at: chrono::Utc::now(),
-            last_updated_at: chrono::Utc::now(),
+            created_at: chrono::Utc::now().with_nanosecond(0).unwrap(),
+            last_updated_at: chrono::Utc::now().with_nanosecond(0).unwrap(),
             request_block_number: 1,
             request_tx_hash: TxHash::random(),
             user_random_number: [20; 32],