use std::collections::{HashMap, HashSet, VecDeque};
use std::future::Future;
use std::num::NonZeroUsize;
use std::pin::Pin;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};

use anyhow::Context;
use async_trait::async_trait;
use bytes::Bytes;
use camino::Utf8PathBuf;
use futures::stream::FuturesUnordered;
use futures::{Stream, StreamExt as _};
use pageserver_api::key::Key;
use pageserver_api::keyspace::KeySpaceAccum;
use pageserver_api::pagestream_api::{PagestreamGetPageRequest, PagestreamRequest};
use pageserver_api::reltag::RelTag;
use pageserver_api::shard::TenantShardId;
use pageserver_client_grpc::{self as client_grpc, ShardSpec};
use pageserver_page_api as page_api;
use rand::prelude::*;
use tokio::task::JoinSet;
use tokio_util::sync::CancellationToken;
use tracing::info;
use url::Url;
use utils::id::TenantTimelineId;
use utils::lsn::Lsn;
use utils::shard::ShardIndex;

use crate::util::tokio_thread_local_stats::AllThreadLocalStats;
use crate::util::{request_stats, tokio_thread_local_stats};

/// GetPage@LatestLSN, uniformly distributed across the compute-accessible keyspace.
#[derive(clap::Parser)]
pub(crate) struct Args {
    #[clap(long, default_value = "http://localhost:9898")]
    mgmt_api_endpoint: String,
    /// Pageserver connection string. Supports postgresql:// and grpc:// protocols.
    #[clap(long, default_value = "postgres://postgres@localhost:64000")]
    page_service_connstring: String,
    /// Use the rich gRPC Pageserver client `client_grpc::PageserverClient`, rather than the basic
    /// no-frills `page_api::Client`. Only valid with grpc:// connstrings.
    #[clap(long)]
    rich_client: bool,
    #[clap(long)]
    pageserver_jwt: Option<String>,
    #[clap(long, default_value = "1")]
    num_clients: NonZeroUsize,
    #[clap(long)]
    runtime: Option<humantime::Duration>,
    /// If true, enable compression (only for gRPC).
    #[clap(long)]
    compression: bool,
    /// Each client sends requests at the given rate.
    ///
    /// If a request takes too long and we should be issuing a new request already,
    /// we skip that request and account it as `MISSED`.
    #[clap(long)]
    per_client_rate: Option<usize>,
    /// Probability for sending `latest=true` in the request (uniform distribution).
    #[clap(long, default_value = "1")]
    req_latest_probability: f64,
    #[clap(long)]
    limit_to_first_n_targets: Option<usize>,
    /// For large pageserver installations, enumerating the keyspace takes a lot of time.
    /// If specified, the specified path is used to maintain a cache of the keyspace enumeration result.
    /// The cache is tagged and auto-invalided by the tenant/timeline ids only.
    /// It doesn't get invalidated if the keyspace changes under the hood, e.g., due to new ingested data or compaction.
    #[clap(long)]
    keyspace_cache: Option<Utf8PathBuf>,
    /// Before starting the benchmark, live-reconfigure the pageserver to use the given
    /// [`pageserver_api::models::virtual_file::IoEngineKind`].
    #[clap(long)]
    set_io_engine: Option<pageserver_api::models::virtual_file::IoEngineKind>,

    /// Before starting the benchmark, live-reconfigure the pageserver to use specified io mode (buffered vs. direct).
    #[clap(long)]
    set_io_mode: Option<pageserver_api::models::virtual_file::IoMode>,

    /// Queue depth generated in each client.
    #[clap(long, default_value = "1")]
    queue_depth: NonZeroUsize,

    /// Batch size of contiguous pages generated by each client. This is equivalent to how Postgres
    /// will request page batches (e.g. prefetches or vectored reads). A batch counts as 1 RPS and
    /// 1 queue depth.
    ///
    /// The libpq protocol does not support client-side batching, and will submit batches as many
    /// individual requests, in the hope that the server will batch them. Each batch still counts as
    /// 1 RPS and 1 queue depth.
    #[clap(long, default_value = "1")]
    batch_size: NonZeroUsize,

    #[clap(long)]
    only_relnode: Option<u32>,

    targets: Option<Vec<TenantTimelineId>>,
}

/// State shared by all clients
#[derive(Debug)]
struct SharedState {
    start_work_barrier: tokio::sync::Barrier,
    live_stats: LiveStats,
}

#[derive(Debug, Default)]
struct LiveStats {
    completed_requests: AtomicU64,
    missed: AtomicU64,
}

impl LiveStats {
    fn request_done(&self) {
        self.completed_requests.fetch_add(1, Ordering::Relaxed);
    }
    fn missed(&self, n: u64) {
        self.missed.fetch_add(n, Ordering::Relaxed);
    }
}

#[derive(Clone, serde::Serialize, serde::Deserialize)]
struct KeyRange {
    timeline: TenantTimelineId,
    timeline_lsn: Lsn,
    start: i128,
    end: i128,
}

impl KeyRange {
    fn len(&self) -> i128 {
        self.end - self.start
    }
}

#[derive(PartialEq, Eq, Hash, Copy, Clone)]
struct WorkerId {
    timeline: TenantTimelineId,
    num_client: usize, // from 0..args.num_clients
}

#[derive(serde::Serialize)]
struct Output {
    total: request_stats::Output,
}

tokio_thread_local_stats::declare!(STATS: request_stats::Stats);

pub(crate) fn main(args: Args) -> anyhow::Result<()> {
    tokio_thread_local_stats::main!(STATS, move |thread_local_stats| {
        main_impl(args, thread_local_stats)
    })
}

async fn main_impl(
    args: Args,
    all_thread_local_stats: AllThreadLocalStats<request_stats::Stats>,
) -> anyhow::Result<()> {
    let args: &'static Args = Box::leak(Box::new(args));

    let mgmt_api_client = Arc::new(pageserver_client::mgmt_api::Client::new(
        reqwest::Client::new(), // TODO: support ssl_ca_file for https APIs in pagebench.
        args.mgmt_api_endpoint.clone(),
        args.pageserver_jwt.as_deref(),
    ));

    if let Some(engine_str) = &args.set_io_engine {
        mgmt_api_client.put_io_engine(engine_str).await?;
    }

    if let Some(mode) = &args.set_io_mode {
        mgmt_api_client.put_io_mode(mode).await?;
    }

    // discover targets
    let timelines: Vec<TenantTimelineId> = crate::util::cli::targets::discover(
        &mgmt_api_client,
        crate::util::cli::targets::Spec {
            limit_to_first_n_targets: args.limit_to_first_n_targets,
            targets: args.targets.clone(),
        },
    )
    .await?;

    #[derive(serde::Deserialize)]
    struct KeyspaceCacheDe {
        tag: Vec<TenantTimelineId>,
        data: Vec<KeyRange>,
    }
    #[derive(serde::Serialize)]
    struct KeyspaceCacheSer<'a> {
        tag: &'a [TenantTimelineId],
        data: &'a [KeyRange],
    }
    let cache = args
        .keyspace_cache
        .as_ref()
        .map(|keyspace_cache_file| {
            let contents = match std::fs::read(keyspace_cache_file) {
                Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
                    return anyhow::Ok(None);
                }
                x => x.context("read keyspace cache file")?,
            };
            let cache: KeyspaceCacheDe =
                serde_json::from_slice(&contents).context("deserialize cache file")?;
            let tag_ok = HashSet::<TenantTimelineId>::from_iter(cache.tag.into_iter())
                == HashSet::from_iter(timelines.iter().cloned());
            info!("keyspace cache file matches tag: {tag_ok}");
            anyhow::Ok(if tag_ok { Some(cache.data) } else { None })
        })
        .transpose()?
        .flatten();
    let all_ranges: Vec<KeyRange> = if let Some(cached) = cache {
        info!("using keyspace cache file");
        cached
    } else {
        let mut js = JoinSet::new();
        for timeline in &timelines {
            js.spawn({
                let mgmt_api_client = Arc::clone(&mgmt_api_client);
                let timeline = *timeline;
                async move {
                    let partitioning = mgmt_api_client
                        .keyspace(
                            TenantShardId::unsharded(timeline.tenant_id),
                            timeline.timeline_id,
                        )
                        .await?;
                    let lsn = partitioning.at_lsn;
                    let start = Instant::now();
                    let mut filtered = KeySpaceAccum::new();
                    // let's hope this is inlined and vectorized...
                    // TODO: turn this loop into a is_rel_block_range() function.
                    for r in partitioning.keys.ranges.iter() {
                        let mut i = r.start;
                        while i != r.end {
                            let mut include = true;
                            include &= i.is_rel_block_key();
                            if let Some(only_relnode) = args.only_relnode {
                                include &= i.is_rel_block_of_rel(only_relnode);
                            }
                            if include {
                                filtered.add_key(i);
                            }
                            i = i.next();
                        }
                    }
                    let filtered = filtered.to_keyspace();
                    let filter_duration = start.elapsed();

                    anyhow::Ok((
                        filter_duration,
                        filtered.ranges.into_iter().map(move |r| KeyRange {
                            timeline,
                            timeline_lsn: lsn,
                            start: r.start.to_i128(),
                            end: r.end.to_i128(),
                        }),
                    ))
                }
            });
        }
        let mut total_filter_duration = Duration::from_secs(0);
        let mut all_ranges: Vec<KeyRange> = Vec::new();
        while let Some(res) = js.join_next().await {
            let (filter_duration, range) = res.unwrap().unwrap();
            all_ranges.extend(range);
            total_filter_duration += filter_duration;
        }
        info!("filter duration: {}", total_filter_duration.as_secs_f64());
        if let Some(cachefile) = args.keyspace_cache.as_ref() {
            let cache = KeyspaceCacheSer {
                tag: &timelines,
                data: &all_ranges,
            };
            let bytes = serde_json::to_vec(&cache).context("serialize keyspace for cache file")?;
            std::fs::write(cachefile, bytes).context("write keyspace cache file to disk")?;
            info!("successfully wrote keyspace cache file");
        }
        all_ranges
    };

    let num_live_stats_dump = 1;
    let num_work_sender_tasks = args.num_clients.get() * timelines.len();
    let num_main_impl = 1;

    let shared_state = Arc::new(SharedState {
        start_work_barrier: tokio::sync::Barrier::new(
            num_live_stats_dump + num_work_sender_tasks + num_main_impl,
        ),
        live_stats: LiveStats::default(),
    });
    let cancel = CancellationToken::new();

    let ss = shared_state.clone();
    tokio::spawn({
        async move {
            ss.start_work_barrier.wait().await;
            loop {
                let start = std::time::Instant::now();
                tokio::time::sleep(std::time::Duration::from_secs(1)).await;
                let stats = &ss.live_stats;
                let completed_requests = stats.completed_requests.swap(0, Ordering::Relaxed);
                let missed = stats.missed.swap(0, Ordering::Relaxed);
                let elapsed = start.elapsed();
                info!(
                    "RPS: {:.0}   MISSED: {:.0}",
                    completed_requests as f64 / elapsed.as_secs_f64(),
                    missed as f64 / elapsed.as_secs_f64()
                );
            }
        }
    });

    let rps_period = args
        .per_client_rate
        .map(|rps_limit| Duration::from_secs_f64(1.0 / (rps_limit as f64)));
    let make_worker: &dyn Fn(WorkerId) -> Pin<Box<dyn Send + Future<Output = ()>>> = &|worker_id| {
        let ss = shared_state.clone();
        let cancel = cancel.clone();
        let ranges: Vec<KeyRange> = all_ranges
            .iter()
            .filter(|r| r.timeline == worker_id.timeline)
            .cloned()
            .collect();
        let weights =
            rand::distr::weighted::WeightedIndex::new(ranges.iter().map(|v| v.len())).unwrap();

        Box::pin(async move {
            let scheme = match Url::parse(&args.page_service_connstring) {
                Ok(url) => url.scheme().to_lowercase().to_string(),
                Err(url::ParseError::RelativeUrlWithoutBase) => "postgresql".to_string(),
                Err(err) => panic!("invalid connstring: {err}"),
            };
            let client: Box<dyn Client> = match scheme.as_str() {
                "postgresql" | "postgres" => {
                    assert!(!args.compression, "libpq does not support compression");
                    assert!(!args.rich_client, "rich client requires grpc://");
                    Box::new(
                        LibpqClient::new(&args.page_service_connstring, worker_id.timeline)
                            .await
                            .unwrap(),
                    )
                }

                "grpc" if args.rich_client => Box::new(
                    RichGrpcClient::new(
                        &args.page_service_connstring,
                        worker_id.timeline,
                        args.compression,
                    )
                    .await
                    .unwrap(),
                ),

                "grpc" => Box::new(
                    GrpcClient::new(
                        &args.page_service_connstring,
                        worker_id.timeline,
                        args.compression,
                    )
                    .await
                    .unwrap(),
                ),

                scheme => panic!("unsupported scheme {scheme}"),
            };
            run_worker(args, client, ss, cancel, rps_period, ranges, weights).await
        })
    };

    info!("spawning workers");
    let mut workers = JoinSet::new();
    for timeline in timelines.iter().cloned() {
        for num_client in 0..args.num_clients.get() {
            let worker_id = WorkerId {
                timeline,
                num_client,
            };
            workers.spawn(make_worker(worker_id));
        }
    }
    let workers = async move {
        while let Some(res) = workers.join_next().await {
            res.unwrap();
        }
    };

    info!("waiting for everything to become ready");
    shared_state.start_work_barrier.wait().await;
    info!("work started");
    if let Some(runtime) = args.runtime {
        tokio::time::sleep(runtime.into()).await;
        info!("runtime over, signalling cancellation");
        cancel.cancel();
        workers.await;
        info!("work sender exited");
    } else {
        workers.await;
        unreachable!("work sender never terminates");
    }

    let output = Output {
        total: {
            let mut agg_stats = request_stats::Stats::new();
            for stats in all_thread_local_stats.lock().unwrap().iter() {
                let stats = stats.lock().unwrap();
                agg_stats.add(&stats);
            }
            agg_stats.output()
        },
    };

    let output = serde_json::to_string_pretty(&output).unwrap();
    println!("{output}");

    anyhow::Ok(())
}

async fn run_worker(
    args: &Args,
    mut client: Box<dyn Client>,
    shared_state: Arc<SharedState>,
    cancel: CancellationToken,
    rps_period: Option<Duration>,
    ranges: Vec<KeyRange>,
    weights: rand::distr::weighted::WeightedIndex<i128>,
) {
    shared_state.start_work_barrier.wait().await;
    let client_start = Instant::now();
    let mut ticks_processed = 0;
    let mut req_id = 0;
    let batch_size: usize = args.batch_size.into();

    // Track inflight requests by request ID and start time. This times the request duration, and
    // ensures responses match requests. We don't expect responses back in any particular order.
    //
    // NB: this does not check that all requests received a response, because we don't wait for the
    // inflight requests to complete when the duration elapses.
    let mut inflight: HashMap<u64, Instant> = HashMap::new();

    while !cancel.is_cancelled() {
        // Detect if a request took longer than the RPS rate
        if let Some(period) = &rps_period {
            let periods_passed_until_now =
                usize::try_from(client_start.elapsed().as_micros() / period.as_micros()).unwrap();

            if periods_passed_until_now > ticks_processed {
                shared_state
                    .live_stats
                    .missed((periods_passed_until_now - ticks_processed) as u64);
            }
            ticks_processed = periods_passed_until_now;
        }

        while inflight.len() < args.queue_depth.get() {
            req_id += 1;
            let start = Instant::now();
            let (req_lsn, mod_lsn, rel, blks) = {
                /// Converts a compact i128 key to a relation tag and block number.
                fn key_to_block(key: i128) -> (RelTag, u32) {
                    let key = Key::from_i128(key);
                    assert!(key.is_rel_block_key());
                    key.to_rel_block()
                        .expect("we filter non-rel-block keys out above")
                }

                // Pick a random page from a random relation.
                let mut rng = rand::rng();
                let r = &ranges[weights.sample(&mut rng)];
                let key: i128 = rng.random_range(r.start..r.end);
                let (rel_tag, block_no) = key_to_block(key);

                let mut blks = VecDeque::with_capacity(batch_size);
                blks.push_back(block_no);

                // If requested, populate a batch of sequential pages. This is how Postgres will
                // request page batches (e.g. prefetches). If we hit the end of the relation, we
                // grow the batch towards the start too.
                for i in 1..batch_size {
                    let (r, b) = key_to_block(key + i as i128);
                    if r != rel_tag {
                        break; // went outside relation
                    }
                    blks.push_back(b)
                }

                if blks.len() < batch_size {
                    // Grow batch backwards if needed.
                    for i in 1..batch_size {
                        let (r, b) = key_to_block(key - i as i128);
                        if r != rel_tag {
                            break; // went outside relation
                        }
                        blks.push_front(b)
                    }
                }

                // We assume that the entire batch can fit within the relation.
                assert_eq!(blks.len(), batch_size, "incomplete batch");

                let req_lsn = if rng.random_bool(args.req_latest_probability) {
                    Lsn::MAX
                } else {
                    r.timeline_lsn
                };
                (req_lsn, r.timeline_lsn, rel_tag, blks.into())
            };
            client
                .send_get_page(req_id, req_lsn, mod_lsn, rel, blks)
                .await
                .unwrap();
            let old = inflight.insert(req_id, start);
            assert!(old.is_none(), "duplicate request ID {req_id}");
        }

        let (req_id, pages) = client.recv_get_page().await.unwrap();
        assert_eq!(pages.len(), batch_size, "unexpected page count");
        assert!(pages.iter().all(|p| !p.is_empty()), "empty page");
        let start = inflight
            .remove(&req_id)
            .expect("response for unknown request ID");
        let end = Instant::now();
        shared_state.live_stats.request_done();
        ticks_processed += 1;
        STATS.with(|stats| {
            stats
                .borrow()
                .lock()
                .unwrap()
                .observe(end.duration_since(start))
                .unwrap();
        });

        if let Some(period) = &rps_period {
            let next_at = client_start
                + Duration::from_micros(
                    (ticks_processed) as u64 * u64::try_from(period.as_micros()).unwrap(),
                );
            tokio::time::sleep_until(next_at.into()).await;
        }
    }
}

/// A benchmark client, to allow switching out the transport protocol.
///
/// For simplicity, this just uses separate asynchronous send/recv methods. The send method could
/// return a future that resolves when the response is received, but we don't really need it.
#[async_trait]
trait Client: Send {
    /// Sends an asynchronous GetPage request to the pageserver.
    async fn send_get_page(
        &mut self,
        req_id: u64,
        req_lsn: Lsn,
        mod_lsn: Lsn,
        rel: RelTag,
        blks: Vec<u32>,
    ) -> anyhow::Result<()>;

    /// Receives the next GetPage response from the pageserver.
    async fn recv_get_page(&mut self) -> anyhow::Result<(u64, Vec<Bytes>)>;
}

/// A libpq-based Pageserver client.
struct LibpqClient {
    inner: pageserver_client::page_service::PagestreamClient,
    // Track sent batches, so we know how many responses to expect.
    batch_sizes: VecDeque<usize>,
}

impl LibpqClient {
    async fn new(connstring: &str, ttid: TenantTimelineId) -> anyhow::Result<Self> {
        let inner = pageserver_client::page_service::Client::new(connstring.to_string())
            .await?
            .pagestream(ttid.tenant_id, ttid.timeline_id)
            .await?;
        Ok(Self {
            inner,
            batch_sizes: VecDeque::new(),
        })
    }
}

#[async_trait]
impl Client for LibpqClient {
    async fn send_get_page(
        &mut self,
        req_id: u64,
        req_lsn: Lsn,
        mod_lsn: Lsn,
        rel: RelTag,
        blks: Vec<u32>,
    ) -> anyhow::Result<()> {
        // libpq doesn't support client-side batches, so we send a bunch of individual requests
        // instead in the hope that the server will batch them for us. We use the same request ID
        // for all, because we'll return a single batch response.
        self.batch_sizes.push_back(blks.len());
        for blkno in blks {
            let req = PagestreamGetPageRequest {
                hdr: PagestreamRequest {
                    reqid: req_id,
                    request_lsn: req_lsn,
                    not_modified_since: mod_lsn,
                },
                rel,
                blkno,
            };
            self.inner.getpage_send(req).await?;
        }
        Ok(())
    }

    async fn recv_get_page(&mut self) -> anyhow::Result<(u64, Vec<Bytes>)> {
        let batch_size = self.batch_sizes.pop_front().unwrap();
        let mut batch = Vec::with_capacity(batch_size);
        let mut req_id = None;
        for _ in 0..batch_size {
            let resp = self.inner.getpage_recv().await?;
            if req_id.is_none() {
                req_id = Some(resp.req.hdr.reqid);
            }
            assert_eq!(req_id, Some(resp.req.hdr.reqid), "request ID mismatch");
            batch.push(resp.page);
        }
        Ok((req_id.unwrap(), batch))
    }
}

/// A gRPC Pageserver client.
struct GrpcClient {
    req_tx: tokio::sync::mpsc::Sender<page_api::GetPageRequest>,
    resp_rx: Pin<Box<dyn Stream<Item = Result<page_api::GetPageResponse, tonic::Status>> + Send>>,
}

impl GrpcClient {
    async fn new(
        connstring: &str,
        ttid: TenantTimelineId,
        compression: bool,
    ) -> anyhow::Result<Self> {
        let mut client = page_api::Client::connect(
            connstring.to_string(),
            ttid.tenant_id,
            ttid.timeline_id,
            ShardIndex::unsharded(),
            None,
            compression.then_some(tonic::codec::CompressionEncoding::Zstd),
        )
        .await?;

        // The channel has a buffer size of 1, since 0 is not allowed. It does not matter, since the
        // benchmark will control the queue depth (i.e. in-flight requests) anyway, and requests are
        // buffered by Tonic and the OS too.
        let (req_tx, req_rx) = tokio::sync::mpsc::channel(1);
        let req_stream = tokio_stream::wrappers::ReceiverStream::new(req_rx);
        let resp_rx = Box::pin(client.get_pages(req_stream).await?);

        Ok(Self { req_tx, resp_rx })
    }
}

#[async_trait]
impl Client for GrpcClient {
    async fn send_get_page(
        &mut self,
        req_id: u64,
        req_lsn: Lsn,
        mod_lsn: Lsn,
        rel: RelTag,
        blks: Vec<u32>,
    ) -> anyhow::Result<()> {
        let req = page_api::GetPageRequest {
            request_id: req_id.into(),
            request_class: page_api::GetPageClass::Normal,
            read_lsn: page_api::ReadLsn {
                request_lsn: req_lsn,
                not_modified_since_lsn: Some(mod_lsn),
            },
            rel,
            block_numbers: blks,
        };
        self.req_tx.send(req).await?;
        Ok(())
    }

    async fn recv_get_page(&mut self) -> anyhow::Result<(u64, Vec<Bytes>)> {
        let resp = self.resp_rx.next().await.unwrap().unwrap();
        anyhow::ensure!(
            resp.status_code == page_api::GetPageStatusCode::Ok,
            "unexpected status code: {}",
            resp.status_code,
        );
        Ok((
            resp.request_id.id,
            resp.pages.into_iter().map(|p| p.image).collect(),
        ))
    }
}

/// A rich gRPC Pageserver client.
struct RichGrpcClient {
    inner: Arc<client_grpc::PageserverClient>,
    requests: FuturesUnordered<
        Pin<Box<dyn Future<Output = anyhow::Result<page_api::GetPageResponse>> + Send>>,
    >,
}

impl RichGrpcClient {
    async fn new(
        connstring: &str,
        ttid: TenantTimelineId,
        compression: bool,
    ) -> anyhow::Result<Self> {
        let inner = Arc::new(client_grpc::PageserverClient::new(
            ttid.tenant_id,
            ttid.timeline_id,
            ShardSpec::new(
                [(ShardIndex::unsharded(), connstring.to_string())].into(),
                None,
            )?,
            None,
            compression.then_some(tonic::codec::CompressionEncoding::Zstd),
        )?);
        Ok(Self {
            inner,
            requests: FuturesUnordered::new(),
        })
    }
}

#[async_trait]
impl Client for RichGrpcClient {
    async fn send_get_page(
        &mut self,
        req_id: u64,
        req_lsn: Lsn,
        mod_lsn: Lsn,
        rel: RelTag,
        blks: Vec<u32>,
    ) -> anyhow::Result<()> {
        let req = page_api::GetPageRequest {
            request_id: req_id.into(),
            request_class: page_api::GetPageClass::Normal,
            read_lsn: page_api::ReadLsn {
                request_lsn: req_lsn,
                not_modified_since_lsn: Some(mod_lsn),
            },
            rel,
            block_numbers: blks,
        };
        let inner = self.inner.clone();
        self.requests.push(Box::pin(async move {
            inner
                .get_page(req)
                .await
                .map_err(|err| anyhow::anyhow!("{err}"))
        }));
        Ok(())
    }

    async fn recv_get_page(&mut self) -> anyhow::Result<(u64, Vec<Bytes>)> {
        let resp = self.requests.next().await.unwrap()?;
        Ok((
            resp.request_id.id,
            resp.pages.into_iter().map(|p| p.image).collect(),
        ))
    }
}
