//! Core processor worker.

use std::sync::{
    atomic::{AtomicU64, Ordering},
    Arc,
};

use aspect_mq_proc::{MsgBatch, NamespaceProcessor};
use aspect_msgqueue::{QueueIdent, QueueNamespace};
use tokio::sync::mpsc;
use tracing::*;

use crate::{ProcJob, ProcStateContainer, ProcWorkerContext};

/// State shared between the worker and the worker handle.
struct WorkerShared {
    /// The namespace that we're operatng on.
    namespace: QueueNamespace,

    /// The last message that we've saved as processed.
    cur_last_msg: AtomicU64,
}

impl WorkerShared {
    pub fn last_msg(&self) -> u64 {
        self.cur_last_msg.load(Ordering::Relaxed)
    }

    fn set_last_msg(&self, v: u64) {
        self.cur_last_msg.store(v, Ordering::Relaxed)
    }
}

/// Handle to a processor worker used to control it.
pub struct WorkerHandle {
    shared: Arc<WorkerShared>,
    proc_tx: mpsc::Sender<ProcJob>,
}

impl WorkerHandle {
    /// The queue namespace that the worker is processing.
    pub fn namespace(&self) -> &QueueNamespace {
        &self.shared.namespace
    }

    // TODO convert this so that not sending the job is an error
    fn send_job(&self, job: ProcJob) -> anyhow::Result<bool> {
        Ok(self.proc_tx.blocking_send(job).is_ok())
    }
}

/// Input that the processor worker receives from.
pub struct WorkerInput {
    shared: Arc<WorkerShared>,
    proc_rx: mpsc::Receiver<ProcJob>,
}

impl WorkerInput {
    /// Waits for a new job input to process.
    fn wait_for_job(&mut self) -> Option<ProcJob> {
        self.proc_rx.blocking_recv()
    }
}

// TODO bootstrapper

fn processor_worker(
    mut input: WorkerInput,
    context: &impl ProcWorkerContext,
    proc: &impl NamespaceProcessor,
) -> anyhow::Result<()> {
    // TODO logging span for the worker namespace

    let namespace = &input.shared.namespace;
    let span = debug_span!("mqproc", ?namespace);
    let _g = span.enter();

    while let Some(job) = input.wait_for_job() {
        let new_last = job.last_msg_idx();
        let span = trace_span!("job", %new_last);
        let _g = span.enter();

        if let Err(err) = process_job(job, context, proc) {
            // TODO also log the queue ident
            error!(%err, "error processing queue job");

            // TODO add retry and stuff
            continue;
        }

        // Update the last message we processed.
        input.shared.set_last_msg(new_last);
    }

    Ok(())
}

fn process_job<T: NamespaceProcessor>(
    job: ProcJob,
    context: &impl ProcWorkerContext,
    proc: &T,
) -> anyhow::Result<()> {
    // Load the queue processing state.
    let last_processed_idx = proc.fetch_last_processed_msg(*job.id())?;

    // Compute the half open interval to fetch.
    let batch_start = last_processed_idx + 1;
    let job_last_msg = job.last_msg_idx();
    let batch_end = job_last_msg + 1;

    // Fetch and process the messages.
    let batch = context.fetch_message_range(*job.id(), batch_start, batch_end)?;
    proc.process_batch(*job.id(), &batch)?;

    // Print a warning if something seems off.
    let new_last_msg = proc.fetch_last_processed_msg(*job.id())?;
    if new_last_msg != job.last_msg_idx() {
        warn!(%job_last_msg, %new_last_msg, "finished job, but stored read index mismatch requested");
    }

    // Now persist the new state and then update the view.
    context.update_read_position(*job.id(), new_last_msg)?;

    Ok(())
}
