use core::time;
use std::collections::HashMap;
use std::collections::HashSet;
use std::sync::Arc;

use tokio::sync::mpsc;
use tracing::*;

use super::db;
use super::errors::STATUS_DECODE_FAILURE;
use super::executor;
use super::id::JobId;
use super::state;
use super::state::PendingJobEntry;
use super::traits::Job;
use crate::errors::*;

pub struct RetryQueue {
    tx: mpsc::Sender<PendingJobEntry>,
    job_config: Arc<state::JobConfig>,
}

pub struct JobManager {
    datastore: Arc<db::JobDatastore>,
    retry_queues: HashMap<String, RetryQueue>,

    /// Number of ready jobs to queue at once.
    retry_batch_size: u64,

    /// Milliseconds between database fetches.
    retry_attempt_period_ms: u32,
}

/// Queue handle used to submit jobs to be completed.
pub struct JobQueue<J: Job> {
    /// Job handler instance.
    // FIXME is this actually needed here or only in the workers?
    inst: Arc<J>,

    /// Dispatch manager.
    datastore: Arc<db::JobDatastore>,

    /// Outgoing queue.
    order_tx: mpsc::Sender<executor::JobOrder<J>>,
}

impl<J: Job> JobQueue<J> {
    fn new(
        inst: Arc<J>,
        datastore: Arc<db::JobDatastore>,
        order_tx: mpsc::Sender<executor::JobOrder<J>>,
    ) -> Self {
        Self {
            inst,
            datastore,
            order_tx,
        }
    }

    /// Queues a payload as a new job.
    pub async fn queue_job(&self, payload: J::Payload) -> Result<JobId, Error> {
        let job_meta = self
            .datastore
            .create_job(self.inst.as_ref(), &payload)
            .await?;
        let id = job_meta.id();
        let rc = state::RunContext::new_fresh();
        let order = executor::JobOrder::<J>::new(job_meta, payload, rc);

        if self.order_tx.send(order).await.is_err() {
            return Err(Error::JobWorkersExited(self.inst.type_name().to_owned()));
        }

        Ok(id)
    }

    /// Queues a job order that's already been assembled.  Only used internally
    /// by the retry logic at the moment.
    pub async fn requeue_job(&self, order: executor::JobOrder<J>) -> Result<(), Error> {
        if self.order_tx.send(order).await.is_err() {
            return Err(Error::JobWorkersExited(self.inst.type_name().to_owned()));
        }

        Ok(())
    }
}

async fn retry_worker(manager: Arc<JobManager>) {
    let retry_attmpt_period = time::Duration::from_millis(manager.retry_attempt_period_ms as u64);
    let mut interval = tokio::time::interval(retry_attmpt_period);

    loop {
        let span = debug_span!("retryjobs");

        if let Err(e) = attempt_retry(&manager).instrument(span).await {
            error!(err = %e, "failed to retry pending jobs");
        }

        interval.tick().await;
    }
}

async fn attempt_retry(manager: &Arc<JobManager>) -> Result<(), Error> {
    let jobs = manager
        .datastore
        .query_ready_jobs(manager.retry_batch_size)
        .await?;

    let pending = jobs.len();
    let mut queued = 0;
    let mut skip_jobs = HashSet::new();

    for j in jobs {
        let id = j.id();
        let ty = j.data().job_type().to_owned();

        // If we failed to submit it once then there's no point in trying again.
        if skip_jobs.contains(&ty) {
            continue;
        }

        let Some(queue) = manager.retry_queues.get(&ty) else {
            error!(%id, %ty, "found pending job of unknown type");
            // TODO should we mark the job as completed?
            continue;
        };

        // Also update the next try time so that we won't requeue the job
        // immediately and can at least try to run it.
        // TODO should probably batch these updates
        let delay_dur = executor::get_retry_duration(j.num_tries() as u16, &queue.job_config);
        let next_try = j
            .last_try()
            .cloned()
            .unwrap_or_else(|| chrono::Utc::now().naive_utc())
            + delay_dur;
        manager
            .datastore
            .update_job_retry_status(&id, j.num_tries(), Some(next_try))
            .await?;

        // Use try_send here so that we don't get stuck on a clogged queue.
        match queue.tx.try_send(j) {
            Ok(_) => {
                queued += 1;
            }
            Err(mpsc::error::TrySendError::Full(_)) => {
                warn!(%ty, "job retry queue full, deferring job");
                skip_jobs.insert(ty);
            }
            Err(mpsc::error::TrySendError::Closed(_)) => {
                warn!(%ty, "job retry queue closed, probably exiting soon")
            }
        }
    }

    debug!(%pending, %queued, "finished queueing jobs for retry");

    Ok(())
}

/// Takes jobs that should be retried and inserts them into the workqueue.
async fn retry_queue_worker<J: Job>(
    mut entries_rx: mpsc::Receiver<state::PendingJobEntry>,
    target_queue: Arc<JobQueue<J>>,
) {
    let datastore = target_queue.datastore.as_ref();
    while let Some(entry) = entries_rx.recv().await {
        let id = entry.id();
        if let Err(e) = retry_job(entry, target_queue.as_ref(), datastore).await {
            warn!(%id, err = %e, "failed to requeue job, will probably try again later");
        }
    }
}

/// Retries a pending job entry obtained from the datastore.
async fn retry_job<J: Job>(
    entry: PendingJobEntry,
    target_queue: &JobQueue<J>,
    ds: &db::JobDatastore,
) -> Result<(), Error> {
    // Try to decode the payload.  If this fails we should just mark the job as
    // completed since there's no way we could recover from it.
    let payload = match aspect_codec::decode(entry.data().payload()) {
        Ok(p) => p,
        Err(e) => {
            error!(id = %entry.id(), err = %e, "error decoding job payload for retry");

            // Mark the job as failed.
            let jcd = state::JobCompletionData::new_from_err(STATUS_DECODE_FAILURE, &e);
            if let Err(e) = ds.complete_job(&entry.id(), jcd, false).await {
                error!(id = %entry.id(), err = %e, "failed to mark failed job as failed");
            }

            return Ok(());
        }
    };

    // If we *did* decode it properly, then we can just pass it down to the main worker queue.
    let ctx = entry.get_run_context();
    let jo = executor::JobOrder::<J>::new(entry.data().meta().clone(), payload, ctx);
    target_queue.requeue_job(jo).await?;
    Ok(())
}

/// Builder type for a job queue executor system.
pub struct QueueSysBuilder {
    datastore: Arc<db::JobDatastore>,
    retry_queues: HashMap<String, RetryQueue>,
    retry_batch_size: u64,
    retry_attempt_period_ms: u32,
}

impl QueueSysBuilder {
    pub fn new(
        datastore: Arc<db::JobDatastore>,
        retry_batch_size: u64,
        retry_attempt_period_ms: u32,
    ) -> Self {
        Self {
            datastore,
            retry_queues: HashMap::new(),
            retry_batch_size,
            retry_attempt_period_ms,
        }
    }

    /// Starts a job queue dispatcher task if a job type with the given name
    /// does not already exist.
    pub fn start_queue_executor<J: Job>(
        &mut self,
        type_inst: Arc<J>,
        job_config: Arc<state::JobConfig>,
    ) -> Option<Arc<JobQueue<J>>> {
        if self.retry_queues.contains_key(type_inst.type_name()) {
            return None;
        }

        let (order_tx, order_rx) = mpsc::channel(64);
        let (pje_tx, pje_rx) = mpsc::channel(64);
        let rx_queue = executor::JobQueueRx::new(order_rx, job_config.clone());

        // Spawn the job executor.
        tokio::spawn(executor::worker_dispatch_task(
            rx_queue,
            type_inst.clone(),
            self.datastore.clone(),
        ));

        let tyname = type_inst.type_name().to_owned();
        let queue = Arc::new(JobQueue::<J>::new(
            type_inst,
            self.datastore.clone(),
            order_tx,
        ));

        // Spawn the retry relayer.
        tokio::spawn(retry_queue_worker(pje_rx, queue.clone()));

        // Insert the retry queue record.
        let rq = RetryQueue {
            tx: pje_tx,
            job_config,
        };

        self.retry_queues.insert(tyname, rq);

        Some(queue)
    }

    /// Finishes setting up the job manager, launching the retry worker.
    pub fn finish(self) -> Arc<JobManager> {
        let jm = JobManager {
            datastore: self.datastore,
            retry_queues: self.retry_queues,
            retry_batch_size: self.retry_batch_size,
            retry_attempt_period_ms: self.retry_attempt_period_ms,
        };

        let jm = Arc::new(jm);
        tokio::spawn(retry_worker(jm.clone()));

        jm
    }
}

#[cfg(test)]
mod tests {
    use core::time;
    use std::future::Future;
    use std::sync::Arc;

    use sea_orm_migration::MigratorTrait;
    use tempfile::tempdir;
    use tempfile::tempfile;

    use crate::job_queue::state::JobConfig;
    use crate::job_queue::traits::Job;

    use super::super::errors::JobError;
    use super::db;
    use super::state;
    use super::QueueSysBuilder;

    struct FooJob {
        prefix: String,
    }

    impl Job for FooJob {
        type Payload = String;

        fn type_name(&self) -> &str {
            "foo"
        }

        fn exec_job_payload(
            &self,
            _job_meta: &state::JobMeta,
            payload: &Self::Payload,
        ) -> impl Future<Output = Result<(), JobError>> + Sync + Send {
            let pfx = self.prefix.clone();
            async move {
                println!("DOING JOB {pfx} {payload}");
                Ok(())
            }
        }
    }

    #[tokio_macros::test]
    async fn test_queue() {
        let url = format!("sqlite::memory:");
        eprintln!("using database: {url}");

        let dbc = sea_orm::Database::connect(url)
            .await
            .expect("test: init database connection");

        aspect_db_sql_migration::Migrator::up(&dbc, None)
            .await
            .expect("test: apply migrations");

        let datastore = db::JobDatastore::new(dbc).expect("test: init job datastore");
        let ds = Arc::new(datastore);

        let mut builder = QueueSysBuilder::new(ds.clone(), 10, 10);

        let jt = FooJob {
            prefix: "asdf".to_owned(),
        };
        let jc = JobConfig {
            base_backoff_time_ms: 1000,
            backoff_factor: 200,
            retry_limit: 3,
            parallel_jobs: 1,
            keep_ok_payloads: true,
            keep_err_payloads: true,
        };

        let jq = builder
            .start_queue_executor(Arc::new(jt), Arc::new(jc))
            .expect("test: start_queue_executor");

        let man = builder.finish();

        let id = jq
            .queue_job("hello".to_owned())
            .await
            .expect("test: queue job");
        eprintln!("created job {id}");

        tokio::time::sleep(time::Duration::from_secs(1)).await;

        let done = ds
            .query_complete_jobs("foo", None, None)
            .await
            .expect("test: query jobs");

        assert_eq!(done.len(), 1);
        assert_eq!(done[0], id);
    }
}
