use std::collections::HashMap;
use std::os::unix::fs::chroot;
use std::sync::Arc;
use std::time;

use chrono::NaiveDateTime;
use tokio::sync::mpsc;
use tokio::sync::{Mutex, OwnedSemaphorePermit, Semaphore};
use tracing::*;
use uuid::Uuid;

use super::db;
use super::errors::*;
use super::id::JobId;
use super::state;
use super::traits::Job;

/// Comfortable hard limit on the number of times we can retry a job.
pub const HARD_RETRY_LIMIT: u16 = 1000;

/// Number of seconds to keep processed jobs in memory to avoid spawning
/// duplicate exec tasks.
pub const DEDUP_POOL_MS: u64 = 60000;

/// Active order to execute a job.
pub struct JobOrder<J: Job> {
    meta: state::JobMeta,
    payload: J::Payload,
    ctx: state::RunContext,
}

impl<J: Job> JobOrder<J> {
    pub fn new(meta: state::JobMeta, payload: J::Payload, ctx: state::RunContext) -> Self {
        Self { meta, payload, ctx }
    }

    pub fn id(&self) -> JobId {
        self.meta.id()
    }
}

pub struct JobQueueRx<J: Job> {
    order_rx: mpsc::Receiver<JobOrder<J>>,
    config: Arc<state::JobConfig>,
}

impl<J: Job> JobQueueRx<J> {
    pub(crate) fn new(
        order_rx: mpsc::Receiver<JobOrder<J>>,
        config: Arc<state::JobConfig>,
    ) -> Self {
        Self { order_rx, config }
    }

    /// Takes the next item from the queue, waiting if necessary, unless the
    /// channel closes.
    pub async fn next(&mut self) -> Option<JobOrder<J>> {
        self.order_rx.recv().await
    }
}

pub struct RecentJobTracker {
    recent_job_pools: Vec<HashMap<JobId, u16>>,
    pool_dur_ms: i64,
    last_refresh_epoch: usize,
}

impl RecentJobTracker {
    pub fn new(pools: usize, pool_dur: time::Duration) -> Self {
        let mut t = Self {
            recent_job_pools: vec![HashMap::new(); pools],
            pool_dur_ms: pool_dur.as_millis() as i64,
            last_refresh_epoch: 0,
        };

        // This is horrible initialization hygine lmao.
        t.last_refresh_epoch = t.get_cur_epoch();
        t
    }

    fn get_cur_epoch(&self) -> usize {
        let cur_millis = chrono::Utc::now().naive_utc().timestamp_millis();
        (cur_millis / self.pool_dur_ms) as usize
    }

    /// Clears stale pools, using a specified current epoch.
    fn refresh_inner(&mut self, cur_epoch: usize) {
        if cur_epoch != self.last_refresh_epoch {
            let npools = self.recent_job_pools.len();
            let start_clear = self.last_refresh_epoch + 1;

            // FIXME this loop break is kinda scuffed, once we go around once
            // there's no reason to keep clearing so we break
            let mut iters = 0;
            for epoch in start_clear..=cur_epoch {
                self.recent_job_pools[epoch % npools].clear();
                iters += 1;
                if iters >= npools {
                    break;
                }
            }
        }
    }

    /// Clears stale pools.
    pub fn refresh(&mut self) {
        let cur_epoch = self.get_cur_epoch();
        self.refresh_inner(cur_epoch);
    }

    /// Finds the stored try index from the list of recent pools.  Uses the last
    /// refresh epoch as the current epoch.
    fn find_job_try(&self, id: &JobId) -> Option<(usize, u16)> {
        // We go in a weird order since it's most likely to be in the recent
        // pools and we'd like to short circuit that.
        let npools = self.recent_job_pools.len();
        for i in 0..npools {
            let slot = (self.last_refresh_epoch - i) % npools;
            let p = &self.recent_job_pools[slot];
            if let Some(idx) = p.get(id) {
                return Some((slot, *idx));
            }
        }

        None
    }

    /// Unconditionally inserts a job in the current epoch pool.  Uses the last
    /// refresh epoch as the current epoch.
    pub fn insert_job(&mut self, id: JobId, attempt: u16) {
        let pool = self.last_refresh_epoch % self.recent_job_pools.len();
        self.recent_job_pools[pool].insert(id, attempt);
    }

    /// Checks if the job order we've received should be run.  Ie. if the try
    /// number is greater than the one we've stored, meaning that it's really a
    /// distinct order.  Uses the last refresh epoch as the current epoch.
    pub fn check_job_ready(&mut self, id: &JobId, attempt: u16) -> bool {
        if let Some((pool, idx)) = self.find_job_try(id) {
            if idx < attempt {
                false
            } else {
                if pool != self.last_refresh_epoch {
                    self.recent_job_pools[pool].remove(id);
                    self.insert_job(*id, attempt);
                }
                true
            }
        } else {
            let pool = self.last_refresh_epoch % self.recent_job_pools.len();
            self.recent_job_pools[pool].insert(*id, attempt);
            true
        }
    }
}

/// Worker tasks that takes pending jobs from a queue spawns independent tasks
/// to carry them out.
pub async fn worker_dispatch_task<J: Job>(
    mut queue: JobQueueRx<J>,
    inst: Arc<J>,
    dc: Arc<db::JobDatastore>,
) {
    // TODO make the tracker configurable
    let mut tracker = RecentJobTracker::new(2, time::Duration::from_millis(DEDUP_POOL_MS as u64));
    let sem = Arc::new(Semaphore::new(queue.config.parallel_jobs));

    while let Some(order) = queue.next().await {
        // Throw away the old jobs we can safely drop off.
        tracker.refresh();

        // Check if we've already seen the job recently.
        if !tracker.check_job_ready(&order.id(), order.ctx.num_prev_tries()) {
            trace!(id = %order.id(), "dropping stale job order");
        }

        // Acquire a permit so we don't spew tasks.
        let permit = sem
            .clone()
            .acquire_owned()
            .await
            .expect("executor: acquire permit");

        // Then clone the other stuff we need, this is fairly cheap since they're all `Arc`s.
        let inst = inst.clone();
        let config = queue.config.clone();
        let dc = dc.clone();

        // Execute the job on its own task.
        tokio::spawn(async move {
            let _permit = permit;
            process_order(inst.as_ref(), &order, &config, &dc).await;
        });
    }
}

/// Processes a job order, updating the datastore accordingly.
async fn process_order<J: Job>(
    inst: &J,
    order: &JobOrder<J>,
    config: &state::JobConfig,
    dc: &db::JobDatastore,
) {
    let id = order.id();
    let status = exec_job(inst, &order.meta, &order.payload, &order.ctx, config).await;

    match status {
        JobStatus::Retry(wait_dur) => {
            let next_try_time = chrono::Utc::now().naive_utc() + wait_dur;
            let cur_tries = order.ctx.num_prev_tries() + 1;
            if let Err(e) = dc
                .update_job_retry_status(&id, cur_tries as i16, Some(next_try_time))
                .await
            {
                error!(%id, err = %e, "failed to update job retry status");
            } else {
                debug!(%id, ?wait_dur, "scheduling job for retry");
            }
        }

        JobStatus::Finish(jcd) => {
            let success = jcd.is_successful();
            let keep_payload = if success {
                config.keep_ok_payloads
            } else {
                config.keep_err_payloads
            };

            if let Err(e) = dc.complete_job(&id, jcd, keep_payload).await {
                error!(%id, %success, err = %e, "failed to mark job completed");
            }
        }
    }
}

/// Executes a job, returning a status as to what to do with it.
async fn exec_job<J: Job>(
    inst: &J,
    meta: &state::JobMeta,
    payload: &J::Payload,
    rc: &state::RunContext,
    config: &state::JobConfig,
) -> JobStatus {
    match do_exec_job(inst, meta, payload).await {
        Ok(_) => JobStatus::Finish(state::JobCompletionData::new_success_now()),
        Err(e) => {
            if e.is_retryable() {
                if rc.num_prev_tries() + 1 >= config.retry_limit {
                    JobStatus::Finish(state::JobCompletionData::new_from_err(
                        STATUS_TOO_MANY_RETRIES,
                        &e,
                    ))
                } else {
                    let retry_dur = get_retry_duration(rc.num_prev_tries() + 1, config);
                    JobStatus::Retry(retry_dur)
                }
            } else {
                JobStatus::Finish(state::JobCompletionData::from_job_err(&e))
            }
        }
    }
}

/// Low level executor that just wraps the call in a span for logging.
// TODO merge into exec_job?
async fn do_exec_job<J: Job>(
    inst: &J,
    meta: &state::JobMeta,
    payload: &J::Payload,
) -> Result<(), JobError> {
    // Just run the job, in a span so we can track the progress.
    let id = meta.id();
    let ty = inst.type_name();
    let span = debug_span!("jobexec", %id, %ty);
    inst.exec_job_payload(meta, &payload)
        .instrument(span)
        .await?;

    Ok(())
}

/// Indication of how the executor should treat the job.
#[derive(Clone, Debug)]
pub enum JobStatus {
    /// The job should be retried in some number of milliseconds.
    Retry(time::Duration),

    /// The job is finished and should be moved to the completed jobs pool.
    Finish(state::JobCompletionData),
}

/// Returns the duration that we should wait until retrying the job, with
/// lightly exponential backoff.
pub fn get_retry_duration(last_attempt_idx: u16, jc: &state::JobConfig) -> time::Duration {
    if last_attempt_idx > HARD_RETRY_LIMIT {
        panic!(
            "executor: absurd retry count, should have failed by now (at {last_attempt_idx} tries)"
        );
    }

    let mut dur_ms = jc.base_backoff_time_ms as u64;
    for _ in 0..last_attempt_idx {
        dur_ms = (dur_ms * jc.backoff_factor as u64) / 100;
    }
    time::Duration::from_millis(dur_ms)
}
