//! 调度实现

use alloc::{sync::Arc, vec::Vec};
use core::{
    hint::{likely, unlikely},
    sync::atomic::{AtomicU64, Ordering},
};

use crate::{
    irq::irqflags::{irqs_disabled, local_irq_disable, local_irq_enable},
    processor::{barrier, nr_cpus, this_processor_id},
    sched::{
        preempt::{
            PREEMPT_OFFSET, in_atomic_preempt_off, preempt_count, preempt_disable,
            preempt_enable_no_resched, preemptible,
        },
        schedule::{
            fair::FAIR_SCHED_CLASS, idle::IDLE_SCHED_CLASS, pelt::SchedAvg, rt::RT_SCHED_CLASS,
        },
        task::{TASK_RUNNING, Task, current, current_fast, thread::switch_to},
    },
    sync::spinlock::Spinlock,
    time::{
        hrtimer::{Hrtimer, HrtimerMode, HrtimerRestart},
        sched_clock::sched_clock,
    },
};

mod fair;
mod idle;
mod pelt;
mod rt;

bitflags::bitflags! {
    #[derive(Clone, Copy)]
    struct SchedClassFlags: u32 {
        const ENQUEUE_WAKEUP = 0b0000_0001;
        const DEQUEUE_SLEEP  = 0b0000_0010;
    }
}

#[allow(unused)]
trait SchedClass {
    fn enqueue_task(&self, rq: &Rq, tsk: Arc<Task>, flags: SchedClassFlags);
    fn dequeue_task(&self, rq: &Rq, tsk: Arc<Task>, flags: SchedClassFlags);

    fn yield_task(&self, rq: &Rq);

    fn check_preempt_curr(&self, rq: &Rq, tsk: Arc<Task>, flags: SchedClassFlags);

    fn pick_next_task(&self, rq: &Rq, prev_tsk: &Task) -> Option<Arc<Task>>;
    fn put_prev_task(&self, rq: &Rq, prev_tsk: Arc<Task>);
    fn set_curr_task(&self, rq: &Rq, next_tsk: Arc<Task>);

    fn task_tick(&self, rq: &Rq);

    fn update_curr(&self, rq: &Rq);
}

#[allow(unused)]
struct Rqinter {
    curr: Arc<Task>,
    nr_switch: u64,
    sched_avg: SchedAvg,
    nr_running: usize,
    rt_nr_running: usize,
    cfs_nr_running: usize,
}

#[repr(align(64))]
#[allow(unused)]
struct Rq {
    cpu: usize,
    clock: AtomicU64,
    hrtimer: Arc<Hrtimer>,
    rq_inter: Spinlock<Rqinter>,
}

impl Rq {
    fn init(cpu: usize, curr: Arc<Task>, hrtimer: Arc<Hrtimer>) -> Self {
        Self {
            cpu,
            clock: AtomicU64::new(sched_clock()),
            hrtimer,
            rq_inter: Spinlock::new(Rqinter {
                curr,
                nr_switch: 0,
                sched_avg: SchedAvg::new(),
                nr_running: 0,
                rt_nr_running: 0,
                cfs_nr_running: 0,
            }),
        }
    }
}

static mut RUNQUEUES: Vec<Rq> = Vec::new();

impl Rq {
    #[inline]
    fn schedule_debug(&self, prev: &Task) {
        assert!(
            !unlikely(in_atomic_preempt_off()),
            "BUG: scheduling while atomic: {}/{}/{:#08x}",
            prev.name(),
            prev.pid(),
            preempt_count()
        );
        let rq_inter = self.rq_inter.lock_irq_save();
        assert_eq!(rq_inter.curr.badge(), prev.badge());
    }

    #[inline(always)]
    fn hrtick_clear(&self) {
        self.hrtimer.cancel_hrtimer();
    }

    #[inline(always)]
    fn update_rq_clock(&self) {
        self.clock.store(sched_clock(), Ordering::Relaxed);
    }

    #[inline(always)]
    fn pick_next_task(&self, prev: &Task) -> Arc<Task> {
        let mut tsk = RT_SCHED_CLASS.pick_next_task(self, prev);
        if let Some(p) = tsk {
            return p;
        }
        tsk = FAIR_SCHED_CLASS.pick_next_task(self, prev);
        if let Some(p) = tsk {
            return p;
        }
        tsk = IDLE_SCHED_CLASS.pick_next_task(self, prev);
        if let Some(p) = tsk {
            return p;
        }
        panic!("BUG: pick next task fail!");
    }

    #[allow(clippy::unused_self)]
    #[inline(always)]
    fn prepare_task_switch(&self, _prev: &Task, next: &Task) {
        next.set_on_cpu();
    }

    #[allow(clippy::unused_self)]
    #[inline(always)]
    fn finish_task_switch(&self, prev: &Task) {
        prev.clear_on_cpu();
        assert_eq!(
            preempt_count(),
            PREEMPT_OFFSET as u32,
            "corrupted preempt_count: {}/{}/{:#08x}",
            current_fast().name(),
            current_fast().pid(),
            preempt_count()
        );
        // 重新开启中断
        local_irq_enable();
    }

    #[inline(always)]
    fn context_switch(&self, prev: &Task, next: Arc<Task>) {
        self.prepare_task_switch(prev, &next);
        if let Some(mm) = next.mm() {
            mm.switch_mm();
        }
        let last = switch_to(prev, next);
        barrier();

        self.finish_task_switch(last);
    }
}

fn __schedule(preempt: bool) {
    let cpu = this_processor_id();
    let rq = unsafe { &RUNQUEUES[cpu] };

    rq.schedule_debug(current_fast());

    rq.hrtick_clear();

    local_irq_disable();
    rq.update_rq_clock();

    let next = rq.pick_next_task(current_fast());
    current_fast().clear_need_resched();

    if likely(current() != next) {
        {
            let mut rq_inter = rq.rq_inter.lock();
            rq_inter.nr_switch += 1;
            rq_inter.curr = next.clone();
        }
        if !preempt && current_fast().state() != TASK_RUNNING {
            current_fast().inc_n_switch_count();
        } else {
            current_fast().inc_nv_switch_count();
        }

        rq.context_switch(current_fast(), next);
    } else {
        local_irq_enable();
    }
}

fn hrtick_timer(_timer: &Hrtimer) -> HrtimerRestart {
    HrtimerRestart::Restart
}

#[allow(static_mut_refs)]
pub(crate) fn sched_init() {
    unsafe {
        for cpu in 0..nr_cpus() {
            let timer = Arc::new(Hrtimer::create(HrtimerMode::Rel, hrtick_timer, None));
            let rq = Rq::init(cpu, current(), timer);
            RUNQUEUES.push(rq);
        }
    }
    sched_init_cpu();
}

pub(crate) fn sched_init_cpu() {
    let rq = unsafe { &mut RUNQUEUES[this_processor_id()] };
    assert_eq!(this_processor_id(), rq.cpu);
    let timer = Arc::new(Hrtimer::create(HrtimerMode::Rel, hrtick_timer, None));
    rq.hrtimer = timer;
    let mut rq_inter = rq.rq_inter.lock();
    rq_inter.curr = current();
}

pub(crate) fn schedule_ipi() {
    sched_clock();
    let rq = unsafe { &mut RUNQUEUES[0] };
    let mut rq_inter = rq.rq_inter.lock();
    rq_inter.sched_avg.update_load_avg(sched_clock(), 1024, 2, 1);
}

/// 调度程序
///
/// # Panics
/// 内存不足将会 panic.
pub fn schedule() {
    loop {
        preempt_disable();
        __schedule(false);
        preempt_enable_no_resched();
        if !current_fast().need_resched() {
            break;
        }
    }
}

/// 调度在空闲
pub fn schedule_idle() {
    loop {
        __schedule(false);
        if !current_fast().need_resched() {
            break;
        }
    }
}

/// 抢占调度
pub fn preempt_schedule() {
    if likely(!preemptible()) {
        return;
    }

    loop {
        preempt_disable();
        __schedule(true);
        preempt_enable_no_resched();

        if !current_fast().need_resched() {
            break;
        }
    }
}

/// 在中断退出后抢占调度
///
/// # Panics
/// 系统错误将会 panic
pub fn preempt_schedule_irq() {
    assert_eq!(preempt_count(), 0);
    assert!(irqs_disabled());

    loop {
        preempt_disable();
        local_irq_enable();
        __schedule(true);
        local_irq_disable();
        preempt_enable_no_resched();

        if !current_fast().need_resched() {
            break;
        }
    }
}

/// aaa
pub fn rq_load_avg(cpu: usize) -> u64 {
    let rq = unsafe { &RUNQUEUES[cpu] };
    let rq_inter = rq.rq_inter.lock();
    rq_inter.sched_avg.load_avg()
}

/// bbb
pub fn rq_runnable_avg(cpu: usize) -> u64 {
    let rq = unsafe { &RUNQUEUES[cpu] };
    let rq_inter = rq.rq_inter.lock();
    rq_inter.sched_avg.runnable_avg()
}

/// ccc
pub fn rq_util_avg(cpu: usize) -> u64 {
    let rq = unsafe { &RUNQUEUES[cpu] };
    let rq_inter = rq.rq_inter.lock();
    rq_inter.sched_avg.util_avg()
}
