use alloc::sync::Arc;
use core::mem::offset_of;

use aarch64_define::TIF_NEED_RESCHED;

use crate::{
    processor::this_processor_id,
    sched::task::{Task, current_fast, switch_entry_task},
    sync::spinlock::Spinlock,
};

#[repr(C)]
struct CpuContext {
    x19: u64,
    x20: u64,
    x21: u64,
    x22: u64,
    x23: u64,
    x24: u64,
    x25: u64,
    x26: u64,
    x27: u64,
    x28: u64,
    fp: u64,
    sp: u64,
    pc: u64,
}

#[repr(C)]
struct UserFpsimdState {
    vregs: [u128; 32],
    fpsr: u32,
    fpcr: u32,
    reserved: [u32; 2],
}

#[repr(C)]
struct Uw {
    tp_value: u64,
    tp2_value: u64,
    fpsimd_state: UserFpsimdState,
}

#[repr(C)]
pub(crate) struct ThreadStruct {
    cpu_context: CpuContext,
    uw: Spinlock<Uw>,
}

impl ThreadStruct {
    pub(crate) const fn new() -> Self {
        ThreadStruct {
            cpu_context: CpuContext {
                x19: 0,
                x20: 0,
                x21: 0,
                x22: 0,
                x23: 0,
                x24: 0,
                x25: 0,
                x26: 0,
                x27: 0,
                x28: 0,
                fp: 0,
                sp: 0,
                pc: 0,
            },
            uw: Spinlock::new(Uw {
                tp_value: 0,
                tp2_value: 0,
                fpsimd_state: UserFpsimdState {
                    vregs: [0; 32],
                    fpsr: 0,
                    fpcr: 0,
                    reserved: [0; 2],
                },
            }),
        }
    }
}

pub(super) const ARCH_NEED_RESCHED: u32 = TIF_NEED_RESCHED as u32;

#[inline(always)]
fn fpsimd_thread_switch(_: &Task) {}

#[inline(always)]
fn tls_thread_switch(next: &Task) {
    let value: u64;
    unsafe {
        core::arch::asm!(
            "mrs {0}, tpidr_el0",
            out(reg) value
        );
    }
    current_fast().thread.uw.lock().tp_value = value;
    let inter = next.thread.uw.lock();
    let value = inter.tp_value;
    unsafe {
        core::arch::asm!(
            "msr tpidr_el0, {0}",
            in(reg) value
        );
    }
}

#[inline(always)]
fn contextidr_thread_switch(_: &Task) {}

pub(crate) fn arch_switch_to(prev: &Task, next: Arc<Task>) -> &Task {
    fpsimd_thread_switch(&next);
    tls_thread_switch(&next);
    contextidr_thread_switch(&next);
    switch_entry_task(this_processor_id(), next.clone());
    unsafe {
        core::arch::aarch64::__dsb(core::arch::aarch64::ISH);
    }
    unsafe extern "C" {
        fn cpu_switch_to(prev: u64, next: u64) -> u64;
    }

    let next_tsk = next.as_ref() as *const Task as u64;
    unsafe {
        let last = cpu_switch_to(prev as *const Task as u64, next_tsk);
        &*(last as *const Task)
    }
}

const THREAD_CPU_CONTEXT: usize = offset_of!(Task, thread.cpu_context);

core::arch::global_asm!(
    r"
THREAD_CPU_CONTEXT = {}

.global cpu_switch_to; .align 2; cpu_switch_to:
    mov	x10, #THREAD_CPU_CONTEXT
    add	x8, x0, x10
    mov	x9, sp
    stp	x19, x20, [x8], #16		// store callee-saved registers
    stp	x21, x22, [x8], #16
    stp	x23, x24, [x8], #16
    stp	x25, x26, [x8], #16
    stp	x27, x28, [x8], #16
    stp	x29, x9, [x8], #16
    str	lr, [x8]
    add	x8, x1, x10
    ldp	x19, x20, [x8], #16		// restore callee-saved registers
    ldp	x21, x22, [x8], #16
    ldp	x23, x24, [x8], #16
    ldp	x25, x26, [x8], #16
    ldp	x27, x28, [x8], #16
    ldp	x29, x9, [x8], #16
    ldr	lr, [x8]
    mov	sp, x9
    msr	sp_el0, x1
    ret
.type cpu_switch_to, @function; .size cpu_switch_to, .-cpu_switch_to
", const THREAD_CPU_CONTEXT
);
