use core::sync::atomic::{AtomicUsize, Ordering};

use alloc::{
    collections::{btree_map::BTreeMap, vec_deque::VecDeque},
    sync::{Arc, Weak},
    vec::Vec,
};
use object::{File, Object, ObjectSegment};
use spin::{Lazy, Mutex, RwLock};
use x86_64::{
    structures::{
        gdt::SegmentSelector,
        paging::{OffsetPageTable, PageTable},
    },
    PhysAddr, VirtAddr,
};

use crate::arch::{
    kmm::{convert_physical_to_virtual, MemoryManager},
    trap::intr::InterruptIndex,
};

use super::{
    kmm::{ExtendedPageTable, MappingType, KERNEL_PAGE_TABLE, PHYSICAL_MEMORY_OFFSET},
    smp::CPUS,
    trap::{apic::get_lapic_id, gdt::Selectors},
};

#[derive(Debug, Clone, Copy, Default)]
#[repr(C, packed)]
#[allow(dead_code)]
pub struct Context {
    pub cr3: usize,

    pub r15: usize,
    pub r14: usize,
    pub r13: usize,
    pub r12: usize,
    pub r11: usize,
    pub r10: usize,
    pub r9: usize,
    pub r8: usize,

    pub rbp: usize,
    pub rsi: usize,
    pub rdi: usize,

    pub rdx: usize,
    pub rcx: usize,
    pub rbx: usize,
    pub rax: usize,

    pub rip: usize,
    pub cs: usize,
    pub rflags: usize,
    pub rsp: usize,
    pub ss: usize,
}

impl Context {
    pub fn init(
        &mut self,
        entry_point: usize,
        stack_end_address: VirtAddr,
        page_table_address: PhysAddr,
        segment_selectors: (SegmentSelector, SegmentSelector),
        args: Option<(usize, usize, usize, usize, usize, usize)>,
    ) {
        self.rflags = 0x200;
        self.rip = entry_point;
        self.rsp = stack_end_address.as_u64() as usize;
        self.cr3 = page_table_address.as_u64() as usize;

        let (code_selector, data_selector) = segment_selectors;
        self.cs = code_selector.0 as usize;
        self.ss = data_selector.0 as usize;

        if let Some((arg0, arg1, arg2, arg3, arg4, arg5)) = args {
            self.rdi = arg0;
            self.rsi = arg1;
            self.rdx = arg2;
            self.rcx = arg3;
            self.r8 = arg4;
            self.r9 = arg5;
        }
    }

    pub fn get_current_page_table(&self) -> OffsetPageTable<'static> {
        unsafe {
            let page_table_addr = PhysAddr::new(self.cr3 as u64);
            let page_table =
                &mut *convert_physical_to_virtual(page_table_addr).as_mut_ptr::<PageTable>();
            let phys_offset = VirtAddr::new(*PHYSICAL_MEMORY_OFFSET);
            OffsetPageTable::new(page_table, phys_offset)
        }
    }
}

impl Context {
    #[inline]
    pub fn address(&self) -> VirtAddr {
        VirtAddr::new(self as *const Context as u64)
    }

    #[inline]
    pub fn from_address(address: VirtAddr) -> Context {
        unsafe { *(address.as_u64() as *mut Context) }
    }
}

#[macro_export]
macro_rules! push_context {
    () => {
        concat!(
            r#"
            push rax
            push rbx
            push rcx
            push rdx
            push rdi
            push rsi
            push rbp
            push r8
            push r9
            push r10
            push r11
            push r12
            push r13
            push r14
            push r15
            mov r15, cr3
            push r15
            "#,
        )
    };
}

#[macro_export]
macro_rules! pop_context {
    () => {
        concat!(
            r#"
            pop r15
            mov cr3, r15
            pop r15
            pop r14
            pop r13
            pop r12
            pop r11
            pop r10
            pop r9
            pop r8
            pop rbp
            pop rsi
            pop rdi
            pop rdx
            pop rcx
            pop rbx
            pop rax
			"#
        )
    };
}

#[derive(Debug, Clone, Copy, Default)]
#[repr(C, packed)]
#[allow(dead_code)]
pub struct OtherContext {
    pub fsbase: usize,
    pub gsbase: usize,
    pub lapic_id: usize,
}

impl OtherContext {
    pub fn set_lapic_id(&mut self, lapic_id: usize) {
        self.lapic_id = lapic_id;
    }

    pub unsafe fn save_fsbase(&mut self) {
        if x86::controlregs::cr4().contains(x86::controlregs::Cr4::CR4_ENABLE_FSGSBASE) {
            self.fsbase = x86::current::segmentation::rdfsbase() as usize;
        } else {
            self.fsbase = x86::msr::rdmsr(x86::msr::IA32_FS_BASE) as usize;
        }
    }

    pub unsafe fn save_gsbase(&mut self) {
        if x86::controlregs::cr4().contains(x86::controlregs::Cr4::CR4_ENABLE_FSGSBASE) {
            self.gsbase = x86::current::segmentation::rdgsbase() as usize;
        } else {
            self.gsbase = x86::msr::rdmsr(x86::msr::IA32_GS_BASE) as usize;
        }
    }

    pub unsafe fn restore_fsbase(&self) {
        if x86::controlregs::cr4().contains(x86::controlregs::Cr4::CR4_ENABLE_FSGSBASE) {
            x86::current::segmentation::wrfsbase(self.fsbase as u64);
        } else {
            x86::msr::wrmsr(x86::msr::IA32_FS_BASE, self.fsbase as u64);
        }
    }

    pub unsafe fn restore_gsbase(&self) {
        if x86::controlregs::cr4().contains(x86::controlregs::Cr4::CR4_ENABLE_FSGSBASE) {
            x86::current::segmentation::wrgsbase(self.gsbase as u64);
        } else {
            x86::msr::wrmsr(x86::msr::IA32_GS_BASE, self.gsbase as u64);
        }
    }
}

#[derive(Debug, Clone)]
#[repr(C, packed)]
pub struct Thread {
    context: Context,
    other_context: OtherContext,
    id: usize,
    kernel_stack: VirtAddr,
    cap: usize,
    user: bool,
}

pub const STACK_ADDR: usize = 0x07ff_fffe_0000;
pub const STACK_SIZE: usize = 0x0000_0001_0000;

impl Thread {
    #[allow(unused_assignments)]
    pub fn new(
        entry: usize,
        user: bool,
        file: Option<File>,
        args: Option<(usize, usize, usize, usize, usize, usize)>,
    ) -> ArcThread {
        let mut context = Context::default();

        let mut new_page_table = unsafe { KERNEL_PAGE_TABLE.lock().deep_copy() };

        let segment_selectors = if user {
            Selectors::get_user_segments()
        } else {
            Selectors::get_kernel_segments()
        };

        let mut capa = 0;
        let mut stack_virt_addr = VirtAddr::zero();

        let kernel_stack_addr = {
            let (ptr, len, cap) = alloc::vec![0u8; STACK_SIZE].into_raw_parts();
            assert_eq!(len, STACK_SIZE);
            capa = cap;
            VirtAddr::new(ptr as u64)
        };

        if user {
            let stack = VirtAddr::new(STACK_ADDR as u64);

            MemoryManager::alloc_range(
                stack,
                STACK_SIZE as u64,
                MappingType::UserData.flags(),
                &mut new_page_table,
            )
            .unwrap_or_else(|e| error!("Cannot map thread stack, error: {:#?}", e));

            stack_virt_addr = stack;
        } else {
            stack_virt_addr = kernel_stack_addr;
        }

        let entry_point = if user {
            for segment in file.as_ref().unwrap().segments() {
                MemoryManager::alloc_range(
                    VirtAddr::new(segment.address() as u64),
                    segment.size(),
                    MappingType::UserCode.flags(),
                    &mut new_page_table,
                )
                .expect("Failed to allocate memory for ELF segment");

                if let Ok(data) = segment.data() {
                    new_page_table.write_to_mapped_address(data, VirtAddr::new(segment.address()));
                }
            }

            file.as_ref().unwrap().entry() as usize
        } else {
            entry
        };

        context.init(
            entry_point,
            stack_virt_addr + STACK_SIZE as u64,
            new_page_table.physical_address(),
            segment_selectors,
            args,
        );

        static NEXT_LAPIC_ID_INDEX: AtomicUsize = AtomicUsize::new(0);

        let cpus = CPUS.read();
        let iter = cpus.iter_id().collect::<Vec<_>>();
        if NEXT_LAPIC_ID_INDEX.load(Ordering::SeqCst) >= iter.len() {
            NEXT_LAPIC_ID_INDEX.store(0, Ordering::SeqCst);
        }
        let idx = NEXT_LAPIC_ID_INDEX.fetch_add(1, Ordering::SeqCst);
        let lapic_id = *iter[idx] as usize;

        let mut other_context = OtherContext::default();
        other_context.set_lapic_id(lapic_id);

        static NEXT_ID: AtomicUsize = AtomicUsize::new(1);

        let arc_thread = Arc::new(RwLock::new(Thread {
            context,
            other_context,
            id: NEXT_ID.fetch_add(1, Ordering::SeqCst),
            kernel_stack: kernel_stack_addr,
            cap: capa,
            user,
        }));

        THREADS.lock().push_back(arc_thread.clone());
        arc_thread
    }

    pub fn new_idle() -> ArcThread {
        Self::new(idle_entry as usize, false, None, None)
    }

    pub fn get_context(&self) -> Context {
        self.context
    }

    pub fn set_context(&mut self, ctx: Context) {
        self.context = ctx;
    }

    pub fn is_user_thread(&self) -> bool {
        self.user
    }

    pub fn get_id(&self) -> usize {
        self.id
    }
}

impl Drop for Thread {
    fn drop(&mut self) {
        drop(unsafe {
            Vec::from_raw_parts(self.kernel_stack.as_mut_ptr::<u8>(), STACK_SIZE, self.cap)
        });

        let page_table_addr = PhysAddr::new(self.context.cr3 as u64);
        let page_table = convert_physical_to_virtual(page_table_addr).as_mut_ptr::<PageTable>();
        let physical_memory_offset = VirtAddr::new(*PHYSICAL_MEMORY_OFFSET);
        let offset_page_table =
            unsafe { OffsetPageTable::new(&mut *page_table, physical_memory_offset) };
        unsafe { offset_page_table.free_user_page_table() };
    }
}

pub type ArcThread = Arc<RwLock<Thread>>;
pub type WeakThread = Weak<RwLock<Thread>>;

fn idle_entry() -> ! {
    loop {
        x86_64::instructions::interrupts::enable_and_hlt();
    }
}

pub static THREAD_MANAGER: Lazy<Mutex<ThreadManager>> =
    Lazy::new(|| Mutex::new(ThreadManager::new()));

pub struct ThreadManager {
    currents: BTreeMap<usize, WeakThread>,
    threads: VecDeque<WeakThread>,
}

impl ThreadManager {
    pub fn new() -> ThreadManager {
        let currents = CPUS
            .read()
            .iter_id()
            .map(|id| (*id as usize, Arc::downgrade(&Thread::new_idle())))
            .collect();

        ThreadManager {
            currents,
            threads: VecDeque::new(),
        }
    }

    pub fn add(&mut self, thread: ArcThread) {
        self.threads.push_back(Arc::downgrade(&thread));
    }

    pub fn del(&mut self, id: usize) {
        let position = self
            .threads
            .iter()
            .position(|thread| thread.upgrade().unwrap().read().get_id() == id);
        if let Some(position) = position {
            self.threads.remove(position);
        }
    }

    pub fn len(&self) -> usize {
        self.threads.len()
    }

    pub fn schedule(&mut self, context: VirtAddr) -> VirtAddr {
        let lapic_id = get_lapic_id() as usize;

        if let Some(weak) = self.currents.get(&lapic_id) {
            if let Some(thread) = weak.upgrade() {
                let mut thread = thread.write();
                thread.context = Context::from_address(context);

                self.threads.push_back(weak.clone());
            }
        }

        if let Some(next_thread) = self.threads.pop_front() {
            self.currents.insert(lapic_id, next_thread);
        }

        let next_thread = self.currents[&lapic_id].upgrade().unwrap();
        let next_thread = next_thread.read();

        let kernel_address = next_thread.kernel_stack + STACK_SIZE as u64;
        CPUS.write()
            .get_mut(lapic_id as u32)
            .set_ring0_rsp(kernel_address);

        next_thread.context.address()
    }

    pub fn get_current(&self) -> ArcThread {
        self.currents[&(get_lapic_id() as usize)].upgrade().unwrap()
    }
}

pub static THREADS: Mutex<VecDeque<ArcThread>> = Mutex::new(VecDeque::new());

pub fn init() {
    if let Some(extended_feature_info) = raw_cpuid::CpuId::new().get_extended_feature_info() {
        if extended_feature_info.has_fsgsbase() {
            unsafe {
                x86::controlregs::cr4_write(
                    x86::controlregs::cr4() | x86::controlregs::Cr4::CR4_ENABLE_FSGSBASE,
                )
            }
        }
    }
    super::syscall::init();
}

pub fn sched() {
    const IDX: u8 = InterruptIndex::Timer as u8;
    unsafe { x86_64::instructions::interrupts::software_interrupt::<IDX>() };
}

pub fn current() -> ArcThread {
    THREAD_MANAGER.lock().get_current()
}
