#include <mm/mm.h>
#include <mm/rmap.h>
#include <mm/swap.h>
#include <mm/valloc.h>
#include <mm/pgtable.h>
#include <sys/sysdef.h>
#include <sys/unistd.h>
#include <arch/cpu.h>
#include <process.h>
#include <sched.h>
#include <fs.h>
#include <log.h>

static void free_pagedir(uptr dir_pa) {
    if (dir_pa == 0)
        return;

    mapping_page_dir(PD_MP_1, dir_pa);
    pg_table_t* dir = (pg_table_t*)(PD_MP_1 | (0x3ff << 12));
    uptr vaddr = 0;
    for (size_t i = 0; i < DIRECTORY_INDEX(KERNEL_MM_BASE); i++) {
        pg_entry_t dir_entry = dir->entry[i];
        if (!dir_entry.data || dir_entry.present == 0)
            continue;

        pg_table_t* tbl = (pg_table_t*)(PD_MP_1 | (i << 12));

        for (size_t j = 0; j < PG_MAX_ENTRIES; j++) {
            pg_entry_t entry = tbl->entry[j];
            if (!entry.data)
                continue;

            vaddr = i << 22 | j << 12;
            if (entry.swap.sign == 0x7) {
                if (count_ref(vaddr, entry) == 1) {
                    set_slot_free(NULL, &entry);
                }
            } else {
                free_page(entry.data);
            }
        }

        free_page(dir_entry.data);
    }

    free_page(dir_pa);

    unmapping_page_dir(PD_MP_1);
}

static int free_page_range(struct vma* vma) {
    pg_entry_t* old_entry;
    for (uptr pn = vma->start >> PG_SIZE_BITS; pn < vma->end >> PG_SIZE_BITS; pn++) {
        old_entry = &PDE_ENTRY(PD_MP_SELF, pn);
        if (!old_entry->data) {
            continue;
        }

        old_entry = &PTE_ENTRY(PD_MP_SELF, pn);

        if (!old_entry->data) {
            continue;
        }

        // this to be fix!
        // if (old_entry->swap.sign == 0x7) {
        //     if (count_ref(pn << PG_SIZE_BITS, &old_entry) == 1) {
        //         set_slot_free(NULL, old_entry);
        //     }
        // } else {
        //     free_page(old_entry->data);
        // }

        free_page(old_entry->data);
    }
    return 0;
}

static int free_mmap(struct mm* mm) {
    struct vma* vma;
    int errno = 0;
    for (vma = mm->vma_list; vma; vma = vma->next) {
        if (vma->avm) {
            if (atomic_load(&vma->avm->ref) > 1) {
                LOGE("NOSET: We don't support avm->ref > 1 yet");
            }
            avm_lock(vma->avm->root);

            struct avm_chain* avc, * pavc, * next;
            list_for_each(pavc, next, &vma->avm_chain, same_vma) {
                list_remove(&pavc->same_avm);
                avm_free(pavc->avm);
                avm_chain_free(pavc);
            }
            avm_unlock(vma->avm->root);
        }

        if ((errno = free_page_range(vma))) {
            goto out;
        }

        vma_free(vma);
    }
    mm_free(mm);
out:
    return errno;
}

static void free_proc_mm(struct process* proc) {
    if (proc->mm) {
        free_mmap(proc->mm);
    }

    mapping_page_dir(PD_MP_1, proc->cr3);
    pg_table_t* dir = (pg_table_t*)(PD_MP_1 | (0x3ff << 12));
    uptr vaddr = 0;
    for (size_t i = 0; i < DIRECTORY_INDEX(KERNEL_MM_BASE); i++) {
        pg_entry_t dir_entry = dir->entry[i];
        if (!dir_entry.data || dir_entry.present == 0)
            continue;

        free_page(dir_entry.data);
    }

    free_page(proc->cr3);
}

static void free_kernel_stack(uptr sp) {
    uptr stk_start = KSTK_AREA_START;
    while (stk_start < KSTK_AREA_END) {
        if (sp >= stk_start && sp < stk_start + KSTK_SIZE) {
            break;
        }
        stk_start += KSTK_SIZE;
    }

    assert_msg(stk_start < KSTK_AREA_END, "kernel stack not found");

    for (uptr i = stk_start >> 12; i < (stk_start + KSTK_SIZE) >> 12; i++) {
        volatile pg_entry_t* entry = &PTE_ENTRY(TBL_BASE_VADDR, i);

        cpu_flush_page(i << 12);

        uptr old_frame = entry->frame << PG_SIZE_BITS;
        free_page(old_frame);
        entry->data = 0;
    }
}

int do_exit(struct process* proc) {
    proc->state = PROCESS_UNUSED;

    if (list_empty(&proc->threads)) {
        // this process is the only one own this, so let it go
        fd_table_t* fd_table = proc->fd_table;
        if (fd_table) {
            for (int i = 0; i < FS_FD_MAX; i++) {
                if (!fd_table->fds[i]) {
                    continue;
                }
                
                atomic_fetch_sub(&fd_table->fds[i]->file->ref_count, 1);
            }
            vfree(fd_table);
        }
        uptr dir = proc->cr3;
        free_pagedir(dir);
    } else {
        list_remove(&proc->threads);
        free_kernel_stack(proc->context.esp);
    }

    memset(proc, 0, sizeof(struct process));
    schedule();
    return 0;
}

__DEF_SYSCALL0(int, exit) {
    LOGW("Process %d exit with code %d, pg_count: %d, pgf_count: %d\n", cur_proc->pid, cur_proc->exit_code, cur_proc->mm->page_count, cur_proc->pgf_count);
    struct process* proc = cur_proc->parent;
    if (proc && proc->state == PROCESS_BLOCKING) {
        proc->context.eax = cur_proc->pid;
        proc->state = PROCESS_READY;
    }
    return do_exit(cur_proc);
}