#include <arch/cpu.h>
#include <arch/interrupts.h>
#include <arch/vectors.h>
#include <log.h>
#include <string.h>
#include <mm/pgtable.h>
#include <mm/mm.h>
#include <mm/vma.h>
#include <mm/rmap.h>
#include <mm/swap.h>
#include <status.h>
#include <process.h>

#define LOG_TAG "PGF"
#include <log.h>

typedef union {
    struct {
        u8 present : 1; // 0 = caused by a non-present, 1 = caused by a page-level protection violation
        u8 by_write : 1; // 0 = caused when read, 1 = caused when write
        u8 by_user : 1; // 0 = caused by a supervisor-mode access, 1 = caused by a user-mode access
        u8 by_reserved : 1; // caused by a reserved bit set to 1
        u8 by_fetch : 1; // caused by instruction fetch
        u8 by_ss : 1; // caused by shadow stack access
        u8 during_HLAT : 1; // fault occurred during HLAT paging
        u32 reserved0 : 6;
        u8 sgx : 1;
        u32 reserved1 : 16;
    };
    u32 raw;
} pgf_cause_t;


typedef struct {
    uptr fault_addr;
    pg_entry_t* entry;
    pgf_cause_t cause;
    struct vma* vma;
} pgf_param_t;

static int do_cow(pgf_param_t* param) {
    uptr old = param->entry->frame << PG_SIZE_BITS;
    page_t* page = page_query(old);

    if (page->ref_counts > 1) {
        uptr new = copy_page(old);
        if (!new) {
            return ENOMEM;
        }
        page->ref_counts--;
        param->entry->frame = new >> PG_SIZE_BITS;
        page = page_query(new);
    }
    param->entry->rw = 1;
    if (!(param->vma->flags & VMA_NO_SWAP)) {
        link_avm_page(param->vma->avm, page, param->fault_addr);
    }
    return 0;
}

int do_swap_in(pg_entry_t* pte, uptr vaddr);
static int do_swapin(pgf_param_t* param) {
    return do_swap_in(param->entry, param->fault_addr);
}

static int do_file_mapping(pgf_param_t* param) {
    uptr new = get_page(PG_LOCKED);
    if (!new) {
        return ENOMEM;
    }

    mapping_p2v(PG_MP_4, new, PG_WRITE, VMAP_NULL);

    size_t offset = PG_ALIGN(param->fault_addr - param->vma->start) + param->vma->file_offset;
    param->vma->file->ops->read(param->vma->file->inode, (void*)PG_MP_4, PG_SIZE, offset);
    unmapping_p2v(PG_MP_4);

    mapping_p2v(param->fault_addr, new, (vma_writeable(param->vma) ? PG_WRITE : 0) | PG_ALLOW_USER, VMAP_NULL);

    if (!(param->vma->flags & VMA_PRIVATE)) {
        if (!param->vma->avm) {
            param->vma->avm = avm_alloc();
            struct avm_chain* avc = avm_chain_alloc();
            avc->avm = param->vma->avm;
            avc->vma = param->vma;
            avm_chain_link(param->vma, avc, param->vma->avm);
        }
        link_avm_page(param->vma->avm, page_query(new), param->fault_addr);
    }
    set_page_addr_unlock(new);
    return 0;
}

static int do_anonymous_mapping(pgf_param_t* param) {
    uptr new = get_page(PG_LOCKED);
    if (!new) {
        return ENOMEM;
    }
    mapping_p2v(param->fault_addr, new, (vma_writeable(param->vma) ? PG_WRITE : 0) | PG_ALLOW_USER, VMAP_NULL);

    if (!(param->vma->flags & VMA_PRIVATE) && !(param->vma->flags & VMA_NO_SWAP)) {
        if (!param->vma->avm) {
            param->vma->avm = avm_alloc();
            struct avm_chain* avc = avm_chain_alloc();
            avc->avm = param->vma->avm;
            avc->vma = param->vma;
            avm_chain_link(param->vma, avc, param->vma->avm);
        }
        link_avm_page(param->vma->avm, page_query(new), param->fault_addr);
    }
    if (!(param->vma->flags & VMA_NO_SWAP)) {
        set_page_addr_unlock(new);
    }
    return 0;
}

static void page_fault(const isr_param* param) {
    pgf_cause_t cause;
    cause.raw = param->ctx->err_code;
    u32 fault_addr = cpu_rcr2();
    u32 raw = fault_addr;
    pgf_param_t p;
    pg_entry_t* entry;
    int errno = 0;

    if (fault_addr >= TBL_BASE_VADDR) {
        fault_addr <<= PG_INDEX_BITS;
    }

    if (fault_addr >= KERNEL_MM_BASE && !cause.by_user) {
        uptr new_page = get_page(PG_KERNEL);
        if (!new_page) {
            errno = ENOMEM;
            goto fatal;
        }
        mapping_p2v(fault_addr, new_page, PG_WRITE, VMAP_NULL);
        return;
    }

    struct mm* mm = cur_proc->mm;
    if (!mm) {
        errno = EFAULT;
        goto fatal;
    }

    p.vma = query_vma(mm, fault_addr);
    if (!p.vma) {
        errno = EFAULT;
        goto fatal;
    }
    entry = &PDE_ENTRY(DIR_BASE_VADDR, fault_addr >> PG_SIZE_BITS);
    if (!entry->data) {
        uptr new_page = get_page_zero(PG_LOCKED);
        if (!new_page) {
            errno = ENOMEM;
            goto fatal;
        }
        *entry = (pg_entry_t){
            .frame = new_page >> PG_SIZE_BITS,
            .rw = 1,
            .user = 1,
            .present = 1
        };
        return;
    }

    entry = &PTE_ENTRY(TBL_BASE_VADDR, fault_addr >> PG_SIZE_BITS);
    p.fault_addr = fault_addr;
    p.entry = entry;
    p.cause = cause;

    if (!entry->data) {
        if (p.vma->file) {
            errno = do_file_mapping(&p);
        } else {
            errno = do_anonymous_mapping(&p);
        }
    } else if (entry->swap.sign == 0x7) {
        errno = do_swapin(&p);
    } else if (cause.by_write && !entry->rw && vma_writeable(p.vma)) {
        errno = do_cow(&p);
    } else if (cause.by_user && fault_addr < KERNEL_MM_BASE) {
        if (!entry->user) {
            entry->user = 1;
        } else {
            entry = &PDE_ENTRY(TBL_BASE_VADDR, fault_addr >> PG_SIZE_BITS);
            if (!entry->user) {
                entry->user = 1;
            }
        }
    } else {
        panic("Unknown PGF\n", errno);
    }

    if (errno < 0) {
        goto fatal;
    }
    cur_proc->pgf_count++;
    return;

fatal:
    // TODO: FATAL error
    if (cause.present == 0 && PTE_ENTRY(PD_MP_SELF, fault_addr >> PG_SIZE_BITS).present == 1) {
        panic("What the Fuck\n");
    }
    panic("(%d) Unsolved Page Fault! Pid:%d Addr:0x%p Ip:0x%p\n", errno, cur_proc->pid, raw, cur_proc->context.ctx->ctx.eip);
}

void page_init(void) {
    intr_register_isr(FAULT_PAGE_FAULT, &page_fault);
}