#include <mm/mm.h>
#include <mm/vma.h>
#include <mm/valloc.h>
#include <arch/cpu.h>
#include <arch/gdt.h>
#include <arch/interrupts.h>
#include <sys/sysdef.h>
#include <sys/unistd.h>
#include <process.h>
#include <log.h>

extern struct process process_table[MAX_PROCESS];

static volatile pid_t nextpid = 1;

extern lnode_t kthread_list;

static int copy_kernel_space(uptr dir_pa) {
    // TODO: should be read only and COW
    mapping_p2v(PG_MP_1, dir_pa, PG_WRITE, VMAP_NULL);

    pg_table_t* orig_dir = (pg_table_t*)DIR_BASE_VADDR;
    pg_table_t* dir = (pg_table_t*)PG_MP_1;

    uptr kernel_i = DIRECTORY_INDEX(KERNEL_MM_BASE);
    for (size_t i = kernel_i; i < KSTK_AREA_DIR_IDX; i++) {
        dir->entry[i].data = orig_dir->entry[i].data;
    }

    uptr kstk_pa = get_page(PG_KERNEL);
    if (!kstk_pa) {
        return ENOMEM;
    }

    dir->entry[KSTK_AREA_DIR_IDX] = (pg_entry_t){
        .frame = (uptr)kstk_pa >> PG_SIZE_BITS,
        .present = 1,
        .rw = 1,
        .user = 0
    };

    dir->entry[PG_LAST_TABLE] = (pg_entry_t){
        .frame = (uptr)dir_pa >> PG_SIZE_BITS,
        .present = 1,
        .rw = 1,
        .pwt = 1,
        .pcd = 1
    };

    unmapping_p2v(PG_MP_1);
    return 0;
}

static int copy_kernel_stack(uptr dir_pa, struct process* p) {

    // only copy one kernel stack
    uptr esp = p->context.esp;

    mapping_page_dir(PD_MP_1, dir_pa);

    memset(PT_BASE(PD_MP_1, KSTK_AREA_DIR_IDX << PG_INDEX_BITS), 0, PG_SIZE);

    uptr stk_start = KSTK_AREA_START;
    while (stk_start < KSTK_AREA_END) {
        if (esp >= stk_start && esp < stk_start + KSTK_SIZE) {
            break;
        }
        stk_start += KSTK_SIZE;
    }

    assert_msg(stk_start < KSTK_AREA_END, "kernel stack not found");
    pg_entry_t* old_entry, * entry;
    for (uptr i = stk_start >> 12; i < (stk_start + KSTK_SIZE) >> 12; i++) {
        old_entry = &PTE_ENTRY(TBL_BASE_VADDR, i);
        entry = &PTE_ENTRY(PD_MP_1, i);
        
        cpu_flush_page((u32)old_entry);
        cpu_flush_page((u32)entry);
        
        entry->data = old_entry->data;
        uptr old_frame = entry->frame << PG_SIZE_BITS;
        uptr new_frame = copy_page(old_frame);
        if (!new_frame) {
            if (i != stk_start >> 12) {
                // free only one page because of stack size is 2 pages
                free_page(PTE_ENTRY(PD_MP_1, stk_start >> 12).data);
            }
            return ENOMEM;
        }
        entry->frame = (uptr)new_frame >> PG_SIZE_BITS;
    }
    unmapping_page_dir(PD_MP_1);
    return 0;
}

static int alloc_kernel_stack(struct process* p) {
    uptr stk_start = KSTK_AREA_START;
    pg_entry_t* entry = &PTE_ENTRY(TBL_BASE_VADDR, stk_start >> 12);
    while (entry->data && stk_start < KSTK_AREA_END) {
        stk_start += KSTK_SIZE;
        entry = &PTE_ENTRY(TBL_BASE_VADDR, stk_start >> 12);
    }

    if (stk_start > KSTK_AREA_END) {
        return ENOMEM;
    }

    uptr stk_pages = get_pages(KSTK_SIZE >> PG_SIZE_BITS, PG_KERNEL);
    if (!stk_pages) {
        return ENOMEM;
    }
    for (uptr page = stk_pages; page < stk_pages + KSTK_SIZE;
        page += PG_SIZE,
        entry = &PTE_ENTRY(TBL_BASE_VADDR, (stk_start + PG_SIZE) >> 12)
        ) {
        *entry = (pg_entry_t){
            .frame = page >> PG_SIZE_BITS,
            .present = 1,
            .rw = 1,
            .user = 0
        };
    }

    p->context.esp = stk_start + KSTK_SIZE - 0x10;

    return 0;
}

static int copy_page_range(struct vma* vma) {
    uptr p;
    pg_entry_t* old_entry, * new_entry;
    for (uptr pn = vma->start >> PG_SIZE_BITS; pn < vma->end >> PG_SIZE_BITS; pn++) {
        old_entry = &PDE_ENTRY(PD_MP_SELF, pn);
        new_entry = &PDE_ENTRY(PD_MP_1, pn);
        if (!old_entry->data) {
            continue;
        } else if (!new_entry->data) {
            if (!(p = get_page_zero(PG_LOCKED))) {
                return ENOMEM;
            }

            new_entry->data = old_entry->data;
            new_entry->frame = p >> PG_SIZE_BITS;
        }

        old_entry = &PTE_ENTRY(PD_MP_SELF, pn);
        new_entry = &PTE_ENTRY(PD_MP_1, pn);

        if (!old_entry->data) {
            continue;
        }
        cpu_flush_page((u32)old_entry);
        *new_entry = *old_entry;

        new_entry->rw = old_entry->rw = 0; // read only, COW
        cpu_flush_page(pn << PG_SIZE_BITS);

        ref_page(old_entry->frame << PG_SIZE_BITS);
    }
    return 0;
}

static int copy_mmap(struct mm* src, struct mm* dst) {
    mutex_lock(&src->lock);

    mutex_init(&dst->lock);
    atomic_init(&dst->ref_count, 1);

    mapping_page_dir(PD_MP_1, dst->proc->cr3);

    struct vma* vma, * tmp, * prev, ** pprev;
    pprev = &dst->vma_list;
    prev = NULL;
    int errno = 0;
    for (vma = src->vma_list; vma; vma = vma->next) {

        if (vma->flags & VMA_PRIVATE) {
            dst->page_count -= vma->end - vma->start;
            dst->page_count >>= PG_SIZE_BITS;
            continue;
        }

        tmp = vma_alloc();

        *tmp = *vma;
        list_head_init(&tmp->avm_chain);

        tmp->mm = dst;

        if ((errno = avm_fork(vma, tmp))) {
            vma_free(tmp);
            goto out;
        }

        if (vma->file) {
            // TODO: deal with file
        }

        *pprev = tmp;
        pprev = &tmp->next;
        tmp->prev = prev;
        prev = tmp;

        vma->mm = dst;
        dst->vma_count++;

        if ((errno = copy_page_range(vma))) {
            goto out;
        }
    }
    unmapping_page_dir(PD_MP_1);
    mutex_unlock(&src->lock);
out:
    return errno;
}

static int copy_memory_for_process(struct process* p) {
    int errno;
    uptr dir_pa = get_page_zero(PG_KERNEL);
    if (!dir_pa) {
        errno = ENOMEM;
        goto out;
    }

    if ((errno = copy_kernel_space(dir_pa))) {
        free_page(dir_pa);
        goto out;
    }

    if ((errno = copy_kernel_stack(dir_pa, p))) {
        free_page(dir_pa);
        goto out;
    }

    p->cr3 = dir_pa;

    if (cur_proc->mm) {
        p->mm = mm_alloc(p);
        if (!p->mm) {
            errno = ENOMEM;
            goto out;
        }
        errno = copy_mmap(cur_proc->mm, p->mm);
    }
out:
    return errno;
}

static int copy_memory_for_thread(struct process* p) {
    int errno;
    if ((errno = alloc_kernel_stack(p))) {
        goto out;
    }

    p->cr3 = cur_proc->cr3;
out:
    return errno;
}

static int copy_memory(struct process* p, u32 is_thread) {
    int errno;
    if (is_thread) {
        errno = copy_memory_for_thread(p);
    } else {
        errno = copy_memory_for_process(p);
    }
    return errno;
}

static void copy_fdtable(struct process* p) {
    for (size_t i = 0; i < FS_FD_MAX; i++) {
        fd_t* fd = cur_proc->fd_table->fds[i];
        if (!fd)
            continue;
        fs_dup_fd(fd, &p->fd_table->fds[i]);
    }
}

struct process* alloc_process(struct process* parent) {
    struct process* process = 0;

    for (int i = 1; i < MAX_PROCESS; i++) {
        if (process_table[i].state == PROCESS_UNUSED) {
            process = &process_table[i];
            break;
        }
    }

    assert_msg(process != 0, "the number of processes is full\n");

    memset(process, 0, sizeof(struct process));

    if (parent) {
        *process = *parent;
        process->parent = parent;
        process->mm = NULL;
        process->fd_table = NULL;
    }
    process->context.eax = 0;
    process->pid = nextpid++;
    list_head_init(&process->waitq);
    list_head_init(&process->threads);
    return process;
}

static struct process* do_fork(int option) {
    struct process* process = alloc_process(cur_proc);
    int errno = 0;

    if ((errno = copy_memory(process, option != FORK_PROCESS))) {
        process->state = PROCESS_UNUSED;
        goto out;
    }

    if (option == FORK_PROCESS) {
        process->fd_table = vzalloc(sizeof(fd_table_t));
        copy_fdtable(process);
    } else {
        process->fd_table = cur_proc->fd_table;

    }

    if (option == FORK_KTHREAD) {
        list_append(&kthread_list, &process->threads);
        process->parent = NULL;
    }

    process->state = PROCESS_READY;
    errno = process->pid;
out:
    process->exit_code = errno;
    return process;
}

__DEF_SYSCALL0(pid_t, fork) {
    struct process* proc = do_fork(FORK_PROCESS);
    return proc->exit_code;
}

void assign_return_context(u32 return_to_kernel, struct process* p, uptr kstk_top) {
    u32 data_seg = return_to_kernel ? SS_R0_DATA : SS_R3_DATA;
    u32 code_seg = return_to_kernel ? SS_R0_CODE : SS_R3_CODE;
    u32 offset = return_to_kernel ? 8 : 0;

    kstk_top = kstk_top - sizeof(enter_ctx) + offset;
    p->context = (isr_param){
        .ds = data_seg,
        .es = data_seg,
        .fs = data_seg,
        .gs = data_seg,
        .esp = kstk_top
    };

    u32 eflags = cpu_reflags();
    p->context.ctx->ctx.cs = code_seg;
    p->context.ctx->ctx.eflags.val = eflags;
}

int do_exit(struct process* proc);
int kthread_create(void (*func)(void*), void* arg) {
    struct process* p = do_fork(FORK_KTHREAD);
    int errno = p->exit_code;
    if (errno < 0) {
        goto out;
    }
    uptr pa = get_page(PG_KERNEL);
    if (!pa) {
        do_exit(p);
        errno = ENOMEM;
        goto out;
    }
    uptr stk_top = mapping_pages(pa, 1, PG_WRITE, VMAP_NULL);
    stk_top += 0xFF0;

    assign_return_context(1, p, stk_top);

    *(uptr*)(stk_top + sizeof(uptr)) = (uptr)arg;

    p->context.ctx->ctx.eip = (uptr)func;

out:
    return errno;
}