/*
 *	todo: needed adding USER TASK checking for pages policy
*/
#include <arch.h>
#include <conio.h>
#include <types.h>
#include <paging.h>
#include <mman.h>
#include <string.h>
#include <schedule.h>
#include <multiboot.h>

#include <sys/task.h>
#include <sys/kmalloc.h>
#include <sys/thread.h>
#include <sys/vmm.h>
#include <sys/unistd.h>

#define curtask curth->owner

static int do_alloc(vm_map_t *, void **, size_t, int);
static int do_free(vm_map_t *, void *);
static int do_attribute(vm_map_t *, void *, int);
static int do_map(vm_map_t *, void *, size_t, void **);
static vm_map_t *do_dup(vm_map_t *);

extern task_t kern_task;
extern sysinfo_t sys;
static vm_map_t kernel_map;

/* flag == 1 : anythere addr;
   flag == 0 : use addr param; */
int vm_alloc(task_t * task, void **addr, size_t size, int flag)
{
    void *uaddr;
    int err = 0;

    sched_lock();
    if (!task_valid(task)) {
	sched_unlock();
	return -1;
    }
    if (task != curtask) {
	sched_unlock();
	return -2;
    }
    if (copyin(addr, &uaddr, sizeof(uaddr))) {
	sched_unlock();
	return -3;
    }
    err = do_alloc(task->vmap, &uaddr, size, flag);
    if (!err) {
	if (copyout(&uaddr, addr, sizeof(uaddr)))
	    err = -3;
    }
    sched_unlock();

    return (err);
}

/* todo: change uint32_t to addr_t! */
static int do_alloc(vm_map_t * vmm, void **addr, size_t size, int flag)
{
    memreg_t *mr;
    uint32_t start, end;
    uint32_t pa;

    if (size == 0)
	return 0;

    /*
     * alloc mem region
     */
    if (flag) {			/* in anythere */
	size = PAGE_ALIGN_UP(size);
	if ((mr = memreg_alloc(&vmm->reglst, size)) == NULL)
	    return 0;		/* no mem */
    } else {
	start = PAGE_ALIGN((uint32_t) * addr);
	end = PAGE_ALIGN_UP(start + size);
	size = (size_t) (end - start);

	if ((mr = memreg_lock((memreg_t *) & vmm->reglst, (addr_t) start,
			      size)) == NULL)
	    return 0;		/* no mem */
    }
    mr->flags = MEMREG_READ | MEMREG_WRITE;

    /*
     * alloc physical pages, and map them into virtual address
     */
    if ((pa = alloc_pages(size / PAGE_SIZE)) == 0)
	goto err1;

    if (map_pages(mr->vbase, pa, vmm->pdir, size, PV_ALL) < 0)
	goto err2;
    mr->addr = pa;		/* phys addr */

    /* zero fill */
    memset((void *) pa, 0, mr->reg_size);
    vmm->map_size += size;
    *addr = (void *) mr->vbase;

    return 0;

  err2:
    free_pages(pa, pa + size);
  err1:
    memreg_free(&vmm->reglst, mr);

    return -1;
}

int vm_free(task_t * task, void *vaddr)
{
    int error;

    sched_lock();
    if (!task_valid(task)) {
	sched_unlock();
	return -1;
    }
    if (task != curtask) {
	sched_unlock();
	return -2;
    }
    error = do_free(task->vmap, vaddr);
    sched_unlock();

    return (error);
}

static int do_free(vm_map_t * vmm, void *vaddr)
{
    memreg_t *mr;
    addr_t va;

    va = PAGE_ALIGN((uint32_t) vaddr);

    /*
     * find the target mem region
     */
    mr = memreg_lookup(&vmm->reglst, va, 1);
    if (mr == NULL || mr->vbase != va || (mr->flags & MEMREG_FREE))
	return -1;

    /*
     * unmap pages of the mem region
     */
    unmap_pages(mr->vbase, mr->addr, vmm->pdir, mr->reg_size);

    /*
     * relinquish use of the page if it is not shared and mapped
     */
    if (!(mr->flags & MEMREG_SHARED) && !(mr->flags & MEMREG_MAPPED))
	free_pages(mr->addr, (mr->addr + mr->reg_size));

    vmm->map_size -= mr->reg_size;
    memreg_free(&vmm->reglst, mr);

    return 0;
}

/*
 * Change attribute of specified virtual address.
 *
 * The "addr" argument points to a memory segment previously
 * allocated through a call to vm_allocate(). The attribute
 * type can be chosen a combination of PROT_READ, PROT_WRITE.
 * Note: PROT_EXEC is not supported, yet.
 */
int vm_attribute(task_t * task, void *vaddr, int attr)
{
    int error;

    sched_lock();
    if (attr == 0 || attr & ~(ATTR_RD | ATTR_WR))
	return -1;

    if (!task_valid(task)) {
	sched_unlock();
	return -1;
    }
    if (task != curtask) {
	sched_unlock();
	return -1;
    }
    error = do_attribute(task->vmap, vaddr, attr);
    sched_unlock();

    return (error);
}

static int do_attribute(vm_map_t * vmap, void *vaddr, int attr)
{
    memreg_t *mr;
    int new_flags, map_type;
    addr_t old_pa, new_pa;
    addr_t va;

    va = (addr_t) PAGE_ALIGN((uint32_t) vaddr);

    /*
     * Find the target mem region.
     */
    mr = memreg_lookup(&vmap->reglst, va, 1);
    if (mr == NULL || mr->vbase != va || (mr->flags & MEMREG_FREE)) {
	return -1;		/* not allocated */
    }
    /*
     * The attribute of the mapped segment can not be changed.
     */
    if (mr->flags & MEMREG_MAPPED)
	return -1;

    /*
     * Check new and old flag.
     */
    new_flags = 0;
    if (mr->flags & MEMREG_WRITE) {
	if (!(attr & ATTR_WR))
	    new_flags = MEMREG_READ;
    } else {
	if (attr & ATTR_WR)
	    new_flags = MEMREG_READ | MEMREG_WRITE;
    }
    if (new_flags == 0)
	return 0;		/* same attribute */

    map_type = (new_flags & MEMREG_WRITE) ? PV_WR : PV_RD;
    /*
     * If it is shared segment, duplicate it.
     */
    if (mr->flags & MEMREG_SHARED) {
	old_pa = mr->addr;
	/* Allocate new physical page. */
	if ((new_pa = alloc_pages(mr->reg_size / PAGE_SIZE)) == 0)
	    return -1;

	/* Copy source page */
	memcpy((void *) new_pa, (void *) old_pa, mr->reg_size);

	/* Map new mem region */
	if (map_pages(mr->vbase, new_pa, vmap->pdir, mr->reg_size,
		      map_type | PV_PRESENT | PV_USER) != 0) {
	    free_pages(new_pa, new_pa + mr->reg_size);
	    return -1;
	}

	mr->addr = new_pa;
	/* Unlink from shared list */
	mr->sh_prev->sh_next = mr->sh_next;
	mr->sh_next->sh_prev = mr->sh_prev;
	if (mr->sh_prev == mr->sh_next)
	    mr->sh_prev->flags &= ~MEMREG_SHARED;

	mr->sh_next = mr->sh_prev = mr;
    } else {
	if (map_pages(mr->vbase, mr->addr, vmap->pdir, mr->reg_size,
		      map_type | PV_PRESENT | PV_USER) != 0)
	    return -1;
    }

    mr->flags = new_flags;

    return 0;
}

/**
 * vm_map - map another task's memory to current task.
 *
 * Note: This routine does not support mapping to the specific address.
 */
int vm_map(task_t * target, void *addr, size_t size, void **alloc)
{
    int error;
    void *vaddr;

    sched_lock();
    if (!task_valid(target)) {
	sched_unlock();
	return -1;
    }
    if (target == curtask) {
	sched_unlock();
	return -2;
    }

    error = do_map(target->vmap, addr, size, alloc);
    sched_unlock();

    return (error);
}

static int do_map(vm_map_t * vmm, void *addr, size_t size, void **alloc)
{
    memreg_t *mr, *cur, *tgt;
    vm_map_t *curmap;
    uint32_t start, end;
    uint32_t pa;
    size_t offset;
    int map_type;
    void *tmp;

    if (size == 0)
	return NULL;

    /* check fault */
    tmp = NULL;
    if (copyout(&tmp, alloc, sizeof(tmp)))
	return -1;

    start = PAGE_ALIGN((uint32_t) addr);
    end = PAGE_ALIGN_UP((uint32_t) addr + size);
    size = (size_t) (end - start);
    offset = (size_t) ((uint32_t) addr - start);

    /*
     * find the mem region that includes target address
     */
    mr = memreg_lookup(&vmm->reglst, (addr_t) start, size);
    if (mr == NULL || (mr->flags & MEMREG_FREE))
	return -2;		/* not allocated */
    tgt = mr;

    /*
     * find the free segment in current task
     */
    curmap = curtask->vmap;
    if ((mr = memreg_alloc(&curmap->reglst, size)) == NULL)
	return -2;		/* no mem */
    cur = mr;

    /*
     * try to map into current memory
     */
    if (tgt->flags & MEMREG_WRITE)
	map_type = PV_WR;
    else
	map_type = PV_RD;

    pa = tgt->addr + (addr_t) (start - tgt->vbase);
    if (map_pages(cur->vbase, pa, curmap->pdir,
		  size, map_type | PV_PRESENT | PV_USER) < 0)
	return -2;		/* error: no mem */

    cur->flags = tgt->flags | MEMREG_MAPPED;
    cur->addr = pa;		/* phys addr */
    curmap->map_size += size;

    return 0;
}

/*
 * create new virtual memory space.
 * No memory is inherited.
 *
 * Must be called with scheduler locked.
 */
vm_map_t *vm_create(void)
{
    vm_map_t *vmm;

    /* allocate new map structure */
    if ((vmm = kmalloc(sizeof(vm_map_t), GFP_KERNEL)) == NULL)
	return NULL;

    vmm->refcount = 1;
    vmm->map_size = 0;

    /* allocate new page directory */
    /* todo: need copy pdir from kernel! */
    if ((vmm->pdir = new_pagedir()) == 0) {
	kfree(vmm);
	return NULL;
    }
    memreg_init(&vmm->reglst);

    return (vmm);
}

/*
 * terminate specified virtual memory space.
 * This is called when task is terminated.
 */
void vm_terminate(vm_map_t * vmm)
{
    memreg_t *mr, *tmp;

    if (--vmm->refcount > 0)
	return;

    sched_lock();
    mr = &vmm->reglst;
    do {
	if (mr->flags != MEMREG_FREE) {
	    /* nnmap mem region */
	    unmap_pages(mr->vbase, mr->addr, vmm->pdir, mr->reg_size);

	    /* free mem region if it is not shared and mapped */
	    if (!(mr->flags & MEMREG_SHARED) &&
		!(mr->flags & MEMREG_MAPPED)) {
		free_pages(mr->addr, mr->addr + mr->reg_size);
	    }
	}

	tmp = mr;
	mr = mr->next;
	memreg_destroy(&vmm->reglst, tmp);
    } while (mr != &vmm->reglst);

    if (vmm == curtask->vmap) {
	/*
	 * switch to the kernel page directory before
	 * deleting current page directory.
	 */
	load_cr3(kernel_map.pdir);
    }

    free_pagetables((uint32_t *) vmm->pdir);
    kfree(vmm);
    sched_unlock();
}

/*
 * duplicate specified virtual memory space.
 * This is called when new task is created.
 *
 * Returns new map id, NULL if it fails.
 *
 * All segments of original memory map are copied to new memory map.
 * If the segment is read-only, executable, or shared segment, it is
 * no need to copy. These segments are physically shared with the
 * original map.
 */
vm_map_t *vm_dup(vm_map_t * org_map)
{
    vm_map_t *new_map;

    if (org_map == NULL)
	return NULL;

    sched_lock();
    new_map = do_dup(org_map);
    sched_unlock();

    return (new_map);
}

static vm_map_t *do_dup(vm_map_t * org_map)
{
    vm_map_t *new_map;
    memreg_t *tmp, *src, *dest;
    int map_type;

    if ((new_map = vm_create()) == NULL)
	return NULL;

    new_map->map_size = org_map->map_size;
    /*
     * copy all mem regions
     */
    tmp = &new_map->reglst;
    src = &org_map->reglst;

    /*
     * copy top mem region
     */
    (*tmp) = (*src);
    tmp->next = tmp->prev = tmp;

    if (src == src->next)	/* blank memory ? */
	return (new_map);

    do {
	assert(src != NULL);
	assert(src->next != NULL);

	if (src == &org_map->reglst) {
	    dest = tmp;
	} else {
	    /* create new mem region struct */
	    dest = kmalloc(sizeof(memreg_t), GFP_KERNEL);
	    if (dest == NULL)
		return NULL;

	    (*dest) = (*src);	/* memcpy */

	    dest->prev = tmp;
	    dest->next = tmp->next;
	    tmp->next->prev = dest;
	    tmp->next = dest;
	    tmp = dest;
	}

	if (src->flags == MEMREG_FREE) {
	    /*
	     * skip free segment
	     */
	} else {
	    /* Check if the segment can be shared */
	    if (!(src->flags & MEMREG_WRITE) &&
		!(src->flags & MEMREG_MAPPED)) {
		dest->flags |= MEMREG_SHARED;
	    }

	    if (!(dest->flags & MEMREG_SHARED)) {
		/* allocate new physical page. */
		dest->addr = alloc_pages(src->reg_size / PAGE_SIZE);
		if (dest->addr == 0)
		    return NULL;

		/* copy source page */
		memcpy((void *) dest->addr, (void *) src->addr,
		       src->reg_size);
	    }

	    /* map the mem region to virtual address */
	    if (dest->flags & MEMREG_WRITE)
		map_type = PV_WR;
	    else
		map_type = PV_RD;

	    if (map_pages(dest->addr, dest->vbase,
			  dest->reg_size, new_map->pdir,
			  map_type | PV_PRESENT | PV_USER))
		return NULL;
	}

	src = src->next;
    } while (src != &org_map->reglst);

    /*
     * No error. Now, link all shared segments
     */
    dest = &new_map->reglst;
    src = &org_map->reglst;
    do {
	if (dest->flags & MEMREG_SHARED) {
	    src->flags |= MEMREG_SHARED;
	    dest->sh_prev = src;
	    dest->sh_next = src->sh_next;
	    src->sh_next->sh_prev = dest;
	    src->sh_next = dest;
	}

	dest = dest->next;
	src = src->next;
    } while (src != &org_map->reglst);

    return (new_map);
}

static void vm_switch(vm_map_t * map)
{
    if (map != &kernel_map)
	load_cr3(map->pdir);
}

void vm_ref(vm_map_t * map)
{
    if (map) {
	map->refcount++;
    }
}

/*
 * load task image for boot task. (kernel servers)
 * Return 0 on success, or errno on failure.
 */
int vm_load(vm_map_t * map, module_t * mod, void **stack)
{
    char *src;
    void *text, *data;
    addr_t naddr;

    kprintf("Loading task cmdline: %s\n", (char *) mod->cmdline);

    /*
     * we have to switch VM mapping to touch the virtual
     * memory space of a target task without page fault.
     */
    vm_switch(map);

    src = virt_to_phys(mod->mod_start, map->pdir);

    /* text = (void *)mod->text;
       data = (void *)mod->data;
       naddr = do_alloc(map, &text, mod->textsz, 0);
       if (naddr == 0)
       return -1;
       memcpy(text, src, mod->textsz);
       error = do_attribute(map, text, PROT_READ);
       if (error)
       return error;

       if (mod->datasz + mod->bsssz != 0) {
       naddr = do_alloc(map, &data, mod->datasz + mod->bsssz, 0);
       if (naddr == 0)
       return -1;
       if (mod->datasz > 0) {
       src = src + (mod->data - mod->text);
       memcpy(data, src, mod->datasz);
       }
       }

       *stack = (void *)USER_STACK;
       naddr = do_alloc(map, stack, PAGE_SIZE * 2, 0);
       if (naddr == 0)
       return -1; */

    /* Free original pages */
    //free_pages(mod->addr, mod->addr + mod->size);

    return 0;
}

/*
 * translate virtual address of current task to physical address.
 * returns physical address on success, or NULL if no mapped memory.
 */
addr_t vm_trans(addr_t vaddr, size_t size)
{

    return (mem_convert(curtask->vmap->pdir, vaddr, size));
}

void vm_init(void)
{
    kprintf("vm system init...");

    kernel_map.refcount = 1;
    kernel_map.pdir = sys.kernel_pdir;
    kernel_map.map_size = sys.mem_size;
    kern_task.vmap = &kernel_map;
    memreg_init(&kernel_map.reglst);

    kprintf("Ok.\n");
}

void memreg_init(memreg_t * mr)
{
    if (mr == NULL)
	return;

    mr->vbase = USER_MAX_ADDR - PAGE_SIZE;
    mr->addr = PAGE_SIZE;
    mr->reg_size = 0;

    mr->next = mr;
    mr->prev = mr;
    mr->sh_next = mr->sh_prev = mr;
    mr->flags = MEMREG_FREE;
}

memreg_t *memreg_create(memreg_t * prev, addr_t vaddr, size_t size)
{
    memreg_t *mr;

    if (prev == NULL)
	return NULL;

    if ((mr = (memreg_t *) kmalloc(sizeof(memreg_t), GFP_KERNEL)) == NULL)
	return NULL;

    bzero(mr, sizeof(memreg_t));
    mr->vbase = vaddr;
    mr->reg_size = size;
    mr->flags = MEMREG_FREE;

    mr->next = prev->next;
    mr->prev = prev;
    prev->next->prev = mr;
    prev->next = mr;
    mr->sh_next = mr->sh_prev = mr;

    return (mr);
}

memreg_t *memreg_alloc(memreg_t * head, size_t size)
{
    memreg_t *mr = NULL;

    if (head == NULL)
	return NULL;

    mr = head;
    do {
	if ((mr->flags & MEMREG_FREE) && mr->reg_size >= size) {
	    if (mr->reg_size != size) {
		if ((memreg_create(mr, mr->vbase + size,
				   mr->reg_size - size) == NULL))
		    return NULL;
	    }

	    mr->reg_size = size;

	    return (mr);
	}
	mr = mr->next;
    } while (mr != head);

    return NULL;
}

/*
 * delete specified mem region
 */
void memreg_destroy(memreg_t * head, memreg_t * mr)
{

    /*
     * if it is shared segment, unlink from shared list.
     */
    if (mr->flags & MEMREG_SHARED) {
	mr->sh_prev->sh_next = mr->sh_next;
	mr->sh_next->sh_prev = mr->sh_prev;
	if (mr->sh_prev == mr->sh_next)
	    mr->sh_prev->flags &= ~MEMREG_SHARED;
    }
    if (head != mr)
	kfree(mr);
}

/*
 * find the mem region at the specified address
 */
memreg_t *memreg_lookup(memreg_t * head, addr_t vaddr, size_t size)
{
    memreg_t *mr;

    mr = head;
    do {
	if (mr->vbase <= vaddr && mr->vbase + mr->reg_size >= vaddr + size) {
	    return (mr);
	}

	mr = mr->next;
    } while (mr != head);

    return NULL;
}


/*
 * delete specified free mem region
 */
void memreg_free(memreg_t * head, memreg_t * mr)
{
    memreg_t *prev, *next;

    assert(mr->flags != MEMREG_FREE);

    mr->flags = MEMREG_FREE;

    /*
     * if it is shared mem region, unlink from shared list.
     */
    if (mr->flags & MEMREG_SHARED) {
	mr->sh_prev->sh_next = mr->sh_next;
	mr->sh_next->sh_prev = mr->sh_prev;
	if (mr->sh_prev == mr->sh_next)
	    mr->sh_prev->flags &= ~MEMREG_SHARED;
    }
    /*
     * if next mem region is free, merge with it.
     */
    next = mr->next;
    if (next != head && (next->flags & MEMREG_FREE)) {
	mr->next = next->next;
	next->next->prev = mr;
	mr->reg_size += next->reg_size;
	kfree(next);
    }
    /*
     * if previous mem region is free, merge with it.
     */
    prev = mr->prev;
    if (mr != head && (prev->flags & MEMREG_FREE)) {
	prev->next = mr->next;
	mr->next->prev = prev;
	prev->reg_size += mr->reg_size;
	kfree(mr);
    }
}

/*
 * lock the mem region at the specified address/size.
 */
memreg_t *memreg_lock(memreg_t * head, addr_t vaddr, size_t size)
{
    memreg_t *mr, *prev, *next;
    size_t diff;

    /*
     * find the block which includes specified block.
     */
    mr = memreg_lookup(head, vaddr, size);
    if (mr == NULL || !(mr->flags & MEMREG_FREE))
	return NULL;

    /*
     * check previous mem region to split region.
     */
    prev = NULL;
    if (mr->vbase != vaddr) {
	prev = mr;
	diff = (size_t) (vaddr - mr->vbase);
	mr = memreg_create(prev, vaddr, prev->reg_size - diff);
	if (mr == NULL)
	    return NULL;

	prev->reg_size = diff;
    }

    /*
     * check next mem region to split region.
     */
    if (mr->reg_size != size) {
	mr = memreg_create(mr, mr->vbase + size, mr->reg_size - size);
	if (next == NULL) {
	    if (prev) {
		/* undo previous memreg_create() operation */
		memreg_free(head, mr);
	    }

	    return NULL;
	}

	mr->reg_size = size;
    }

    mr->flags = 0;

    return (mr);
}
