#include <utils/rbtree_augmented.h>
#include <seminix/syscall.h>
#include <seminix/param.h>
#include <seminix/slab.h>
#include <seminix/init.h>
#include <seminix/pgtable.h>
#include <seminix/vmacache.h>
#include <seminix/mmap.h>
#include <cap/cap.h>
#include <cap/frame.h>
#include <cap/cnode.h>
#include <cap/vspace.h>
#include <asm/mmu_context.h>

static struct kmem_cache *cap_vspace_cache;

static __init int vspace_cap_init(void)
{
    cap_vspace_cache = KMEM_CACHE(cap_vspace, SLAB_PANIC);

    return 0;
}
userver_initcall(vspace_cap_init)

static cap_t *vspace_cap_create(seminix_object_t *object)
{
    int ret;
    cap_vspace_t *cap_vspace;

    cap_vspace = kmem_cache_alloc(cap_vspace_cache, GFP_ZERO);
    if (!cap_vspace)
        return ERR_PTR(-SERRNO_ENOMEM);

    cap_vspace->mm = mm_struct_create();
    if (!cap_vspace->mm) {
        ret = -SERRNO_ENOMEM;
        goto free_cap_vspace;
    }

    return CAP_REF(cap_vspace);
free_cap_vspace:
    kmem_cache_free(cap_vspace_cache, cap_vspace);
    return ERR_PTR(ret);
}

static void vspace_cap_delete(cap_t *cap)
{
}

const struct cap_ops vspace_cap_ops __ro_after_init = {
    .cap_create = vspace_cap_create,
    .cap_delete = vspace_cap_delete
};











SYSCALL_DEFINE6(vspace_mmap, int, vspace,
    unsigned long, addr, unsigned long, len,
    unsigned long, prot, unsigned long, flags,
    int, frame)
{
    int ret;
    cap_t *cap, *cap_frame;
    struct mm_struct *mm;
    struct vm_area_struct *vma;

    unsigned long vm_flags;

    if (!len || !PAGE_ALIGNED(addr) || PAGE_ALIGNED(len))
        return -SERRNO_EINVAL;

    cap = cnode_capget(vspace, cap_vspace_cap);
    if (IS_ERR(cap))
        return PTR_ERR(cap);

    cap_frame = cnode_capget(frame, cap_frame_cap);
    if (IS_ERR(cap_frame)) {
        ret = PTR_ERR(cap_frame);
        goto put_cap;
    }

    mm = CAP_VSPACE_PTR(cap)->mm;
  //  if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
///        ret = -SERRNO_EINVAL;
   //     goto out;
 //   }

    /* Do simple checking here so the lower-level routines won't have
     * to. we assume access permissions have been handled by the open
     * of the memory object, so we don't do any here.
     */
    vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
    if (flags & SEMINIX_MAP_SHARED)
        vm_flags |= VM_SHARED;

    vma = vm_area_alloc(mm);
    if (!vma) {
        ret = -SERRNO_ENOMEM;
        goto out;
    }

    vma->vm_start = addr;
    vma->vm_end = addr + len;
    vma->vm_flags = vm_flags;
    vma->vm_page_prot = vm_get_page_prot(vm_flags);
  //  vma->cap_frame = CAP_FRAME_PTR(cap_frame);
 //   vma_link(mm, vma, prev, rb_link, rb_parent);
    vma_set_page_prot(vma);

    // TODO map_pgtable
    ret = 0;
out:
    cnode_capput(cap_frame);
put_cap:
    cnode_capput(cap);
    return ret;
}

SYSCALL_DEFINE4(vspace_pgprot_modify, int, vspace,
    unsigned long, addr, unsigned long, len,
    unsigned long, prot)
{
    return 0;
}

SYSCALL_DEFINE3(vspace_munmap, int, vspace, unsigned long, addr, unsigned long, len)
{
    int ret;
    cap_t *cap;
    //unsigned long end;
    struct vm_area_struct *vma = NULL, *prev;
    struct mm_struct *mm;

    if ((offset_in_page(addr)) || addr > TASK_SIZE || len > TASK_SIZE - addr)
        return -EINVAL;

    if (!len || !PAGE_ALIGNED(addr) || PAGE_ALIGNED(len))
        return -SERRNO_EINVAL;

    cap = cnode_capget(vspace, cap_vspace_cap);
    if (IS_ERR(cap))
        return PTR_ERR(cap);

    mm = CAP_VSPACE_PTR(cap)->mm;
 //   vma = find_exact_vma(NULL, mm, addr, addr + len);
    if (!vma) {
        ret = -SERRNO_EINVAL;
        goto put_cap;
    }
    prev = vma->vm_prev;
   // end = addr + len;
    vma = prev ? prev->vm_next : mm->mmap;

    /* Detach vmas from rbtree */
//	detach_vmas_to_be_unmapped(mm, vma, prev, end);

    // TODO free_pgtables
    ret = 0;
put_cap:
    cnode_capput(cap);
    return ret;
}
