#include <seminix/mm.h>
#include <seminix/mmap.h>
#include <seminix/slab.h>
#include <seminix/param.h>
#include <seminix/init.h>
#include <seminix/pgtable.h>
#include <asm/mmu_context.h>

static struct kmem_cache *mm_cachep;
static struct kmem_cache *pgd_cachep;
/* SLAB cache for vm_area_struct structures */
static struct kmem_cache *vm_area_cachep;

static inline struct mm_struct *alloc_mm_struct(void)
{
    return kmem_cache_alloc(mm_cachep, GFP_KERNEL);
}

static inline void free_mm_struct(struct mm_struct *mm)
{
    kmem_cache_free(mm_cachep, mm);
}

static inline pgd_t *alloc_pgd_struct(void)
{
    return kmem_cache_alloc(pgd_cachep, GFP_ZERO);
}

static inline void free_pgd_struct(pgd_t *pgd)
{
    kmem_cache_free(pgd_cachep, pgd);
}

static void mm_init(struct mm_struct *mm)
{
    mm->mm_rb = RB_ROOT;
    atomic_set(&mm->mm_users, 1);
    mm_pgtables_bytes_init(mm);
    mm->map_count = 0;
    spin_lock_init(&mm->page_table_lock);
    init_rwsem(&mm->mmap_sem);

    spin_lock_init(&mm->tl_lock);
    INIT_LIST_HEAD(&mm->thread_list);
    init_tlb_flush_pending(mm);

    init_new_context(mm);
}

struct mm_struct *mm_struct_create(void)
{
    struct mm_struct *mm;

    mm = alloc_mm_struct();
    if (!mm)
        goto out;
    memset(mm, 0, sizeof (*mm));

    mm->pgd = alloc_pgd_struct();
    if (!mm->pgd)
        goto free_mm;

    mm_init(mm);

    return mm;
free_mm:
    free_mm_struct(mm);
out:
    return NULL;
}

static void check_mm(struct mm_struct *mm)
{
    if (mm_pgtables_bytes(mm))
        pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
                mm_pgtables_bytes(mm));
}

void __mmput(struct mm_struct *mm)
{
    free_pgd_struct(mm->pgd);
    check_mm(mm);
    free_mm_struct(mm);
}

void mm_set_task(struct mm_struct *mm, struct tcb *tsk)
{
    tsk->mm = mm;
    spin_lock(&mm->tl_lock);
    list_add(&tsk->thread_node, &mm->thread_list);
    spin_unlock(&mm->tl_lock);
}

void mm_clear_task(struct tcb *tsk)
{
    struct mm_struct *mm = tsk->mm;

    spin_lock(&mm->tl_lock);
    list_del(&tsk->thread_node);
    spin_unlock(&mm->tl_lock);
    tsk->mm = NULL;
}

static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
{
    memset(vma, 0, sizeof(*vma));
    vma->vm_mm = mm;
}

struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
{
    struct vm_area_struct *vma;

    vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
    if (vma)
        vma_init(vma, mm);
    return vma;
}

struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
{
    struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);

    if (new) {
        *new = *orig;
        new->vm_next = new->vm_prev = NULL;
    }
    return new;
}

void vm_area_free(struct vm_area_struct *vma)
{
    kmem_cache_free(vm_area_cachep, vma);
}

static int __init mm_cachep_init(void)
{
    mm_cachep = KMEM_CACHE(mm_struct, SLAB_PANIC);
    pgd_cachep = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE, SLAB_PANIC, NULL);
    vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);

    return 0;
}
core_initcall(mm_cachep_init)
