#ifndef SEMINIX_MMAP_H
#define SEMINIX_MMAP_H

#include <seminix/mm.h>

/*
 * vm_flags in vm_area_struct, see mm_types.h.
 * When changing, update also include/trace/events/mmflags.h
 */
#define VM_NONE		   0x00000000
#define VM_READ		   0x00000001	/* currently active flags */
#define VM_WRITE	   0x00000002
#define VM_EXEC		   0x00000004

#define VM_SHARED	   0x00000008   /* no pagefault */

#define VM_IO_MASK     0x00000070
#define VM_IO          0x00000010
#define VM_IO_WC       0x00000020
#define VM_IO_NP       0x00000030
#define VM_IO_CACHE    0x00000040

#define VM_NOPAGEFAULT 0x00000200

#define VM_HUGEPAGE    0x00000400
#define VM_SHARED_USER 0x00000800

#define VM_SPECIAL  (VM_IO_MASK | VM_SHARED_USER)

static inline bool is_vm_hugetlb_page_flags(unsigned long flags)
{
    return !!(flags & VM_HUGEPAGE);
}

static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
{
    return is_vm_hugetlb_page_flags(vma->vm_flags);
}

static inline unsigned long vm_hugetlb_size(unsigned long flags)
{
    if (!is_vm_hugetlb_page_flags(flags))
        return UTILS_PAGE_SIZE;
    return (1UL << ((flags >> SEMINIX_MAP_HUGE_SHIFT) & SEMINIX_MAP_HUGE_MASK));
}

/*
 * Optimisation macro.  It is equivalent to:
 *      (x & bit1) ? bit2 : 0
 * but this version is faster.
 * ("bit1" and "bit2" must be single bits)
 */
#define _calc_vm_trans(x, bit1, bit2) \
  ((!(bit1) || !(bit2)) ? 0 : \
  ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
   : ((x) & (bit1)) / ((bit1) / (bit2))))

/*
 * Combine the mmap "prot" argument into "vm_flags" used internally.
 */
static inline unsigned long
calc_vm_prot_bits(unsigned long prot)
{
    return _calc_vm_trans(prot, SEMINIX_PROT_READ, VM_READ ) |
           _calc_vm_trans(prot, SEMINIX_PROT_WRITE, VM_WRITE) |
           _calc_vm_trans(prot, SEMINIX_PROT_EXEC, VM_EXEC);
}

/*
 * Combine the mmap "flags" argument into "vm_flags" used internally.
 */
static inline unsigned long
calc_vm_flag_bits(unsigned long flags)
{
    return _calc_vm_trans(flags, SEMINIX_MAP_SHARED, VM_SHARED_USER ) |
           _calc_vm_trans(flags, SEMINIX_MAP_IO, VM_IO) |
           _calc_vm_trans(flags, SEMINIX_MAP_IO_WC, VM_IO_WC) |
           _calc_vm_trans(flags, SEMINIX_MAP_IO_NP, VM_IO_NP) |
           _calc_vm_trans(flags, SEMINIX_MAP_IO_CACHE, VM_IO_CACHE) |
           _calc_vm_trans(flags, SEMINIX_MAP_NOPAGEFAULT, VM_NOPAGEFAULT) |
           _calc_vm_trans(flags, SEMINIX_MAP_HUGEPAGE, VM_HUGEPAGE);
}

extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);

pgprot_t vm_get_page_prot(unsigned long vm_flags);
void vma_set_page_prot(struct vm_area_struct *vma);
int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);

struct vm_area_struct *vm_area_alloc(struct mm_struct *mm);
struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig);
void vm_area_free(struct vm_area_struct *vma);

struct vm_area_struct *__find_vma(struct tcb *tsk, struct mm_struct *mm, unsigned long addr);
static inline struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
    return __find_vma(NULL, mm, addr);
}
struct vm_area_struct *__find_vma_prev(struct tcb *tsk, struct mm_struct *mm, unsigned long addr,
    struct vm_area_struct **pprev);
static inline struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev)
{
    return __find_vma_prev(NULL, mm, addr, pprev);
}

static inline unsigned long vma_pages(struct vm_area_struct *vma)
{
    return (vma->vm_end - vma->vm_start) >> UTILS_PAGE_SHIFT;
}

/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
static inline struct vm_area_struct *
__find_exact_vma(struct tcb *tsk, struct mm_struct *mm, unsigned long vm_start, unsigned long vm_end)
{
    struct vm_area_struct *vma = __find_vma(tsk, mm, vm_start);

    if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
        vma = NULL;

    return vma;
}
static inline struct vm_area_struct *
find_exact_vma(struct mm_struct *mm, unsigned long vm_start, unsigned long vm_end)
{
    return __find_exact_vma(NULL, mm, vm_start, vm_end);
}

static inline bool range_in_vma(struct vm_area_struct *vma,
                unsigned long start, unsigned long end)
{
    return (vma && vma->vm_start <= start && end <= vma->vm_end);
}


int __munmap(struct tcb *tsk, struct mm_struct *mm, unsigned long addr, unsigned long len);
static inline int munmap(struct mm_struct *mm, unsigned long addr, unsigned long len)
{
    return __munmap(NULL, mm, addr, len);
}
int __mmap(struct tcb *tsk, struct mm_struct *mm,
    unsigned long addr, unsigned long len,
    unsigned long prot, unsigned long flags);
static inline int mmap(struct mm_struct *mm, unsigned long addr, unsigned long len,
    unsigned long prot, unsigned long flags)
{
    return __mmap(NULL, mm, addr, len, prot, flags);
}
int __mmap_pgprot_modify(struct tcb *tsk, struct mm_struct *mm, unsigned long addr, unsigned long len,
    unsigned long prot);
static inline int mmap_pgprot_modify(struct mm_struct *mm, unsigned long addr, unsigned long len,
    unsigned long prot)
{
    return __mmap_pgprot_modify(NULL, mm, addr, len, prot);
}

#endif /* !SEMINIX_MMAP_H */
