#ifndef __I386_MM_PGTABLE_H__
#define __I386_MM_PGTABLE_H__

#include "i386/mm/pgtable-3level-defs.h"

extern unsigned long empty_zero_page[1024];
extern pgd_t swapper_pg_dir[1024];

/*
 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
 *
 * this macro returns the index of the entry in the pgd page which would
 * control the given virtual address
 */

#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#define pgd_index_t(addr) pgd_index(addr)


/*
 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
 *
 * this macro returns the index of the entry in the pte page which would
 * control the given virtual address
 */
#define pte_index(address) \
  (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, address) \
    ((pte_t *) pmd_page_kernel(*(dir)) +  pte_index(address)


/*
 * The 4MB page is guessing..  Detailed in the infamous "Chapter H"
 * of the Pentium details, but assuming intel did the straightforward
 * thing, this bit set in the page directory entry just means that
 * the page directory entry points directly to a 4MB-aligned block of
 * memory.
 */
#define _PAGE_BIT_PRESENT       0
#define _PAGE_BIT_RW            1
#define _PAGE_BIT_USER          2
#define _PAGE_BIT_PWT           3
#define _PAGE_BIT_PCD           4
#define _PAGE_BIT_ACCESSED      5
#define _PAGE_BIT_DIRTY         6
#define _PAGE_BIT_PSE           7       /* 4 MB (or 2MB) page, Pentium+, if present.. */
#define _PAGE_BIT_GLOBAL        8       /* Global TLB entry PPro+ */
#define _PAGE_BIT_UNUSED1       9       /* available for programmer */
#define _PAGE_BIT_UNUSED2       10
#define _PAGE_BIT_UNUSED3       11
#define _PAGE_BIT_NX            63

#define _PAGE_PRESENT   0x001
#define _PAGE_RW        0x002
#define _PAGE_USER      0x004
#define _PAGE_PWT       0x008
#define _PAGE_PCD       0x010
#define _PAGE_ACCESSED  0x020
#define _PAGE_DIRTY     0x040
#define _PAGE_PSE       0x080   /* 4 MB (or 2MB) page, Pentium+, if present.. */
#define _PAGE_GLOBAL    0x100   /* Global TLB entry PPro+ */
#define _PAGE_UNUSED1   0x200   /* available for programmer */
#define _PAGE_UNUSED2   0x400
#define _PAGE_UNUSED3   0x800

#define _PAGE_FILE      0x040   /* set:pagecache unset:swap */
#define _PAGE_PROTNONE  0x080   /* If not present */
#ifdef CONFIG_X86_PAE
#define _PAGE_NX        (1ULL<<_PAGE_BIT_NX)
#else
#define _PAGE_NX        0
#endif

#define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK  (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)

#define PAGE_NONE \
  __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED \
  __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)

#define PAGE_SHARED_EXEC \
  __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY_NOEXEC \
  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
#define PAGE_COPY_EXEC \
  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY \
        PAGE_COPY_NOEXEC
#define PAGE_READONLY \
  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
#define PAGE_READONLY_EXEC \
  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)

#define _PAGE_KERNEL \
  (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
#define _PAGE_KERNEL_EXEC \
  (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)

extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
#define __PAGE_KERNEL_RO                (__PAGE_KERNEL & ~_PAGE_RW)
#define __PAGE_KERNEL_NOCACHE           (__PAGE_KERNEL | _PAGE_PCD)
#define __PAGE_KERNEL_LARGE             (__PAGE_KERNEL | _PAGE_PSE)
#define __PAGE_KERNEL_LARGE_EXEC        (__PAGE_KERNEL_EXEC | _PAGE_PSE)

#define PAGE_KERNEL             __pgprot(__PAGE_KERNEL)
#define PAGE_KERNEL_RO          __pgprot(__PAGE_KERNEL_RO)
#define PAGE_KERNEL_EXEC        __pgprot(__PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_NOCACHE     __pgprot(__PAGE_KERNEL_NOCACHE)
#define PAGE_KERNEL_LARGE       __pgprot(__PAGE_KERNEL_LARGE)
#define PAGE_KERNEL_LARGE_EXEC  __pgprot(__PAGE_KERNEL_LARGE_EXEC)

/*
 * The i386 can't do page protection for execute, and considers that
 * the same are read. Also, write permissions imply read permissions.
 * This is the closest we can get..
 */
#define __P000  PAGE_NONE
#define __P001  PAGE_READONLY
#define __P010  PAGE_COPY
#define __P011  PAGE_COPY
#define __P100  PAGE_READONLY_EXEC
#define __P101  PAGE_READONLY_EXEC
#define __P110  PAGE_COPY_EXEC
#define __P111  PAGE_COPY_EXEC

#define __S000  PAGE_NONE
#define __S001  PAGE_READONLY
#define __S010  PAGE_SHARED
#define __S011  PAGE_SHARED
#define __S100  PAGE_READONLY_EXEC
#define __S101  PAGE_READONLY_EXEC
#define __S110  PAGE_SHARED_EXEC
#define __S111  PAGE_SHARED_EXEC
/*
 * Define this if things work differently on an i386 and an i486:
 * it will (on an i486) warn about kernel memory accesses that are
 * done without a 'verify_area(VERIFY_WRITE,..)'
 */
#undef TEST_VERIFY_AREA

/* The boot page tables (all created as a single array) */
extern unsigned long pg0[];

#define pte_present(x)  ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_clear(xp)   do { set_pte(xp, __pte(0)); } while (0)

#define pmd_none(x)     (!pmd_val(x))
#define pmd_present(x)  (pmd_val(x) & _PAGE_PRESENT)
#define pmd_clear(xp)   do { set_pmd(xp, __pmd(0)); } while (0)
#define pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)


#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))

/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */
static inline int pte_user(pte_t pte)           { return (pte).pte_low & _PAGE_USER; }
static inline int pte_read(pte_t pte)           { return (pte).pte_low & _PAGE_USER; }
static inline int pte_dirty(pte_t pte)          { return (pte).pte_low & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte)          { return (pte).pte_low & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte)          { return (pte).pte_low & _PAGE_RW; }

/*
 * The following only works if pte_present() is not true.
 */
static inline int pte_file(pte_t pte)           { return (pte).pte_low & _PAGE_FILE; }

static inline pte_t pte_rdprotect(pte_t pte)    { (pte).pte_low &= ~_PAGE_USER; return pte; }
static inline pte_t pte_exprotect(pte_t pte)    { (pte).pte_low &= ~_PAGE_USER; return pte; }
static inline pte_t pte_mkclean(pte_t pte)      { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkold(pte_t pte)        { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_wrprotect(pte_t pte)    { (pte).pte_low &= ~_PAGE_RW; return pte; }
static inline pte_t pte_mkread(pte_t pte)       { (pte).pte_low |= _PAGE_USER; return pte; }
static inline pte_t pte_mkexec(pte_t pte)       { (pte).pte_low |= _PAGE_USER; return pte; }
static inline pte_t pte_mkdirty(pte_t pte)      { (pte).pte_low |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte)      { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte)      { (pte).pte_low |= _PAGE_RW; return pte; }

#endif  // __I386_MM_PGTABLE_H__
