#pragma once

#include <linux/kconfig.h>

/*
 * A facility to provide batching of the reload of page tables and
 * other process state with the actual context switch code for
 * paravirtualized guests.  By convention, only one of the batched
 * update (lazy) modes (CPU, MMU) should be active at any given time,
 * entry should never be nested, and entry and exits should always be
 * paired.  This is for sanity of maintaining and reasoning about the
 * kernel code.  In this case, the exit (end of the context switch) is
 * in architecture-specific code, and so doesn't need a generic
 * definition.
 */
#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
#define arch_start_context_switch(prev) \
    do                                  \
    {                                   \
    } while (0)
#endif

#ifdef CONFIG_MMU

#include <linux/pfn.h>
#include <asm/pgtable.h>

struct mm_struct;

/* Page-Table Modification Mask */
typedef unsigned int pgtbl_mod_mask;

void pgd_clear_bad(pgd_t *);

#ifndef pgd_index
/* Must be a compile-time constant, so implement it as a macro */
#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#endif

static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
{
    return (pgd + pgd_index(address));
};

/*
 * a shortcut to get a pgd_t in a given mm
 */
#ifndef pgd_offset
#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
#endif

/*
 * When walking page tables, get the address of the next boundary,
 * or the end address of the range if that comes earlier.  Although no
 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
 */

#define pgd_addr_end(addr, end)                                        \
    ({                                                                 \
        unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
        (__boundary - 1 < (end) - 1) ? __boundary : (end);             \
    })

static inline int pgd_none_or_clear_bad(pgd_t *pgd)
{
    if (pgd_none(*pgd))
        return 1;
    if (unlikely(pgd_bad(*pgd)))
    {
        pgd_clear_bad(pgd);
        return 1;
    }
    return 0;
}

static inline void pte_unmap(pte_t *pte)
{
    // TODO
}

/*
 * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
 *
 * The pXx_index() functions return the index of the entry in the page
 * table page which would control the given virtual address
 *
 * As these functions may be used by the same code for different levels of
 * the page table folding, they are always available, regardless of
 * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
 * because in such cases PTRS_PER_PxD equals 1.
 */

static inline unsigned long pte_index(unsigned long address)
{
    return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}

/*
 * a shortcut which implies the use of the kernel's pgd, instead
 * of a process's
 */
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))

/*
 * Page Table Modification bits for pgtbl_mod_mask.
 *
 * These are used by the p?d_alloc_track*() set of functions an in the generic
 * vmalloc/ioremap code to track at which page-table levels entries have been
 * modified. Based on that the code can better decide when vmalloc and ioremap
 * mapping changes need to be synchronized to other page-tables in the system.
 */
#define __PGTBL_PGD_MODIFIED 0
#define __PGTBL_P4D_MODIFIED 1
#define __PGTBL_PUD_MODIFIED 2
#define __PGTBL_PMD_MODIFIED 3
#define __PGTBL_PTE_MODIFIED 4

#define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED)
#define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED)
#define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED)
#define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED)
#define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED)

/*
 * A facility to provide lazy MMU batching.  This allows PTE updates and
 * page invalidations to be delayed until a call to leave lazy MMU mode
 * is issued.  Some architectures may benefit from doing this, and it is
 * beneficial for both shadow and direct mode hypervisors, which may batch
 * the PTE updates which happen during this window.  Note that using this
 * interface requires that read hazards be removed from the code.  A read
 * hazard could result in the direct mode hypervisor case, since the actual
 * write to the page tables may not yet have taken place, so reads though
 * a raw PTE pointer after it has been modified are not guaranteed to be
 * up to date.
 *
 * In the general case, no lock is guaranteed to be held between entry and exit
 * of the lazy mode. So the implementation must assume preemption may be enabled
 * and cpu migration is possible; it must take steps to be robust against this.
 * (In practice, for user PTE updates, the appropriate page table lock(s) are
 * held, but for kernel PTE updates, no lock is held). Nesting is not permitted
 * and the mode cannot be used in interrupt context.
 */
#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
#define arch_enter_lazy_mmu_mode() \
    do                             \
    {                              \
    } while (0)
#define arch_leave_lazy_mmu_mode() \
    do                             \
    {                              \
    } while (0)
#define arch_flush_lazy_mmu_mode() \
    do                             \
    {                              \
    } while (0)
#endif


#ifndef ptep_get
static inline pte_t ptep_get(pte_t *ptep)
{
    return *ptep;
}
#endif

#ifndef p4d_addr_end
#define p4d_addr_end(addr, end)                                    \
    ({                                                             \
        unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
        (__boundary - 1 < (end) - 1) ? __boundary : (end);         \
    })
#endif

#ifndef pud_addr_end
#define pud_addr_end(addr, end)                                    \
    ({                                                             \
        unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
        (__boundary - 1 < (end) - 1) ? __boundary : (end);         \
    })
#endif

#ifndef pmd_addr_end
#define pmd_addr_end(addr, end)                                    \
    ({                                                             \
        unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
        (__boundary - 1 < (end) - 1) ? __boundary : (end);         \
    })
#endif

#ifndef pte_advance_pfn
static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
{
    return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
}
#endif

#define pte_next_pfn(pte) pte_advance_pfn(pte, 1)

static inline void page_table_check_ptes_set(struct mm_struct *mm,
                                             pte_t *ptep, pte_t pte, unsigned int nr)
{
}

#ifndef set_ptes
/**
 * set_ptes - Map consecutive pages to a contiguous range of addresses.
 * @mm: Address space to map the pages into.
 * @addr: Address to map the first page at.
 * @ptep: Page table pointer for the first entry.
 * @pte: Page table entry for the first page.
 * @nr: Number of pages to map.
 *
 * When nr==1, initial state of pte may be present or not present, and new state
 * may be present or not present. When nr>1, initial state of all ptes must be
 * not present, and new state must be present.
 *
 * May be overridden by the architecture, or the architecture can define
 * set_pte() and PFN_PTE_SHIFT.
 *
 * Context: The caller holds the page table lock.  The pages all belong
 * to the same folio.  The PTEs are all in the same PMD.
 */
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
                            pte_t *ptep, pte_t pte, unsigned int nr)
{
    page_table_check_ptes_set(mm, ptep, pte, nr);

    for (;;)
    {
        set_pte(ptep, pte);
        if (--nr == 0)
            break;
        ptep++;
        pte = pte_next_pfn(pte);
    }
}
#endif
#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)

#ifndef pte_offset_kernel
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
	return NULL;//todo
}
#define pte_offset_kernel pte_offset_kernel
#endif

/* Find an entry in the second-level page table.. */
#ifndef pmd_offset
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
	return NULL;//todo
}
#define pmd_offset pmd_offset
#endif

#endif /* CONFIG_MMU */
