#include <linux/pgtable.h>

/*** Page table manipulation functions ***/
static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                          phys_addr_t phys_addr, pgprot_t prot,
                          unsigned int max_page_shift, pgtbl_mod_mask *mask)
{
    pte_t *pte;
    u64 pfn;
    struct page *page;
    unsigned long size = PAGE_SIZE;

    pfn = phys_addr >> PAGE_SHIFT;
    pte = pte_alloc_kernel_track(pmd, addr, mask);
    if (!pte)
        return -ENOMEM;

    arch_enter_lazy_mmu_mode();

    do
    {
        if (unlikely(!pte_none(ptep_get(pte))))
        {
            if (pfn_valid(pfn))
            {
                page = pfn_to_page(pfn);
                //dump_page(page, "remapping already mapped page");
            }
            BUG();
        }

#ifdef CONFIG_HUGETLB_PAGE
        size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
        if (size != PAGE_SIZE)
        {
            pte_t entry = pfn_pte(pfn, prot);

            entry = arch_make_huge_pte(entry, ilog2(size), 0);
            set_huge_pte_at(&init_mm, addr, pte, entry, size);
            pfn += PFN_DOWN(size);
            continue;
        }
#endif
        set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
        pfn++;
    } while (pte += PFN_DOWN(size), addr += size, addr != end);

    arch_leave_lazy_mmu_mode();
    *mask |= PGTBL_PTE_MODIFIED;
    return 0;
}

static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
                             phys_addr_t phys_addr, pgprot_t prot,
                             unsigned int max_page_shift)
{
    return 0;
}

static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
                          phys_addr_t phys_addr, pgprot_t prot,
                          unsigned int max_page_shift, pgtbl_mod_mask *mask)
{
    pmd_t *pmd;
    unsigned long next;

    pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
    if (!pmd)
        return -ENOMEM;
    do
    {
        next = pmd_addr_end(addr, end);

        if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
                              max_page_shift))
        {
            *mask |= PGTBL_PMD_MODIFIED;
            continue;
        }

        if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
            return -ENOMEM;
    } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);

    return 0;
}

static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
                             phys_addr_t phys_addr, pgprot_t prot,
                             unsigned int max_page_shift)
{
    return 0;
}

static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
                          phys_addr_t phys_addr, pgprot_t prot,
                          unsigned int max_page_shift, pgtbl_mod_mask *mask)
{
    pud_t *pud;
    unsigned long next;

    pud = pud_alloc_track(&init_mm, p4d, addr, mask);
    if (!pud)
        return -ENOMEM;
    do
    {
        next = pud_addr_end(addr, end);

        if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
                              max_page_shift))
        {
            *mask |= PGTBL_PUD_MODIFIED;
            continue;
        }

        if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
                           max_page_shift, mask))
            return -ENOMEM;
    } while (pud++, phys_addr += (next - addr), addr = next, addr != end);

    return 0;
}

static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
                             phys_addr_t phys_addr, pgprot_t prot,
                             unsigned int max_page_shift)
{
    return 0;
}

static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
                          phys_addr_t phys_addr, pgprot_t prot,
                          unsigned int max_page_shift, pgtbl_mod_mask *mask)
{
    p4d_t *p4d;
    unsigned long next;

    p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
    if (!p4d)
        return -ENOMEM;
    do
    {
        next = p4d_addr_end(addr, end);

        if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
                              max_page_shift))
        {
            *mask |= PGTBL_P4D_MODIFIED;
            continue;
        }

        if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
                           max_page_shift, mask))
            return -ENOMEM;
    } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);

    return 0;
}

static int vmap_range_noflush(unsigned long addr, unsigned long end,
                              phys_addr_t phys_addr, pgprot_t prot,
                              unsigned int max_page_shift)
{
    pgd_t *pgd;
    unsigned long start;
    unsigned long next;
    int err;
    pgtbl_mod_mask mask = 0;

    might_sleep();
    BUG_ON(addr >= end);

    start = addr;
    pgd = pgd_offset_k(addr);
    do
    {
        next = pgd_addr_end(addr, end);
        err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
                             max_page_shift, &mask);
        if (err)
            break;
    } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);

#if ARCH_PAGE_TABLE_SYNC_MASK
    if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
        arch_sync_kernel_mappings(start, end);
#endif

    return err;
}
