#ifndef ASM_GENERIC_TLB_H
#define ASM_GENERIC_TLB_H

/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }

struct mmu_gather {
    struct mm_struct    *mm;

    unsigned long		start;
    unsigned long		end;
    /*
     * we are in the middle of an operation to clear
     * a full mm and can make some optimizations
     */
    unsigned int		fullmm : 1;

    /*
     * we have removed page directories
     */
    unsigned int		freed_tables : 1;

    /*
     * at which levels have we cleared entries?
     */
    unsigned int		cleared_ptes : 1;
    unsigned int		cleared_pmds : 1;
    unsigned int		cleared_puds : 1;
    unsigned int		cleared_p4ds : 1;
};

static inline void __tlb_adjust_range(struct mmu_gather *tlb,
                      unsigned long address,
                      unsigned int range_size)
{
    tlb->start = min(tlb->start, address);
    tlb->end = max(tlb->end, address + range_size);
}

static inline void __tlb_reset_range(struct mmu_gather *tlb)
{
    if (tlb->fullmm) {
        tlb->start = tlb->end = ~0;
    } else {
        tlb->start = TASK_SIZE;
        tlb->end = 0;
    }
    tlb->freed_tables = 0;
    tlb->cleared_ptes = 0;
    tlb->cleared_pmds = 0;
    tlb->cleared_puds = 0;
    tlb->cleared_p4ds = 0;
}

static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
    /*
     * Anything calling __tlb_adjust_range() also sets at least one of
     * these bits.
     */
    if (tlb->freed_tables)
        return;

    tlb_flush(tlb);
    __tlb_reset_range(tlb);
}

static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
{
    if (tlb->cleared_ptes)
        return UTILS_PAGE_SHIFT;
    if (tlb->cleared_pmds)
        return PMD_SHIFT;
    if (tlb->cleared_puds)
        return PUD_SHIFT;
    if (tlb->cleared_p4ds)
        return P4D_SHIFT;

    return UTILS_PAGE_SHIFT;
}

static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
{
    return 1UL << tlb_get_unmap_shift(tlb);
}

/*
 * In the case of tlb vma handling, we can optimise these away in the
 * case where we're doing a full MM flush.  When we're doing a munmap,
 * the vmas are adjusted to only cover the region to be torn down.
 */
static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
    if (tlb->fullmm)
        return;

    flush_cache_range(vma, vma->vm_start, vma->vm_end);
}

static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
    if (tlb->fullmm)
        return;

    /*
        * Do a TLB flush and reset the range at VMA boundaries; this avoids
        * the ranges growing with the unused space between consecutive VMAs.
        */
    tlb_flush_mmu_tlbonly(tlb);
}

/*
 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
 * and set corresponding cleared_*.
 */
static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
                     unsigned long address, unsigned long size)
{
    __tlb_adjust_range(tlb, address, size);
    tlb->cleared_ptes = 1;
}

static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
                     unsigned long address, unsigned long size)
{
    __tlb_adjust_range(tlb, address, size);
    tlb->cleared_pmds = 1;
}

static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
                     unsigned long address, unsigned long size)
{
    __tlb_adjust_range(tlb, address, size);
    tlb->cleared_puds = 1;
}

static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
                     unsigned long address, unsigned long size)
{
    __tlb_adjust_range(tlb, address, size);
    tlb->cleared_p4ds = 1;
}

/*
 * For things like page tables caches (ie caching addresses "inside" the
 * page tables, like x86 does), for legacy reasons, flushing an
 * individual page had better flush the page table caches behind it. This
 * is definitely how x86 works, for example. And if you have an
 * architected non-legacy page table cache (which I'm not aware of
 * anybody actually doing), you're going to have some architecturally
 * explicit flushing for that, likely *separate* from a regular TLB entry
 * flush, and thus you'd need more than just some range expansion..
 *
 * So if we ever find an architecture
 * that would want something that odd, I think it is up to that
 * architecture to do its own odd thing, not cause pain for others
 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
 *
 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
 */
#ifndef pte_free_tlb
#define pte_free_tlb(tlb, ptep, address)			\
    do {							\
        tlb_flush_pmd_range(tlb, address, UTILS_PAGE_SIZE);	\
        tlb->freed_tables = 1;				\
        __pte_free_tlb(tlb, ptep, address);		\
    } while (0)
#endif

#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address)			\
    do {							\
        tlb_flush_pud_range(tlb, address, UTILS_PAGE_SIZE);	\
        tlb->freed_tables = 1;				\
        __pmd_free_tlb(tlb, pmdp, address);		\
    } while (0)
#endif

#ifndef pud_free_tlb
#define pud_free_tlb(tlb, pudp, address)			\
    do {							\
        tlb_flush_p4d_range(tlb, address, UTILS_PAGE_SIZE);	\
        tlb->freed_tables = 1;				\
        __pud_free_tlb(tlb, pudp, address);		\
    } while (0)
#endif

#ifndef p4d_free_tlb
#define p4d_free_tlb(tlb, pudp, address)			\
    do {							\
        __tlb_adjust_range(tlb, address, UTILS_PAGE_SIZE);	\
        tlb->freed_tables = 1;				\
        __p4d_free_tlb(tlb, pudp, address);		\
    } while (0)
#endif

extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
                unsigned long start, unsigned long end);
extern void tlb_finish_mmu(struct mmu_gather *tlb,
                unsigned long start, unsigned long end);

extern void tlb_remove_table(struct mmu_gather *tlb, void *table);

#endif /* !ASM_GENERIC_TLB_H */
