/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright @ Huawei Technologies Co., Ltd. 2019-2019. All rights reserved.
 * Description: Header file for evmm_tlb module.
 */
#ifndef _LINUX_EVMMTLB_H
#define _LINUX_EVMMTLB_H

#include <asm/tlb.h>
#include <asm-generic/tlb.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/fs.h>

#ifdef CONFIG_EULEROS_EVMM_TLB
static inline bool is_evmm_tlb_page(struct vm_area_struct *vma)
{
	return vma != NULL && !!(vma->vm_flags & VM_EVMM_TLB);
}

static inline bool is_evmm_huge_pte(struct mm_struct *mm,
		unsigned long addr)
{
	pgd_t *pgdp;
	p4d_t *p4dp;
	pud_t *pudp, pud;
	pmd_t *pmdp, pmd;

	pgdp = pgd_offset(mm, addr);
	if (!pgd_present(READ_ONCE(*pgdp)))
		return true;

	p4dp = p4d_offset(pgdp, addr);
	if (!p4d_present(READ_ONCE(*p4dp)))
		return NULL;
	pudp = pud_offset(p4dp, addr);
	pud = READ_ONCE(*pudp);
	if (pud_huge(pud) || !pud_present(pud))
		return true;
	pmdp = pmd_offset(pudp, addr);
	pmd = READ_ONCE(*pmdp);
	if (pmd_huge(pmd) || !pmd_present(pmd))
		return true;
	return false;
}

static inline void evmm_unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
					 unsigned long start, unsigned long end)
{
	/* The evmm/swapmm page table mapping mechanism is different
	 * from the normal page. Therefore, the unmapping
	 * process of the evmm/swapmm is different.
	 *
	 * `unmap_page_range()` is only used in task exiting
	 * process or oom killer (vma must be anonymous or
	 * non-shared). Because the evmm/swapmm vma must be shared
	 * and non-anonymous, we only need do here.
	 */
	if (vma->vm_file) {
		const struct file_operations *fops;

		fops = vma->vm_file->f_op;
		if (fops && fops->unmap_page_range)
			fops->unmap_page_range(tlb, vma, start, end);
	}
}

static inline bool vma_has_evmm_huge_pte(struct vm_area_struct *vma,
	unsigned long start, unsigned long end)
{
	unsigned long address;
	struct mm_struct *mm = vma->vm_mm;

	if (!is_evmm_tlb_page(vma))
		return false;

	for (address = start; address < end; address += PMD_SIZE)
		if (is_evmm_huge_pte(mm, address))
			return true;

	return false;
}

#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_PPC64) && !defined(CONFIG_PPC)
static inline struct page *evmm_follow_page_mask(struct vm_area_struct *vma,
					unsigned long address, unsigned int flags)
{
	pgd_t *pgdp;
	p4d_t *p4dp;
	pud_t *pudp, pud;
	pmd_t *pmdp, pmd;
	pte_t *ptep, pte;
	struct page *page;
	unsigned long pfn;
	spinlock_t *ptl;
	struct mm_struct *mm = vma->vm_mm;

	pgdp = pgd_offset(mm, address);
	if (pgd_none(*pgdp))
		goto no_page;

	p4dp = p4d_offset(pgdp, address);
	if (p4d_none(*p4dp))
		goto no_page;

	pudp = pud_offset(p4dp, address);
	pud = *pudp;
	if (pud_none(pud) || !pud_present(pud))
		goto no_page;
	if (pud_huge(pud)) {
		ptl = pud_lock(mm, pudp);
		pfn = pud_pfn(pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
		goto get_page;
	}

	pmdp = pmd_offset(pudp, address);
	pmd = READ_ONCE(*pmdp);
	if (pmd_none(pmd) || !pmd_present(pmd))
		goto no_page;
	if (pmd_huge(pmd)) {
		ptl = pmd_lock(mm, pmdp);
		pfn = pmd_pfn(pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
		goto get_page;
	}

	ptep = pte_offset_map_lock(mm, pmdp, address, &ptl);
	pte = *ptep;
	if (pte_none(pte) || !pte_present(pte))
		goto unlock;
	pfn = pte_pfn(pte);

get_page:
	/* if pfn has struct page ? */
	if (!pfn_in_present_section(pfn))
		goto unlock;
	page = pfn_to_page(pfn);
	if (flags & FOLL_GET)
		get_page(page);

	spin_unlock(ptl);
	return page;

unlock:
	spin_unlock(ptl);
no_page:
	return ERR_PTR(-EFAULT);
}
#else
static inline struct page *evmm_follow_page_mask(struct vm_area_struct *vma,
					unsigned long address, unsigned int flags)
{
	return ERR_PTR(-EFAULT);
}
#endif

#else
static inline bool is_evmm_tlb_page(struct vm_area_struct *vma)
{
	return false;
}

static inline bool is_evmm_huge_pte(struct mm_struct *mm,
		unsigned long addr)
{
	return false;
}

static inline bool vma_has_evmm_huge_pte(struct vm_area_struct *vma,
	unsigned long start, unsigned long end)
{
	return false;
}

static inline struct page *evmm_follow_page_mask(struct vm_area_struct *vma,
					unsigned long address, unsigned int flags)
{
	return ERR_PTR(-EFAULT);
}

static inline void evmm_unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
					 unsigned long start, unsigned long end)
{
}
#endif

#endif
