/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright @ Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
 * Description: Header file for swapmm_tlb module.
 */
#ifndef _LINUX_SWAPMMTLB_H
#define _LINUX_SWAPMMTLB_H

#include <linux/mm.h>

#ifdef CONFIG_EULEROS_SWAPMM_TLB
#include <linux/list.h>
#include <uapi/linux/swapmm_tlb.h>

#include <asm/pgalloc.h>
#include <asm/pgtable-prot.h>

struct swapmm_area {
	unsigned long virt_start;
	unsigned long virt_end;
	unsigned long asid;
	unsigned long *pgd_array;
	struct swapmm_area *next;
};

struct swapmm_reverved_mem {
	struct list_head list;
	phys_addr_t start;
	phys_addr_t end;
};

static inline bool is_swapmm_tlb_page(struct vm_area_struct *vma)
{
	return vma != NULL && !!(vma->vm_flags & VM_SWAPMM_TLB);
}

static inline void swapmm_set_p4d(p4d_t *p4dp, p4d_t p4d)
{
	WRITE_ONCE(*p4dp, p4d);
	dsb(ishst);
	isb();
}

static inline void swapmm_p4d_populate(struct mm_struct *mm, p4d_t *p4dp, pud_t *pud)
{
	phys_addr_t pudp = __pa(pud);
	p4d_t p4d = __p4d(__phys_to_p4d_val(pudp) | PUD_TYPE_TABLE);

	return swapmm_set_p4d(p4dp, p4d);
}

static inline void swapmm_p4d_clear(p4d_t *p4dp)
{
	swapmm_set_p4d(p4dp, __p4d(0));
}

/* code copied from `linux/include/asm-generic/pgalloc.h` */
static inline pud_t *swapmm_pud_alloc(struct mm_struct *mm, unsigned long addr)
{
	return (pud_t *)get_zeroed_page(GFP_PGTABLE_USER);
}

static inline pmd_t *swapmm_pmd_alloc(struct mm_struct *mm, unsigned long addr)
{
	struct page *page = alloc_pages(GFP_PGTABLE_USER, 0);

	if (!page)
		return NULL;
	if (!pgtable_pmd_page_ctor(page)) {
		__free_pages(page, 0);
		return NULL;
	}
	return (pmd_t *)page_address(page);
}

extern bool swapmm_hotreplace_enabled(void);
int check_phys_addr(unsigned long start, unsigned long end);

/* Test whether the given physical address is in the swapmm pin area.
 * The related physical page is aligned by size 4K.
 */
bool is_swapmm_pin_page(phys_addr_t paddr);

/* PAGE_SWAPMM = 0xe8000000000f53 */
#define _PAGE_SWAPMM (PROT_NORMAL | PTE_DIRTY | PTE_NG | PTE_AF | PTE_USER)
#define PAGE_SWAPMM __pgprot(_PAGE_SWAPMM)

static inline pmd_t swapmm_pmd_prot(phys_addr_t phys, pgprot_t prot)
{
	return __pmd(phys | pgprot_val(mk_pmd_sect_prot(prot)));
}

static inline void swapmm_pmd_populate(struct mm_struct *mm,
				       pmd_t *pmdp, pte_t *pte)
{
	phys_addr_t ptep = __pa(pte);
	pmd_t pmd = __pmd(__phys_to_pmd_val(ptep) | PMD_TYPE_TABLE);

	WRITE_ONCE(*pmdp, pmd);
	if (pmd_valid(pmd)) {
		dsb(ishst);
		isb();
	}
}

static inline int __pmd_huge(pmd_t pmd)
{
	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
}

/*
 * change usersapce addr which mmaped with SWAPMMWILLNEED to physical addr
 * used in kernelspace by product.
 *
 * Warn: the interface is just for Dorado and just for the addr mapped with
 *       SWAPMM_MADV_WILLNEED action in swapmm dev
 *
 * uaddr: the userspace addr mmaped with SWAPMMWILLNEED
 * taskmm: the mm struct of task in which context the mmap have done
 *         if NULL, will use current->mm
 * needlock:
 *         0: do not need lock to protect to improve performance, the caller
 *            should make sure there is no unmap action when call this function
 *         1: need lock to protect concurrent, can not guarantee performance
 */
unsigned long get_swapmm_madv_kaddr(unsigned long uaddr, struct mm_struct *taskmm, int needlock);

#else /* CONFIG_EULEROS_SWAPMM_TLB */
static inline bool is_swapmm_tlb_page(struct vm_area_struct *vma)
{
	return false;
}

#endif /* CONFIG_EULEROS_SWAPMM_TLB */

#ifndef CONFIG_EULEROS_EPGFAULT
# define epgfault_is_enabled()		false
# define VM_FAULT_USER_FAULT_SIG	0
#endif

#endif /* _LINUX_SWAPMMTLB_H */
