// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright @ Huawei Technologies Co., Ltd. 2020-2020. ALL rights reversed.
 * Description: Euler virtual memory management.
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/spinlock.h>
#include <linux/miscdevice.h>
#include <linux/profile.h>
#include <linux/task_exit_notifier.h>
#include <linux/swapmm_tlb.h>

#include <asm/ptrace.h>
#include <asm/tlb.h>

#include "swapmm_tlb_madv.h"

#define G_PAGEBUF_NUM 128

static bool __madv_have_pte_page(pte_t *pte)
{
	int i;

	for (i = 0; i < PTRS_PER_PTE; i++)
		if (!pte_none(pte[i]))
			return true;
	return false;
}

static void __clean_pmds(pmd_t *pmd, unsigned long addr, unsigned long end)
{
	unsigned long next;

	if (addr >= end)
		return;

	do {
		next = pmd_addr_end(addr, end);
		pmd_clear(pmd);
	} while (pmd++, addr = next, addr != end);
}

static void __clean_ptes(pte_t *pte, unsigned long addr, unsigned long end)
{
	if (addr >= end)
		return;

	do {
		ptep_get_and_clear(current->mm, addr, pte);
	} while (pte++, addr += PAGE_SIZE, addr != end);
}

static void __rollback_swapmove_pmds(pmd_t *pmdori, pmd_t *pmddst, unsigned long addrori,
					unsigned long addrdst, unsigned long endori,
					unsigned long enddst)
{
	unsigned long nextori, nextdst;
	struct page *pageori;

	if ((addrdst >= enddst) || (addrori >= endori))
		return;

	do {
		nextori = pmd_addr_end(addrori, endori);
		nextdst = pmd_addr_end(addrdst, enddst);

		pageori = pmd_page(*pmdori);

		set_pmd(pmddst, __pmd(page_to_phys(pageori) |
			pgprot_val(mk_pmd_sect_prot(PAGE_SWAPMM))));
		pmd_clear(pmdori);
	} while (pmddst++, pmdori++, addrdst = nextdst, addrori = nextori,
			(addrdst != enddst) && (addrori != endori));
}

static void __rollback_swapmove_ptes(pte_t *pteori, pte_t *ptedst, unsigned long addrori,
					unsigned long addrdst, unsigned long endori, unsigned long enddst)
{
	struct page *pageori;

	if ((addrdst >= enddst) || (addrori >= endori))
		return;

	do {
		pageori = pte_page(*pteori);
		set_pte(ptedst, pfn_pte(page_to_pfn(pageori), PAGE_SWAPMM));
		ptep_get_and_clear(current->mm, addrori, pteori);
	} while (ptedst++, pteori++, addrdst += PAGE_SIZE, addrori += PAGE_SIZE,
		 (addrdst != enddst) && (addrori != endori));
}


static void *__madv_alloc_page(unsigned long addr)
{
	pgtable_t pte = NULL;
	/* 4K page frame */
	if (PAGE_SHIFT != 12)
		return NULL;

	/*
	 * add page to kmem cache
	 * used by gup and munmap
	 * otherwise, would cause multiple panics
	 */
	pte = pte_alloc_one(current->mm);
	if (!pte)
		return NULL;
	return page_to_virt(pte);
}

static int __madv_alloc_init_pte(struct mm_struct *mm, pmd_t *pmd,
		unsigned long addr, unsigned long end,
		pgprot_t prot, struct swapmm_madv_opt *madvopt)
{
	pte_t *pte = NULL;
	pte_t *pte_start = NULL;
	unsigned long addr_start = addr;
	spinlock_t *ptl;

	if (pmd_none(*pmd)) {
		pte = (pte_t *)__madv_alloc_page(addr);
		if (!pte) {
			pr_alert("swapmm: alloc pte pgtable page failed!\n");
			return -ENOMEM;
		}
		ptl = pmd_lock(mm, pmd);
		if (pmd_none(*pmd)) {
			mm_inc_nr_ptes(mm);
			__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
		} else {
			pte_free(mm, virt_to_page(pte));
		}
		spin_unlock(ptl);
	}

	ptl = pte_lockptr(mm, pmd);
	spin_lock(ptl);
	pte_start = pte = pte_offset_kernel(pmd, addr);
	do {
		if (!pte_none(*pte)) {
			__clean_ptes(pte_start, addr_start, addr);
			spin_unlock(ptl);
			return -EEXIST;
		}
		/* SWAPMM_ALIGN_4K, map should finished here */
		set_pte(pte, pfn_pte(page_to_pfn(madvopt->pages[madvopt->pos]), prot));
		madvopt->pos++;
		/* should not happen, but still need protect */
		if (madvopt->pos > madvopt->num) {
			spin_unlock(ptl);
			return -EINVAL;
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
	spin_unlock(ptl);
	return 0;
}

static int __madv_alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
		unsigned long addr, unsigned long end,
		pgprot_t prot, struct swapmm_madv_opt *madvopt)
{
	pmd_t *pmd = NULL;
	pmd_t *pmd_start = NULL;
	unsigned long addr_start = addr;
	unsigned long next;
	int err  = 0;
	spinlock_t *ptl;

	if (pud_none(*pud)) {
		pmd = swapmm_pmd_alloc(mm, addr);
		if (!pmd) {
			pr_alert("swapmm: alloc pmd pgtable page failed!\n");
			return -ENOMEM;
		}
		ptl = pud_lock(mm, pud);
		if (pud_none(*pud)) {
			mm_inc_nr_pmds(mm);
			pud_populate(mm, pud, pmd);
		} else {
			pmd_free(mm, pmd);
		}
		spin_unlock(ptl);
	}

	pmd_start = pmd = pmd_offset(pud, addr);
	if (madvopt->align == SWAPMM_ALIGN_2M) {
		do {
			next = pmd_addr_end(addr, end);

			ptl = pmd_lock(mm, pmd);
			if (!pmd_none(*pmd)) {
				if (__pmd_huge(*pmd) ||
				    __madv_have_pte_page((pte_t *)__va(pmd_page_paddr(*pmd)))) {
					// already exist 2m page or exist 4K pages,
					// need to return error
					__clean_pmds(pmd_start, addr_start, addr);
					spin_unlock(ptl);
					return -EEXIST;
				} else {
					struct page *page = pmd_page(*pmd);

					mm_dec_nr_ptes(current->mm);
					pmd_clear(pmd);
					pte_free(current->mm, page);
				}
			}

			/* if SWAPMM_ALIGN_2M, map should finished here */
			set_pmd(pmd, __pmd(page_to_phys(madvopt->pages[madvopt->pos])
						| pgprot_val(mk_pmd_sect_prot(prot))));
			spin_unlock(ptl);
			madvopt->pos++;
			/* should not happen, but still need protect */
			if (madvopt->pos > madvopt->num)
				return -EINVAL;
		} while (pmd++, addr = next, addr != end);
	} else {
		do {
			ptl = pmd_lock(mm, pmd);
			if (!pmd_none(*pmd) && __pmd_huge(*pmd)) {
				spin_unlock(ptl);
				return -EEXIST;
			}
			spin_unlock(ptl);
			next = pmd_addr_end(addr, end);
			err = __madv_alloc_init_pte(mm, pmd, addr, next, prot, madvopt);
			if (err != 0)
				return err;
		} while (pmd++, addr = next, addr != end);
	}
	return 0;
}

static int __madv_alloc_init_pud(struct mm_struct *mm, p4d_t *p4d,
		unsigned long addr, unsigned long end,
		pgprot_t prot, struct swapmm_madv_opt *madvopt)
{
	pud_t *pud = NULL;
	unsigned long next;
	int err = 0;

	if (p4d_none(*p4d)) {
		pud = swapmm_pud_alloc(mm, addr);
		if (!pud) {
			pr_alert("swapmm: alloc pud pgtable page failed!\n");
			return -ENOMEM;
		}
		spin_lock(&mm->page_table_lock);
		if (p4d_none(*p4d)) {
			mm_inc_nr_puds(mm);
			swapmm_p4d_populate(mm, p4d, pud);
		} else {
			pud_free(mm, pud);
		}
		spin_unlock(&mm->page_table_lock);
	}

	pud = pud_offset(p4d, addr);
	do {
		next = pud_addr_end(addr, end);

		err = __madv_alloc_init_pmd(mm, pud, addr, next, prot, madvopt);
		if (err != 0)
			return err;
	} while (pud++, addr = next, addr != end);

	return 0;
}

static int __madv_alloc_init_p4d(struct mm_struct *mm, pgd_t *pgd,
		unsigned long addr, unsigned long end,
		pgprot_t prot, struct swapmm_madv_opt *madvopt)
{
	p4d_t *p4d = NULL;
	unsigned long next;
	int err = 0;

	if (pgd_none(*pgd)) {
		p4d = p4d_alloc_one(mm, addr);
		if (!p4d) {
			pr_alert("swapmm: alloc p4d pgtable page failed!\n");
			return -ENOMEM;
		}
		spin_lock(&mm->page_table_lock);
		if (pgd_none(*pgd))
			pgd_populate(mm, pgd, p4d);
		else
			p4d_free(mm, p4d);
		spin_unlock(&mm->page_table_lock);
	}

	p4d = p4d_offset(pgd, addr);
	do {
		next = p4d_addr_end(addr, end);

		err = __madv_alloc_init_pud(mm, p4d, addr, next, prot, madvopt);
		if (err != 0)
			return err;
	} while (p4d++, addr = next, addr != end);

	return 0;
}

static int __swapmm_madv_mapping(struct mm_struct *mm, unsigned long virt,
		unsigned long size, struct swapmm_madv_opt *madvopt)
{
	pgd_t *pgd = NULL;
	pgprot_t prot;
	unsigned long addr, length, end, next;
	int err = 0;

	pgd = pgd_offset(mm, virt);
	prot = PAGE_SWAPMM;
	addr = virt & PAGE_MASK;
	length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));

	end = addr + length;
	do {
		/*
		 * end vaddr per pgd table descriptor(512GB)
		 * we may need more than one loop
		 */
		next = pgd_addr_end(addr, end);
		err = __madv_alloc_init_p4d(mm, pgd, addr, next, prot, madvopt);
		if (err != 0)
			return err;
	} while (pgd++, addr = next, addr != end);

	return 0;
}

static int __swapmm_align_ok(unsigned long align)
{
	return (align == SWAPMM_ALIGN_4K
			|| align == SWAPMM_ALIGN_2M);
}

static void __clean_pages(struct page **pages, unsigned long num, unsigned long align)
{
	int i = 0;

	for (i = 0; i < num; i++) {
		if (pages[i] != NULL) {
			ClearPageUnevictable(pages[i]);
			__free_pages(pages[i], compound_order(pages[i]));
			pages[i] = NULL;
		}
	}
}

static int __init_pages(struct page **pages, unsigned long num,
			unsigned long align, unsigned int numa_node)
{
	int i = 0;
	gfp_t flags;

	/* Use __GFP_NORETRY flag to avoid long waiting */
	if (is_reliable || current->flags & PF_RELIABLE)
		flags = GFP_RELIABLE | __GFP_ZERO | __GFP_NORETRY;
	else
		flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO | __GFP_NORETRY;

	for (i = 0; i < num; i++) {
		if (align == SWAPMM_ALIGN_4K)
			pages[i] = alloc_pages_node(numa_node, flags, 0);
		else
			pages[i] = alloc_pages_node(numa_node, flags | __GFP_COMP, PMD_SHIFT - PAGE_SHIFT);
		if (!pages[i])
			return -1;

		/* Set swapmm madv pages to non-LRU but unevictable pages, so that when
		 * it meets CE/UCE, memory mirror process can quickly identify it as
		 * swapmm page. It will not affect other procedures since it only sets
		 * unevictable flag while lru flag is not set.
		 */
		SetPageUnevictable(pages[i]);
	}

	return 0;
}

static int __swapmm_ufree_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
				    unsigned long align)
{
	pte_t *pte;
	struct page *page;
	spinlock_t *ptl;

	ptl = pte_lockptr(current->mm, pmd);
	spin_lock(ptl);
	pte = pte_offset_kernel(pmd, addr);
	do {
		if (pte_none(*pte) || !pfn_valid(pte_pfn(*pte)))
			continue;

		page = pte_page(*pte);
		ptep_get_and_clear(current->mm, addr, pte);
		ClearPageUnevictable(page);
		__free_page(page);
	} while (pte++, addr += PAGE_SIZE, addr != end);
	spin_unlock(ptl);

	return 0;
}

static int __swapmm_ufree_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
				    unsigned long align)
{
	pmd_t *pmd;
	unsigned long next;
	struct page *page;
	spinlock_t *ptl;
	int err = 0;
	bool is_huge;

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		ptl = pmd_lock(current->mm, pmd);
		if (pmd_none(*pmd)) {
			spin_unlock(ptl);
			continue;
		}

		is_huge = __pmd_huge(*pmd);
		if ((is_huge && align == SWAPMM_ALIGN_4K) ||
		    (!is_huge && align == SWAPMM_ALIGN_2M)) {
			/* if page mmap does not match align, do err */
			spin_unlock(ptl);
			return -EFAULT;
		}

		if (is_huge) {
			page = pmd_page(*pmd);
			pmd_clear(pmd);
			spin_unlock(ptl);
			ClearPageUnevictable(page);
			__free_pages(page, compound_order(page));
		} else {
			spin_unlock(ptl);
			err = __swapmm_ufree_pte_range(pmd, addr, next, align);
			if (err)
				return err;
		}
	} while (pmd++, addr = next, addr != end);

	return 0;
}

static int __swapmm_ufree_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
				    unsigned long align)
{
	pud_t *pud;
	unsigned long next;
	spinlock_t *ptl;
	int err = 0;

	pud = pud_offset(p4d, addr);
	do {
		next = pud_addr_end(addr, end);
		ptl = pud_lock(current->mm, pud);
		if (pud_none(*pud)) {
			spin_unlock(ptl);
			continue;
		}
		spin_unlock(ptl);
		err = __swapmm_ufree_pmd_range(pud, addr, next, align);
		if (err)
			return err;
	} while (pud++, addr = next, addr != end);
	return 0;
}

static int __swapmm_ufree_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
				    unsigned long align)
{
	p4d_t *p4d;
	unsigned long next;
	int err = 0;

	p4d = p4d_offset(pgd, addr);
	do {
		next = p4d_addr_end(addr, end);
		spin_lock(&current->mm->page_table_lock);
		if (p4d_none(*p4d)) {
			spin_unlock(&current->mm->page_table_lock);
			continue;
		}
		spin_unlock(&current->mm->page_table_lock);
		err = __swapmm_ufree_pud_range(p4d, addr, next, align);
		if (err)
			return err;
	} while (p4d++, addr = next, addr != end);
	return 0;
}

static int __swapmm_unmap_free(unsigned long addr, unsigned long end, unsigned long align)
{
	pgd_t *pgd;
	unsigned long next;
	int err = 0;

	pgd = pgd_offset(current->mm, addr);
	do {
		next = pgd_addr_end(addr, end);
		spin_lock(&current->mm->page_table_lock);
		if (pgd_none(*pgd)) {
			spin_unlock(&current->mm->page_table_lock);
			continue;
		}
		spin_unlock(&current->mm->page_table_lock);
		err = __swapmm_ufree_p4d_range(pgd, addr, next, align);
		if (err)
			return err;
	} while (pgd++, addr = next, addr != end);
	return 0;
}

static int __swapmm_swap_move_pte(pmd_t *pmdori, pmd_t *pmddst, unsigned long vaddr_ori,
		unsigned long vaddr_dst, unsigned long end_ori,
		unsigned long end_dst, unsigned long align)
{
	pte_t *pteori = NULL;
	pte_t *ptedst = NULL;
	pte_t *pteori_start, *ptedst_start;
	unsigned long vaddr_dst_start = vaddr_dst;
	unsigned long vaddr_ori_start = vaddr_ori;
	struct page *pageori;
	spinlock_t *ptlori;
	spinlock_t *ptldst;

	if (pmd_none(*pmddst)) {
		ptedst = (pte_t *)__madv_alloc_page(vaddr_dst);
		if (!ptedst) {
			pr_alert("swapmm: alloc pte dst pgtable page failed!\n");
			return -ENOMEM;
		}
		ptldst = pmd_lock(current->mm, pmddst);
		if (pmd_none(*pmddst)) {
			mm_inc_nr_ptes(current->mm);
			__pmd_populate(pmddst, __pa(ptedst), PMD_TYPE_TABLE);
		} else {
			pte_free(current->mm, virt_to_page(ptedst));
		}
		spin_unlock(ptldst);
	}

	ptedst_start = ptedst = pte_offset_kernel(pmddst, vaddr_dst);
	pteori_start = pteori = pte_offset_kernel(pmdori, vaddr_ori);
	do {
		ptldst = pte_lockptr(current->mm, pmddst);

		/* get ori page, set to dst pte, and last clear ori pte */
		ptlori = pte_lockptr(current->mm, pmdori);
		spin_lock(ptlori);
		if (pte_none(*pteori)) {
			if (ptldst != ptlori)
				spin_lock(ptldst);
			__rollback_swapmove_ptes(ptedst_start, pteori_start, vaddr_dst_start,
							vaddr_ori_start, vaddr_dst, vaddr_ori);
			if (ptldst != ptlori)
				spin_unlock(ptldst);
			spin_unlock(ptlori);
			return -ENXIO;
		}
		pageori = pte_page(*pteori);

		/* set to dst pte, as dst lock new lock, so should recheck ptedst */
		if (ptldst != ptlori)
			spin_lock(ptldst);
		if (!pte_none(*ptedst)) {
			__rollback_swapmove_ptes(ptedst_start, pteori_start, vaddr_dst_start,
							vaddr_ori_start, vaddr_dst, vaddr_ori);
			if (ptldst != ptlori)
				spin_unlock(ptldst);
			spin_unlock(ptlori);
			return -EEXIST;
		}
		set_pte(ptedst, pfn_pte(page_to_pfn(pageori), PAGE_SWAPMM));
		ptep_get_and_clear(current->mm, vaddr_ori, pteori);
		if (ptldst != ptlori)
			spin_unlock(ptldst);
		spin_unlock(ptlori);
	} while (ptedst++, pteori++, vaddr_dst += PAGE_SIZE, vaddr_ori += PAGE_SIZE,
			(vaddr_dst != end_dst) && (vaddr_ori != end_ori));

	return 0;
}


static int __swapmm_swap_move_pmd(pud_t *pudori, pud_t *puddst, unsigned long vaddr_ori,
		unsigned long vaddr_dst, unsigned long end_ori,
		unsigned long end_dst, unsigned long align)
{
	pmd_t *pmdori = NULL;
	pmd_t *pmddst = NULL;
	pmd_t *pmddst_start, *pmdori_start;
	unsigned long next_ori;
	unsigned long next_dst;
	unsigned long vaddr_dst_start = vaddr_dst;
	unsigned long vaddr_ori_start = vaddr_ori;
	struct page *pageori;
	int err = 0;
	spinlock_t *ptlori;
	spinlock_t *ptldst;

	if (pud_none(*puddst)) {
		pmddst = swapmm_pmd_alloc(current->mm, vaddr_dst);
		if (!pmddst) {
			pr_alert("swapmm: alloc pmd dst pgtable page failed!\n");
			return -ENOMEM;
		}
		ptldst = pud_lock(current->mm, puddst);
		if (pud_none(*puddst)) {
			mm_inc_nr_pmds(current->mm);
			pud_populate(current->mm, puddst, pmddst);
		} else {
			pmd_free(current->mm, pmddst);
		}
		spin_unlock(ptldst);
	}

	pmddst_start = pmddst = pmd_offset(puddst, vaddr_dst);
	pmdori_start = pmdori = pmd_offset(pudori, vaddr_ori);
	do {
		next_dst = pmd_addr_end(vaddr_dst, end_dst);
		next_ori = pmd_addr_end(vaddr_ori, end_ori);

		if (align == SWAPMM_ALIGN_2M) {
			ptldst = pmd_lockptr(current->mm, pmddst);
			/* get ori page, set to dst pmd, and last clear ori pmd */
			ptlori = pmd_lock(current->mm, pmdori);
			if (pmd_none(*pmdori) || !__pmd_huge(*pmdori)) {
				if (ptldst != ptlori)
					spin_lock(ptldst);
				__rollback_swapmove_pmds(pmddst_start, pmdori_start,
							 vaddr_dst_start, vaddr_ori_start,
							 vaddr_dst, vaddr_ori);
				if (ptldst != ptlori)
					spin_unlock(ptldst);
				spin_unlock(ptlori);
				return -ENXIO;
			}
			pageori = pmd_page(*pmdori);

			/* if enable USE_SPLIT_PMD_PTLOCKS, ptldst different from ptlori
			 * if not, ptldst the same as ptlori, is just page_table_lock
			 */
			if (ptldst != ptlori)
				spin_lock(ptldst);
			if (!pmd_none(*pmddst)) {
				if (__pmd_huge(*pmddst) ||
				    __madv_have_pte_page((pte_t *)__va(pmd_page_paddr(*pmddst)))) {
					/* already exist 2m page or exist 4K pages,
					 * need to return error
					 */
					__rollback_swapmove_pmds(pmddst_start, pmdori_start,
								 vaddr_dst_start, vaddr_ori_start,
								 vaddr_dst, vaddr_ori);
					if (ptldst != ptlori)
						spin_unlock(ptldst);
					spin_unlock(ptlori);
					return -EEXIST;
				} else {
					struct page *page = pmd_page(*pmddst);

					mm_dec_nr_ptes(current->mm);
					pmd_clear(pmddst);
					pte_free(current->mm, page);
				}
			}
			set_pmd(pmddst, __pmd(page_to_phys(pageori) |
				pgprot_val(mk_pmd_sect_prot(PAGE_SWAPMM))));
			pmd_clear(pmdori);
			if (ptldst != ptlori)
				spin_unlock(ptldst);
			spin_unlock(ptlori);
		} else {
			ptlori = pmd_lock(current->mm, pmdori);
			if (pmd_none(*pmdori) || __pmd_huge(*pmdori)) {
				spin_unlock(ptlori);
				return -ENXIO;
			}
			spin_unlock(ptlori);

			ptldst = pmd_lock(current->mm, pmddst);
			if (!pmd_none(*pmddst) && __pmd_huge(*pmddst)) {
				spin_unlock(ptldst);
				return -EEXIST;
			}
			spin_unlock(ptldst);

			err = __swapmm_swap_move_pte(pmdori, pmddst, vaddr_ori, vaddr_dst,
					next_ori, next_dst, align);
			if (err != 0)
				return err;
		}
	} while (pmddst++, pmdori++, vaddr_dst = next_dst, vaddr_ori = next_ori,
			(vaddr_dst != end_dst) && (vaddr_ori != end_ori));
	return 0;
}


static int __swapmm_swap_move_pud(p4d_t *p4dori, p4d_t *p4ddst, unsigned long vaddr_ori,
		unsigned long vaddr_dst, unsigned long end_ori,
		unsigned long end_dst, unsigned long align)
{
	pud_t *puddst = NULL;
	pud_t *pudori = NULL;
	unsigned long next_ori;
	unsigned long next_dst;
	int err = 0;
	spinlock_t *ptl;

	if (p4d_none(*p4ddst)) {
		puddst = swapmm_pud_alloc(current->mm, vaddr_dst);
		if (!puddst) {
			pr_alert("swapmm: alloc pud dst pgtable page failed!\n");
			return -ENOMEM;
		}
		spin_lock(&current->mm->page_table_lock);
		if (p4d_none(*p4ddst)) {
			mm_inc_nr_puds(current->mm);
			swapmm_p4d_populate(current->mm, p4ddst, puddst);
		} else {
			pud_free(current->mm, puddst);
		}
		spin_unlock(&current->mm->page_table_lock);
	}

	puddst = pud_offset(p4ddst, vaddr_dst);
	pudori = pud_offset(p4dori, vaddr_ori);
	do {
		next_dst = pud_addr_end(vaddr_dst, end_dst);
		next_ori = pud_addr_end(vaddr_ori, end_ori);

		ptl = pud_lock(current->mm, pudori);
		if (pud_none(*pudori)) {
			spin_unlock(ptl);
			continue;
		}
		spin_unlock(ptl);
		err = __swapmm_swap_move_pmd(pudori, puddst, vaddr_ori, vaddr_dst,
				next_ori, next_dst, align);
		if (err != 0)
			/* why return without free?
			 * because pud_alloc_one just init pgd self and just do one time,
			 * do not free here can be faster next time the vaddr need to mmap,
			 * also, all pagetable memory will be freed when task exit
			 */
			return err;
	} while (puddst++, pudori++, vaddr_dst = next_dst, vaddr_ori = next_ori,
			(vaddr_dst != end_dst) && (vaddr_ori != end_ori));
	return 0;
}

static int __swapmm_swap_move_p4d(pgd_t *pgdori, pgd_t *pgddst, unsigned long vaddr_ori,
		unsigned long vaddr_dst, unsigned long end_ori,
		unsigned long end_dst, unsigned long align)
{
	p4d_t *p4ddst = NULL;
	p4d_t *p4dori = NULL;
	unsigned long next_ori;
	unsigned long next_dst;
	int err = 0;

	if (pgd_none(*pgddst)) {
		p4ddst = p4d_alloc_one(current->mm, vaddr_dst);
		if (!p4ddst) {
			pr_alert("swapmm: alloc p4d dst pgtable page failed!\n");
			return -ENOMEM;
		}
		spin_lock(&current->mm->page_table_lock);
		if (pgd_none(*pgddst)) {
			mm_inc_nr_puds(current->mm);
			pgd_populate(current->mm, pgddst, p4ddst);
		} else {
			p4d_free(current->mm, p4ddst);
		}
		spin_unlock(&current->mm->page_table_lock);
	}

	p4ddst = p4d_offset(pgddst, vaddr_dst);
	p4dori = p4d_offset(pgdori, vaddr_ori);
	do {
		next_dst = p4d_addr_end(vaddr_dst, end_dst);
		next_ori = p4d_addr_end(vaddr_ori, end_ori);

		spin_lock(&current->mm->page_table_lock);
		if (p4d_none(*p4dori)) {
			spin_unlock(&current->mm->page_table_lock);
			continue;
		}
		spin_unlock(&current->mm->page_table_lock);
		err = __swapmm_swap_move_pud(p4dori, p4ddst, vaddr_ori, vaddr_dst,
				next_ori, next_dst, align);
		if (err != 0)
			/* why return without free?
			 * because p4d_alloc_one just init pgd self and just do one time,
			 * do not free here can be faster next time the vaddr need to mmap,
			 * also, all pagetable memory will be freed when task exit
			 */
			return err;
	} while (p4ddst++, p4dori++, vaddr_dst = next_dst, vaddr_ori = next_ori,
			(vaddr_dst != end_dst) && (vaddr_ori != end_ori));
	return 0;
}

static int __swapmm_swap_move(struct swapmm_madv_swapmove *swapmm_swapmove)
{
	pgd_t *pgdori, *pgddst;
	unsigned long next_ori, next_dst;
	unsigned long vaddr_ori = swapmm_swapmove->vaddr_ori;
	unsigned long vaddr_dst = swapmm_swapmove->vaddr_dst;
	unsigned long end_ori = swapmm_swapmove->vaddr_ori + swapmm_swapmove->len;
	unsigned long end_dst = swapmm_swapmove->vaddr_dst + swapmm_swapmove->len;
	int err = 0;

	pgdori = pgd_offset(current->mm, vaddr_ori);
	pgddst = pgd_offset(current->mm, vaddr_dst);
	do {
		next_ori = pgd_addr_end(vaddr_ori, end_ori);
		next_dst = pgd_addr_end(vaddr_dst, end_dst);
		/* pgdori should not be none, pgddst maybe null */
		if (pgd_none(*pgdori))
			continue;

		err = __swapmm_swap_move_p4d(pgdori, pgddst, vaddr_ori, vaddr_dst,
				next_ori, next_dst, swapmm_swapmove->pagealign);
		if (err != 0)
			return err;
	} while (pgdori++, pgddst++, vaddr_ori = next_ori, vaddr_dst = next_dst,
			(vaddr_ori != end_ori) && (vaddr_dst != end_dst));
	return 0;
}

unsigned long get_swapmm_madv_kaddr(unsigned long uaddr, struct mm_struct *taskmm,
					int needlock)
{
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long kaddr = 0;
	struct mm_struct *mm = current->mm;

	if (!uaddr)
		return 0;

	if (taskmm)
		mm = taskmm;

	if (unlikely(needlock))
		mmap_write_lock(mm);

	pgd = pgd_offset(mm, uaddr);
	if (pgd_none(*pgd))
		goto no_page;

	p4d = p4d_offset(pgd, uaddr);
	if (p4d_none(*p4d))
		goto no_page;

	pud = pud_offset(p4d, uaddr);
	if (pud_none(*pud))
		goto no_page;

	pmd = pmd_offset(pud, uaddr);
	if (pmd_none(*pmd))
		goto no_page;

	if (__pmd_huge(*pmd)) {
		kaddr = (unsigned long)page_to_phys(pmd_page(*pmd));
	} else {
		pte = pte_offset_map(pmd, uaddr);
		if (!pte_present(*pte) || pte_none(*pte))
			goto no_page;
		kaddr = (unsigned long)page_to_phys(pte_page(*pte));
	}

no_page:
	if (unlikely(needlock))
		mmap_write_unlock(mm);
	return kaddr;
}
EXPORT_SYMBOL(get_swapmm_madv_kaddr);

int __swapmm_madv_willneed(unsigned long arg)
{
	unsigned long virt_addr;
	unsigned long size;
	unsigned long align;
	unsigned int numa_node;
	struct page **pages = NULL;
	struct page *pagesbuf[G_PAGEBUF_NUM] = {0};
	unsigned long num = 0;
	int err = 0;
	int i;
	struct mm_struct *mm = current->mm;
	struct swapmm_madv_ospages swapmm_madv;
	void __user *buf = (void __user *)arg;
	struct vm_area_struct *vma;
	struct swapmm_madv_opt madvopt;

	if (!access_ok(buf, sizeof(struct swapmm_madv_ospages))) {
		pr_warn("swapmm: willneed userspace data can not access.\n");
		return -EFAULT;
	}
	if (copy_from_user(&swapmm_madv, buf, sizeof(struct swapmm_madv_ospages))) {
		pr_warn("swapmm: willneed userspace data can not copy.\n");
		return -EINVAL;
	}

	virt_addr = swapmm_madv.virt_addr;
	size = swapmm_madv.len;
	align = swapmm_madv.pagealign;
	numa_node = swapmm_madv.numa_node;

	err = (!size || !__swapmm_align_ok(align) || (size % align) != 0
			|| (virt_addr % align) != 0 || numa_node >= nr_node_ids);
	if (err) {
		pr_warn("swapmm: willneed parameters invalid, size: %lu, align: %lu, vaddr: 0x%lx, numa: %u\n",
			size, align, virt_addr, numa_node);
		return -EINVAL;
	}

	num = size / align;
	if (swapmm_madv.flag & SWAPMM_BIT_GET_KADDR) {
		if ((swapmm_madv.buf == NULL) ||
		     (swapmm_madv.buflen < (num * sizeof(struct page *)))) {
			pr_warn("swapmm: willneed parameters invalid, buf: 0x%lx, buflen: %u\n",
				(unsigned long)swapmm_madv.buf, swapmm_madv.buflen);
			return -EINVAL;
		}
		if (!access_ok(swapmm_madv.buf, num * sizeof(struct page *))) {
			pr_warn("swapmm: willneed userspace buf can not copy.\n");
			return -EFAULT;
		}
	}

	if (unlikely(num > G_PAGEBUF_NUM)) {
		pages = (struct page **)kmalloc(num * sizeof(struct page *),
						GFP_KERNEL | __GFP_ZERO);
		if (pages == NULL) {
			pr_warn("swapmm: willneed kmalloc failed, num %lu\n", num);
			return -ENOMEM;
		}
	} else {
		pages = (struct page **)pagesbuf;
	}

	if (__init_pages(pages, num, align, numa_node, swapmm_madv.flag & SWAPMM_BIT_RELIABLE)) {
		pr_warn("swapmm: willneed alloc_pages failed, num %lu, align: %lu\n", num, align);
		err = -ENOMEM;
		goto err_free;
	}

	madvopt.pages = pages;
	madvopt.align = align;
	madvopt.num = num;
	madvopt.pos = 0;

	mmap_read_lock(mm);
	vma = find_vma(mm, virt_addr);
	if (!vma || !(vma->vm_flags & VM_SWAPMM_TLB) ||
			!(vma->vm_flags & VM_SWAPMM_TLB_MADV) ||
			(vma->vm_end < (virt_addr + size))) {
		mmap_read_unlock(mm);
		pr_warn("swapmm: willneed mapping vm area(%#lx-%#lx) invalid !\n",
			virt_addr, virt_addr + size);
		err = -EINVAL;
		goto err_free;
	}

	err = __swapmm_madv_mapping(mm, virt_addr, size, &madvopt);
	mmap_read_unlock(mm);

	if (swapmm_madv.flag & SWAPMM_BIT_GET_KADDR) {
		for (i = 0; i < num; i++)
			pages[i] = (void *)page_to_phys(pages[i]);
		if (copy_to_user(swapmm_madv.buf, pages, num * sizeof(struct page *)))
			pr_warn("swapmm: willneed copy kaddr to user failed\n");
	}

	if (unlikely(num > G_PAGEBUF_NUM))
		kfree(pages);
	return err;
err_free:
	__clean_pages(pages, num, align);
	if (unlikely(num > G_PAGEBUF_NUM))
		kfree(pages);
	return err;
}

int __swapmm_madv_dontneed(unsigned long arg)
{
	unsigned long virt_addr;
	unsigned long size;
	unsigned long align;
	unsigned long num = 0;
	int err = 0;
	struct mm_struct *mm = current->mm;
	struct swapmm_madv_ospages swapmm_dontneed;
	void __user *buf = (void __user *)arg;
	struct vm_area_struct *vma;

	if (!access_ok(buf, sizeof(struct swapmm_madv_ospages))) {
		pr_warn("swapmm: dontneed userspace data can not access.\n");
		return -EFAULT;
	}
	if (copy_from_user(&swapmm_dontneed, buf, sizeof(struct swapmm_madv_ospages))) {
		pr_warn("swapmm: dontneed userspace data can not copy.\n");
		return -EINVAL;
	}

	virt_addr = swapmm_dontneed.virt_addr;
	size = swapmm_dontneed.len;
	align = swapmm_dontneed.pagealign;

	if (!size || !__swapmm_align_ok(align) || (size % align) != 0
			|| (virt_addr % align) != 0) {
		pr_warn("swapmm: dontneed parameters invalid,
			size: %lu, align: %lu, virt_addr: 0x%lx\n",
			size, align, virt_addr);
		return -EINVAL;
	}

	num = size / align;

	mmap_read_lock(mm);
	vma = find_vma(mm, virt_addr);
	if (!vma || !(vma->vm_flags & VM_SWAPMM_TLB) ||
			!(vma->vm_flags & VM_SWAPMM_TLB_MADV) ||
			(vma->vm_end < (virt_addr + size))) {
		mmap_read_unlock(mm);
		pr_warn("swapmm: dontneed mapping vm area(%#lx-%#lx) invalid !\n",
			virt_addr, virt_addr + size);
		return -EINVAL;
	}

	err = __swapmm_unmap_free(virt_addr, virt_addr + num * align, align);
	flush_tlb_range(vma, virt_addr, virt_addr + num * align);

	mmap_read_unlock(mm);
	return err;
}

int __swapmm_madv_swapmove(unsigned long arg)
{
	unsigned long vaddr_ori;
	unsigned long vaddr_dst;
	unsigned long size;
	unsigned long align;
	unsigned long num = 0;
	int err = 0;
	struct mm_struct *mm = current->mm;
	struct swapmm_madv_swapmove swapmm_swapmove;
	void __user *buf = (void __user *)arg;
	struct vm_area_struct *vma_ori, *vma_dst;

	if (!access_ok(buf, sizeof(struct swapmm_madv_swapmove))) {
		pr_warn("swapmm: swapmove userspace data can not access.\n");
		return -EFAULT;
	}
	if (copy_from_user(&swapmm_swapmove, buf, sizeof(struct swapmm_madv_swapmove))) {
		pr_warn("swapmm: swapmove userspace data can not copy.\n");
		return -EINVAL;
	}

	vaddr_ori = swapmm_swapmove.vaddr_ori;
	vaddr_dst = swapmm_swapmove.vaddr_dst;
	size = swapmm_swapmove.len;
	align = swapmm_swapmove.pagealign;

	if (!size || !__swapmm_align_ok(align) || (size % align) != 0
			|| (vaddr_ori % align) != 0 || (vaddr_dst % align) != 0
			|| ((vaddr_ori <= vaddr_dst) && ((vaddr_ori + size) > vaddr_dst))
			|| ((vaddr_ori >= vaddr_dst) && (vaddr_ori < (vaddr_dst + size)))) {
		pr_warn("swapmm: swapmove parameters invalid, size: %lu,
			align: %lu, vaddr_ori: 0x%lx, vaddr_dst: 0x%lx\n",
			size, align, vaddr_ori, vaddr_dst);
		return -EINVAL;
	}

	num = size / align;

	mmap_read_lock(mm);
	vma_ori = find_vma(mm, vaddr_ori);
	vma_dst = find_vma(mm, vaddr_dst);
	if (!vma_ori || !vma_dst || !(vma_ori->vm_flags & VM_SWAPMM_TLB) ||
	    !(vma_ori->vm_flags & VM_SWAPMM_TLB_MADV) ||
	    !(vma_dst->vm_flags & VM_SWAPMM_TLB_MADV) ||
	    !(vma_dst->vm_flags & VM_SWAPMM_TLB) || (vma_ori->vm_end < (vaddr_ori + size)) ||
	    (vma_dst->vm_end < (vaddr_dst + size))) {
		mmap_read_unlock(mm);
		pr_warn("swapmm: swapmove mapping vm area(%#lx-%#lx, %#lx-%#lx) invalid !\n",
			vaddr_ori, vaddr_ori + size,
			vaddr_dst, vaddr_dst + size);
		return -EINVAL;
	}

	err = __swapmm_swap_move(&swapmm_swapmove);
	flush_tlb_range(vma_ori, vaddr_ori, vaddr_ori + size);
	flush_tlb_range(vma_dst, vaddr_dst, vaddr_dst + size);

	mmap_read_unlock(mm);
	return err;
}

struct madv_area {
	unsigned long start;
	unsigned long end;
	struct madv_area *next;
};

static int cmpul(const void *a, const void *b)
{
	if (*(unsigned long *)b < *(unsigned long *)a)
		return 1;
	return -1;
}

/**
 * Given two virtual memory regions, find out the difference between them.
 * The difference could be 0, 1, 2 regions. For example,
 * there are 2 diff regions between 0x30000-0x40000 and 0x31000-0x32000:
 * 0x30000-0x31000 and 0x32000-0x40000;
 * there is only 1 diff region between 0x30000-0x40000 and 0x31000-0x45000:
 * 0x30000-0x31000.
 */
static bool __madv_area_diff(
	struct madv_area *madv_area, struct swapmm_area *swapmm_area)
{
	struct madv_area *next = NULL;
	unsigned long nums[4] = {
		madv_area->start, swapmm_area->virt_start,
		madv_area->end, swapmm_area->virt_end};
	unsigned long start = madv_area->start;
	unsigned long end = madv_area->end;
	bool updated = false;

	sort(nums, 4, sizeof(unsigned long), cmpul, NULL);

	/* Test if these two regions overlap at the beginning.
	 * If no, we need to update madv_area and get the first region.
	 */
	if (nums[0] == start && nums[1] != start) {
		madv_area->start = nums[0];
		madv_area->end = nums[1];
		updated = true;
	}

	/* Test if these two regions overlap at the end.
	 * If the end is from madv_area, we need to update madv_area.
	 * Otherwise, it means they either has no intersection, or the
	 * tail of madv_area belongs to swapmm_area.
	 */
	if (nums[3] == end && nums[2] != end) {
		if (updated) {
			next = kmalloc(sizeof(struct madv_area), GFP_KERNEL);
			if (!next)
				return false;
			next->start = nums[2];
			next->end = nums[3];
			next->next = madv_area->next;
			madv_area->next = next;
		} else {
			madv_area->start = nums[2];
			madv_area->end = nums[3];
			updated = true;
		}
	}

	return updated;
}

/**
 * Find out the difference between one memory region and a list of regions.
 */
static struct madv_area *madv_area_diff(
	struct madv_area *madv_area, struct swapmm_area *swapmm_area)
{
	struct madv_area *head = madv_area;
	struct madv_area *prev, *cur, *next;
	bool updated;
	unsigned long asid;

	if (!swapmm_area)
		return head;
	asid = swapmm_area->asid;

	while (swapmm_area && swapmm_area->asid == asid && head) {
		prev = NULL;
		cur = head;
		while (cur) {
			next = cur->next;
			updated = __madv_area_diff(cur, swapmm_area);
			if (!updated) {
				kfree(cur);
				if (prev)
					prev->next = next;
				else
					head = next;
				cur = next;
			} else {
				prev = cur;
				cur = next;
			}
		}
		swapmm_area = swapmm_area->next;
	}
	return head;
}

/**
 * Clear and free non-pin pages when process exits.
 */
void swapmm_madv_clear_pgtable(struct mm_struct *mm, struct swapmm_area *swapmm_area)
{
	struct vm_area_struct *vma;
	struct madv_area *madv_area, *prev_area;
	int err;

	mmap_read_lock(mm);
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		if (!(vma->vm_flags & VM_SWAPMM_TLB_MADV))
			continue;

		madv_area = kmalloc(sizeof(struct madv_area), GFP_KERNEL);
		if (madv_area == NULL)
			break;

		madv_area->start = vma->vm_start;
		madv_area->end = vma->vm_end;
		madv_area->next = NULL;
		madv_area = madv_area_diff(madv_area, swapmm_area);

		while (madv_area) {
			err = __swapmm_unmap_free(madv_area->start, madv_area->end, 0);
			flush_tlb_range(vma, madv_area->start, madv_area->end);
			if (err != 0)
				pr_warn("swapmm: exiting task %d non-pin area(%pK-%pK) free failed %d",
					current->tgid, (void *)madv_area->start,
					(void *)madv_area->end, err);

			prev_area = madv_area;
			madv_area = madv_area->next;
			kfree(prev_area);
		}
	}
	mmap_read_unlock(mm);
}

