// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright @ Huawei Technologies Co., Ltd. 2020-2020. ALL rights reversed.
 * Description: Euler virtual memory management.
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/miscdevice.h>
#include <linux/profile.h>
#include <linux/task_exit_notifier.h>
#include <linux/swapmm_tlb.h>
#include <linux/printk.h>

#include <asm/ptrace.h>
#include <asm/tlb.h>

#include "swapmm_tlb_madv.h"
#include "swapmm_tlb_mirror.h"

static struct swapmm_area *swapmm_area_head;
static spinlock_t page_map_swap_lock;
static int exit_notifier_status;

static struct list_head reversed_head = LIST_HEAD_INIT(reversed_head);

struct swapmm_asid {
	int tgid;
	int reserve;
	unsigned long asid;
	struct swapmm_area *head_area;
	struct list_head list;
};

static LIST_HEAD(swapmm_asid_head);
#define SWAPMM_PID_LIMIT  0xffffffff
#define RESOURCE_NAME_LEN 512
static char resource_name[RESOURCE_NAME_LEN] __initdata;
module_param_string(resource_name, resource_name, sizeof(resource_name), 0);
MODULE_PARM_DESC(resource_name, "The reversed physical region name");
module_param_named(max_mirror_page, mem_mirror_max_pages, uint, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(max_mirror_page, "The maximum number of 4K pages to be mirrored");

#if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS
bool ptlock_alloc(struct page *page)
{
	return true;
}

void ptlock_free(struct page *page)
{
}
#endif
#endif

int check_phys_addr(unsigned long start, unsigned long end)
{
	struct swapmm_reverved_mem *reversed;

	/* prevent overflow */
	if (end <= start)
		return -EINVAL;

	list_for_each_entry(reversed, &reversed_head, list) {
		if (start >= reversed->start && end <=	reversed->end)
			return 0;
	}

	return -EINVAL;
}

/*
 * pgtable alloc function
 * pgtable free located at free_pgtables->...>pte_free
 */
static void *swapmm_alloc_page(unsigned long addr)
{
	pgtable_t pte = NULL;
	/* 4K page frame */
	if (PAGE_SHIFT != 12)
		return NULL;

	/*
	 * add page to kmem cache
	 * used by gup and munmap
	 * otherwise, would cause multiple panics
	 */
	pte = pte_alloc_one(current->mm);
	if (!pte)
		return NULL;
	return page_to_virt(pte);
}

static int alloc_init_pte(struct mm_struct *mm, pmd_t *pmd,
		unsigned long addr, unsigned long end,
		unsigned long pfn, pgprot_t prot)
{
	pte_t *pte = NULL;

	if (pmd_none(*pmd)) {
		pte = (pte_t *)swapmm_alloc_page(addr);
		if (!pte) {
			pr_alert("swapmm: alloc pte pgtable page failed!\n");
			return -ENOMEM;
		}
		mm_inc_nr_ptes(mm);
		swapmm_pmd_populate(mm, pmd, pte);
	}

	pte = pte_offset_kernel(pmd, addr);
	do {
		set_pte(pte, pfn_pte(pfn, prot));
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);

	return 0;
}

static int alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
		unsigned long addr, unsigned long end,
		phys_addr_t phys, pgprot_t prot)
{
	pmd_t *pmd = NULL;
	unsigned long next;
	int err  = 0;

	if (pud_none(*pud)) {
		pmd = swapmm_pmd_alloc(mm, addr);
		if (!pmd) {
			pr_alert("swapmm: alloc pmd pgtable page failed!\n");
			return -ENOMEM;
		}
		mm_inc_nr_pmds(mm);
		pud_populate(mm, pud, pmd);
		flush_tlb_all();
	}

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (((addr | next | phys) & ~PMD_MASK) == 0) {
			pmd_t old_pmd = *pmd;

			set_pmd(pmd, swapmm_pmd_prot(phys, prot));
			if (!pmd_none(old_pmd)) {
				flush_tlb_all();
				pr_alert("swapmm: if occurs, may be an error! free pte page frame!\n");
			}
		} else {
			err = alloc_init_pte(mm, pmd, addr, next,
				__phys_to_pfn(phys), prot);
			if (err != 0)
				return err;
		}
		phys += next - addr;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

static int alloc_init_pud(struct mm_struct *mm, p4d_t *p4d,
		unsigned long addr, unsigned long end,
		phys_addr_t phys, pgprot_t prot)
{
	pud_t *pud = NULL;
	unsigned long next;
	int err = 0;

	if (!p4d_present(*p4d)) {
		pud = swapmm_pud_alloc(mm, addr);
		if (!pud) {
			pr_alert("swapmm: alloc pud pgtable page failed!\n");
			return -ENOMEM;
		}
		mm_inc_nr_puds(mm);
		swapmm_p4d_populate(mm, p4d, pud);
	}

	pud = pud_offset(p4d, addr);
	do {
		next = pud_addr_end(addr, end);
		err = alloc_init_pmd(mm, pud, addr, next, phys, prot);
		if (err != 0)
			return err;
		phys += next - addr;
	} while (pud++, addr = next, addr != end);

	return 0;
}

static inline int alloc_init_p4d(struct mm_struct *mm, pgd_t *pgd,
				unsigned long addr, unsigned long end,
				phys_addr_t phys, pgprot_t prot)
{
	p4d_t *p4d;
	unsigned long next;
	int err;

	if (!pgd_present(*pgd)) {
		p4d = p4d_alloc_one(mm, addr);
		if (!p4d) {
			pr_alert("swapmm: alloc p4d pgtable failed!\n");
			return -ENOMEM;
		}
		pgd_populate(mm, pgd, p4d);
	}

	p4d = p4d_offset(pgd, addr);
	do {
		next = p4d_addr_end(addr, end);
		err = alloc_init_pud(mm, p4d, addr, end, phys, prot);
		if (err != 0)
			return err;
		phys += next - addr;
	} while (p4d++, addr = next, addr != end);

	return 0;
}

static int swapmm_create_mapping(struct mm_struct *mm, unsigned long virt,
	unsigned long start, unsigned long size)
{
	pgd_t *pgd = NULL;
	pgprot_t prot;
	unsigned long addr, length, end, next;
	phys_addr_t phys;
	int err = 0;

	pgd = pgd_offset(mm, virt);
	prot = PAGE_SWAPMM;
	phys = start & PAGE_MASK;
	addr = virt & PAGE_MASK;
	length = PAGE_ALIGN(size + (virt & (~PAGE_MASK)));
	end = addr + length;

	err = check_phys_addr(phys, phys + length);
	if (err != 0) {
		pr_warn("swapmm: bad physical address %pK -- %pK.\n",
			(void *)phys, (void *)(phys+length));
		return err;
	}

	do {
		next = pgd_addr_end(addr, end);

		err = alloc_init_p4d(mm, pgd, addr, next, phys, prot);
		if (err != 0)
			return err;
		if (phys)
			phys += next - addr;
	} while (pgd++, addr = next, addr != end);

	return 0;
}

static vm_fault_t swapmm_fault(struct vm_fault *vmf)
{
	if (pte_none(vmf->orig_pte) && !pmd_none(*vmf->pmd) &&
		     (vmf->vma->vm_flags & VM_SWAPMM_TLB_MADV)) {
		/* It may race against the remap process of mem mirror module
		 * If so, the pte lock is hold by mem mirror, waiting to be released
		 */
		vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl);
		if (!pte_none(*vmf->pte)) {
			pte_unmap_unlock(vmf->pte, vmf->ptl);
			return VM_FAULT_NOPAGE;
		}
		pte_unmap_unlock(vmf->pte, vmf->ptl);
	}
	if (epgfault_is_enabled()) {
		return VM_FAULT_USER_FAULT_SIG;
	} else {
		pr_err("swapmm: SIGSEGV in %s, epgfault is disabled.", __func__);
		return VM_FAULT_SIGSEGV;
	}
}

static const char *swapmm_name(struct vm_area_struct *vma)
{
	if (vma->vm_flags & VM_SWAPMM_TLB_MADV)
		return "swapmm_willneed";
	if (vma->vm_flags & VM_SWAPMM_TLB)
		return "swapmm_mmap";
	return NULL;
}

static const struct vm_operations_struct swapmm_vm_ops = {
	.fault = swapmm_fault,
	.name = swapmm_name,
};

static struct swapmm_asid *get_asid_node_by_tgid(int tgid)
{
	struct swapmm_asid *node;

	spin_lock(&page_map_swap_lock);
	list_for_each_entry(node, &swapmm_asid_head, list) {
		if (node->tgid == tgid) {
			spin_unlock(&page_map_swap_lock);
			return node;
		}
	}
	spin_unlock(&page_map_swap_lock);
	return NULL;
}

static struct swapmm_asid *get_asid_node_by_asid(unsigned long asid)
{
	struct swapmm_asid *node;

	spin_lock(&page_map_swap_lock);
	list_for_each_entry(node, &swapmm_asid_head, list) {
		if (node->asid == asid) {
			spin_unlock(&page_map_swap_lock);
			return node;
		}
	}
	spin_unlock(&page_map_swap_lock);
	return NULL;
}

static struct swapmm_asid *unbind_asid(int tgid)
{
	struct swapmm_asid *node;

	node = get_asid_node_by_tgid(tgid);
	if (!node)
		return NULL;
	node->tgid = SWAPMM_PID_LIMIT;
	return node;
}

static struct swapmm_area *find_swapmm_area(unsigned long start,
	unsigned long end, struct swapmm_asid *node)
{
	struct swapmm_area *p;

	spin_lock(&page_map_swap_lock);
	if (!node->head_area)
		goto out;
	p = node->head_area;
	while (p && p->asid == node->asid) {
		if ((start >= p->virt_start) && (end <= p->virt_end)) {
			spin_unlock(&page_map_swap_lock);
			return p;
		}
		p = p->next;
	}
out:
	spin_unlock(&page_map_swap_lock);
	return NULL;
}

static bool is_paddr_in_pmd(pmd_t *pmd, unsigned long start, unsigned long end,
		phys_addr_t paddr)
{
	pte_t *pte = NULL;
	phys_addr_t phys;

	if (pmd_none(*pmd) || pmd_bad(*pmd))
		return false;

	pte = pte_offset_kernel(pmd, start);
	do {
		if (pte_none(*pte))
			continue;
		phys = __pte_to_phys(*pte);
		if (phys == paddr)
			return true;
	} while (pte++, start += PAGE_SIZE, start != end);

	return false;
}

static bool is_paddr_in_pud(pud_t *pud, unsigned long start, unsigned long end,
		phys_addr_t paddr)
{
	pmd_t *pmd = NULL;
	phys_addr_t paddr_huge = paddr & ~(SWAPMM_ALIGN_2M - 1);
	phys_addr_t phys;
	unsigned long next;

	if (pud_none(*pud) || pud_bad(*pud))
		return false;

	pmd = pmd_offset(pud, start);
	do {
		next = pmd_addr_end(start, end);
		if (__pmd_huge(*pmd)) {
			phys = __pmd_to_phys(*pmd);
			if (phys == paddr_huge)
				return true;
		} else if (is_paddr_in_pmd(pmd, start, next, paddr)) {
			return true;
		}
	} while (pmd++, start = next, start != end);

	return false;
}


static bool is_paddr_in_p4d(p4d_t *p4d, unsigned long start, unsigned long end,
		phys_addr_t paddr)
{
	pud_t *pud = NULL;
	unsigned long next;

	if (p4d_none(*p4d) || p4d_bad(*p4d))
		return false;

	pud = pud_offset(p4d, start);
	do {
		next = pud_addr_end(start, end);
		if (is_paddr_in_pud(pud, start, next, paddr))
			return true;
	} while (pud++, start = next, start != end);

	return false;
}

static bool is_paddr_in_pgd(pgd_t *pgd, unsigned long start, unsigned long end,
		phys_addr_t paddr)
{
	p4d_t *p4d = NULL;
	unsigned long next;

	if (pgd_none(*pgd) || pgd_bad(*pgd))
		return false;

	p4d = p4d_offset(pgd, start);
	do {
		next = p4d_addr_end(start, end);
		if (is_paddr_in_p4d(p4d, start, next, paddr))
			return true;
	} while (p4d++, start = next, start != end);

	return false;
}

static bool is_paddr_in_node(phys_addr_t paddr, struct swapmm_asid *node)
{
	struct swapmm_area *p = node->head_area;
	unsigned long rc, nr;
	unsigned long start, end, next;
	pgd_t pgd;

	while (p && p->asid == node->asid) {
		start = p->virt_start;
		end = p->virt_end;
		nr = (p->virt_end - (p->virt_start & PGDIR_MASK) + PGDIR_SIZE - 1) / PGDIR_SIZE;

		for (rc = 0; rc < nr; rc++, start = next) {
			pgd.pgd = p->pgd_array[rc];
			next = pgd_addr_end(start, end);
			if (is_paddr_in_pgd(&pgd, start, next, paddr))
				return true;
		}
		p = p->next;
	}

	return false;
}

bool is_swapmm_pin_page(phys_addr_t paddr)
{
	struct swapmm_asid *node;

	paddr = paddr & ~(PAGE_SIZE - 1);

	spin_lock(&page_map_swap_lock);
	list_for_each_entry(node, &swapmm_asid_head, list) {
		if (is_paddr_in_node(paddr, node)) {
			spin_unlock(&page_map_swap_lock);
			return true;
		}
	}
	spin_unlock(&page_map_swap_lock);
	return false;
}
EXPORT_SYMBOL(is_swapmm_pin_page);

static void insert_swapmm_area(struct swapmm_area *svma, struct swapmm_asid *node)
{
	struct swapmm_area *p = swapmm_area_head;
	struct swapmm_area *prev = NULL;

	spin_lock(&page_map_swap_lock);
	if (!swapmm_area_head) {
		swapmm_area_head = svma;
		goto out;
	}

	while (p) {
		if (svma->asid <= p->asid) {
			if (!prev) {
				swapmm_area_head = svma;
				svma->next = p;
			} else {
				prev->next = svma;
				svma->next = p;
			}
			goto out;
		}
		prev = p;
		p = p->next;
	}
	prev->next = svma;
out:
	/* update asid node head */
	node->head_area = svma;
	spin_unlock(&page_map_swap_lock);
}

static int pin_pgd_range(struct mm_struct *mm, unsigned long start,
	unsigned long end, struct swapmm_area *p)
{
	pgd_t *pgd;
	p4d_t *p4d;
	unsigned long next;
	struct page *page;
	int rc = 0;

	pgd = pgd_offset(mm, start);
	do {
		p4d = (p4d_t *)pgd;
		next = pgd_addr_end(start, end);
		if (pgd_none_or_clear_bad(pgd) || p4d_none_or_clear_bad(p4d))
			continue;

		pr_debug("save p4d[%d]\n", rc);
		page = p4d_page(*p4d);
		p->pgd_array[rc] = p4d_val(*p4d);
		rc++;

		/* the page with hot replace flag can be reversed
		 * for remapping when process exit.
		 */
		if (!PageHotreplace(page)) {
			SetPageHotreplace(page);
			page_ref_inc(page);
		}
	} while (pgd++, start = next, start < end);
	return rc;
}

/*
 * store the vma pgd values for remapping to restart process,
 * which can help to restore the process data quickly.
 */
static void pin_pgd_for_swap_vmas(struct mm_struct *mm, struct swapmm_area *head_area)
{
	struct swapmm_area *p = head_area;
	unsigned long asid;

	if (p == NULL)
		return;

	asid = p->asid;
	spin_lock(&page_map_swap_lock);
	while (p && p->asid == asid) {
		pin_pgd_range(mm, p->virt_start, p->virt_end, p);
		p = p->next;
	}
	spin_unlock(&page_map_swap_lock);
}

/* remapping the pgd values to the restart process page table */
static int restore_page_range(struct mm_struct *mm, unsigned long start,
	unsigned long end, struct swapmm_area *p)
{
	pgd_t *pgd;
	p4d_t *p4d;
	unsigned long next;
	int index;
	int rc = 0;
	unsigned long scan_start = start;

	pgd = pgd_offset(mm, scan_start);
	do {
		p4d = (p4d_t *)pgd;
		next = pgd_addr_end(scan_start, end);
		index = (scan_start - (start & PGDIR_MASK)) / PGDIR_SIZE;
		if (!p->pgd_array[index] || !p4d_none(*p4d)) {
			pr_debug("p4d[%d] is none\n", index);
			rc++;
			continue;
		}
		pr_debug("restore p4d[%d]\n", index);
		swapmm_set_p4d(p4d, __p4d(p->pgd_array[index]));
		++rc;
	} while (pgd++, scan_start = next, scan_start < end);

	return rc;
}

static struct swapmm_area *create_swapmm_area(struct mm_struct *mm,
	unsigned long start, unsigned long end, struct swapmm_asid *asid_node)
{
	struct swapmm_area *p;
	unsigned long nr, rc;
	pgd_t *pgd;

	p = kmalloc(sizeof(struct swapmm_area), GFP_KERNEL);
	if (!p) {
		pr_err("swapmm: alloc swap mm area(size: %#lx) failed.\n",
			sizeof(struct swapmm_area));
		return NULL;
	}
	nr = (end - (start & PGDIR_MASK) + PGDIR_SIZE - 1) / PGDIR_SIZE;
	p->pgd_array = kzalloc(nr * sizeof(unsigned long), GFP_KERNEL);
	if (!p->pgd_array) {
		kfree(p);
		pr_err("swapmm: alloc pgd array fail(size: %#lx) failed.\n",
				nr * sizeof(unsigned long));
		return NULL;
	}

	pgd = pgd_offset(mm, start);
	for (rc = 0; rc < nr; rc++, pgd++)
		p->pgd_array[rc] = pgd_val(*pgd);
	p->virt_start = start;
	p->virt_end = end;
	p->asid = asid_node->asid;
	p->next = NULL;
	insert_swapmm_area(p, asid_node);
	return p;
}

static void free_swapmm_areas(void)
{
	struct swapmm_area *next;
	struct swapmm_area *p = swapmm_area_head;

	while (p) {
		next = p->next;
		kfree(p->pgd_array);
		p->pgd_array = NULL;
		kfree(p);
		p = next;
	}
	swapmm_area_head = NULL;
}

 /* swapmm callback for mmap syscall */
static int swapmm_mmap(struct file *file, struct vm_area_struct *vma)
{
	int err = 0;
	struct mm_struct *mm = current->mm;

	pr_debug("swapmm: user call mmap vma=0x%pK, offset=%#lx, vma size=%#lx, start=0x%pK\n",
		 vma, vma->vm_pgoff, vma->vm_end - vma->vm_start, (void *)vma->vm_start);

	if (!vma->vm_pgoff || (vma->vm_pgoff == SWAPMM_MMAP_MADV >> PAGE_SHIFT)) {
		vma->vm_flags |= VM_SWAPMM_TLB | VM_DONTCOPY;
		vma->vm_ops = &swapmm_vm_ops;
		if (vma->vm_pgoff == SWAPMM_MMAP_MADV >> PAGE_SHIFT) {
			vma->vm_flags |= VM_SWAPMM_TLB_MADV;
			/* record each swapmm_tlb_madv user for mem mirror module */
			mem_mirror_try_add_task(current);
		}
		return 0;
	}

	/* init the swapmm vma struct */
	if (vma->vm_flags & VM_LOCKED)
		vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
	vma->vm_flags |= VM_SWAPMM_TLB | VM_DONTCOPY;
	vma->vm_ops = &swapmm_vm_ops;

	err = swapmm_create_mapping(mm, vma->vm_start,
		vma->vm_pgoff << PAGE_SHIFT, vma->vm_end - vma->vm_start);
	if (err)
		return err;

	flush_tlb_page(vma, vma->vm_start);
	return err;
}

/* help users to mmap physical pages to vma when page fault */
static int swapmm_phy_mapping(unsigned long arg)
{
	unsigned long virt_start, virt_end;
	struct mm_struct *mm = current->mm;
	struct swapmm_phy_area sfa;
	void __user *buf = (void __user *)arg;
	struct vm_area_struct *vma;
	int ret;

	if (!access_ok(buf, sizeof(struct swapmm_phy_area))) {
		pr_warn("swapmm: userspace data can not access.\n");
		return -EFAULT;
	}
	if (copy_from_user(&sfa, buf, sizeof(struct swapmm_phy_area))) {
		pr_warn("swapmm: userspace data can not copy.\n");
		return -EINVAL;
	}

	virt_start = sfa.virt_addr;
	virt_end = sfa.virt_addr + sfa.size;
	if (virt_end <= virt_start) {
		pr_alert("swapmm: size %#lx is zero or too large(overflow)!\n", sfa.size);
		return -EINVAL;
	}
	mmap_write_lock(mm);
	vma = find_vma(mm, virt_start);
	if (!vma || !(vma->vm_flags & VM_SWAPMM_TLB) ||
		(vma->vm_flags & VM_SWAPMM_TLB_MADV) ||
		(vma->vm_end < virt_end)) {
		pr_warn("swapmm: mapping vm area(0x%pK-0x%pK) invalid!\n",
					(void *)virt_start, (void *)virt_end);
		ret = -EINVAL;
		goto out;
	}

	ret = swapmm_create_mapping(mm, sfa.virt_addr, sfa.phy_addr, sfa.size);

out:
	mmap_write_unlock(mm);
	return ret;
}

static int swapmm_task_exit_notifier(struct notifier_block *nb,
			unsigned long action, void *data)
{
	struct swapmm_asid *node;

	if (!swapmm_hotreplace_enabled())
		return 0;

	node = unbind_asid(current->tgid);
	if (node) {
		pr_info("unbind and pin for %d\n", current->tgid);
		pin_pgd_for_swap_vmas(current->mm, node->head_area);
		mem_mirror_task_exit(current);
		swapmm_madv_clear_pgtable(current->mm, node->head_area);
	}

	return 0;
}

static struct notifier_block task_exit_notifier_block = {
	.notifier_call	= swapmm_task_exit_notifier,
};

/* asid only bind once */
static int bind_asid(unsigned long arg)
{
	struct swapmm_asid *node = NULL;
	unsigned long asid;
	void __user *buf = (void __user *)arg;

	if (copy_from_user(&asid, buf, sizeof(unsigned long))) {
		pr_warn("swapmm: userspace data can not copy.\n");
		return -EINVAL;
	}
	/* has already binded */
	if (get_asid_node_by_tgid(current->tgid))
		return -EINVAL;
	node = get_asid_node_by_asid(asid);
	if (node) {
		if (node->tgid != SWAPMM_PID_LIMIT) {
			pr_warn("asid %lu has been binded\n", asid);
			return -EINVAL;
		}
		node->tgid = current->tgid;
		pr_info("bind asid %lu to %d\n", asid, current->tgid);
		return 0;
	}
	node = kmalloc(sizeof(struct swapmm_asid), GFP_KERNEL);
	if (!node)
		return -ENOMEM;
	node->tgid = current->tgid;
	node->asid = asid;
	INIT_LIST_HEAD(&node->list);
	node->head_area = NULL;
	spin_lock(&page_map_swap_lock);
	list_add_tail(&node->list, &swapmm_asid_head);
	if (!exit_notifier_status) {
		pr_info("task_exit_notifier_block register ok\n");
		task_exit_notifier_register(&task_exit_notifier_block);
		exit_notifier_status = 1;
	}
	spin_unlock(&page_map_swap_lock);
	pr_info("bind asid %lu to %d\n", asid, current->tgid);
	return 0;
}

/* help users to pin the swapmm vma which can be recovered when process
 * restart
 */
static int swapmm_pin_or_restore_mapping(unsigned long arg, bool pin)
{
	int ret = 0;
	struct swapmm_area *sa;
	struct swapmm_pin_area spa;
	void __user *buf = (void __user *)arg;
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	struct swapmm_asid *asid_node;

	if (!swapmm_hotreplace_enabled()) {
		pr_warn("swapmm: no kernel parameter 'swapmm_pagetable_reuse=1'\n");
		return -EINVAL;
	}

	if (!access_ok(buf, sizeof(struct swapmm_pin_area))) {
		pr_warn("swapmm: can't access userspace data.\n");
		return -EFAULT;
	}
	if (copy_from_user(&spa, buf, sizeof(struct swapmm_pin_area))) {
		pr_warn("swapmm: can't read userspace data.\n");
		return -EINVAL;
	}

	spa.virt_start &= PAGE_MASK;
	spa.virt_end = PAGE_ALIGN(spa.virt_end);

	if (spa.virt_start == spa.virt_end) {
		pr_warn("swapmm: virt_start is equal to virt_end.\n");
		return -EINVAL;
	}
	asid_node = get_asid_node_by_tgid(current->tgid);
	if (!asid_node) {
		pr_warn("swapmm: current task has been not binded to swapmm.\n");
		return -EFAULT;
	}

	mmap_write_lock(mm);
	vma = find_vma(mm, spa.virt_start);
	if (!vma || !(vma->vm_flags & VM_SWAPMM_TLB)
			|| (vma->vm_end < spa.virt_end)) {
		pr_warn("swapmm: pin mmaping area(%pK-%pK) invalid!\n",
			(void *)spa.virt_start, (void *)spa.virt_end);
		ret = -EINVAL;
		goto finish;
	}

	sa = find_swapmm_area(spa.virt_start, spa.virt_end, asid_node);
	if (pin) {
		if (sa)
			goto finish;
		if (create_swapmm_area(mm, spa.virt_start,
			    spa.virt_end, asid_node) == NULL) {
			pr_warn("swapmm: create swapmm area failed.\n");
			ret = -EFAULT;
			goto finish;
		}
	} else {
		if (!sa) {
			ret = -EFAULT;
			goto finish;
		}
		/* restore the vmas mapped by swapmm tlb */
		restore_page_range(mm, spa.virt_start, spa.virt_end, sa);
		flush_tlb_all();
	}
finish:
	mmap_write_unlock(mm);
	return ret;
}

static long swapmm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	long ret = 0;

	if (_IOC_TYPE(cmd) != SWAPMM_MAGIC) {
		pr_warn("swapmm: invalid cmd magic number '%#x', should
			'#%x'.\n", _IOC_TYPE(cmd), SWAPMM_MAGIC);
		return -EINVAL;
	}

	switch (cmd) {
	case SWAPMM_PHY_MAPPING:
		ret = swapmm_phy_mapping(arg);
		break;
	case SWAPMM_PIN_MAPPING:
		ret = swapmm_pin_or_restore_mapping(arg, true);
		break;
	case SWAPMM_RESTORE_MAPPING:
		ret = swapmm_pin_or_restore_mapping(arg, false);
		break;
	case SWAPMM_BIND_ASID:
		ret = bind_asid(arg);
		break;
	case SWAPMM_MADV_WILLNEED:
		ret = __swapmm_madv_willneed(arg);
		break;
	case SWAPMM_MADV_DONTNEED:
		ret = __swapmm_madv_dontneed(arg);
		break;
	case SWAPMM_MADV_SWAPMOVE:
		ret = __swapmm_madv_swapmove(arg);
		break;
	case SWAPMM_MEM_MIRROR_ADD:
		ret = __swapmm_mem_mirror_add(arg);
		break;
	case SWAPMM_MEM_MIRROR_DEL:
		ret = __swapmm_mem_mirror_del(arg);
		break;
	case SWAPMM_MEM_MIRROR_QUERY:
		ret = __swapmm_mem_mirror_query(arg);
		break;
	default:
		pr_warn("swapmm: invalid cmd '%#x'.\n", cmd);
		ret = -EINVAL;
	}

	return ret;
}


static int swapmm_pmd_huge(pmd_t pmd)
{
	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
}

static int swapmm_pud_huge(pud_t pud)
{
	return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
}

static inline long swapmm_zap_pte_range(struct mmu_gather *tlb,
				       struct vm_area_struct *vma, pmd_t *pmd,
				       unsigned long addr, unsigned long end)
{
	spinlock_t *ptl;
	pte_t *start_pte;
	pte_t *pte;
	start_pte = pte_offset_map_lock(tlb->mm, pmd, addr, &ptl);
	pte = start_pte;

	do {
		if (pte_none(*pte))
			continue;

		pte_clear(mm, addr, pte);
		tlb_remove_tlb_entry(tlb, pte, addr);
	} while (pte++, addr += PAGE_SIZE, addr != end);

	pte_unmap_unlock(start_pte, ptl);

	return addr;
}

static inline void swapmm_zap_huge_pmd(struct mmu_gather *tlb,
				       struct vm_area_struct *vma, pmd_t *pmd,
				       unsigned long addr)
{
	pte_t pte;
	spinlock_t *ptl;

	ptl = pmd_lock(vma->vm_mm, pmd);
	pte = ptep_get_and_clear(tlb->mm, addr, (pte_t *)pmd);
	tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
	spin_unlock(ptl);
}

static unsigned long swapmm_zap_pmd_range(struct mmu_gather *tlb,
				       struct vm_area_struct *vma, pud_t *pud,
				       unsigned long addr, unsigned long end)
{
	pmd_t *pmd;
	unsigned long next;

       /* We don't need to free physical pages during releasing process, just
	* set pmd or pte to zero.
	*/
	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);

		/* swapmm not support kernel swap, not need consider swap. */
		if (swapmm_pmd_huge(*pmd)) {
			WARN_ON(next - addr != PMD_SIZE);
			swapmm_zap_huge_pmd(tlb, vma, pmd, addr);
			goto next;
		}

		if (pmd_none(*pmd))
			continue;

		next = swapmm_zap_pte_range(tlb, vma, pmd, addr, next);
next:
		cond_resched();
	} while (pmd++, addr = next, addr != end);

	return addr;
}

static inline void swapmm_zap_huge_pud(struct mmu_gather *tlb,
			       struct vm_area_struct *vma, pud_t *pud,
			       unsigned long addr)
{
	pte_t pte;
	spinlock_t *ptl;

	ptl = pud_lock(vma->vm_mm, pud);
	pte = ptep_get_and_clear(tlb->mm, addr, (pte_t *)pud);
	tlb_remove_pud_tlb_entry(tlb, pud, addr);
	spin_unlock(ptl);
}

static unsigned long swapmm_zap_pud_range(struct mmu_gather *tlb,
				       struct vm_area_struct *vma, p4d_t *p4d,
				       unsigned long addr, unsigned long end)
{
	pud_t *pud;
	unsigned long next;

	pud = pud_offset(p4d, addr);
	do {
		next = pud_addr_end(addr, end);

		/* swapmm not support kernel swap, not need consider kernel swap scenes. */
		if (swapmm_pud_huge(*pud)) {
			WARN_ON(next - addr != PUD_SIZE);
			swapmm_zap_huge_pud(tlb, vma, pud, addr);
			goto next;
		}

		if (pud_none(*pud))
			continue;

		next = swapmm_zap_pmd_range(tlb, vma, pud, addr, next);
next:
		cond_resched();
	} while (pud++, addr = next, addr != end);

	return addr;
}

static void swapmm_unmap_page_range(struct mmu_gather *tlb,
			    struct vm_area_struct *vma,
			    unsigned long addr, unsigned long end)
{
	pgd_t *pgd;
	p4d_t *p4d;
	unsigned long next;

	WARN_ON(addr >= end);
	tlb_start_vma(tlb, vma);
	pgd = pgd_offset(vma->vm_mm, addr);

	/* don't support p4d, just make building failure here. */
	BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS != 4);

	do {
		p4d = (p4d_t *)pgd;
		next = pgd_addr_end(addr, end);
		/* `pgd_none_or_clear_bad()` always return false if
		 * `CONFIG_PGTABLE_LEVELS == 4`, `p4d_none_or_clear_bad` is used
		 * to decide here.
		 */
		if (pgd_none_or_clear_bad(pgd) || p4d_none_or_clear_bad(p4d))
			continue;

		if (swapmm_hotreplace_enabled()) {
			struct page *page = p4d_page(*p4d);

			if (PageHotreplace(page)) {
				swapmm_p4d_clear(p4d);

			       /* The `mm->pgtables_bytes` is a kind of
				* statistical data used by oom-killer and
				* memory cgroup. If we clear pgd directly, the
				* next step `free_pgd_range()` would return
				* instead of steping in `free_p4d_range()`,
				* `free_pud_range()`, and so on, which would
				* result that the count of `mm->pgtables_bytes`
				* is wrong.
				*
				* Then, the following step `check_mm()` will
				* find the non-zero number of
				* `mm->pgtables_bytes`, and pr_alter "BUG:
				* non-zero pgtables_bytes on freeing mm".
				*
				* We minus the count here to balance the total
				* amount `mm->patables_bytes` to avoid the
				* warning. Once the vma was pined, this process
				* is always executed, we don't need to consider
				* the release scene of vma.
				*/
				continue;
			}
		}
		next = swapmm_zap_pud_range(tlb, vma, p4d, addr, next);
	} while (pgd++, addr = next, addr != end);
	tlb_flush(tlb);
}

static const struct file_operations swapmm_fops = {
	.owner	= THIS_MODULE,
	.mmap	= swapmm_mmap,
	.unlocked_ioctl = swapmm_ioctl,
	.compat_ioctl	= swapmm_ioctl,
	.unmap_page_range = swapmm_unmap_page_range,
};

static struct miscdevice swapmm_miscdev = {
	.minor	= MISC_DYNAMIC_MINOR,
	.name	= "swapmm",
	.fops	= &swapmm_fops,
};

static void free_recored_areas(void)
{
	struct swapmm_reverved_mem *cur;

	while (!list_empty(&reversed_head)) {
		cur = list_first_entry(&reversed_head,
				       struct swapmm_reverved_mem, list);
		list_del(&cur->list);
		kfree(cur);
	}
}

static int __init collect_resource(char *name)
{
	struct swapmm_reverved_mem *reversed;
	struct resource *p;
	int found = 0;

	for (p = iomem_resource.child; p != NULL; p = p->sibling) {
		if (p->name == NULL || strcmp(p->name, name) != 0)
			continue;

		reversed = kzalloc(sizeof(*reversed), GFP_KERNEL);
		if (!p)
			goto err;

		reversed->start = p->start;
		reversed->end = p->end + 1;
		list_add_tail(&reversed->list, &reversed_head);

		pr_info("swapmm: find area.\n");

		found += 1;
	}

	return found;

err:
	free_recored_areas();
	return -ENOMEM;
}

static int __init swapmm_mmap_init(void)
{
	int err = 0;

	err = collect_resource(resource_name);
	if (err < 0)
		goto out;
	else if (err == 0) {
		err = -EINVAL;
		pr_alert("swapmm: not find any resource match '%s'\n",
			 resource_name);
		goto out;
	}

	err = misc_register(&swapmm_miscdev);
	if (err != 0) {
		pr_alert("swapmm: init failed!\n");
		goto out;
	}
	mem_mirror_init();

	pr_info("swapmm: init success.\n");
	spin_lock_init(&page_map_swap_lock);

out:
	return err;
}

static void __exit swapmm_mmap_exit(void)
{
	if (exit_notifier_status)
		task_exit_notifier_unregister(&task_exit_notifier_block);

	misc_deregister(&swapmm_miscdev);
	mem_mirror_exit();
	free_swapmm_areas();
	free_recored_areas();

	pr_info("swapmm: exit!\n");
}

module_init(swapmm_mmap_init);
module_exit(swapmm_mmap_exit);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Euler");
MODULE_DESCRIPTION("swapmm mmap");
