// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright @ Huawei Technologies Co., Ltd. 2020-2020. ALL rights reversed.
 * Description: Euler virtual memory management.
 */
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/migrate.h>
#include <linux/swapmm_tlb.h>
#include <linux/memory_hotplug.h>
#include <linux/pid.h>
#include <linux/hashtable.h>
#include <linux/list.h>
#include <linux/ratelimit.h>

#include "swapmm_tlb_mirror.h"

#define MIRROR_RATELIMIT_INTERVAL (300 * HZ)
#define MIRROR_RATELIMIT_BURST 100
static DEFINE_RATELIMIT_STATE(_mirror_rs, MIRROR_RATELIMIT_INTERVAL,
			MIRROR_RATELIMIT_BURST);
#define MIRROR_INFO(fmt, ...)							\
do {										\
	if (__ratelimit(&_mirror_rs))						\
		pr_info(fmt, ##__VA_ARGS__);					\
} while (0)									\

/*** start of helper structures and functions ***/
#define HASHTABLE_SIZE 13

/* struct mem_mirror_hwpoison_item - record mirrored physical address
 */
struct mem_mirror_hwpoison_item {
	phys_addr_t phy_addr;
	struct page *page;
	bool is_umm;
	struct hlist_node hlist;
};
static DEFINE_HASHTABLE(mem_mirror_hwpoison_hash, HASHTABLE_SIZE);
static int mem_mirror_hwpoison_hash_size;
static struct kmem_cache *mem_mirror_hwpoison_cache;
static DECLARE_RWSEM(mem_mirror_hwpoison_sem);
unsigned int mem_mirror_max_pages = 1e4;

/* struct mem_mirror_registration_item - record registration parameters
 * @map_addr: could be vaddr or phy_addr, depends on map method
 * @mm: if map_addr is phy_addr, this field should be NULL
 * @hwpoison_item_key: referred hwpoison_item coule be released, requires double check
 * @hlist: list node
 */
struct mem_mirror_registration_item {
	phys_addr_t map_addr;
	struct mm_struct *mm;
	phys_addr_t hwpoison_item_key;
	struct hlist_node hlist;
};
static DEFINE_HASHTABLE(mem_mirror_registration_hash, HASHTABLE_SIZE);
static struct kmem_cache *mem_mirror_registration_cache;
static DEFINE_SPINLOCK(mem_mirror_registration_lock);

/* struct mem_mirror_task_item - record swapmm madv users
 */
struct mem_mirror_task_item {
	struct task_struct *group_leader;
	pid_t tgid;
	struct list_head list;
};
static struct list_head mem_mirror_task_list = LIST_HEAD_INIT(mem_mirror_task_list);
static struct kmem_cache *mem_mirror_task_cache;
static DEFINE_SPINLOCK(mem_mirror_task_lock);

/*** hwpoison pages hashtable operations ***/

static inline bool mem_mirror_is_hashtable_full(void)
{
	return !!(mem_mirror_hwpoison_hash_size >= mem_mirror_max_pages);
}

static inline void mem_mirror_add_hwpoison_item(struct mem_mirror_hwpoison_item *item)
{
	hash_add(mem_mirror_hwpoison_hash, &item->hlist, item->phy_addr);
	mem_mirror_hwpoison_hash_size++;
}

static inline void mem_mirror_del_hwpoison_item(struct mem_mirror_hwpoison_item *item)
{
	hash_del(&item->hlist);
	mem_mirror_hwpoison_hash_size--;
}

static inline struct mem_mirror_hwpoison_item *alloc_mem_mirror_hwpoison_item(void)
{
	if (!mem_mirror_hwpoison_cache)
		return NULL;
	return kmem_cache_zalloc(mem_mirror_hwpoison_cache, GFP_ATOMIC);
}

static inline void free_mem_mirror_hwpoison_item(struct mem_mirror_hwpoison_item *item)
{
	kmem_cache_free(mem_mirror_hwpoison_cache, item);
}

static inline int construct_mem_mirror_hwpoison_item(
	phys_addr_t paddr, struct page *src_page, bool is_umm)
{
	struct mem_mirror_hwpoison_item *hwpoison_item;

	hwpoison_item = alloc_mem_mirror_hwpoison_item();
	if (!hwpoison_item)
		return -ENOMEM;

	hwpoison_item->phy_addr = paddr;
	hwpoison_item->page = src_page;
	hwpoison_item->is_umm = is_umm;
	mem_mirror_add_hwpoison_item(hwpoison_item);
	return 0;
}

static inline void deconstruct_mem_mirror_hwpoison_item(struct mem_mirror_hwpoison_item *item)
{
	if (item->is_umm) {
		/* We only unpoison pages that have been poisoned through this module,
		 * so no need to call num_poisoned_pages_dec.
		 */
		ClearPageHWPoison(item->page);
		__free_page(item->page);
	} else {
		unpoison_memory(__phys_to_pfn(item->phy_addr));
	}
	mem_mirror_del_hwpoison_item(item);
	free_mem_mirror_hwpoison_item(item);
}

/*** hwpoison registration hashtable usage ***/

static inline void mem_mirror_add_registration_item(struct mem_mirror_registration_item *item)
{
	hash_add(mem_mirror_registration_hash, &item->hlist, item->map_addr);
}

static inline void mem_mirror_del_registration_item(struct mem_mirror_registration_item *item)
{
	hash_del(&item->hlist);
}

static inline struct mem_mirror_registration_item *alloc_mem_mirror_registration_item(void)
{
	if (!mem_mirror_registration_cache)
		return NULL;
	return kmem_cache_zalloc(mem_mirror_registration_cache, GFP_ATOMIC);
}

static inline void free_mem_mirror_registration_item(struct mem_mirror_registration_item *item)
{
	kmem_cache_free(mem_mirror_registration_cache, item);
}

static inline int construct_mem_mirror_registration_item(
	phys_addr_t map_addr, struct mm_struct *mm, phys_addr_t hwpoison_item_key)
{
	struct mem_mirror_registration_item *registration_item;

	registration_item = alloc_mem_mirror_registration_item();
	if (!registration_item)
		return -ENOMEM;

	registration_item->map_addr = map_addr;
	registration_item->mm = mm;
	registration_item->hwpoison_item_key = hwpoison_item_key;
	mem_mirror_add_registration_item(registration_item);
	return 0;
}

/* This should not be called directly except during module exiting.
 * Use `try_deconstruct_mem_mirror_registration_item` instead
 */
static inline void deconstruct_mem_mirror_registration_item(struct mem_mirror_registration_item *item)
{
	mem_mirror_del_registration_item(item);
	free_mem_mirror_registration_item(item);
}

static inline void try_deconstruct_mem_mirror_registration_item(phys_addr_t addr, struct mm_struct *mm)
{
	struct mem_mirror_registration_item *registration_item;

	spin_lock(&mem_mirror_registration_lock);
	hash_for_each_possible(mem_mirror_registration_hash, registration_item, hlist, addr)
		/* If not found, it may have been deconstructed by other threads */
		if (registration_item->mm == mm && registration_item->map_addr == addr) {
			deconstruct_mem_mirror_registration_item(registration_item);
			break;
		}
	spin_unlock(&mem_mirror_registration_lock);
}

/* It only return paddr instead of the original item, because
 * A. The related item might be freed before use under multi-thread conditions
 * B. paddr is enough for further process
 */
static inline phys_addr_t get_mem_mirror_registration_paddr(phys_addr_t addr, struct mm_struct *mm)
{
	struct mem_mirror_registration_item *registration_item;
	struct mem_mirror_hwpoison_item *hwpoison_item;
	phys_addr_t paddr = 0;
	bool found = false;

	spin_lock(&mem_mirror_registration_lock);
	hash_for_each_possible(mem_mirror_registration_hash, registration_item, hlist, addr)
		if (registration_item->mm == mm && registration_item->map_addr == addr) {
			paddr = registration_item->hwpoison_item_key;
			spin_unlock(&mem_mirror_registration_lock);

			down_read(&mem_mirror_hwpoison_sem);
			hash_for_each_possible(mem_mirror_hwpoison_hash, hwpoison_item, hlist, paddr) {
				if (hwpoison_item->phy_addr == paddr) {
					found = true;
					break;
				}
			}
			up_read(&mem_mirror_hwpoison_sem);

			if (found)
				return paddr;
			try_deconstruct_mem_mirror_registration_item(addr, mm);
			return 0;
		}
	spin_unlock(&mem_mirror_registration_lock);
	return 0;
}

/*** struct task list usage ***/

static inline void mem_mirror_add_task_item(struct mem_mirror_task_item *item)
{
	list_add_tail(&item->list, &mem_mirror_task_list);
}

static inline struct mem_mirror_task_item *alloc_mem_mirror_task_item(void)
{
	if (!mem_mirror_task_cache)
		return NULL;
	return kmem_cache_zalloc(mem_mirror_task_cache, GFP_ATOMIC);
}

static inline void mem_mirror_del_task_item(struct mem_mirror_task_item *item)
{
	list_del(&item->list);
}

static inline void free_mem_mirror_task_item(struct mem_mirror_task_item *item)
{
	kmem_cache_free(mem_mirror_task_cache, item);
}

static inline bool mem_mirror_task_exist(struct task_struct *task)
{
	// Caller should hold the lock
	struct mem_mirror_task_item *item;

	list_for_each_entry(item, &mem_mirror_task_list, list) {
		if (item->group_leader == task->group_leader && item->tgid == task->tgid)
			return true;
	}
	return false;
}

static inline bool mem_mirror_task_item_valid(struct mem_mirror_task_item *item)
{
	struct task_struct *task;
	bool valid;

	rcu_read_lock();
	task = pid_task(find_vpid(item->tgid), PIDTYPE_PID);
	valid = !!(task && task->group_leader == item->group_leader);
	rcu_read_unlock();
	return valid;
}
/*** end of helper structures and functions ***/

struct umm_addr_info {
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	unsigned long vaddr;
};

#define MIRROR_RETRY_COUNT 1

#ifdef CONFIG_EULEROS_SWAPMM_TLB
static inline bool mem_mirror_is_paddr_umm_reserved(phys_addr_t paddr)
{
	return check_phys_addr(paddr, paddr + PAGE_SIZE) == 0;
}

static inline bool mem_mirror_is_vma_mirrorable_umm(struct vm_area_struct *vma)
{
	if (!vma || !(vma->vm_flags & (VM_SWAPMM_TLB | VM_SWAPMM_TLB_MADV)))
		return false;
	return true;
}

static inline bool mem_mirror_can_be_page_swapmm_madv(struct page *page)
{
	if (!PageLRU(page) && PageUnevictable(page))
		return true;
	return false;
}
#else
static inline bool mem_mirror_is_paddr_umm_reserved(phys_addr_t paddr)
{
	return false;
}

static inline bool mem_mirror_is_vma_mirrorable_umm(struct vm_area_struct *vma)
{
	return false;
}

static inline bool mem_mirror_can_be_page_swapmm_madv(struct page *page)
{
	return false;
}
#endif

static inline int mem_mirror_is_page_mirrorable(phys_addr_t paddr)
{
	unsigned long pfn = paddr >> PAGE_SHIFT;
	struct page *page = NULL;
	int ret = 0;

	if (!pfn_valid(pfn)) {
		ret = -ENXIO;
		goto out_err;
	}
	page = pfn_to_online_page(pfn);
	if (!page || is_zone_device_page(page) || PageReserved(page) || PageTable(page)) {
		ret = -EIO;
		goto out_err;
	}
	if (PageHuge(page)) {
		ret = -EINVAL;
		goto out_err;
	}
	if (PageHWPoison(page)) {
		ret = -EEXIST;
		goto out_err;
	}
	if (mem_mirror_is_paddr_umm_reserved(paddr)) {
		ret = -EACCES;
		goto out_err;
	}
	return 0;
out_err:
	MIRROR_INFO("swapmm: given paddr is in unsupported list:
		    linear area, UMM reserved area, DMA area, device memory,
		    huge page, offline page. errno: %d\n", ret);
	return ret;
}

static inline int mem_mirror_is_page_unmirrorable(phys_addr_t paddr)
{
	unsigned long pfn = paddr >> PAGE_SHIFT;
	struct page *page = NULL;
	int ret = 0;

	if (!pfn_valid(pfn)) {
		ret = -ENXIO;
		goto out_err;
	}
	page = pfn_to_online_page(pfn);
	if (page_count(page) > 1 || page_mapped(page) || page_mapping(page)) {
		ret = -EBUSY;
		goto out_err;
	}
	return 0;
out_err:
	MIRROR_INFO("swapmm: given paddr is not unmirrorable errno: %d\n", ret);
	return ret;
}

void mem_mirror_try_add_task(struct task_struct *task)
{
	struct mem_mirror_task_item *item = NULL;

	if (!task)
		return;
	spin_lock(&mem_mirror_task_lock);
	if (!mem_mirror_task_exist(task)) {
		item = alloc_mem_mirror_task_item();
		if (item) {
			item->group_leader = task->group_leader;
			item->tgid = task->tgid;
			mem_mirror_add_task_item(item);
		}
	}
	spin_unlock(&mem_mirror_task_lock);
}

/* mem_mirror_task_exit - Clean up structures related with exiting swapmm task.
 */
void mem_mirror_task_exit(struct task_struct *task)
{
	struct mem_mirror_registration_item *registration_item;
	struct hlist_node *tmp;
	int i;

	spin_lock(&mem_mirror_registration_lock);
	hash_for_each_safe(mem_mirror_registration_hash, i, tmp, registration_item, hlist) {
		if (registration_item->mm == task->mm)
			deconstruct_mem_mirror_registration_item(registration_item);
	}
	spin_unlock(&mem_mirror_registration_lock);
}

static inline void mem_mirror_copy_page(struct page *mirror, struct page *page)
{
	void *src, *dst;

	dst = kmap_atomic(mirror);
	src = kmap_atomic(page);
	copy_page(dst, src);
	kunmap_atomic(src);
	kunmap_atomic(dst);
}

static inline int mem_mirror_pmd_huge(pmd_t pmd)
{
	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
}

static inline pte_t *mem_mirror_get_pte(struct mm_struct *mm,
	struct vm_area_struct *vma, unsigned long address, spinlock_t **ptlp)
{
	pte_t *ptep = NULL;
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;

	pgd = pgd_offset(mm, address);
	if (pgd_none(*pgd))
		goto out_pte;

	p4d = p4d_offset(pgd, address);
	if (p4d_none(*p4d))
		goto out_pte;

	pud = pud_offset(p4d, address);
	if (pud_none(*pud))
		goto out_pte;

	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd) || mem_mirror_pmd_huge(*pmd) || pmd_trans_huge(*pmd))
		goto out_pte;

	if (ptlp)
		ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
	else
		ptep = pte_offset_map(pmd, address);

	if (!pte_present(*ptep)) {
		if (ptlp)
			pte_unmap_unlock(ptep, *ptlp);
		else
			pte_unmap(ptep);
		ptep = NULL;
	}

out_pte:
	return ptep;
}

static inline struct umm_addr_info *find_vma_umm_addr(
	struct mm_struct *mm, phys_addr_t paddr)
{
	struct umm_addr_info *ret = NULL;
	unsigned long vaddr;
	phys_addr_t phys;
	struct vm_area_struct *vma;
	pte_t *ptep = NULL;
	int found = 0;

	mmap_read_lock(mm);
	for (vma = mm->mmap; vma && !found; vma = vma->vm_next) {
		if (!mem_mirror_is_vma_mirrorable_umm(vma))
			continue;

		for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
			ptep = mem_mirror_get_pte(mm, vma, vaddr, NULL);
			if (ptep == NULL)
				continue;

			phys = __pte_to_phys(*ptep);
			pte_unmap(ptep);
			if (phys == paddr) {
				ret = (struct umm_addr_info *)kzalloc(
					sizeof(struct umm_addr_info), GFP_KERNEL);
				if (!ret) {
					mmap_read_unlock(mm);
					return NULL;
				}
				ret->mm = mm;
				ret->vma = vma;
				ret->vaddr = vaddr;
				found = 1;
				break;
			}
		}
	}

	mmap_read_unlock(mm);
	return ret;
}

static struct umm_addr_info *get_umm_addr_info(phys_addr_t paddr)
{
	struct umm_addr_info *ret = NULL;
	struct mem_mirror_task_item *item, *next;
	struct mm_struct *mm;

	spin_lock(&mem_mirror_task_lock);
	list_for_each_entry_safe(item, next, &mem_mirror_task_list, list) {
		if (!mem_mirror_task_item_valid(item)) {
			mem_mirror_del_task_item(item);
			free_mem_mirror_task_item(item);
			continue;
		}
		spin_unlock(&mem_mirror_task_lock);

		mm = item->group_leader->mm;
		ret = find_vma_umm_addr(mm, paddr);
		spin_lock(&mem_mirror_task_lock);
		if (ret)
			break;
	}
	spin_unlock(&mem_mirror_task_lock);

	return ret;
}

/**
 * mem_mirror_add_os - Mirror a non-Slab page belonging to OS.
 * Return 0 on success, return negated errno on failure, return positive on retryable failure.
 */
static int mem_mirror_add_os(phys_addr_t paddr, struct page *src_page)
{
	struct mem_mirror_hwpoison_item *hwpoison_item;
	int ret, retry = 0;

	down_write(&mem_mirror_hwpoison_sem);
	hash_for_each_possible(mem_mirror_hwpoison_hash, hwpoison_item, hlist, paddr)
		if (hwpoison_item->phy_addr == paddr)
			break;

	if (hwpoison_item && hwpoison_item->phy_addr == paddr) {
		MIRROR_INFO("swapmm: already mirrored at paddr\n");
		ret = -EEXIST;
		goto out_add;
	}

retry:
	ret = mem_mirror_soft_offline_page(src_page, MF_SOFT_OFFLINE);
	if (ret == -EAGAIN) {
		/* EAGAIN means it is racing with other process, resulting the failure of
		 * migrate_pages.
		 * If so, turn the return value to positive so that it can be retryed later.
		 */
		ret = 1;
		if (retry < MIRROR_RETRY_COUNT) {
			retry++;
			goto retry;
		}
		goto out_add;
	} else if (ret < 0)
		goto out_add;

	ret = construct_mem_mirror_hwpoison_item(paddr, src_page, false);
	MIRROR_INFO("swapmm: mirrored 1 OS page. Total mirrored %d pages.\n",
		    mem_mirror_hwpoison_hash_size);

out_add:
	up_write(&mem_mirror_hwpoison_sem);
	return ret;
}

/**
 * mem_mirror_add_umm - Mirror a UMM memflow page belonging to OS.
 * Return 0 on success, return negated errno on failure.
 */
static int mem_mirror_add_umm(phys_addr_t paddr, struct page *src_page)
{
	struct page *dst_page;
	struct mem_mirror_hwpoison_item *hwpoison_item;
	struct umm_addr_info *umm_addr_item;
	phys_addr_t phys;
	spinlock_t *ptl = NULL;
	pte_t *ptep;
	pte_t pte;
	gfp_t flags;
	void *p;
	int ret = 0, locked;

	if (src_page == NULL || !mem_mirror_can_be_page_swapmm_madv(src_page) ||
		is_swapmm_pin_page(paddr))
		return -EINVAL;

	down_write(&mem_mirror_hwpoison_sem);
	hash_for_each_possible(mem_mirror_hwpoison_hash, hwpoison_item, hlist, paddr)
		if (hwpoison_item->phy_addr == paddr)
			break;

	if (hwpoison_item && hwpoison_item->phy_addr == paddr) {
		MIRROR_INFO("swapmm: already mirrored paddr\n");
		ret = -EINVAL;
		goto out_add;
	}

	// Assume there is only one match for physical address
	umm_addr_item = get_umm_addr_info(paddr);
	if (umm_addr_item == NULL) {
		ret = -EINVAL;
		goto out_add;
	}

	flags = GFP_KERNEL;
	if (current->flags & PF_RELIABLE || page_zonenum(src_page) == ZONE_NORMAL)
		flags = GFP_RELIABLE | __GFP_ZERO | __GFP_NORETRY;
	else
		flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO | __GFP_NORETRY;
	dst_page = alloc_pages_node(page_to_nid(src_page), flags, 0);
	if (!dst_page) {
		ret = -ENOMEM;
		goto out_add;
	}

	lock_page(src_page);
	// It may fail because the compound head of dst_page could be the same as src_page
	locked = trylock_page(dst_page);

	mmap_read_lock(umm_addr_item->mm);
	ptep = mem_mirror_get_pte(umm_addr_item->mm, umm_addr_item->vma,
				  umm_addr_item->vaddr, &ptl);
	if (ptep == NULL) {
		ret = -EFAULT;
		__free_page(dst_page);
		goto out_vma;
	}
	phys = __pte_to_phys(*ptep);
	if (phys != paddr) {
		ret = -EFAULT;
		__free_page(dst_page);
		goto out_pte;
	}

	flush_cache_page(umm_addr_item->vma, umm_addr_item->vaddr, pte_pfn(*ptep));
	pte = ptep_get_and_clear(umm_addr_item->mm, umm_addr_item->vaddr, ptep);
	flush_tlb_range(umm_addr_item->vma, umm_addr_item->vaddr, umm_addr_item->vaddr + PAGE_SIZE);

	mem_mirror_copy_page(dst_page, src_page);
	pte = mk_pte(dst_page, READ_ONCE(umm_addr_item->vma->vm_page_prot));
	set_pte(ptep, pte);

	ret = construct_mem_mirror_hwpoison_item(paddr, src_page, true);

	/* We do not update num_poisoned_pages here since it is not an exported
	 * symbol and UMM mirrored pages are completely managed by this moudle.
	 */
	SetPageHWPoison(src_page);
	ClearPageUnevictable(src_page);
	SetPageUnevictable(dst_page);
	MIRROR_INFO("swapmm: mirrored 1 UMM willdneed page. Total mirrored %d pages.\n",
		    mem_mirror_hwpoison_hash_size);

out_pte:
	pte_unmap_unlock(ptep, ptl);
out_vma:
	mmap_read_unlock(umm_addr_item->mm);
	if (locked)
		unlock_page(dst_page);
	unlock_page(src_page);
out_add:
	up_write(&mem_mirror_hwpoison_sem);
	return ret;
}

static int mem_mirror_del(phys_addr_t paddr)
{
	struct mem_mirror_hwpoison_item *hwpoison_item = NULL;
	int ret = 0;

	down_write(&mem_mirror_hwpoison_sem);
	hash_for_each_possible(mem_mirror_hwpoison_hash, hwpoison_item, hlist, paddr)
		if (hwpoison_item->phy_addr == paddr)
			break;

	if (!hwpoison_item || hwpoison_item->phy_addr != paddr) {
		MIRROR_INFO("swapmm: paddr not mirrored\n");
		ret = -EINVAL;
		goto out_del;
	}

	deconstruct_mem_mirror_hwpoison_item(hwpoison_item);
	MIRROR_INFO("swapmm: unmirrored 1 page. Total mirrored %d pages.\n",
		    mem_mirror_hwpoison_hash_size);

out_del:
	up_write(&mem_mirror_hwpoison_sem);
	return ret;
}

static int mem_mirror_query(phys_addr_t *user_buf, unsigned int len)
{
	int written = 0;
	int i;
	struct mem_mirror_hwpoison_item *item;
	phys_addr_t *buf;

	if (!access_ok(user_buf, len * sizeof(phys_addr_t))) {
		pr_warn("swapmm: mirror_query user buf fail to access\n");
		return -EFAULT;
	}

	buf = kcalloc(len, sizeof(phys_addr_t), GFP_KERNEL);
	if (buf == NULL)
		return -ENOMEM;

	down_read(&mem_mirror_hwpoison_sem);
	hash_for_each(mem_mirror_hwpoison_hash, i, item, hlist) {
		if (written == len)
			break;
		buf[written++] = item->phy_addr;
	}
	up_read(&mem_mirror_hwpoison_sem);

	if (copy_to_user(user_buf, buf, len * sizeof(phys_addr_t))) {
		kfree(buf);
		pr_warn("swapmm: mirror_query copy mirrored addr to user failed\n");
		return -EFAULT;
	}

	kfree(buf);
	return written;
}

int __swapmm_mem_mirror_add(unsigned long arg)
{
	void __user *buf = (void __user *)arg;
	struct mm_struct *mm = NULL;
	struct page *src_page;
	struct swapmm_mem_mirror_apply item;
	struct vm_area_struct *vma;
	pte_t *ptep;
	unsigned long vaddr;
	phys_addr_t paddr = 0;
	int ret = 0;

	if (mem_mirror_is_hashtable_full())
		return -ENOMEM;
	if (!access_ok(buf, sizeof(struct swapmm_mem_mirror_apply)))
		return -EFAULT;
	if (copy_from_user(&item, buf, sizeof(struct swapmm_mem_mirror_apply)))
		return -EINVAL;

	if (item.is_virtual) {
		mm = current->mm;
		if (get_mem_mirror_registration_paddr(item.addr, mm) != 0) {
			ret = -EEXIST;
			goto out_add;
		}

		vaddr = item.addr;
		mmap_read_lock(mm);
		vma = find_vma(mm, vaddr);
		if (!vma)
			goto out_vma;

		ptep = mem_mirror_get_pte(mm, vma, vaddr, NULL);
		if (ptep == NULL)
			goto out_vma;
		if (!pte_val(*ptep))
			goto out_pte;

		paddr = __pte_to_phys(*ptep);
out_pte:
		pte_unmap(ptep);
out_vma:
		mmap_read_unlock(mm);
	} else
		paddr = item.addr;

	paddr = paddr & ~(PAGE_SIZE - 1);
	if (!paddr) {
		ret = -EINVAL;
		goto out_add;
	}

	ret = mem_mirror_is_page_mirrorable(paddr);
	if (ret < 0)
		goto out_add;

	src_page = phys_to_page(paddr);
	if (mem_mirror_can_be_page_swapmm_madv(src_page))
		ret = mem_mirror_add_umm(paddr, src_page);
	else
		ret = mem_mirror_add_os(paddr, src_page);

out_add:
	if (ret == 0) {
		spin_lock(&mem_mirror_registration_lock);
		ret = construct_mem_mirror_registration_item(item.addr, mm, paddr);
		spin_unlock(&mem_mirror_registration_lock);
	} else {
		MIRROR_INFO("swapmm: mirror failed errno %d\n", ret);
	}
	return ret;
}

int __swapmm_mem_mirror_del(unsigned long arg)
{
	void __user *buf = (void __user *)arg;
	struct mm_struct *mm = NULL;
	struct swapmm_mem_mirror_apply item;
	phys_addr_t paddr = 0;
	int ret = 0;

	if (!access_ok(buf, sizeof(struct swapmm_mem_mirror_apply)))
		return -EFAULT;
	if (copy_from_user(&item, buf, sizeof(struct swapmm_mem_mirror_apply)))
		return -EINVAL;

	if (item.is_virtual)
		mm = current->mm;

	/* If it was registered with virtual address, now the mapped physical address is
	 * a normal one.
	 * So we need this hash table to record the mirrored one
	 */
	paddr = get_mem_mirror_registration_paddr(item.addr, mm);
	if (paddr)
		paddr = paddr & ~(PAGE_SIZE - 1);
	else if (item.is_virtual)
		MIRROR_INFO("swapmm_tlb_mirror: input virtual address not registered as hwpoison
			    page yet\n");
	else
		paddr = item.addr & ~(PAGE_SIZE - 1);

	ret = mem_mirror_is_page_unmirrorable(paddr);
	if (!ret)
		ret = mem_mirror_del(paddr);

	if (ret == 0)
		try_deconstruct_mem_mirror_registration_item(item.addr, mm);
	else
		MIRROR_INFO("swapmm: unmirror physical address failed errno %d\n", ret);

	return ret;
}

int __swapmm_mem_mirror_query(unsigned long arg)
{
	void __user *buf = (void __user *)arg;
	struct swapmm_mem_mirror_query item;
	int ret;

	if (!access_ok(buf, sizeof(struct swapmm_mem_mirror_query)))
		return -EFAULT;
	if (copy_from_user(&item, buf, sizeof(struct swapmm_mem_mirror_query)))
		return -EINVAL;
	if (!item.buf || !item.buflen || !access_ok(item.buf, item.buflen))
		return -EINVAL;

	ret = mem_mirror_query((phys_addr_t *)item.buf, item.buflen / sizeof(phys_addr_t));
	if (ret < 0)
		return ret;
	if (copy_to_user(&((struct swapmm_mem_mirror_query *)buf)->written_count, &ret, sizeof(ret))) {
		pr_warn("swapmm: copy written count to user failed\n");
		return -EFAULT;
	}

	return 0;
}

#define MEM_MIRROR_KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct, \
		sizeof(struct __struct), __alignof__(struct __struct), \
		(__flags), NULL)

void __init mem_mirror_init(void)
{
	mem_mirror_hwpoison_cache = MEM_MIRROR_KMEM_CACHE(mem_mirror_hwpoison_item, 0);
	if (!mem_mirror_hwpoison_cache) {
		pr_err("mem_mirror: creating mem_mirror_hwpoison_cache failed\n");
		return;
	}

	mem_mirror_registration_cache = MEM_MIRROR_KMEM_CACHE(mem_mirror_registration_item, 0);
	if (!mem_mirror_registration_cache) {
		pr_err("mem_mirror: creating mem_mirror_registration_cache failed\n");
		goto out_hwpoison_cache;
	}

	mem_mirror_task_cache = MEM_MIRROR_KMEM_CACHE(mem_mirror_task_item, 0);
	if (!mem_mirror_task_cache) {
		pr_err("mem_mirror: creating mem_mirror_task_cache failed\n");
		goto out_task_cache;
	}

	init_rwsem(&mem_mirror_hwpoison_sem);
	return;

out_task_cache:
	kmem_cache_destroy(mem_mirror_registration_cache);
	mem_mirror_registration_cache = NULL;
out_hwpoison_cache:
	kmem_cache_destroy(mem_mirror_hwpoison_cache);
	mem_mirror_hwpoison_cache = NULL;
}

void __exit mem_mirror_exit(void)
{
	struct mem_mirror_hwpoison_item *hwpoison_item;
	struct mem_mirror_registration_item *registration_item;
	struct mem_mirror_task_item *task_item, *next;
	struct hlist_node *tmp;
	int i;

	spin_lock(&mem_mirror_registration_lock);
	hash_for_each_safe(mem_mirror_registration_hash, i, tmp, registration_item, hlist) {
		deconstruct_mem_mirror_registration_item(registration_item);
	}
	spin_unlock(&mem_mirror_registration_lock);

	down_write(&mem_mirror_hwpoison_sem);
	hash_for_each_safe(mem_mirror_hwpoison_hash, i, tmp, hwpoison_item, hlist) {
		deconstruct_mem_mirror_hwpoison_item(hwpoison_item);
	}
	up_write(&mem_mirror_hwpoison_sem);

	spin_lock(&mem_mirror_task_lock);
	list_for_each_entry_safe(task_item, next, &mem_mirror_task_list, list) {
		mem_mirror_del_task_item(task_item);
		free_mem_mirror_task_item(task_item);
	}
	spin_unlock(&mem_mirror_task_lock);

	if (mem_mirror_hwpoison_cache) {
		kmem_cache_destroy(mem_mirror_hwpoison_cache);
		mem_mirror_hwpoison_cache = NULL;
	}

	if (mem_mirror_registration_cache) {
		kmem_cache_destroy(mem_mirror_registration_cache);
		mem_mirror_registration_cache = NULL;
	}

	if (mem_mirror_task_cache) {
		kmem_cache_destroy(mem_mirror_task_cache);
		mem_mirror_task_cache = NULL;
	}
}
