// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.
 * Description: Euler Hybrid Memory Management for Persistent Memory.
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/mm_types.h>
#include <linux/gfp.h>
#include <linux/numa.h>
#include <asm/pgalloc.h>
#include <linux/crc16.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/hpmm.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <linux/memblock.h>

#include <asm/tlb.h>
#include <asm/types.h>
#include <asm/xen/page.h>

#include "hpmm_common.h"
#include "hpmm_persist.h"
#include "hpmm_mgr.h"
#include "hpmm_uce.h"

/* all persist device, protected by p_dev_lock */
static LIST_HEAD(persist_device_head);
static DEFINE_MUTEX(p_dev_lock);

/*
 * device_avail_lock is used for hpmm_persist_device alloc operations,
 * For the scenarios of concurrent creation of bbu meta areas or
 * restoration of that areas, this lock is used to ensure that there
 * will be no problems in concurrent scenarios.
 */
static DEFINE_MUTEX(device_avail_lock);

static inline void hpmm_persist_device_ref_inc(struct hpmm_persist_device *dev)
{
	atomic_inc(&dev->_refcount);
}

static inline void hpmm_persist_device_ref_dec(struct hpmm_persist_device *dev)
{
	atomic_dec(&dev->_refcount);
}

static inline bool hpmm_persist_device_dec_and_test(struct hpmm_persist_device *dev)
{
	return atomic_dec_and_test(&dev->_refcount);
}

static inline int hpmm_persist_device_ref_count(struct hpmm_persist_device *dev)
{
		return atomic_read(&dev->_refcount);
}

static const char *hpmm_persist_get_name(int type)
{
	return "hpmm persistent memory";
}

static void hpmm_persist_free_contig_range(unsigned long pfn, unsigned int nr_pages)
{
	unsigned int count = 0;

	for (; nr_pages--; pfn++) {
		struct page *page = pfn_to_page(pfn);

		count += page_count(page) != 1;
		__free_page(page);
	}
	WARN(count != 0, "%d pages are still in use!\n", count);
}

static void hpmm_persist_device_add(struct hpmm_persist_device *dev)
{
	list_add(&dev->list, &persist_device_head);
}

static void hpmm_persist_device_del(struct hpmm_persist_device *dev)
{
	list_del(&dev->list);
}

static struct hpmm_persist_device *hpmm_persist_device_lookup(int node)
{
	struct hpmm_persist_device *dev;

	mutex_lock(&p_dev_lock);
	list_for_each_entry(dev, &persist_device_head, list)
		if (dev->node == node) {
			mutex_unlock(&p_dev_lock);
			return dev;
		}

	mutex_unlock(&p_dev_lock);
	return NULL;
}

/* check the persist_device is valid or not */
static bool check_hpmm_persist_info_valid(struct hpmm_mem_info *hpmm_info)
{
	int nid = hpmm_info->node;

	if (nid < 0 || nid >= MAX_NUMNODES) {
		pr_warn("node num: %d is invalid", nid);
		return false;
	}

	/* this node is not a persist memory node */
	if (!node_online(nid) || !is_node_pmem(nid)) {
		pr_warn("node num: %d is invalid, not online or not pmem", nid);
		return false;
	}

	return true;
}

/* check the metadata area on the corresponding node is valid */
static bool check_hpmm_persist_node_reserved(int node, unsigned long *start_addr)
{
	unsigned long start_pfn;
	struct zone *zone;
	pg_data_t *pgdat;
	struct page *page;
	int zoneid;

	pgdat = NODE_DATA(node);

	/* Find first usable zone. */
	for (zoneid = 0; zoneid <= MAX_NR_ZONES - 1; zoneid++) {
		zone = &pgdat->node_zones[zoneid];
		if (managed_zone(zone))
			break;
	}
	if (zoneid > MAX_NR_ZONES - 1) {
		pr_warn("Unpopulated node %d\n", node);
		return false;
	}

	start_pfn = zone->zone_start_pfn;
	page = pfn_to_page(start_pfn);
	if (!page || PageBuddy(page))
		return false;

	*start_addr = (unsigned long)page_to_virt(page);
	return true;
}

static int set_check_hpmm_dev(struct hpmm_device *hpmm_dev, struct hpmm_mem_info *hpmm_info)
{
	if (!hpmm_dev || !hpmm_info) {
		pr_err("hpmm_dev or hpmm_info is invalid");
		return -EINVAL;
	}
	if (!check_hpmm_persist_info_valid(hpmm_info))
		return -EINVAL;

	hpmm_dev->mem_type = HPMM_PERSISTENT_MEMORY;
	hpmm_dev->pid_uuid = hpmm_info->uuid;
	hpmm_dev->name = hpmm_persist_get_name(hpmm_info->type);
	hpmm_dev->hpmm_ops = NULL;

	return 0;
}

static void add_dev_to_hpmm_node(struct hpmm_persist_device *dev,
				 struct hpmm_dev_node *hpmm_node,
				 struct hpmm_device *hpmm_dev)
{
	spin_lock(&dev->dev_lock);
	hpmm_persist_device_ref_inc(dev);
	spin_unlock(&dev->dev_lock);
	hpmm_node->dev_node = dev;
	list_add_tail(&hpmm_node->entry, &hpmm_dev->p_dev);
	hpmm_dev->dev_num++;
}

/*
 * hpmm_persist_create()
 * create hpmm_device to operate on and init hpmm_mgr_context struct
 * @hpmm_dev: the hpmm_device to be init
 * @hpmm_info: Device information passed by the user mode, contains the pmem node and type
 */
static int hpmm_persist_create(struct hpmm_device *hpmm_dev, struct hpmm_mem_info *hpmm_info)
{
	struct hpmm_persist_device *dev;
	struct page *page;
	struct hpmm_dev_node *hpmm_node;
	unsigned long pfn;
	unsigned long mgr_addr = 0;
	int ret;

	ret = set_check_hpmm_dev(hpmm_dev, hpmm_info);
	if (ret != 0)
		return ret;

	hpmm_node = kzalloc(sizeof(struct hpmm_dev_node), GFP_KERNEL);
	if (!hpmm_node)
		return -ENOMEM;

	mutex_lock(&device_avail_lock);
	/*
	 * check the node is alerady been add to persist_device_head
	 * if true: Link it directly to hpmm_device
	 * if false: we should create the hpmm_persist_device and link it
	 */
	dev = hpmm_persist_device_lookup(hpmm_info->node);
	/*
	 * case 1: the hpmm_persist_device has alerady been Initialized.
	 * we can just get it from the hpmm_persist_device list and be
	 * done with it.
	 */
	if (dev) {
		add_dev_to_hpmm_node(dev, hpmm_node, hpmm_dev);
		mutex_unlock(&device_avail_lock);
		return 0;
	}

	/*
	 * in case this is resume bbu meta zone process,
	 * Make sure that the metadata area on the corresponding
	 * node has been alloced and is valid.
	 */

	dev = kzalloc(sizeof(struct hpmm_persist_device), GFP_KERNEL);
	if (!dev) {
		kfree(hpmm_node);
		mutex_unlock(&device_avail_lock);
		return -ENOMEM;
	}

	dev->node = hpmm_info->node;
	spin_lock_init(&dev->dev_lock);

	/*
	 * case 2: the persist node has been reserved and the mgr context is valid,
	 * means the hpmm_persist_device has been Initialized elsewhere.
	 */
	if (check_hpmm_persist_node_reserved(dev->node, &mgr_addr) &&
	    check_hpmm_mgr_context_valid(mgr_addr)) {
		dev->mgr_addr = (void *)mgr_addr;
		goto out;
	}

	mgr_addr = 0;
	if (check_hpmm_persist_node_reserved(dev->node, &mgr_addr) &&
	    !check_hpmm_mgr_context_valid(mgr_addr)) {
		pr_debug("bbu memory is reserved, but context is not valid. node: %d\n", dev->node);
		pr_debug("user need to release all the node use, and redo the init process");
		hpmm_persist_free_contig_range(virt_to_pfn(mgr_addr), BBU_META_NR_PAGES);
	}

	/* isolate pages for the BBU meta area. */
	page = take_pages_off_node(dev->node, BBU_META_NR_PAGES,
					GFP_KERNEL | __GFP_THISNODE);
	if (!page) {
		pr_err("take pages off node failed. node: %d", dev->node);
		goto unlock_out;
	}
	dev->mgr_addr = page_to_virt(page);

	/*
	 * case 3: For the power failure scenario of the device,
	 * need to perform the pmem setting operation before recovery.
	 * In this scenario, the pages of the power-saving memory
	 * area have not been isolated, but the metadata of
	 * this area is valid.
	 */
	if (!check_hpmm_mgr_context_valid((unsigned long)dev->mgr_addr)) {
		/* hpmm_persist_create normal process */
		if (persistent_mgr_init(dev->mgr_addr, BBU_META_NR_PAGES << PAGE_SHIFT)) {
			dev->mgr_addr = NULL;
			pfn = page_to_pfn(page);
			hpmm_persist_free_contig_range(pfn, BBU_META_NR_PAGES);
			pr_err("persistent mgr init failed");
			goto unlock_out;
		}
	}

out:
	mutex_lock(&p_dev_lock);
	hpmm_persist_device_add(dev);
	add_dev_to_hpmm_node(dev, hpmm_node, hpmm_dev);
	mutex_unlock(&p_dev_lock);
	mutex_unlock(&device_avail_lock);

	return 0;

unlock_out:
	mutex_unlock(&device_avail_lock);
	kfree(dev);
	kfree(hpmm_node);
	return -EINVAL;
}

static void hpmm_persist_destroy(struct hpmm_device *hpmm_dev)
{
	struct hpmm_dev_node *cur, *next;
	struct hpmm_persist_device *dev;

	list_for_each_entry_safe(cur, next, &hpmm_dev->p_dev, entry) {
		dev = cur->dev_node;
		mutex_lock(&p_dev_lock);
		spin_lock(&dev->dev_lock);
		if (hpmm_persist_device_dec_and_test(dev)) {
			spin_unlock(&dev->dev_lock);
			hpmm_persist_device_del(dev);
			mutex_unlock(&p_dev_lock);
			kfree(dev);
			goto out;
		}
		spin_unlock(&dev->dev_lock);
		mutex_unlock(&p_dev_lock);
out:
		list_del(&cur->entry);
		kfree(cur);
	}
}

/*
 * hpmm_persist_clear_metadata()
 * clear the pgtables and memory and bbu metadata area in persist device
 * @hpmm_info: Device information passed by the user node, contains the flags and pid info
 */

static int hpmm_persist_clear_metadata(struct hpmm_mem_info *hpmm_info)
{
	struct hpmm_persist_device *dev;
	struct hpmm_mgr_context *mgr;

	if (!check_hpmm_persist_info_valid(hpmm_info))
		return -EINVAL;

	dev = hpmm_persist_device_lookup(hpmm_info->node);
	if (!dev) {
		pr_info("cannot get correct dev, node : %d", hpmm_info->node);
		return -EINVAL;
	}

	mutex_lock(&p_dev_lock);
	spin_lock(&dev->dev_lock);
	if (hpmm_persist_device_ref_count(dev) == 1) {
		mgr = dev->mgr_addr;
		spin_unlock(&dev->dev_lock);
		if (!check_hpmm_mgr_context_valid((unsigned long)mgr)) {
			pr_err("the dev is wrong , node is not init. please do init first\n");
			mutex_unlock(&p_dev_lock);
			return -1;
		}
		mgr->magic = 0;
		memset(mgr, 0, struct_size(mgr, hpmm_mgr_info, MGR_INFO_NUM));
		mutex_unlock(&p_dev_lock);
		return 0;
	}

	spin_unlock(&dev->dev_lock);
	mutex_unlock(&p_dev_lock);
	return 0;
}

/*
 * hpmm_persist_clear()
 * clear the pgtables and memory in persist device
 */
static int hpmm_persist_clear(struct hpmm_device *hpmm_dev, struct hpmm_mem_info *hpmm_info)
{
	struct hpmm_persist_device *dev;
	int flags = hpmm_info->flags;

	if (!flags || (flags & ~(HPMM_CLEAR_VALID))) {
		pr_err("hpmm clear flags is invalid");
		return -EINVAL;
	}

	mutex_lock(&p_dev_lock);
	list_for_each_entry(dev, &persist_device_head, list) {
		if (persist_memory_clear(dev, flags, hpmm_info->uuid) != 0) {
			mutex_unlock(&p_dev_lock);
			return -EINVAL;
		}
	}
	mutex_unlock(&p_dev_lock);

	/* clear the metadata of the specific bbu node */
	if (flags & HPMM_CLEAR_METADATA)
		return hpmm_persist_clear_metadata(hpmm_info);

	return 0;
}

static int check_vma_addr_valid(struct hpmm_mem_info *hpmm_info,
				unsigned long *nr_pages,
				struct vm_area_struct **vma)
{
	struct mm_struct *mm;
	unsigned long va_start;
	unsigned long va_length;
	unsigned long va_end;

	va_start = hpmm_info->mapping_addr.start_addr;
	va_length = hpmm_info->mapping_addr.length;
	va_end = va_start + va_length;

	if (va_end <= va_start)
		return -EINVAL;

	rcu_read_lock();
	get_task_struct(current);
	rcu_read_unlock();

	mm = get_task_mm(current);
	put_task_struct(current);
	if (!mm) {
		pr_err("hpmm get mm failed");
		return -EINVAL;
	}

	mmap_read_lock(mm);
	*vma = find_vma(mm, va_start);
	if (!(*vma) || !((*vma)->vm_flags & VM_HYBRID_MM)) {
		mmap_read_unlock(mm);
		mmput(mm);
		return -EINVAL;
	}
	mmap_read_unlock(mm);

	if (va_start != (*vma)->vm_start || va_end != (*vma)->vm_end) {
		mmput(mm);
		return -EINVAL;
	}

	/* this is 2M aligin */
	if ((*vma)->vm_flags & VM_HUGEPAGE) {
		if ((va_start & (HPAGE_SIZE - 1)) ||
		    ((va_start + hpmm_info->mapping_addr.offset) & (HPAGE_SIZE - 1))) {
			mmput(mm);
			return -EINVAL;
		}
	}

	*nr_pages = (va_end - va_start) >> PAGE_SHIFT;

	mmput(mm);
	return 0;
}

static int check_get_mapping_pfn(struct hpmm_persist_device *dev,
				 struct hpmm_mem_info *hpmm_info,
				 unsigned long *ret_start_pfn,
				 unsigned long *ret_offset_pfn,
				 unsigned long nr_pages)
{
	struct hpmm_mgr_context *mgr_context;
	pg_data_t *pgdat;
	struct zone *zone;
	int zoneid;
	unsigned long start_pfn, end_pfn, offset_pfn;

	mgr_context = (struct hpmm_mgr_context *)(dev->mgr_addr);
	if (!check_hpmm_mgr_context_valid((unsigned long)mgr_context)) {
		pr_err("the dev is wrong , node is not init. please do init first\n");
		return -EINVAL;
	}

	/* alloc page for the thp mapping */
	pgdat = NODE_DATA(dev->node);
	/* Find first usable zone. */
	for (zoneid = 0; zoneid <= MAX_NR_ZONES - 1; zoneid++) {
		zone = &pgdat->node_zones[zoneid];
		if (managed_zone(zone))
			break;
	}
	if (zoneid > MAX_NR_ZONES - 1) {
		pr_warn("Unpopulated node %d\n", dev->node);
		return -EFAULT;
	}

	start_pfn = zone->zone_start_pfn + BBU_THP_MAPPING_OFFSET;
	end_pfn = zone_end_pfn(zone);
	if (end_pfn - start_pfn < nr_pages) {
		pr_warn("Can't find suitable pfn range for %lu pages\n", nr_pages);
		return -ENOMEM;
	}

	offset_pfn = hpmm_info->mapping_addr.offset >> PAGE_SHIFT;
	if (unlikely(start_pfn + offset_pfn + nr_pages > end_pfn)) {
		pr_err("the offset add length is larger than memory end");
		return -ENOMEM;
	}

	*ret_start_pfn = start_pfn;
	*ret_offset_pfn = offset_pfn;

	return 0;
}

static int hpmm_persist_linear_memory_recover(struct hpmm_device *hpmm_dev,
					      struct hpmm_mem_info *hpmm_info)
{
	struct hpmm_persist_device *dev;
	struct vm_area_struct *vma;
	struct hpmm_mgr_context *mgr_context;
	int ret = -EINVAL;
	unsigned long nr_pages;
	struct pgtable_head *pgtable_head;
	unsigned long start_pfn, offset_pfn;

	if (!hpmm_dev || !hpmm_info) {
		pr_err("hpmm_persist_thp_linear_recover hpmm_dev or hpmm_info is invalid\n");
		return -EINVAL;
	}

	if (!check_hpmm_persist_info_valid(hpmm_info))
		return -EINVAL;

	/* get the specific dev */
	dev = hpmm_persist_device_lookup(hpmm_info->node);
	if (!dev) {
		pr_info("the dev is not correct, node : %d is wrong", hpmm_info->node);
		return -EINVAL;
	}

	if (hpmm_info->flags & ~(HPMM_RECOVER_VALID)) {
		pr_err("hpmm recover flags in invalid\n");
		return -EINVAL;
	}

	/* get mm in case mm being released during mapping process */
	if (check_vma_addr_valid(hpmm_info, &nr_pages, &vma) != 0)
		return -EINVAL;

	ret = check_get_mapping_pfn(dev, hpmm_info, &start_pfn, &offset_pfn, nr_pages);
	if (ret != 0)
		goto out;

	mgr_context = (struct hpmm_mgr_context *)(dev->mgr_addr);
	pgtable_head = mgr_context->hpmm_mgr_info[HPMM_PGTABLE_HEAD].pgtable_head;
	if (pgtable_head->linear_mapping_pfn == 0) {
		pgtable_head->linear_mapping_pfn = start_pfn;
		pgtable_head->linear_mapping_nrpage = nr_pages;

		ret = hpmm_fault_linear_mapping(vma, vma->vm_start, vma->vm_end,
						start_pfn + offset_pfn, false);
		if (ret == 0)
			ret = -ENOTRECOVERABLE;
		goto out;
	}

	if (unlikely(pgtable_head->linear_mapping_nrpage < nr_pages)) {
		pr_err("the nr pages is not correct. recover failed\n");
		return -ENOMEM;
	}

	ret = hpmm_fault_linear_mapping(vma, vma->vm_start, vma->vm_end,
					pgtable_head->linear_mapping_pfn + offset_pfn,
					true);

out:
	return ret;
}

/*
 * hpmm_persist_linear_memory_mapping()
 * alloc huge memory and set pgtable for them
 */
static int hpmm_persist_linear_memory_mapping(struct hpmm_device *hpmm_dev,
					      struct hpmm_mem_info *hpmm_info)
{
	struct hpmm_persist_device *dev;
	struct vm_area_struct *vma;
	int ret = -EINVAL;
	unsigned long nr_pages;
	unsigned long start_pfn, offset_pfn;
	struct hpmm_mgr_context *mgr_context;
	struct pgtable_head *pgtable_head;

	if (!hpmm_dev || !hpmm_info) {
		pr_err("hpmm_persist_thp_linear_recover hpmm_dev or hpmm_info is invalid\n");
		return -EINVAL;
	}

	if (!check_hpmm_persist_info_valid(hpmm_info))
		return -EINVAL;

	/* get the specific dev */
	dev = hpmm_persist_device_lookup(hpmm_info->node);
	if (!dev) {
		pr_info("the dev is not correct, node : %d is invalid", hpmm_info->node);
		return -EINVAL;
	}

	/* get mm in case mm being released during mapping process */
	if (check_vma_addr_valid(hpmm_info, &nr_pages, &vma) != 0)
		return -EINVAL;

	ret = check_get_mapping_pfn(dev, hpmm_info, &start_pfn, &offset_pfn, nr_pages);
	if (ret != 0)
		return ret;

	mgr_context = (struct hpmm_mgr_context *)(dev->mgr_addr);
	pgtable_head = mgr_context->hpmm_mgr_info[HPMM_PGTABLE_HEAD].pgtable_head;
	pgtable_head->linear_mapping_pfn = start_pfn;
	pgtable_head->linear_mapping_nrpage = nr_pages;

	ret = hpmm_fault_linear_mapping(vma,
					vma->vm_start,
					vma->vm_end,
					start_pfn + offset_pfn,
					false);
	return ret;
}

/*
 * hpmm_persist_recover()
 * recover the pgtables and memory in persist device
 */
static int hpmm_persist_recover(struct hpmm_device *hpmm_dev, struct hpmm_mem_info *hpmm_info)
{
	int flags = hpmm_info->flags;

	if (!flags || (flags & ~(HPMM_RECOVER_VALID))) {
		pr_err("hpmm recover flags is invalid");
		return -EINVAL;
	}

	return hpmm_persist_linear_memory_recover(hpmm_dev, hpmm_info);
}

unsigned long get_addr_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
				unsigned long addr, unsigned long end,
				unsigned long pfn)
{
	pte_t *pte;

	pte = pte_offset_map(pmd, addr);
	do {
		if (pte_none(*pte))
			continue;
		if (!pte_present(*pte))
			continue;
		if (pte_pfn(*pte) == pfn)
			return addr;
	} while (pte++, addr += PAGE_SIZE, addr != end);
	return 0;
}

unsigned long get_addr_pmd_range(struct vm_area_struct *vma, pud_t *pud,
				unsigned long addr, unsigned long end,
				unsigned long pfn)
{
	pmd_t *pmd;
	unsigned long next, va;

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none(*pmd))
			continue;
		va = get_addr_pte_range(vma, pmd, addr, next, pfn);
		if (va)
			return va;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

unsigned long get_addr_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
				unsigned long addr, unsigned long end,
				unsigned long pfn)
{
	pud_t *pud;
	unsigned long next, va;

	pud = pud_offset(p4d, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none(*pud))
			continue;
		va = get_addr_pmd_range(vma, pud, addr, next, pfn);
		if (va)
			return va;
	} while (pud++, addr = next, addr != end);
	return 0;
}

unsigned long get_addr_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
				unsigned long addr, unsigned long end,
				unsigned long pfn)
{
	p4d_t *p4d;
	unsigned long next, va;

	p4d = p4d_offset(pgd, addr);
	do {
		next = p4d_addr_end(addr, end);
		if (p4d_none(*p4d))
			continue;
		va = get_addr_pud_range(vma, p4d, addr, next, pfn);
		if (va)
			return va;
	} while (p4d++, addr = next, addr != end);
	return 0;
}

unsigned long get_addr_vma(struct vm_area_struct *vma, unsigned long pfn)
{
	pgd_t *pgd;
	unsigned long addr, end, next, va;

	addr = vma->vm_start;
	end = vma->vm_end;
	pgd = pgd_offset(vma->vm_mm, addr);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none(*pgd))
			continue;
		va = get_addr_p4d_range(vma, pgd, addr, next, pfn);
		if (va)
			return va;
	} while (pgd++, addr = next, addr != end);
	return 0;
}

unsigned long get_user_addr(unsigned long pfn)
{
	struct vm_area_struct *vma;
	struct mm_struct *mm = current->mm;
	unsigned long va = 0;

	mmap_read_lock(mm);
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		if (!(vma->vm_flags & VM_HYBRID_MM))
			continue;
		va = get_addr_vma(vma, pfn);
		if (vma->anon_vma && va != 0)
			break;
		cond_resched();
	}
	mmap_read_unlock(mm);
	return va;
}

static int hpmm_persist_query_uce(struct hpmm_uce_records *res)
{
	struct hpmm_persist_device *dev;
	struct hpmm_mgr_context *hpmm_mgr;
	struct uce_record_head *head;
	struct uce_record *record;
	unsigned long va;
	int i;

	res->count = 0;

	mutex_lock(&p_dev_lock);
	list_for_each_entry(dev, &persist_device_head, list) {
		hpmm_mgr = (struct hpmm_mgr_context *)dev->mgr_addr;
		if (!check_hpmm_mgr_context_valid((unsigned long)hpmm_mgr)) {
			pr_err("the dev is wrong , node is not init. please do init first\n");
			mutex_unlock(&p_dev_lock);
			return -1;
		}

		head = hpmm_mgr->hpmm_mgr_info[HPMM_UCE_RECORD_HEAD].uce_record_head;
		for_each_uce_record(head, record, i) {
			if (test_bit(i, head->bitmap)) {
				va = get_user_addr(record->pfn);
				if (!va)
					continue;
				res->records[res->count].va = va;
				res->records[res->count].type = record->type;
				res->count++;
				if (res->count == MAX_UCE_RECORD_NR)
					goto out;
			}
		}
	}
out:
	mutex_unlock(&p_dev_lock);
	return 0;
}

static struct page *get_user_page(unsigned long addr)
{
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ptep;
	spinlock_t *ptl;
	struct page *page;
	struct mm_struct *mm = current->mm;

	if (!mm)
		return NULL;

	mmap_read_lock(mm);
	if (!addr || !find_vma(mm, addr))
		goto not_found;

	pgd = pgd_offset(mm, addr);
	if (pgd_none(*pgd) || pgd_bad(*pgd))
		goto not_found;
	p4d = p4d_offset(pgd, addr);
	if (p4d_none(*p4d) || p4d_bad(*p4d))
		goto not_found;
	pud = pud_offset(p4d, addr);
	if (pud_none(*pud) || pud_bad(*pud))
		goto not_found;
	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd) || pmd_bad(*pmd))
		goto not_found;
	ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
	if (pte_none(*ptep) || !pte_present(*ptep))
		goto ptep_not_found;
	page = pfn_to_page(pte_pfn(*ptep));
	pte_unmap_unlock(ptep, ptl);
	mmap_read_unlock(mm);

	return page;

ptep_not_found:
	pte_unmap_unlock(ptep, ptl);
not_found:
	mmap_read_unlock(mm);
	return NULL;
}

static bool cmp_record(struct uce_record *r1, struct uce_record *r2)
{
	struct page *p = NULL;

	p = get_user_page(r2->va);
	if (p) {
		if (r1->pfn != page_to_pfn(p))
			return false;
		if (r1->type != r2->type)
			return false;
		return true;
	}
	return false;
}

static int hpmm_persist_clear_uce(struct hpmm_uce_records *res)
{
	struct hpmm_persist_device *dev;
	struct hpmm_mgr_context *hpmm_mgr;
	struct uce_record_head *head;
	struct uce_record *record;
	int uce_index, j;

	if (!res)
		return -EINVAL;

	if (res->count > MAX_UCE_RECORD_NR) {
		pr_err("the res count is not valid, count should lower than 128");
		return -EINVAL;
	}

	for (uce_index = 0; uce_index < res->count; uce_index++) {
		bool found = false;

		mutex_lock(&p_dev_lock);
		list_for_each_entry(dev, &persist_device_head, list) {
			hpmm_mgr = (struct hpmm_mgr_context *)dev->mgr_addr;
			if (!check_hpmm_mgr_context_valid((unsigned long)hpmm_mgr)) {
				pr_err("the dev is wrong , node is not init. please do init first\n");
				mutex_unlock(&p_dev_lock);
				return -1;
			}
			head = hpmm_mgr->hpmm_mgr_info[HPMM_UCE_RECORD_HEAD].uce_record_head;
			for_each_uce_record(head, record, j) {
				if (test_bit(j, head->bitmap)) {
					if (cmp_record(record, &res->records[uce_index])) {
						found = true;
						record->pfn = 0;
						record->type = 0;
						clear_bit(j, head->bitmap);
						break;
					}
				}
			}
			if (found)
				break;
		}
		mutex_unlock(&p_dev_lock);
		if (!found)
			pr_info("uce record index: %d not found\n", uce_index);
		cond_resched();
	}
	return 0;
}

static struct hpmm_driver hpmm_persist_driver = {
	.type = HPMM_PERSISTENT_MEMORY,
	.name = "hpmm persistent memory",
	.create = hpmm_persist_create,
	.destroy = hpmm_persist_destroy,
	.clear = hpmm_persist_clear,
	.recover = hpmm_persist_recover,
	.linear_memory_mapping = hpmm_persist_linear_memory_mapping,
	.query_uce = hpmm_persist_query_uce,
	.clear_uce = hpmm_persist_clear_uce,
};

bool check_and_clear_meta(struct page *page)
{
	struct hpmm_persist_device *dev;
	unsigned long vaddr = (unsigned long)page_to_virt(page);

	mutex_lock(&p_dev_lock);
	list_for_each_entry(dev, &persist_device_head, list) {
		unsigned long begin, end;

		begin = (unsigned long)dev->mgr_addr;
		end = begin + (BBU_META_NR_PAGES << PAGE_SHIFT);
		if (vaddr >= begin && vaddr < end) {
			memset((char *)begin, 0, BBU_META_NR_PAGES << PAGE_SHIFT);
			mutex_unlock(&p_dev_lock);
			return true;
		}
	}

	mutex_unlock(&p_dev_lock);
	return false;
}

struct hpmm_persist_device *find_hpmm_persist_device(int node)
{
	struct hpmm_persist_device *dev;

	mutex_lock(&p_dev_lock);
	list_for_each_entry(dev, &persist_device_head, list) {
		if (node == dev->node) {
			mutex_unlock(&p_dev_lock);
			return dev;
		}
	}

	mutex_unlock(&p_dev_lock);
	return NULL;
}

void __init hpmm_persist_register(void)
{
	hpmm_register_memory_driver(&hpmm_persist_driver);
	hpmm_uce_init();
}

void __exit hpmm_persist_unregister(void)
{
	hpmm_uce_exit();
	hpmm_unregister_memory_driver(&hpmm_persist_driver);
}
