/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024.
 * Description: support memory snapshot feature
 * Author: heyuqiang
 * Create: 2024-05-10
 */
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/pfn.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/mm_types.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/cma.h>
#include <linux/crc32.h>
#include <linux/hal/cache_ecc_check.h>
#include <linux/mempolicy.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/proc_fs.h>
#include <asm/system_misc.h>
#include <asm/tlb.h>
#include <linux/rtos_mm_types.h>
#include <linux/rtos_mem_snapshot.h>
#include "../../mm/cma.h"
#include <uapi/linux/rtos_mem_snapshot.h>

#define PAGE_SIZE_KB (PAGE_SIZE >> 10)

static DEFINE_MUTEX(task_attach_cma_lock);
static LIST_HEAD(g_task_attach_cma_list);

static DEFINE_MUTEX(mem_snapshot_lock);

struct pmem_ioctl_cmd_func {
	unsigned int cmd;
	int (*cmd_func)(struct persist_mem_ioctl_args *ioctl_args);
};

static bool g_mem_snapshot;
bool mem_snapshot_is_enabled(void)
{
	return g_mem_snapshot;
}

static int __init setup_mem_snapshot(char *arg)
{
	g_mem_snapshot = true;
	return 0;
}
early_param("mem_snapshot", setup_mem_snapshot);

bool bitmap_get_bitvalue(unsigned long *bitmap, unsigned long offset)
{
	unsigned long byte;
	unsigned long start = offset - offset % BITS_PER_BYTE;
	unsigned long mask;

	/* obtains the bitmap value corresponding to the offset */
	mask = 0x1 << (offset % BITS_PER_BYTE);
	byte = bitmap_get_value8(bitmap, start);
	if ((byte & mask) == 0)
		return 0;
	return 1;
}

static inline bool is_addr_valid(unsigned long addr, unsigned long length,
	struct mem_snapshot_task *p, struct cma *cma)
{
	if ((addr & ~PAGE_MASK) || (length & ~PAGE_MASK) || addr < p->vm_start || addr > p->vm_end ||
		addr + length <= addr || addr + length <= p->vm_start || addr + length > p->vm_end ||
		addr < p->vm_start + (PAGE_SIZE + cma->bitmap_size))
		return false;
	return true;
}

static int add_attach_task(int cma_id, struct vm_area_struct *vma)
{
	struct mem_snapshot_task *p;
	unsigned long size;

	mutex_lock(&task_attach_cma_lock);
	list_for_each_entry(p, &g_task_attach_cma_list, list) {
		if (p->vma == vma && p->cma_id == cma_id) {
			pr_err("%s():Error: Do not repeat mem_snapshot_attach.", __func__);
			mutex_unlock(&task_attach_cma_lock);
			return -EEXIST;
		}
	}

	/*
	 * mem_snapshot_task is bound to the process,
	 * mem_snapshot_task are not repeatedly added to the linked list,
	 */
	p = kmalloc(sizeof(struct mem_snapshot_task), GFP_KERNEL);
	if (!p) {
		mutex_unlock(&task_attach_cma_lock);
		return -ENOMEM;
	}

	p->cma_id = cma_id;
	p->pid = current->pid;
	p->vma = vma;
	p->vm_start = vma->vm_start;
	p->vm_end = vma->vm_end;
	size = PHYS_PFN(p->vm_end - p->vm_start);
	p->phymap_bitmap = bitmap_zalloc(size, GFP_KERNEL);
	if (!p->phymap_bitmap) {
		kfree(p);
		mutex_unlock(&task_attach_cma_lock);
		return -ENOMEM;
	}
	mm_to_rtos_mm(current->mm)->cma_task[cma_id] = p;

	list_add_tail(&p->list, &g_task_attach_cma_list);
	mutex_unlock(&task_attach_cma_lock);
	return 0;
}

static void clear_task_all_page_refcount(struct cma *cma, struct mem_snapshot_task *p)
{
	unsigned long start = 0;
	unsigned long next_set_bit, next_zero_bit, nr_set, nbits;

	nbits = PHYS_PFN(p->vm_end - p->vm_start);
	for (;;) {
		next_set_bit = find_next_bit(p->phymap_bitmap, nbits, start);
		if (next_set_bit >= nbits)
			break;
		next_zero_bit = find_next_zero_bit(p->phymap_bitmap, nbits, next_set_bit);
		nr_set = next_zero_bit - next_set_bit;
		bitmap_clear(p->phymap_bitmap, next_set_bit, nr_set);

		start = next_set_bit + nr_set;
	}
}

static int check_phymap_refcount_no_lock(struct cma *cma, int cma_id,
	unsigned long pfn_start, unsigned long pfn_end)
{
	struct mem_snapshot_task *p;
	unsigned long start, end, next_set_bit;

	start = pfn_start - cma->base_pfn;
	end = pfn_end - cma->base_pfn;

	list_for_each_entry(p, &g_task_attach_cma_list, list) {
		if (p->cma_id != cma_id)
			continue;
		next_set_bit = find_next_bit(p->phymap_bitmap, end, start);
		if (next_set_bit < end) {
			pr_err("%s():error %lx pfn be used!\n",
				__func__, cma->base_pfn + next_set_bit);
			return -EBUSY;
		}
	}
	return 0;
}

static int del_attach_task(int cma_id, struct mm_struct *vm_mm)
{
	struct mem_snapshot_task *p;
	struct cma *cma;

	cma = &cma_areas[cma_id];

	mutex_lock(&task_attach_cma_lock);
	p = mm_to_rtos_mm(vm_mm)->cma_task[cma_id];
	if (!p) {
		mutex_unlock(&task_attach_cma_lock);
		return -EINVAL;
	}

	/* clear reference counting of the physical page */
	clear_task_all_page_refcount(cma, p);

	list_del(&p->list);
	bitmap_free(p->phymap_bitmap);
	kfree(p);
	mm_to_rtos_mm(vm_mm)->cma_task[cma_id] = NULL;
	mutex_unlock(&task_attach_cma_lock);

	return 0;
}

static unsigned long cma_virt_to_pfn(struct cma *cma, int id, unsigned long addr, unsigned long length)
{
	struct mem_snapshot_task *p;
	unsigned long offset;

	p = mm_to_rtos_mm(current->mm)->cma_task[id];
	if (!p) {
		pr_err("%s(): id[%d] error, not find\n", __func__, id);
		return 0;
	}
	if (!is_addr_valid(addr, length, p, cma)) {
		pr_err("%s():addr error out of cma range\n", __func__);
		return 0;
	}
	offset = addr - p->vm_start;
	return cma->base_pfn + PHYS_PFN(offset);
}

static unsigned long build_phymap(int id, unsigned long addr, unsigned long length)
{
	struct cma *cma;
	struct mem_snapshot_task *p;
	struct vm_area_struct *vma;
	struct page *page;
	unsigned long count, offset, pfn, output_addr;
	int ret;

	if (length == 0 || (length & ~PAGE_MASK))
		return -EINVAL;

	cma = &cma_areas[id];

	p = mm_to_rtos_mm(current->mm)->cma_task[id];
	if (!p) {
		pr_err("%s():alloc failed, task[%d] cma[%d] not find\n", __func__, current->pid, id);
		return -EINVAL;
	}
	vma = p->vma;
	count = PHYS_PFN(PAGE_ALIGN(length));

	/* No virtual address is specified. The system automatically allocates an available area. */
	if (addr == 0) {
		page = rtos_cma_alloc(cma, count, 0, false);
		if (!page) {
			pr_err("%s(): cma_alloc failed\n", __func__);
			return -EFAULT;
		}
		pfn = page_to_pfn(page);
		offset = PFN_PHYS(pfn - cma->base_pfn);
		output_addr = vma->vm_start + offset;
	} else {
		if (!is_addr_valid(addr, length, p, cma)) {
			pr_err("%s():addr error out of cma range\n", __func__);
			return -EINVAL;
		}
		offset = addr - p->vm_start;
		pfn = cma->base_pfn + PHYS_PFN(offset);
		ret = cma_alloc_from_pfn(cma, pfn, count, 0, false);
		if (ret != 0) {
			pr_err("%s():cma_alloc_from_pfn alloc %ld pages failed\n", __func__, count);
			return ret;
		}
		output_addr = addr;
	}

	bitmap_set(p->phymap_bitmap, PHYS_PFN(offset), count);

	return output_addr;
}

static int clear_phymap(int id, unsigned long addr, unsigned long length)
{
	struct vm_area_struct *vma;
	struct mem_snapshot_task *p;
	struct cma *cma;
	unsigned long offset;
	unsigned long count;

	if (addr == 0 || length == 0)
		return -EINVAL;

	cma = &cma_areas[id];
	count = PHYS_PFN(PAGE_ALIGN(length));

	p = mm_to_rtos_mm(current->mm)->cma_task[id];
	if (!p) {
		pr_err("%s():id[%d] error, not find\n", __func__, id);
		return -EINVAL;
	}
	if (!is_addr_valid(addr, length, p, cma)) {
		pr_err("%s():addr out of range\n", __func__);
		return -EINVAL;
	}

	offset = addr - p->vm_start;
	vma = p->vma;

	bitmap_clear(p->phymap_bitmap, PHYS_PFN(offset), count);

	zap_page_range(vma, addr, PAGE_ALIGN(length));

	return 0;
}

static int mem_snapshot_release(int id, unsigned long addr, unsigned long length)
{
	struct page *page;
	struct cma *cma;
	unsigned long pfn;
	unsigned long count;
	unsigned long ret = 0;

	cma = &cma_areas[id];
	count = PHYS_PFN(PAGE_ALIGN(length));
	pfn = cma_virt_to_pfn(cma, id, addr, length);
	if (pfn == 0)
		return -EINVAL;

	page = pfn_to_page(pfn);
	mutex_lock(&task_attach_cma_lock);
	ret = check_phymap_refcount_no_lock(cma, id, pfn, pfn + count);
	if (ret != 0) {
		mutex_unlock(&task_attach_cma_lock);
		return ret;
	}

	ret = cma_release_from_count(cma, page, count);
	if (ret != 0) {
		pr_err("%s():cma_release_from_count failed\n", __func__);
		mutex_unlock(&task_attach_cma_lock);
		return ret;
	}
	mutex_unlock(&task_attach_cma_lock);
	return 0;
}

static int cma_mark(int id, bool flag)
{
	struct cma *cma;

	cma = &cma_areas[id];
	if (!cma->block)
		return -EINVAL;
	cma->block->reset_on_reboot = flag;
	return 0;
}

static int get_cma_status(int id, int *state)
{
	struct cma *cma;

	cma = &cma_areas[id];
	if (!cma->block)
		return -EINVAL;
	*state = cma->block->block_valid;

	return 0;
}

static int pmem_link(struct persist_mem_ioctl_args *ioctl_args)
{
	unsigned long addr;

	mutex_lock(&mem_snapshot_lock);
	addr = build_phymap(ioctl_args->id, ioctl_args->addr, ioctl_args->length);
	mutex_unlock(&mem_snapshot_lock);
	if (IS_ERR((void *)addr)) {
		pr_err("%s():build_phymap failed\n", __func__);
		return PTR_ERR((void *)addr);
	}
	ioctl_args->addr = addr;
	return 0;
}

static int pmem_unlink(struct persist_mem_ioctl_args *ioctl_args)
{
	int ret = 0;

	mutex_lock(&mem_snapshot_lock);
	ret = clear_phymap(ioctl_args->id, ioctl_args->addr, ioctl_args->length);
	mutex_unlock(&mem_snapshot_lock);
	if (ret != 0)
		pr_err("%s():clear_phymap failed\n", __func__);

	return ret;
}
static int pmem_release(struct persist_mem_ioctl_args *ioctl_args)
{
	int ret = 0;

	mutex_lock(&mem_snapshot_lock);
	ret = mem_snapshot_release(ioctl_args->id, ioctl_args->addr, ioctl_args->length);
	mutex_unlock(&mem_snapshot_lock);
	return ret;
}
static int pmem_get_size(struct persist_mem_ioctl_args *ioctl_args)
{
	ioctl_args->length = cma_areas[ioctl_args->id].count * PAGE_SIZE;
	return 0;
}
static int pmem_reset_on_reboot(struct persist_mem_ioctl_args *ioctl_args)
{
	int ret = 0;

	ret = cma_mark(ioctl_args->id, ioctl_args->mark);
	if (ret != 0)
		pr_err("%s():cma_mark failed\n", __func__);

	return ret;
}
static int pmem_detach(struct persist_mem_ioctl_args *ioctl_args)
{
	int ret = 0;
	struct mem_snapshot_task *p;

	p = mm_to_rtos_mm(current->mm)->cma_task[ioctl_args->id];
	if (!p) {
		pr_err("%s(),id[%d] not find\n", __func__, ioctl_args->id);
		return -EINVAL;
	}
	ioctl_args->addr = p->vm_start;
	ioctl_args->length = p->vm_end - p->vm_start;

	ret = del_attach_task(ioctl_args->id, current->mm);
	if (ret != 0)
		pr_err("%s(),task delete cma[%d] failed\n", __func__, ioctl_args->id);

	return ret;
}
static int pmem_get_status(struct persist_mem_ioctl_args *ioctl_args)
{
	return get_cma_status(ioctl_args->id, &ioctl_args->status);
}

static int pmem_get_bitmap(struct persist_mem_ioctl_args *ioctl_args)
{
	struct cma *cma;

	cma = &cma_areas[ioctl_args->id];
	if (!cma->bitmap || !ioctl_args->bitmap)
		return -EINVAL;
	if (ioctl_args->bitmap_size != cma->bitmap_size) {
		pr_err("%s():bitmap_size err, expect be %ld\n", __func__, cma->bitmap_size);
		return -EINVAL;
	}

	if (copy_to_user((void __user *)ioctl_args->bitmap, cma->bitmap, cma->bitmap_size))
		return -EFAULT;

	return 0;
}

static struct pmem_ioctl_cmd_func g_pmem_ioctl_cmd_func[] = {
	{PERSIST_MEM_IOC_BUILD_PHYMAP,    pmem_link},
	{PERSIST_MEM_IOC_CLEAR_PHYMAP,    pmem_unlink},
	{PERSIST_MEM_IOC_RELEASE,         pmem_release},
	{PERSIST_MEM_IOC_GET_SIZE,        pmem_get_size},
	{PERSIST_MEM_IOC_RESET_ON_REBOOT, pmem_reset_on_reboot},
	{PERSIST_MEM_IOC_DETACH,          pmem_detach},
	{PERSIST_MEM_IOC_GET_STATUS,      pmem_get_status},
	{PERSIST_MEM_IOC_GET_BITMAP,      pmem_get_bitmap},
};

static long persist_mem_ioctl(struct file *file, unsigned int cmd, unsigned long args)
{
	int i;
	int ret;
	struct persist_mem_ioctl_args ioctl_args = {0};

	ret = copy_from_user(&ioctl_args, (struct persist_mem_ioctl_args *)args,
		sizeof(struct persist_mem_ioctl_args));
	if (ret != 0)
		return -EFAULT;

	if (ioctl_args.id < 0 || ioctl_args.id >= cma_area_count) {
		pr_err("%s():id[%d] more than cma_area_count[%d].", __func__, ioctl_args.id, cma_area_count);
		return -EINVAL;
	}

	ret = -ENOIOCTLCMD;
	for (i = 0; i < ARRAY_SIZE(g_pmem_ioctl_cmd_func); i++) {
		if (cmd == g_pmem_ioctl_cmd_func[i].cmd) {
			ret = g_pmem_ioctl_cmd_func[i].cmd_func(&ioctl_args);
			break;
		}
	}
	if (ret != 0)
		return ret;
	ret = copy_to_user((void __user *)args, &ioctl_args, sizeof(struct persist_mem_ioctl_args));
	return ret;
}

static vm_fault_t persist_mem_vm_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
	unsigned long pfn;
	struct page *page;
	unsigned long offset;
	struct mem_snapshot_task *task;
	struct cma *cma = NULL;
	bool flag;
	int id = vma->vm_pgoff;

	if (id < 0 || id >= cma_area_count) {
		pr_err("%s():id[%d] more than cma_area_count[%d].", __func__, id, cma_area_count);
		return VM_FAULT_SIGBUS;
	}

	task = mm_to_rtos_mm(current->mm)->cma_task[id];
	if (!task) {
		pr_err("%s():sigbus error,vma not find\n", __func__);
		return VM_FAULT_SIGBUS;
	}
	cma = &cma_areas[id];
	if (!cma || !cma->block || !cma->bitmap)
		return VM_FAULT_SIGSEGV;

	offset = vmf->address - vma->vm_start;
	if (offset < (PAGE_SIZE + cma->bitmap_size)) {
		pr_err("%s():sigbus error, don't permistion\n", __func__);
		return VM_FAULT_SIGBUS;
	}

	pfn = cma->base_pfn + PHYS_PFN(offset);

	flag = bitmap_get_bitvalue(task->phymap_bitmap, PHYS_PFN(offset));
	if (!flag) {
		pr_err("%s():sigbus error, No pfn[%lx] physical mapping is created\n", __func__, pfn);
		return VM_FAULT_SIGBUS;
	}

	/* check whether the PFN is returned to the buddy system. */
	spin_lock_irq(&cma->lock);
	flag = bitmap_get_bitvalue(cma->bitmap, PHYS_PFN(offset));
	if (!flag) {
		pr_err("%s():sigbus error,pfn[%lx] has been returned to the buddy system.\n", __func__, pfn);
		spin_unlock_irq(&cma->lock);
		return VM_FAULT_SIGBUS;
	}

	page = pfn_to_page(pfn);
	vmf->page = page;
	get_page(page);
	spin_unlock_irq(&cma->lock);
	return 0;
}

static void persist_mem_vm_close(struct vm_area_struct *area)
{
	int id = area->vm_pgoff;

	if (id < 0 || id >= cma_area_count) {
		pr_err("%s():id[%d] more than cma_area_count[%d].", __func__, id, cma_area_count);
		return;
	}

	/* in vm_close() current->mm=NULL, should use area->vm_mm */
	(void)del_attach_task(id, area->vm_mm);
}

static int persist_mem_vm_split(struct vm_area_struct *vma, unsigned long addr)
{
	/* persist_mem vma can't be split. */
	return -EINVAL;
}

static int persist_mem_vm_mremap(struct vm_area_struct *vma)
{
	/* persist_mem vma can't be mremap. */
	return -EINVAL;
}

static int persist_mem_vm_mprotect(struct vm_area_struct *vma, unsigned long start,
	unsigned long end, unsigned long newflags)
{
	/* persist_mem vma don't support mprotect. */
	return -EINVAL;
}

static const struct vm_operations_struct persist_mem_vm_ops = {
	.close = persist_mem_vm_close,
	.fault = persist_mem_vm_fault,
	.split = persist_mem_vm_split,
	.mremap = persist_mem_vm_mremap,
	.mprotect = persist_mem_vm_mprotect,
};

static int persist_mem_mmap(struct file *file, struct vm_area_struct *vma)
{
	unsigned long size = vma->vm_end - vma->vm_start;
	int id = vma->vm_pgoff;
	struct cma *cma;
	int ret;

	/*
	 * VM_READ VM_WRITE VM_EXEC must be set one,
	 * at least when !PROT_NONE. Here not support PROT_NONE
	 * VM_SHARED must be set.
	 */
	if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
		return -EINVAL;
	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;

	if (id < 0 || id >= cma_area_count) {
		pr_err("%s():id[%d] more than cma_area_count[%d]\n",
			__func__, id, cma_area_count);
		return -EINVAL;
	}
	cma = &cma_areas[id];
	/*
	 * The VMA of the global CMA is added for the first time.
	 * Duplicate mmap cma memory is not allowed.
	 */
	if (mm_to_rtos_mm(current->mm)->cma_task[id] != NULL) {
		pr_err("%s():error, cma[%d] has been mmap", __func__, id);
		return -EINVAL;
	}

	vma->vm_ops = &persist_mem_vm_ops;

	if (size != PFN_PHYS(cma->count)) {
		pr_err("%s():mmap size[%ld] not equal to cma[%d].size=%lld\n",
			__func__, size, id, PFN_PHYS(cma->count));
		return -EINVAL;
	}
	ret = add_attach_task(id, vma);
	if (ret != 0)
		return ret;

	return 0;
}

/* define file_operations for cma */
static const struct file_operations mem_fops = {
	.owner = THIS_MODULE,
	.mmap = persist_mem_mmap,
	.unlocked_ioctl = persist_mem_ioctl,
	.compat_ioctl = persist_mem_ioctl,
};

static struct miscdevice dev = {
	.minor = MISC_DYNAMIC_MINOR,
	.name = "persist_mem",
	.fops = &mem_fops,
};

static int mem_snapshot_reboot_notifier(struct notifier_block *nb,
	unsigned long action, void *data)
{
	int id;
	int i;
	struct cma *cma;
	struct persist_mem_block *head;
	unsigned long data_addr;

	for (id = 0; id < cma_area_count; id++) {
		cma = &cma_areas[id];
		head = cma->block;
		if (!cma->block || !cma->bitmap)
			continue;
		head->crc32_bitmap = crc32(0, (const uint8_t *)cma->bitmap, cma->bitmap_size);
		head->crc32_page = 0;
		data_addr = (uintptr_t)head + (PAGE_SIZE + cma->bitmap_size);
		for (i = CMA_HEADER_PFN_START; i < cma->count; i++) {
			if (bitmap_get_bitvalue(cma->bitmap, i))
				head->crc32_page = crc32(head->crc32_page, (const uint8_t *)data_addr, PAGE_SIZE);
			data_addr += PAGE_SIZE;
		}
	}

	return NOTIFY_DONE;
}

static struct notifier_block cma_reboot_nb = {
	.notifier_call	= mem_snapshot_reboot_notifier,
};

static int __init persist_mem_init(void)
{
	int ret;

	ret = misc_register(&dev);
	if (ret != 0) {
		pr_err("%s():failed to register misc device", __func__);
		return ret;
	}
	ret = register_reset_notifier(&cma_reboot_nb);
	if (ret) {
		pr_err("%s():failed to register reboot notifier\n", __func__);
		misc_deregister(&dev);
		return ret;
	}

	return 0;
}

static void persist_mem_exit(void)
{
	int ret;

	ret = unregister_reset_notifier(&cma_reboot_nb);
	if (ret)
		pr_err("%s():failed to unregister reboot notifier\n", __func__);
	misc_deregister(&dev);
}

static unsigned long show_cma_free(struct cma *cma)
{
	unsigned long next_zero_bit, next_set_bit, nr_zero;
	unsigned long start = 0;
	unsigned long nr_part, nr_total = 0;
	unsigned long nbits = cma_bitmap_maxno(cma);

	spin_lock_irq(&cma->lock);
	for (;;) {
		next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
		if (next_zero_bit >= nbits)
			break;
		next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
		nr_zero = next_set_bit - next_zero_bit;
		nr_part = nr_zero << cma->order_per_bit;
		nr_total += nr_part;
		start = next_zero_bit + nr_zero;
	}
	spin_unlock_irq(&cma->lock);
	return nr_total;
}

static int mem_snapshot_meminfo_show(struct seq_file *seq, void *offset)
{
	int i;
	unsigned long free_count, used_count;
	struct cma *cma;

	seq_puts(seq, "        start_pfn    end_pfn    total     free     used\n");
	for (i = 0; i < cma_area_count; i++) {
		cma = &cma_areas[i];
		if (!cma->bitmap)
			continue;
		free_count = show_cma_free(cma);
		used_count = cma->count - free_count;
		seq_printf(seq, "cma[%d] 0x%08lx 0x%08lx %8lu %8lu %8lu\n", i, cma->base_pfn,
			cma->base_pfn + cma->count,
			cma->count * PAGE_SIZE_KB, free_count * PAGE_SIZE_KB, used_count * PAGE_SIZE_KB);
	}

	return 0;
}

static int mem_snapshot_meminfo_open(struct inode *inode, struct file *file)
{
	return single_open(file, mem_snapshot_meminfo_show, NULL);
}

static const struct proc_ops proc_mem_snapshot_meminfo = {
	.proc_open	= mem_snapshot_meminfo_open,
	.proc_read	= seq_read,
	.proc_lseek	= seq_lseek,
	.proc_release	= single_release,
};

static struct proc_dir_entry *mem_snapshot_entry;

static int mem_snapshot_proc_init(struct proc_dir_entry *parent)
{
	struct proc_dir_entry *p = NULL;

	p = proc_create("meminfo", (mode_t)0400, parent, &proc_mem_snapshot_meminfo);
	if (p == NULL) {
		pr_err("%s():proc_create mem_snapshot/meminfo failed.\n", __func__);
		return -ENOMEM;
	}

	return 0;
}

static void __exit mem_snapshot_proc_remove(void)
{
	proc_remove(mem_snapshot_entry);
}

static int __init init_mem_snapshot_proc(void)
{
#ifdef CONFIG_PROC_FS
	int proc_init;

	mem_snapshot_entry = proc_mkdir("mem_snapshot", NULL);
	if (mem_snapshot_entry == NULL) {
		pr_err("%s():proc_create mem_snapshot/ failed.\n", __func__);
		return -ENOMEM;
	}

	proc_init = mem_snapshot_proc_init(mem_snapshot_entry);
	if (proc_init != 0) {
		remove_proc_entry("mem_snapshot", NULL);
		return -ENOMEM;
	}
#endif
	return 0;
}

static int __init mem_snapshot_init(void)
{
	int ret;

	if (!mem_snapshot_is_enabled())
		return 0;
	ret = persist_mem_init();
	if (ret != 0)
		return ret;

	ret = init_mem_snapshot_proc();
	if (ret != 0)
		persist_mem_exit();
	return ret;
}

static void __exit mem_snapshot_exit(void)
{
	if (!mem_snapshot_is_enabled())
		return;
	mem_snapshot_proc_remove();
	persist_mem_exit();
}

module_init(mem_snapshot_init);
module_exit(mem_snapshot_exit);
