/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2022.
 * Description: support oom_extend feaure
 * Author: fanglinxu <fanglinxu@huawei.com>
 * Create: 2018-08-25
 */

#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/statfs.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
#include <linux/nmi.h>
#include <linux/sort.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/swapops.h>
#include <linux/rmap.h>
#include <linux/vmalloc.h>
#include <linux/pagewalk.h>

#ifdef CONFIG_RTOS_KBOX
#include <linux/set_kbox_region.h>
#endif

#include <linux/rtos_oom_extend.h>

#define PSS_SHIFT	12
#define LONG_SIZE	21
#define MAX_SMAPS_COUNT 50
#define SHOW_ALL_THREAD_SMAPS (-1)
#define DEFAULT_SMAPS_COUNT 5
#define MAX_PAGECACHE_COUNT 50
#define SHOW_ALL_PAGECACHE (-1)
#define DEFAULT_PAGECACHE_COUNT 20
#define DEFAULT_SMAPS_WATERLINE 2048L   /* kB */
#define CHAR_ENABLE	'1'
#define CHAR_DISABLE	'0'
#define OOM_LOCKED (1)
#define OOM_UNLOCKED (0)
#define MAX_LOG_SIZE_KB	(UINT_MAX / 1024)

#define pte_offset_map_trylock(mm, pmd, address, ptlp)	\
({                                                      \
	spinlock_t *__ptl = pte_lockptr(mm, pmd);       \
	pte_t *__pte = pte_offset_map(pmd, address);    \
	*(ptlp) = __ptl;                                \
	if (!spin_trylock(__ptl))			\
		*(ptlp) = NULL;				\
	__pte;                                          \
})

long oom_pagecache_count = DEFAULT_PAGECACHE_COUNT;

static int oom_extend_enable = 1;
static int oom_smaps_detail;
static long oom_smaps_count = DEFAULT_SMAPS_COUNT;
static atomic_t oom_lock_state = ATOMIC_INIT(OOM_UNLOCKED);

#ifdef CONFIG_RTOS_KBOX
static unsigned int oom_region_size;
static int oom_region_id = -1;
#endif

struct oom_mem_size_stats {
	struct vm_area_struct *vma;
	unsigned long resident;
	unsigned long shared_clean;
	unsigned long shared_dirty;
	unsigned long private_clean;
	unsigned long private_dirty;
	unsigned long referenced;
	unsigned long anonymous;
	unsigned long anonymous_thp;
	unsigned long swap;
	unsigned long nonlinear;
	u64 pss;
};

struct task_rss {
	struct task_struct *tsk;
	unsigned long rss;
};

static struct task_rss task_rss[MAX_SMAPS_COUNT];

static char path_buf[PATH_MAX];

static char *get_path(const struct path *path)
{
	char *p;

	p = d_path(path, path_buf, PATH_MAX);
	if (IS_ERR(p))
		return NULL;
	return p;
}

static void set_path_name(const char **path_p, struct task_struct *p,
	struct vm_area_struct *vma, struct mm_struct *mm)
{
	*path_p = arch_vma_name(vma);
	if (*path_p != NULL)
		return;

	if (mm == NULL) {
		*path_p = "[vdso]";
		return;
	}

	if (vma->vm_start <= mm->brk &&
		vma->vm_end >= mm->start_brk) {
		*path_p = "[heap]";
		return;
	}

	if (vma->vm_start <= mm->start_stack &&
	    vma->vm_end >= mm->start_stack) {
		*path_p = "[stack]";
		return;
	}

	*path_p = "";
}

/* reference: ./fs/proc/task_mmu.c --> show_map_vma */
int oom_show_vma_total(struct vm_area_struct *vma, struct task_struct *p, int *is_lib)
{
	struct mm_struct *mm = vma->vm_mm;
	struct file *file = vma->vm_file;
	int flags = vma->vm_flags;
	unsigned long ino = 0;
	unsigned long long pgoff = 0;
	unsigned long start, end;
	dev_t dev = 0;
	const char *path = NULL;
	*is_lib = 0;

	if (file) {
		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;

		dev = inode->i_sb->s_dev;
		ino = inode->i_ino;
		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
	}

	/* We don't show the stack guard page in /proc/maps */
	start = vma->vm_start;
	end = vma->vm_end;

	/*
	 * Print the dentry name for named mappings, and a
	 * special [heap] marker for the heap:
	 */
	if (file) {
		size_t path_len;
		size_t pos;

		path = get_path(&file->f_path);
		if (!path)
			return -EINVAL;
		path_len = strlen(path);
		pos = path_len;
		while (pos > 0) {
			if (path[pos] == '.')
				break;
			pos--;
		}

		if ((path_len - pos == 3) && (strncmp(&path[pos], ".so", 3) == 0))
			*is_lib = 1;
	} else {
		set_path_name(&path, p, vma, mm);
	}

	pr_info("\t%pK-%pK %c%c%c%c %08llx %02x:%02x %lu          %s\n",
		(void *)(uintptr_t)start,
		(void *)(uintptr_t)end,
		flags & VM_READ ? 'r' : '-',
		flags & VM_WRITE ? 'w' : '-',
		flags & VM_EXEC ? 'x' : '-',
		flags & VM_MAYSHARE ? 's' : 'p',
		pgoff,
		MAJOR(dev), MINOR(dev), ino,
		path ? path : "");

	return 0;
}

/* reference: ./fs/proc/task_mmu.c --> smaps_pte_entry */
static void smaps_pte_entry(pte_t ptent, unsigned long addr,
		const unsigned long ptent_size, const struct mm_walk *walk)
{
	struct oom_mem_size_stats *mss = walk->private;
	struct vm_area_struct *vma = mss->vma;
	pgoff_t pgoff = linear_page_index(vma, addr);
	struct page *page = NULL;
	int mapcount;

	if (pte_present(ptent)) {
		page = vm_normal_page(vma, addr, ptent);
	} else if (is_swap_pte(ptent)) {
		swp_entry_t swpent = pte_to_swp_entry(ptent);
		if (!non_swap_entry(swpent))
			mss->swap += ptent_size;
		else if (is_migration_entry(swpent))
			page = migration_entry_to_page(swpent);
	}

	if (!page)
		return;

	if (PageAnon(page))
		mss->anonymous += ptent_size;

	if (page->index != pgoff)
		mss->nonlinear += ptent_size;

	mss->resident += ptent_size;
	/* Accumulate the size in pages that have been accessed. */
	if (pte_young(ptent) || PageReferenced(page))
		mss->referenced += ptent_size;
	mapcount = page_mapcount(page);
	if (mapcount >= 2) {
		if (pte_dirty(ptent) || PageDirty(page))
			mss->shared_dirty += ptent_size;
		else
			mss->shared_clean += ptent_size;
		mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
	} else {
		if (pte_dirty(ptent) || PageDirty(page))
			mss->private_dirty += ptent_size;
		else
			mss->private_clean += ptent_size;
		mss->pss += (ptent_size << PSS_SHIFT);
	}
}

static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
				struct mm_walk *walk)
{
	struct oom_mem_size_stats *mss = walk->private;
	struct vm_area_struct *vma = mss->vma;
	pte_t *pte = NULL;
	spinlock_t *ptl = NULL;

	/* THP handle will be blocked by mutex, so just return here. */
	if (pmd_trans_huge(*pmd))
		return 0;
	/*
	 * The mmap_sem held all the way back in m_start() is what
	 * keeps khugepaged out of here and from collapsing things
	 * in here.
	 */
	pte = pte_offset_map_trylock(vma->vm_mm, pmd, addr, &ptl);
	if (!ptl)
		return -EBUSY;

	for (; addr != end; pte++, addr += PAGE_SIZE)
		smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
	pte_unmap_unlock(pte - 1, ptl);
	return 0;
}

static const struct mm_walk_ops smaps_walk_ops = {
	.pmd_entry = smaps_pte_range,
	.hugetlb_entry = NULL,
};

static int oom_show_vma(struct mm_struct *mm, struct vm_area_struct *vma,
	struct task_struct *p, u64 *pss_size, u64 *share_lib)
{
	int is_lib = 0;
	int err = 0;
	struct oom_mem_size_stats mss;

	memset(&mss, 0, sizeof(mss));
	mss.vma = vma;

	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
		err = walk_page_range(mm, vma->vm_start, vma->vm_end, &smaps_walk_ops, &mss);

	if ((mss.resident >> 10) < DEFAULT_SMAPS_WATERLINE)
		return 0;

	oom_show_vma_total(vma, p, &is_lib);

	if (err) {
		pr_info("\t\tSize:           %8lu kB\n"
			"\t\ttrylock fail, ignore.\n\n",
			(vma->vm_end - vma->vm_start) >> 10);
		return err;
	}

	if (oom_smaps_detail) {
		pr_info("\t\tSize:           %8lu kB\n"
			"\t\tRss:            %8lu kB\n"
			"\t\tPss:            %8lu kB\n"
			"\t\tShared_Clean:   %8lu kB\n"
			"\t\tShared_Dirty:   %8lu kB\n"
			"\t\tPrivate_Clean:  %8lu kB\n"
			"\t\tPrivate_Dirty:  %8lu kB\n"
			"\t\tReferenced:     %8lu kB\n"
			"\t\tAnonymous:      %8lu kB\n"
			"\t\tAnonHugePages:  %8lu kB\n"
			"\t\tSwap:           %8lu kB\n"
			"\t\tKernelPageSize: %8lu kB\n"
			"\t\tMMUPageSize:    %8lu kB\n"
			"\t\tLocked:         %8lu kB\n",
			(vma->vm_end - vma->vm_start) >> 10,
			mss.resident >> 10,
			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
			mss.shared_clean  >> 10,
			mss.shared_dirty  >> 10,
			mss.private_clean >> 10,
			mss.private_dirty >> 10,
			mss.referenced >> 10,
			mss.anonymous >> 10,
			mss.anonymous_thp >> 10,
			mss.swap >> 10,
			vma_kernel_pagesize(vma) >> 10,
			vma_mmu_pagesize(vma) >> 10,
			(vma->vm_flags & VM_LOCKED) ?
			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
	} else {
		pr_info("\t\tSize:           %8lu kB\n"
			"\t\tPss:            %8lu kB\n",
			(vma->vm_end - vma->vm_start) >> 10,
			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)));
	}

	pr_info("\n");
	*pss_size = mss.pss;
	if (is_lib)
		*share_lib = mss.pss;

	return 0;
}

static int task_compare(const void *a, const void *b)
{
	if (((struct task_rss *)a)->rss > ((struct task_rss *)b)->rss)
		return -1;
	else if (((struct task_rss *)a)->rss < ((struct task_rss *)b)->rss)
		return 1;

	return 0;
}

static void task_swap(void *a, void *b, int size)
{
	struct task_rss *entry1 = a;
	struct task_rss *entry2 = b;
	struct task_rss tmp;

	BUG_ON(size != sizeof(*entry1));

	tmp.tsk = entry1->tsk;
	tmp.rss = entry1->rss;
	entry1->tsk = entry2->tsk;
	entry1->rss = entry2->rss;
	entry2->tsk = tmp.tsk;
	entry2->rss = tmp.rss;
}

static void printk_task_smaps_info(struct task_struct *p, u64 *all_user_size, u64 *all_share_lib)
{
	struct mm_struct *mm = NULL;
	struct vm_area_struct *vma = NULL;
	u64 pss_size;
	u64 share_lib_size;

	task_lock(p);
	mm = p->mm;
	if (mm == NULL) {
		task_unlock(p);
		return;
	}

	pr_info("  smaps info of task-%s[%d], rss:%lu kB:\n", p->comm, p->pid, get_mm_rss(mm) * 4);
	if (!mmap_read_trylock(mm)) {
		pr_info("task[%d] down_read_trylock failed, ignore it.\n", p->pid);
		task_unlock(p);
		return;
	}

	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		pss_size = 0;
		share_lib_size = 0;
		oom_show_vma(mm, vma, p, &pss_size, &share_lib_size);
		*all_user_size += pss_size;
		*all_share_lib += share_lib_size;
	}

	mmap_read_unlock(mm);
	task_unlock(p);
}

static void oom_show_all_thread_smaps(u64 *all_user_size, u64 *all_share_lib)
{
	struct task_struct *p = NULL;

	for_each_process(p) {
		printk_task_smaps_info(p, all_user_size, all_share_lib);
	}
}

static void sort_oom_smaps_count_show(u64 *all_user_size, u64 *all_share_lib)
{
	struct task_struct *p = NULL;
	struct mm_struct *mm = NULL;
	long count = 0;
	long i;
	unsigned long rss;

	memset(task_rss, 0, sizeof(struct task_rss) * MAX_SMAPS_COUNT);
	for_each_process(p) {
		task_lock(p);
		mm = p->mm;
		if (!mm) {
			task_unlock(p);
			continue;
		}
		rss = get_mm_rss(mm);
		if (count < oom_smaps_count) {
			task_rss[count].tsk = p;
			task_rss[count++].rss = rss;
		} else if (count > 0 && task_rss[count - 1].rss < rss) {
			task_rss[count - 1].tsk = p;
			task_rss[count - 1].rss = rss;
		}
		sort(task_rss, count, sizeof(struct task_rss), task_compare, task_swap);
		task_unlock(p);
	}

	for (i = 0; i < count; i++) {
		p = task_rss[i].tsk;
		if (!p) {
			BUG_ON(1);
			continue;
		}
		printk_task_smaps_info(p, all_user_size, all_share_lib);
	}
}

static int oom_show_smap(u64 *all_user_size, u64 *all_share_lib)
{
	if (oom_smaps_count == 0)
		return 0;

	pr_info("*****smap info of all task:*****\n");

	*all_user_size = 0;
	read_lock(&tasklist_lock);

	if (oom_smaps_count == SHOW_ALL_THREAD_SMAPS) {
		/* ready to show all thread smaps */
		oom_show_all_thread_smaps(all_user_size, all_share_lib);
	} else if (oom_smaps_count > 0) {
		/* sort top oom_smaps_count to show */
		sort_oom_smaps_count_show(all_user_size, all_share_lib);
	} else {
		BUG_ON(1);
	}

	read_unlock(&tasklist_lock);
	return 0;
}

/* lib/show_mem.c show_mem */
static void get_reserved_pages(unsigned long *reserved_pages)
{
	pg_data_t *pgdat = NULL;
	*reserved_pages = 0;

	for_each_online_pgdat(pgdat) {
		unsigned long flags;
		int zoneid;

		pgdat_resize_lock(pgdat, &flags);
		for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
			struct zone *zone = &pgdat->node_zones[zoneid];

			if (!populated_zone(zone))
				continue;

			(*reserved_pages) += zone->present_pages - zone_managed_pages(zone);
		}
		pgdat_resize_unlock(pgdat, &flags);
	}
}

int oom_extend_show(void)
{
	u64 user_size = 0;
	u64 share_lib_size = 0;
	unsigned long reserved_pages;
	unsigned long global_user_pages;
#ifdef CONFIG_HUGETLB_PAGE
	struct hstate *h = &default_hstate;
	unsigned long huge_page_shift = 0;
#endif
#ifdef CONFIG_RTOS_KBOX
	int old_region_id = -1;
#endif

	if (!oom_extend_enable)
		return 0;

	/* Avoid multiple cpu entering at the same time. */
	if (atomic_cmpxchg(&oom_lock_state, OOM_UNLOCKED, OOM_LOCKED) != OOM_UNLOCKED)
		return -EBUSY;

	get_reserved_pages(&reserved_pages);

#ifdef CONFIG_RTOS_KBOX
	if (oom_region_id >= 0)
		old_region_id = set_kbox_region(oom_region_id);
#endif
	pr_info("*****************Start oom extend info.*****************\n");
	oom_show_vmalloc_slab();
	show_mount_info();

	/* *****smap info of all task:***** if !oom_smaps_count */
	oom_show_smap(&user_size, &share_lib_size);

	pr_info("********    mem info     *****");

#ifdef CONFIG_HUGETLB_PAGE
	huge_page_shift = huge_page_order(h) + PAGE_SHIFT - 10;
#endif
	global_user_pages = global_node_page_state(NR_ACTIVE_ANON) +
			    global_node_page_state(NR_INACTIVE_ANON) +
			    global_node_page_state(NR_ACTIVE_FILE) +
			    global_node_page_state(NR_INACTIVE_FILE) +
			    global_node_page_state(NR_UNEVICTABLE);

	pr_info("\tTotal:               %8lu kB\n"
		"\tTotal free:          %8lu kB\n"
		"\tUser space:          %8lu kB\n",
		(unsigned long)(totalram_pages() << 2),
		(unsigned long)(global_zone_page_state(NR_FREE_PAGES) << 2),
		(unsigned long)(global_user_pages << 2));

	if (oom_smaps_count == SHOW_ALL_THREAD_SMAPS)
		pr_info("\tUser shared lib:     %8lu kB\n",
			(unsigned long)(share_lib_size >> (10 + PSS_SHIFT)));

	pr_info("\tMlock:               %8lu kB\n"
		"\tKernel space:        %8lu kB\n"
		"\tBootmem reserved:    %8lu kB\n",
		(unsigned long)(global_zone_page_state(NR_MLOCK) << 2),
		(unsigned long)((totalram_pages() -
			global_zone_page_state(NR_FREE_PAGES) -
			global_user_pages) << 2),
		(unsigned long)(reserved_pages << 2));
#ifdef CONFIG_HUGETLB_PAGE
	pr_info("\tHugePages_Total:     %8lu kB\n"
		"\tHugePages_Free:      %8lu kB\n"
		"\tHugePages_Rsvd:      %8lu kB\n"
		"\tHugePages_Surp:      %8lu kB\n"
		"\tHugepagesize:        %8lu kB\n",
		(unsigned long)(h->nr_huge_pages << huge_page_shift),
		(unsigned long)(h->free_huge_pages << huge_page_shift),
		(unsigned long)(h->resv_huge_pages << huge_page_shift),
		(unsigned long)(h->surplus_huge_pages << huge_page_shift),
		(unsigned long)(1UL << huge_page_shift));
#endif
	pr_info("\tkernel_image_info:\n");
	show_kernel_image_info();

	pr_info("\tmodule info:\n");
	lsmod_show();

	if (oom_pagecache_count != 0) {
		pr_info("******     pagecache_info:     ******\n");
		show_pagecache_info(oom_pagecache_count);
	}

	pr_info("*****************End oom extend info.*****************\n");
#ifdef CONFIG_RTOS_KBOX
	if (old_region_id >= 0)
		restore_kbox_region(old_region_id);
#endif

	atomic_set(&oom_lock_state, OOM_UNLOCKED);
	return 0;
}
EXPORT_SYMBOL(oom_extend_show);

static int enable_show(struct seq_file *seq, void *offset)
{
	seq_printf(seq, "%d\n", oom_extend_enable);
	return 0;
}

static ssize_t enable_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos)
{
	char c;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (len != 2)
		return -EINVAL;

	if (copy_from_user(&c, buf, 1))
		return -EFAULT;

	switch (c) {
	case CHAR_ENABLE:
		oom_extend_enable = 1;
		break;
	case CHAR_DISABLE:
		oom_extend_enable = 0;
		break;
	default:
		return -EINVAL;
	}

	return len;
}

static int enable_open(struct inode *inode, struct file *file)
{
	return single_open(file, enable_show, NULL);
}

static const struct proc_ops proc_oom_extend_enable = {
	.proc_flags	= PROC_ENTRY_PERMANENT,
	.proc_open	= enable_open,
	.proc_read	= seq_read,
	.proc_write     = enable_write,
	.proc_lseek	= seq_lseek,
	.proc_release	= single_release,
};

/* set smaps_detail */
static int smaps_detail_show(struct seq_file *seq, void *offset)
{
	seq_printf(seq, "%d\n", oom_smaps_detail);
	return 0;
}

static ssize_t smaps_detail_write(struct file *file, const char __user *buf,
	size_t len, loff_t *ppos)
{
	char c;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (len != 2)
		return -EINVAL;

	if (copy_from_user(&c, buf, 1))
		return -EFAULT;

	switch (c) {
	case CHAR_ENABLE:
		oom_smaps_detail = 1;
		break;
	case CHAR_DISABLE:
		oom_smaps_detail = 0;
		break;
	default:
		return -EINVAL;
	}

	return len;
}

static int smaps_detail_open(struct inode *inode, struct file *file)
{
	return single_open(file, smaps_detail_show, NULL);
}

static const struct proc_ops proc_oom_smaps_detail = {
	.proc_flags	= PROC_ENTRY_PERMANENT,
	.proc_open	= smaps_detail_open,
	.proc_read	= seq_read,
	.proc_write     = smaps_detail_write,
	.proc_lseek	= seq_lseek,
	.proc_release	= single_release,
};

static int smaps_count_show(struct seq_file *seq, void *offset)
{
	seq_printf(seq, "%ld\n", oom_smaps_count);
	return 0;
}

static ssize_t smaps_count_write(struct file *file, const char __user *buf,
	size_t len, loff_t *ppos)
{
	char c[LONG_SIZE];
	char *pos = NULL;
	long count;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (len > LONG_SIZE - 1)
		return -EINVAL;

	memset(c, 0, LONG_SIZE);

	if (copy_from_user(c, buf, len))
		return -EFAULT;

	count = simple_strtol(c, &pos, 0);

	if (pos == c)
		return -EINVAL;

	if ((count > MAX_SMAPS_COUNT || count < 0) && count != -1)
		return -EINVAL;

	if (*pos != '\0' && *pos != '\n')
		return -EINVAL;

	oom_smaps_count = count;

	return len;
}

static int smaps_count_open(struct inode *inode, struct file *file)
{
	return single_open(file, smaps_count_show, NULL);
}

static const struct proc_ops proc_oom_extend_smapscount = {
	.proc_flags	= PROC_ENTRY_PERMANENT,
	.proc_open	= smaps_count_open,
	.proc_read	= seq_read,
	.proc_write     = smaps_count_write,
	.proc_lseek	= seq_lseek,
	.proc_release	= single_release,
};
static int pagecache_count_show(struct seq_file *seq, void *offset)
{
	seq_printf(seq, "%ld\n", oom_pagecache_count);
	return 0;
}

static ssize_t pagecache_count_write(struct file *file, const char __user *buf,
	size_t len, loff_t *ppos)
{
	char c[LONG_SIZE];
	char *pos = NULL;
	long count;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (len > LONG_SIZE - 1)
		return -EINVAL;

	memset(c, 0, LONG_SIZE);

	if (copy_from_user(c, buf, len))
		return -EFAULT;

	count = simple_strtol(c, &pos, 0);

	if (pos == c)
		return -EINVAL;

	if ((count > MAX_PAGECACHE_COUNT || count < 0) && count != SHOW_ALL_PAGECACHE)
		return -EINVAL;

	if (*pos != '\0' && *pos != '\n')
		return -EINVAL;

	oom_pagecache_count = count;

	return len;
}

static int pagecache_count_open(struct inode *inode, struct file *file)
{
	return single_open(file, pagecache_count_show, NULL);
}

static const struct proc_ops proc_pagecache_count = {
	.proc_flags	= PROC_ENTRY_PERMANENT,
	.proc_open	= pagecache_count_open,
	.proc_read	= seq_read,
	.proc_write     = pagecache_count_write,
	.proc_lseek	= seq_lseek,
	.proc_release	= single_release,
};

#ifdef CONFIG_RTOS_KBOX
static int oom_region_size_show(struct seq_file *seq, void *offset)
{
	seq_printf(seq, "%u kB\n", oom_region_size);
	return 0;
}

static ssize_t oom_region_size_write(struct file *file, const char __user *buf,
	size_t len, loff_t *ppos)
{
	char c[LONG_SIZE];
	char *pos = NULL;
	unsigned long size;
	int tmp_id;
	unsigned int kbox_size;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (len > LONG_SIZE - 1)
		return -EINVAL;

	memset(c, 0, LONG_SIZE);

	if (copy_from_user(c, buf, len))
		return -EFAULT;

	size = simple_strtoul(c, &pos, 0);

	if (pos == c)
		return -EINVAL;

	if (size > MAX_LOG_SIZE_KB)
		return -EINVAL;

	kbox_size = (unsigned int)size * 1024;

	if ((size % 4) != 0) {
		pr_err("Size does not align by 4 kB\n");
		return -EINVAL;
	}

	if (*pos != '\0' && *pos != '\n')
		return -EINVAL;

	tmp_id = kern_kbox_register_region("oom_extend", kbox_size, NULL);
	if (tmp_id >= 0) {
		oom_region_id = tmp_id;
		oom_region_size = (unsigned int)size;
		return len;
	}

	pr_err("kern_kbox_register_region failed, return %d.\n", tmp_id);
	return -EINVAL;
}

static int oom_region_size_open(struct inode *inode, struct file *file)
{
	return single_open(file, oom_region_size_show, NULL);
}

static const struct proc_ops proc_oom_region_size = {
	.proc_flags	= PROC_ENTRY_PERMANENT,
	.proc_open	= oom_region_size_open,
	.proc_read	= seq_read,
	.proc_write     = oom_region_size_write,
	.proc_lseek	= seq_lseek,
	.proc_release	= single_release,
};
#endif

static int oom_extend_proc_init(struct proc_dir_entry *parent)
{
	struct proc_dir_entry *p = NULL;
	p = proc_create("enable", (mode_t)0640, parent, &proc_oom_extend_enable);
	if (p == NULL) {
		pr_err("proc_create oom_extend/enable failed.\n");
		return -ENOMEM;
	}

	p = proc_create("enable_smaps_detail", (mode_t)0640, parent, &proc_oom_smaps_detail);
	if (p == NULL) {
		pr_err("proc_create oom_extend/enable_smaps_detail failed.\n");
		return -ENOMEM;
	}

	p = proc_create("smaps_count", (mode_t)0640, parent, &proc_oom_extend_smapscount);
	if (p == NULL) {
		pr_err("proc_create oom_extend/smaps_count failed.\n");
		return -ENOMEM;
	}

	p = proc_create("pagecache_count", (mode_t)0640, parent, &proc_pagecache_count);
	if (p == NULL) {
		pr_err("proc_create oom_extend/pagecache_count failed.\n");
		return -ENOMEM;
	}

#ifdef CONFIG_RTOS_KBOX
	p = proc_create("kbox_region_size", (mode_t)0640, parent, &proc_oom_region_size);
	if (p == NULL) {
		pr_err("proc_create oom_extend/kbox_region_size failed.\n");
		return -ENOMEM;
	}
#endif

	return 0;
}

static void oom_proc_remove(struct proc_dir_entry *parent)
{
#ifdef CONFIG_RTOS_KBOX
	remove_proc_entry("pagecache_count", parent);
#endif
	remove_proc_entry("smaps_count", parent);
	remove_proc_entry("enable_smaps_detail", parent);
	remove_proc_entry("enable", parent);
	remove_proc_entry("oom_extend", NULL);
}

static int __init init_oom_extend(void)
{
#ifdef CONFIG_PROC_FS
	struct proc_dir_entry *parent = NULL;
	int proc_init;

	parent = proc_mkdir("oom_extend", NULL);
	if (parent == NULL) {
		pr_err("proc_create oom_extend/ failed.\n");
		return -ENOMEM;
	}

	proc_init = oom_extend_proc_init(parent);

	if (proc_init != 0) {
		oom_proc_remove(parent);
		return -ENOMEM;
	}

	return 0;

#endif
}
late_initcall(init_oom_extend);
