// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2020-2023. All rights reserved.
 * Description: add memstat for vmalloc and buddy for modules
 * Author: Lv Ying
 * Create: 2020-04-10
 */

#include <linux/list.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/memstat.h>
#include <linux/oom.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/stacktrace.h>
#include <linux/uaccess.h>
#include <linux/nmi.h>
#include <linux/kallsyms.h>
#include <linux/rbtree.h>

#define MEMSTAT_OOM_NOTIFY_PRIORITY 111
#define PAGE_TRACE_ENTRIES_DEFAULT 2
#define PAGE_TRACE_ENTRIES_MAX 8
#define MEMSTAT_COUNT_MAX_NUM 50000000
#define PER_LOOP_FREE_CNT_MAX 10000

#define memstat_print(m, args...)	\
do {					\
	if (m)				\
		seq_printf(m, args);	\
	else				\
		pr_err(args);		\
} while (0)

struct module_stat {
	struct rb_node node;
	struct module *mod;
	char name[MODULE_NAME_LEN];
	unsigned int core_size;
	unsigned long vmalloc;
	unsigned long buddy;
};

static struct rb_root mod_stat_root = RB_ROOT;
int memstat_enable;
static DEFINE_MUTEX(memstat_lock);
static spinlock_t *memstat_vmap_area_lock;
static int memstat_setup_enable;

struct page_stat {
	struct rb_node node;
	struct list_head list;
	struct list_head gray_list;
	unsigned long pfn;
	unsigned int size;
	unsigned int nr_entries;
	unsigned long *trace_entries;
};

static LIST_HEAD(available_pg_stat_list);
static DEFINE_RWLOCK(pg_stat_lock);
static struct rb_root pg_stat_root = RB_ROOT;
static LIST_HEAD(pg_stat_gray_list);
static struct kmem_cache *page_stat_cache;

static atomic_t buddy_stat_flag = ATOMIC_INIT(0);
static atomic_t free_stat_flag = ATOMIC_INIT(0);
static atomic_t memstat_enable_flag = ATOMIC_INIT(0);
static int memstat_module_count;

static int memstat_trace_entries_nr;
static char memstat_module_filter[MODULE_NAME_LEN];
static struct proc_dir_entry *memstat_proc_dir;
static int init_proc_and_notifier(void);
static void remove_proc_and_notifier(void);

void memstat_page_caller(unsigned long pfn, unsigned int size)
{
	struct page_stat *pg_stat = NULL;
	struct rb_node **p, *parent;
	unsigned long flags;

	atomic_inc(&buddy_stat_flag);
	wmb(); // Memory barrier
	if (likely(!atomic_read(&free_stat_flag))) {
		write_lock_irqsave(&pg_stat_lock, flags);
		parent = NULL;
		p = &pg_stat_root.rb_node;
		while (*p) {
			parent = *p;
			pg_stat = rb_entry(*p, struct page_stat, node);
			if (pfn < pg_stat->pfn)
				p = &(*p)->rb_left;
			else if (pfn > pg_stat->pfn)
				p = &(*p)->rb_right;
			else
				goto found;
		}

		if (list_empty(&available_pg_stat_list)) {
			write_unlock_irqrestore(&pg_stat_lock, flags);
			goto out;
		}

		pg_stat = list_first_entry(&available_pg_stat_list,
						struct page_stat, list);
		list_del(&pg_stat->list);
		rb_link_node(&pg_stat->node, parent, p);
		rb_insert_color(&pg_stat->node, &pg_stat_root);
found:
		pg_stat->pfn = pfn;
		pg_stat->nr_entries = stack_trace_save(&pg_stat->trace_entries[0],
				memstat_trace_entries_nr, 2);
		pg_stat->size = size;
		write_unlock_irqrestore(&pg_stat_lock, flags);
	}
out:
	atomic_dec(&buddy_stat_flag);
}

void memstat_page_free(struct page *page, unsigned int order)
{
	struct page_stat *pg_stat;
	struct rb_node **p;
	unsigned long flags;
	int i;
	unsigned long pfn;

	atomic_inc(&buddy_stat_flag);
	wmb(); // Memory barrier
	if (likely(!atomic_read(&free_stat_flag))) {
		for (i = 0; i < (1<<order); i++) {
			pfn = page_to_pfn(page + i);
			write_lock_irqsave(&pg_stat_lock, flags);
			p = &pg_stat_root.rb_node;
			while (*p) {
				pg_stat = rb_entry(*p, struct page_stat, node);
				if (pfn < pg_stat->pfn)
					p = &(*p)->rb_left;
				else if (pfn > pg_stat->pfn)
					p = &(*p)->rb_right;
				else
					goto found;
			}
			goto unlock;
found:
			pg_stat->size = 0;
			pg_stat->nr_entries = 0;
			pg_stat->pfn = 0;
			list_add_tail(&pg_stat->list, &available_pg_stat_list);
			rb_erase(&pg_stat->node, &pg_stat_root);
unlock:
			write_unlock_irqrestore(&pg_stat_lock, flags);
		}
	}
	atomic_dec(&buddy_stat_flag);
}

static int memstat_add_statinfo(struct module *mod)
{
	struct module_stat *entry;
	struct rb_node **p, *parent;

	if (!mod) {
		pr_err("[memstat]: %s failed, mod = NULL.\n", __func__);
		return -EINVAL;
	}

	p = &mod_stat_root.rb_node;
	parent = NULL;
	while (*p) {
		parent = *p;
		entry = rb_entry(*p, struct module_stat, node);
		if ((unsigned long)mod < (unsigned long)entry->mod) {
			p = &(*p)->rb_left;
		} else if ((unsigned long)mod > (unsigned long)entry->mod) {
			p = &(*p)->rb_right;
		} else {
			pr_err("[memstat]: %s failed, mod has been exist.\n", __func__);
			return -EFAULT;
		}
	}

	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return -ENOMEM;

	entry->mod = mod;
	entry->core_size = mod->core_layout.size;
	strncpy(entry->name, mod->name, MODULE_NAME_LEN);
	rb_link_node(&entry->node, parent, p);
	rb_insert_color(&entry->node, &mod_stat_root);
	memstat_module_count++;
	return 0;
}

struct module_stat *memstat_get_statinfo(struct module *mod)
{
	struct module_stat *entry;
	struct rb_node **p;

	p = &mod_stat_root.rb_node;
	while (*p) {
		entry = rb_entry(*p, struct module_stat, node);
		if ((unsigned long)mod < (unsigned long)entry->mod)
			p = &(*p)->rb_left;
		else if ((unsigned long)mod > (unsigned long)entry->mod)
			p = &(*p)->rb_right;
		else
			return entry;
	}
	return NULL;
}

struct module_stat *memstat_get_statinfo_byname(char *name)
{
	struct module_stat *entry;
	struct rb_node *n;

	for (n = rb_first(&mod_stat_root); n != NULL; n = rb_next(n)) {
		entry = rb_entry(n, struct module_stat, node);
		if (!strcmp(entry->name, name))
			return entry;
	}
	return NULL;
}

static bool check_module_address(unsigned long *trace_entries,
			int nr_entries, struct module *mod)
{
	int i;

	if (!mod)
		return false;

	for (i = 0; i < nr_entries; i++) {
		if (within_module_core(trace_entries[i], mod)
				|| within_module_init(trace_entries[i], mod))
			return true;
	}
	return false;
}

static struct module *get_module_by_address(unsigned long *trace_entries, int nr_entries)
{
	int i;
	struct module *module;

	for (i = 0; i < nr_entries; i++) {
		module = __module_address(trace_entries[i]);
		if (module)
			return module;
	}

	return NULL;
}

static int count_page(void)
{
	struct rb_node *n;
	struct page_stat *pg_stat, *next;
	struct module *module;
	struct module_stat *entry;
	unsigned long flags;

	read_lock_irqsave(&pg_stat_lock, flags);
	for (n = rb_first(&pg_stat_root); n != NULL; n = rb_next(n)) {
		pg_stat = rb_entry(n, struct page_stat, node);
		list_add(&pg_stat->gray_list, &pg_stat_gray_list);
	}
	read_unlock_irqrestore(&pg_stat_lock, flags);

	list_for_each_entry_safe(pg_stat, next, &pg_stat_gray_list, gray_list) {
		list_del(&pg_stat->gray_list);
		if (pg_stat->size == 0)
			continue;
		module = get_module_by_address(pg_stat->trace_entries,
					pg_stat->nr_entries);
		if (module) {
			entry = memstat_get_statinfo(module);
			if (entry)
				entry->buddy += pg_stat->size;
			else
				pr_err("[memstat]: Race condition between %s and unload module with unfreed page\n",
					__func__);
		}
		cond_resched();
	}
	return 0;
}

static int count_module_page(struct module_stat *mod_stat)
{
	struct page_stat *pg_stat, *next;
	struct rb_node *n;
	unsigned long flags;

	if (!mod_stat) {
		pr_err("[memstat]: %s failed, mod_stat = NULL.\n", __func__);
		return -EINVAL;
	}

	read_lock_irqsave(&pg_stat_lock, flags);
	for (n = rb_first(&pg_stat_root); n != NULL; n = rb_next(n)) {
		pg_stat = rb_entry(n, struct page_stat, node);
		list_add(&pg_stat->gray_list, &pg_stat_gray_list);
	}
	read_unlock_irqrestore(&pg_stat_lock, flags);

	list_for_each_entry_safe(pg_stat, next, &pg_stat_gray_list, gray_list) {
		list_del(&pg_stat->gray_list);
		if (pg_stat->size == 0)
			continue;
		if (check_module_address(pg_stat->trace_entries,
				pg_stat->nr_entries, mod_stat->mod))
			mod_stat->buddy += pg_stat->size;
		cond_resched();
	}
	return 0;
}

static int check_and_remove_module_memstat(struct module *mod)
{
	struct vmap_area *va;
	struct module_stat *mod_stat;
	unsigned long vm_count = 0;

	if (!mod) {
		pr_err("[memstat]: %s failed, mod = NULL.\n", __func__);
		return -EINVAL;
	}

	mod_stat = memstat_get_statinfo(mod);
	if (!mod_stat) {
		pr_err("[memstat]: %s failed, mod_stat = NULL.\n", __func__);
		return -EFAULT;
	}

	mod_stat->vmalloc = 0;
	mod_stat->buddy = 0;

	if (!memstat_vmap_area_lock)
		goto page_count;

	spin_lock(memstat_vmap_area_lock);
	list_for_each_entry_rcu(va, &vmap_area_list, list) {
		unsigned long caller = 0;
		unsigned int nr_pages = 0;

		if (!va->vm)
			continue;

		caller = (unsigned long)va->vm->caller;
		nr_pages = va->vm->nr_pages;

		if (caller == 0 || nr_pages == 0)
			continue;

		if (within_module_core(caller, mod) || within_module_init(caller, mod))
			mod_stat->vmalloc += nr_pages * PAGE_SIZE;
		vm_count++;
		if (unlikely(vm_count % memstat_get_max_count_num() == 0))
			touch_nmi_watchdog();
	}
	spin_unlock(memstat_vmap_area_lock);

page_count:
	count_module_page(mod_stat);
	if (mod_stat->vmalloc > 0 || mod_stat->buddy > 0)
		pr_err("[memstat]: module [%s] exit with memory use, module start [%pK], end [%pK], buddy [%ld], vmalloc [%ld]\n",
				mod->name, mod->core_layout.base,
				mod->core_layout.base + mod->core_layout.size,
				mod_stat->buddy, mod_stat->vmalloc);
	rb_erase(&mod_stat->node, &mod_stat_root);
	kfree(mod_stat);
	memstat_module_count--;
	return 0;
}

int memstat_module_notify(struct notifier_block *self,
		unsigned long val, void *data)
{
	struct module *mod = data;
	int ret = 0;

	if (!memstat_enable)
		return -1;

	mutex_lock(&memstat_lock);
	if (val == MODULE_STATE_COMING)
		ret = memstat_add_statinfo(mod);
	else if (val == MODULE_STATE_GOING)
		ret = check_and_remove_module_memstat(mod);
	mutex_unlock(&memstat_lock);

	return ret;
}

struct notifier_block memstat_module_nb = {
	.notifier_call = memstat_module_notify,
	.priority = 0,
};

static int show(struct seq_file *m)
{
	struct module_stat *entry;
	struct vmap_area *va;
	struct module *module;
	unsigned long vm_count = 0;
	struct rb_node *n;

	for (n = rb_first(&mod_stat_root); n != NULL; n = rb_next(n)) {
		entry = rb_entry(n, struct module_stat, node);
		entry->vmalloc = 0;
		entry->buddy = 0;
	}

	if (!memstat_vmap_area_lock)
		goto page_count;

	spin_lock(memstat_vmap_area_lock);
	list_for_each_entry_rcu(va, &vmap_area_list, list) {
		unsigned long caller = 0;
		unsigned int nr_pages = 0;

		if (!va->vm)
			continue;

		caller = (unsigned long)va->vm->caller;
		nr_pages = va->vm->nr_pages;

		if (caller == 0 || nr_pages == 0)
			continue;

		module = __module_address(caller);
		if (module) {
			entry = memstat_get_statinfo(module);
			if (entry)
				entry->vmalloc += nr_pages * PAGE_SIZE;
		}
		vm_count++;
		if (unlikely(vm_count % memstat_get_max_count_num() == 0))
			touch_nmi_watchdog();
	}
	spin_unlock(memstat_vmap_area_lock);

	cond_resched();

page_count:
	count_page();

	memstat_print(m, "module\t\t\t\tbuddy\t\tvmalloc\t\tcore_size\n");
	for (n = rb_first(&mod_stat_root); n != NULL; n = rb_next(n)) {
		entry = rb_entry(n, struct module_stat, node);
		memstat_print(m, "%30s\t%8ld\t%8ld\t%8d\n",
				entry->name, entry->buddy,
				entry->vmalloc, entry->core_size);
	}
	memstat_print(m, "\n");
	return 0;
}

static int memstat_proc_show(struct seq_file *m, void *s)
{
	if (!memstat_enable)
		return -1;

	if (!mutex_trylock(&memstat_lock))
		return -EBUSY;

	show(m);
	mutex_unlock(&memstat_lock);

	return 0;
}

static int proc_memstat_open(struct inode *inode, struct file *file)
{
	return single_open(file, memstat_proc_show, PDE_DATA(inode));
}

static int show_in_oom(struct notifier_block *nb,
		unsigned long action, void *data)
{
	struct seq_file *m = NULL;

	if (!memstat_enable)
		return -1;

	mutex_lock(&memstat_lock);
	show(m);
	mutex_unlock(&memstat_lock);

	return 0;
}

static struct notifier_block show_in_oom_notifier = {
	.notifier_call = show_in_oom,
	.next = NULL,
	.priority = MEMSTAT_OOM_NOTIFY_PRIORITY,
};

static const struct proc_ops proc_memstat_file_operations = {
	.proc_open		= proc_memstat_open,
	.proc_read		= seq_read,
	.proc_lseek		= seq_lseek,
	.proc_release		= single_release,
};

static void free_module_stat(void)
{
	int loop;
	struct module_stat *entry;
	struct page_stat *pg_stat, *next;
	unsigned long flags;

	mutex_lock(&memstat_lock);
	atomic_inc(&free_stat_flag);
	wmb(); // Memory barrier
	while (atomic_read(&buddy_stat_flag))
		cond_resched();

	write_lock_irqsave(&pg_stat_lock, flags);
	loop = 0;
	while (!RB_EMPTY_ROOT(&pg_stat_root)) {
		pg_stat = rb_entry(pg_stat_root.rb_node,
				struct page_stat, node);
		rb_erase(&pg_stat->node, &pg_stat_root);
		kfree(pg_stat->trace_entries);
		kmem_cache_free(page_stat_cache, pg_stat);
		loop++;
		if ((loop % PER_LOOP_FREE_CNT_MAX) == 0)
			touch_nmi_watchdog();
	}

	list_for_each_entry_safe(pg_stat, next,
			&available_pg_stat_list, list) {
		list_del(&pg_stat->list);
		kfree(pg_stat->trace_entries);
		kmem_cache_free(page_stat_cache, pg_stat);
		loop++;
		if ((loop % PER_LOOP_FREE_CNT_MAX) == 0)
			touch_nmi_watchdog();
	}

	write_unlock_irqrestore(&pg_stat_lock, flags);

	kmem_cache_destroy(page_stat_cache);
	page_stat_cache = NULL;

	while (!RB_EMPTY_ROOT(&mod_stat_root)) {
		entry = rb_entry(mod_stat_root.rb_node,
				struct module_stat, node);
		rb_erase(&entry->node, &mod_stat_root);
		kfree(entry);
	}

	atomic_dec(&free_stat_flag);
	memstat_module_count = 0;
	mutex_unlock(&memstat_lock);
}

static int alloc_for_page_stat(void)
{
	int i = 0;
	int loop;
	struct sysinfo val;
	unsigned long nr_pages;
	struct page_stat *pg_stat, *next;
	unsigned long alloc_size = 0;
	unsigned long flags;

	page_stat_cache = KMEM_CACHE(page_stat, 0);
	if (!page_stat_cache) {
		pr_err("[memstat]: create page_stat_cache failed.\n");
		return -ENOMEM;
	}

	si_meminfo(&val);
	nr_pages = val.freeram;

	for (i = 0; i < nr_pages; i++) {
		pg_stat = kmem_cache_alloc(page_stat_cache, GFP_KERNEL);
		if (!pg_stat)
			goto alloc_failed;

		alloc_size += sizeof(struct page_stat);
		pg_stat->trace_entries =
			kmalloc_array(memstat_trace_entries_nr,
					sizeof(unsigned long), GFP_KERNEL);
		if (!pg_stat->trace_entries) {
			kmem_cache_free(page_stat_cache, pg_stat);
			goto alloc_failed;
		}

		alloc_size += sizeof(unsigned long) * memstat_trace_entries_nr;
		write_lock_irqsave(&pg_stat_lock, flags);
		list_add(&pg_stat->list, &available_pg_stat_list);
		write_unlock_irqrestore(&pg_stat_lock, flags);
		if ((i % PER_LOOP_FREE_CNT_MAX) == 0)
			touch_nmi_watchdog();
	}

	pr_info("[memstat]: %s, size=%ld, nr_pages=%lu\n", __func__, alloc_size, nr_pages);
	return 0;

alloc_failed:
	write_lock_irqsave(&pg_stat_lock, flags);
	loop = 0;
	list_for_each_entry_safe(pg_stat, next, &available_pg_stat_list, list) {
		list_del(&pg_stat->list);
		kfree(pg_stat->trace_entries);
		kmem_cache_free(page_stat_cache, pg_stat);
		loop++;
		if ((loop % PER_LOOP_FREE_CNT_MAX) == 0)
			touch_nmi_watchdog();
	}
	write_unlock_irqrestore(&pg_stat_lock, flags);
	pr_info("[memstat]: %s failed, nr_pages=%lu\n", __func__, nr_pages);
	return -ENOMEM;
}

static int alloc_module_stat(void)
{
	struct module *mod;
	int ret = 0;

	ret = alloc_for_page_stat();
	if (ret)
		return ret;

	mutex_lock(&module_mutex);
	list_for_each_entry(mod, mem_modules, list) {
		mutex_lock(&memstat_lock);
		ret = memstat_add_statinfo(mod);
		mutex_unlock(&memstat_lock);
		if (ret)
			break;
	}
	mutex_unlock(&module_mutex);
	return ret;
}

static int memstat_enable_proc_show(struct seq_file *m, void *s)
{
	memstat_print(m, "%d\n", memstat_enable);
	return 0;
}

static int proc_memstat_enable_open(struct inode *inode, struct file *file)
{
	return single_open(file, memstat_enable_proc_show, PDE_DATA(inode));
}

static ssize_t proc_memstat_enable_write(struct file *file, const char __user *ubuf,
		size_t cnt, loff_t *ppos)
{
	unsigned long val;
	int ret = 0;

	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret) {
		pr_err("[memstat]: parse input parameter for memstat enable failed.\n");
		return ret;
	}

	if (atomic_cmpxchg(&memstat_enable_flag, 0, 1))
		return -EPERM;

	val = !!val;
	if (memstat_enable ^ val) {
		if (!val) {
			memstat_enable = 0;
			ret = cnt;
			goto memstat_disable;
		} else {
			ret = init_proc_and_notifier();
			if (ret)
				goto out;

			ret = alloc_module_stat();
			if (ret)
				goto memstat_disable;
			memstat_enable = 1;
		}
	}
	atomic_set(&memstat_enable_flag, 0);
	return cnt;

memstat_disable:
	remove_proc_and_notifier();
	free_module_stat();
	memset(memstat_module_filter, 0, MODULE_NAME_LEN);
out:
	atomic_set(&memstat_enable_flag, 0);
	return ret;
}

static const struct proc_ops proc_memstat_enable_file_operations = {
	.proc_open		= proc_memstat_enable_open,
	.proc_read		= seq_read,
	.proc_lseek		= seq_lseek,
	.proc_release		= single_release,
	.proc_write		= proc_memstat_enable_write,
};

static int memstat_trace_entries_proc_show(struct seq_file *m, void *s)
{
	memstat_print(m, "%d\n", memstat_trace_entries_nr);
	return 0;
}

static int proc_memstat_trace_entries_open(struct inode *inode, struct file *file)
{
	return single_open(file, memstat_trace_entries_proc_show, PDE_DATA(inode));
}

static ssize_t proc_memstat_trace_entries_write(struct file *file, const char __user *ubuf,
		size_t cnt, loff_t *ppos)
{
	int ret;
	unsigned long val;

	if (memstat_enable)
		return -1;

	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret) {
		pr_err("[memstat]: parse input parameter for trace_entries failed.\n");
		return ret;
	}

	if (val <= 0 || val > PAGE_TRACE_ENTRIES_MAX) {
		pr_err("[memstat]: set memstat_trace_entries failed, should be 1 to 8.\n");
		return -EINVAL;
	}

	memstat_trace_entries_nr = val;
	return cnt;
}

static const struct proc_ops proc_memstat_trace_entries_file_operations = {
	.proc_open		= proc_memstat_trace_entries_open,
	.proc_read		= seq_read,
	.proc_lseek		= seq_lseek,
	.proc_release		= single_release,
	.proc_write		= proc_memstat_trace_entries_write,
};

static int memstat_module_filter_proc_show(struct seq_file *m, void *s)
{
	if (!mutex_trylock(&memstat_lock))
		return -EBUSY;

	memstat_print(m, "%s", memstat_module_filter);
	if (strlen(memstat_module_filter))
		memstat_print(m, "\n");

	mutex_unlock(&memstat_lock);
	return 0;
}

static int proc_memstat_module_filter_open(struct inode *inode, struct file *file)
{
	return single_open(file, memstat_module_filter_proc_show, PDE_DATA(inode));
}

static ssize_t proc_memstat_module_filter_write(struct file *file, const char __user *ubuf,
		size_t cnt, loff_t *ppos)
{
	int ret;
	char name[MODULE_NAME_LEN];
	struct module_stat *stat = NULL;

	if (cnt >= MODULE_NAME_LEN || cnt <= 0)
		return -EINVAL;

	ret = copy_from_user(name, ubuf, cnt);
	if (ret) {
		pr_err("[memstat]: copy from user for module_filter failed.\n");
		return ret;
	}
	name[cnt - 1] = '\0';

	if (!mutex_trylock(&memstat_lock))
		return -EBUSY;

	if (strlen(name) == 0) {
		memset(memstat_module_filter, 0, MODULE_NAME_LEN);
		mutex_unlock(&memstat_lock);
		return cnt;
	}

	stat = memstat_get_statinfo_byname(name);
	if (!stat) {
		pr_err("[memstat]: no such module[%s], please check input.\n", name);
		mutex_unlock(&memstat_lock);
		return -EINVAL;
	}
	strncpy(memstat_module_filter, name, MODULE_NAME_LEN);
	mutex_unlock(&memstat_lock);
	return cnt;
}

static const struct proc_ops proc_memstat_module_filter_file_operations = {
	.proc_open		= proc_memstat_module_filter_open,
	.proc_read		= seq_read,
	.proc_lseek		= seq_lseek,
	.proc_release		= single_release,
	.proc_write		= proc_memstat_module_filter_write,
};

static int memstat_module_trace_proc_show(struct seq_file *m, void *s)
{
	struct page_stat *pg_stat, *next;
	struct page *page;
	struct module_stat *mod_stat;
	unsigned long flags;
	struct rb_node *n;
	int i = 0;

	if (!mutex_trylock(&memstat_lock))
		return -EBUSY;

	if (!strlen(memstat_module_filter)) {
		pr_err("[memstat]: module_filter is empty, please set module_filter.\n");
		mutex_unlock(&memstat_lock);
		return -EINVAL;
	}

	mod_stat = memstat_get_statinfo_byname(memstat_module_filter);
	if (!mod_stat) {
		pr_err("[memstat]: get module_stat for module[%s] failed.\n",
				memstat_module_filter);
		mutex_unlock(&memstat_lock);
		return -EINVAL;
	}

	read_lock_irqsave(&pg_stat_lock, flags);
	for (n = rb_first(&pg_stat_root); n != NULL; n = rb_next(n)) {
		pg_stat = rb_entry(n, struct page_stat, node);
		list_add(&pg_stat->gray_list, &pg_stat_gray_list);
	}
	read_unlock_irqrestore(&pg_stat_lock, flags);

	list_for_each_entry_safe(pg_stat, next, &pg_stat_gray_list, gray_list) {
		list_del(&pg_stat->gray_list);
		if (pg_stat->size == 0)
			continue;
		if (!check_module_address(pg_stat->trace_entries,
				pg_stat->nr_entries, mod_stat->mod))
			continue;
		page = pfn_to_page(pg_stat->pfn);
		memstat_print(m, "page[%pK], pfn[%ld], size[%u], module[%s]\n",
			page, pg_stat->pfn,
			pg_stat->size, mod_stat->name);
		for (i = 0; i < pg_stat->nr_entries; i++) {
			memstat_print(m, "[<%pK>] %pS\n",
				(void *)pg_stat->trace_entries[i],
				(void *)pg_stat->trace_entries[i]);
		}
		cond_resched();
	}

	mutex_unlock(&memstat_lock);
	return 0;
}

static int proc_memstat_module_trace_open(struct inode *inode, struct file *file)
{
	return single_open(file, memstat_module_trace_proc_show, PDE_DATA(inode));
}

static const struct proc_ops proc_memstat_module_trace_file_operations = {
	.proc_open		= proc_memstat_module_trace_open,
	.proc_read		= seq_read,
	.proc_lseek		= seq_lseek,
	.proc_release		= single_release,
};

static int init_proc_and_notifier(void)
{
	int ret;

	if (!proc_create("statinfo", 0400, memstat_proc_dir,
				&proc_memstat_file_operations)) {
		pr_err("[memstat]: create /proc/memstat/statinfo failed.\n");
		return -ENOMEM;
	}

	if (!proc_create("module_filter", 0600, memstat_proc_dir,
			&proc_memstat_module_filter_file_operations)) {
		pr_err("[memstat]: create /proc/memstat/module_filter failed.\n");
		ret = -ENOMEM;
		goto proc_filter_failed;
	}

	if (!proc_create("module_trace", 0400, memstat_proc_dir,
			&proc_memstat_module_trace_file_operations)) {
		pr_err("[memstat]: create /proc/memstat/module_trace failed.\n");
		ret = -ENOMEM;
		goto proc_trace_failed;
	}

	ret = register_oom_notifier(&show_in_oom_notifier);
	if (ret) {
		pr_err("[memstat]: register oom notifier failed.\n");
		goto oom_register_failed;
	}

	ret = register_module_notifier(&memstat_module_nb);
	if (ret) {
		pr_err("[memstat]: register module notifier failed.\n");
		goto module_register_failed;
	}
	return 0;

module_register_failed:
	unregister_oom_notifier(&show_in_oom_notifier);
oom_register_failed:
	remove_proc_entry("module_trace", memstat_proc_dir);
proc_trace_failed:
	remove_proc_entry("module_filter", memstat_proc_dir);
proc_filter_failed:
	remove_proc_entry("statinfo", memstat_proc_dir);
	return ret;
}

static void remove_proc_and_notifier(void)
{
	remove_proc_entry("statinfo", memstat_proc_dir);
	remove_proc_entry("module_filter", memstat_proc_dir);
	remove_proc_entry("module_trace", memstat_proc_dir);
	unregister_oom_notifier(&show_in_oom_notifier);
	unregister_module_notifier(&memstat_module_nb);
}

static int __init setup_memstat_enable(char *str)
{
	memstat_setup_enable = 1;
	return 0;
}
__setup("memstat_enable", setup_memstat_enable);

static int __init setup_memstat_trace_entries_set(char *str)
{
	unsigned long trace_entries_nr;

	if (kstrtoul(str, 0, &trace_entries_nr)) {
		pr_info("[memstat]: convert memstat_trace_entries failed.\n");
		return 1;
	}

	if (trace_entries_nr <= 0 || trace_entries_nr > PAGE_TRACE_ENTRIES_MAX) {
		pr_info("[memstat]: set trace entries failed, use default.\n");
		return 1;
	}
	memstat_trace_entries_nr = trace_entries_nr;
	return 0;
}
__setup("memstat_trace_entries=", setup_memstat_trace_entries_set);

static int __init memstat_init(void)
{
	int ret = 0;

	pr_info("[memstat]starting init memstat for modules\n");

	memstat_vmap_area_lock = (spinlock_t *)kallsyms_lookup_name("vmap_area_lock");
	if (!memstat_vmap_area_lock)
		pr_err("[memstat]: get vmap_area_lock for memstat failed.\n");

	if (!memstat_trace_entries_nr)
		memstat_trace_entries_nr = PAGE_TRACE_ENTRIES_DEFAULT;

	memstat_proc_dir = proc_mkdir_mode("memstat", 0550, NULL);
	if (!memstat_proc_dir) {
		pr_err("[memstat]: create /proc/memstat dir failed.\n");
		return -ENOMEM;
	}

	if (!memstat_setup_enable) {
		if (!proc_create("enable", 0600, memstat_proc_dir,
					&proc_memstat_enable_file_operations)) {
			pr_err("[memstat]: create /proc/memstat/enable failed.\n");
			ret = -ENOMEM;
			goto remove_proc_dir;
		}
		if (!proc_create("trace_entries", 0600, memstat_proc_dir,
					&proc_memstat_trace_entries_file_operations)) {
			pr_err("[memstat]: create /proc/memstat/trace_entries failed.\n");
			remove_proc_entry("enable", memstat_proc_dir);
			ret = -ENOMEM;
			goto remove_proc_dir;
		}
		return 0;
	}

	ret = init_proc_and_notifier();
	if (ret)
		goto remove_proc_dir;

	ret = alloc_module_stat();
	if (ret) {
		remove_proc_and_notifier();
		free_module_stat();
		goto remove_proc_dir;
	}

	memstat_enable = 1;
	return 0;

remove_proc_dir:
	proc_remove(memstat_proc_dir);
	return ret;
}
module_init(memstat_init)

unsigned long memstat_get_max_count_num(void)
{
	if (memstat_module_count <= 0)
		return MEMSTAT_COUNT_MAX_NUM;

	else
		return MEMSTAT_COUNT_MAX_NUM / memstat_module_count;
}
