// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2023. All rights reserved.
 * Author: Huawei OS Kernel Lab
 * Create: Tue Oct 12 15:30:27 2021
 */

#include <asm/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/version.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <trace/hooks/liblinux.h>

enum mod_alloc_type {
	ALLOC_TYPE_SLAB = 0,
	ALLOC_TYPE_GFP,
	ALLOC_TYPE_VMALLOC,
	ALLOC_TYPE_NR
};

struct mod_rb_tree {
	struct rb_root root;
	spinlock_t lock;
};

static struct mod_rb_tree gfp_tree = {
	.root = RB_ROOT,
};

static struct mod_rb_tree stat_tree = {
	.root = RB_ROOT,
};

#define MEMSTAT_MODULE_NAME 15U
#define MEMSTAT_SLAB_CACHE_NAME 16U
struct memstat_key {
	void *addr;
	size_t size;
};

struct memstat_caller {
	unsigned long endstack;
	unsigned long topstack;
	union {
		unsigned long keystack;
		char slab_cache_name[MEMSTAT_SLAB_CACHE_NAME];
	};
	char mem_type;
	char mod_name[MEMSTAT_MODULE_NAME];
};

struct mod_alloc_stat {
	struct mod_rb_tree *tree;
	struct rb_node node;
	struct module_mem_usage *usage;
	struct list_head list;
	struct memstat_key mem_node;
	struct memstat_caller caller;
};

struct module_mem_usage {
	struct list_head list;
	struct module *mod;
	bool alive;
	struct list_head stat_list;
	spinlock_t stat_splock;
};

static DEFINE_RWLOCK(mod_mem_usage_rwlock);
static struct module_mem_usage mod_mem_usage;

/*
 * Record the number of applied stats, which is convenient
 * for calculating the occupied memory size.
 * Use the same lock as rb tree.
 */
static atomic_t alloc_stat_num = ATOMIC_INIT(0);

static atomic_t alloc_usage_num = ATOMIC_INIT(0);

extern int memstat_enable;

static int
__mod_mem_usage_cb(struct notifier_block *this, unsigned long event, void *ptr)
{
	struct module *mod = (struct module *)ptr;
	struct module_mem_usage *usage = NULL;
	struct module_mem_usage *tmp = NULL;

	switch (event) {
	case MODULE_STATE_COMING:
		usage = (struct module_mem_usage *)liblinux_pal_malloc(sizeof(*usage));
		if (usage != NULL) {
			usage->mod = mod;
			usage->alive = true;
			INIT_LIST_HEAD(&usage->stat_list);
			spin_lock_init(&usage->stat_splock);
			write_lock(&mod_mem_usage_rwlock);
			list_add_tail(&usage->list, &mod_mem_usage.list);
			write_unlock(&mod_mem_usage_rwlock);
			atomic_inc(&alloc_usage_num);
		}
		break;
	case MODULE_STATE_GOING:
		/* kill core symbol */
		write_lock(&mod_mem_usage_rwlock);
		list_for_each_entry_safe(usage, tmp, &mod_mem_usage.list, list) {
			if (usage->mod == mod) {
				/* remove usages only when stat_list is empty */
				if (list_empty(&usage->stat_list))
					list_del(&usage->list);
				break;
			}
		}
		if (usage->mod == mod) {
			if (list_empty(&usage->stat_list)) {
				liblinux_pal_free(usage);
				atomic_dec(&alloc_usage_num);
			} else {
				/*
					* when stat_list is not empty, usage is reserved and
					* memory leak information is recorded
					*/
				usage->mod = NULL;
				usage->alive = false;
			}
		}
		write_unlock(&mod_mem_usage_rwlock);
		break;
	default:
		/* nothing to do */
		break;
	}

	return NOTIFY_DONE;
}

static struct notifier_block mod_mem_usage_nb = {
	.notifier_call = __mod_mem_usage_cb,
};

static long __mod_vmalloc_pmem_stat(void *addr, size_t size)
{
	int i;
	int ret;
	size_t count = 0;
	size_t tmp_size = 0;
	size_t remain_size = size;
	unsigned char *vec = NULL;
	/* addr must be page aligned */
	void *align_addr = (void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE);
	size_t align_size = size + (addr - align_addr);
	int page_num = (align_size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;

	vec = (unsigned char *)liblinux_pal_malloc(page_num * sizeof(unsigned char));
	if (vec == NULL) {
		pr_err("alloc vec failed\n");
		return -ENOMEM;
	}

	memset(vec, 0, page_num);
	ret = liblinux_pal_mincore(align_addr, align_size, vec);
	if (ret < 0) {
		pr_err("failed to obtain the addr mapping information, ret=%d\n", ret);
		liblinux_pal_free(vec);
		return ret;
	}

	for (i = 0; i < page_num; i++) {
		if (vec[i] == 1) {
			if ((i == 0) && (addr != align_addr)) {
				tmp_size = remain_size > PAGE_SIZE - (addr - align_addr) ?
					   PAGE_SIZE - (addr - align_addr) : remain_size;
			} else {
				tmp_size = remain_size > PAGE_SIZE ? PAGE_SIZE : remain_size;
			}
			count += tmp_size;
			remain_size -= tmp_size;
		}
	}

	liblinux_pal_free(vec);

	return count;
}

struct mod_stat_res {
	unsigned long slab_stat;
	unsigned long gfp_stat;
	unsigned long vmalloc_vmem_stat;
	unsigned long vmalloc_pmem_stat;
};

static
void __mod_one_stat(struct mod_alloc_stat *stat, struct mod_stat_res *res)
{
	int ret;

	switch (stat->caller.mem_type) {
	case ALLOC_TYPE_SLAB:
		res->slab_stat += stat->mem_node.size;
		break;
	case ALLOC_TYPE_GFP:
		res->gfp_stat += stat->mem_node.size;
		break;
	case ALLOC_TYPE_VMALLOC:
		ret = __mod_vmalloc_pmem_stat(stat->mem_node.addr, stat->mem_node.size);
		if (ret < 0)
			pr_err("pmem_stat failed, pmem_size statistics for vmalloc\n");
		else
			res->vmalloc_pmem_stat += (unsigned long)ret;

		res->vmalloc_vmem_stat += stat->mem_node.size;
		break;
	default:
		/* do nothing */
		break;
	}
}

static void __mod_stat(struct module_mem_usage *usage, struct mod_stat_res *res)
{
	struct mod_alloc_stat *stat = NULL;

	spin_lock(&usage->stat_splock);
	list_for_each_entry(stat, &usage->stat_list, list)
		__mod_one_stat(stat, res);

	spin_unlock(&usage->stat_splock);
}

/*
 * procfs node for module memstat
 */
static void __show_stat(struct seq_file *m, const char *name,
			struct module_mem_usage *usage)
{
	struct mod_stat_res res = { 0 };

	__mod_stat(usage, &res);
	seq_printf(m, "%-20s %15lu %15lu %15lu %15lu\n",
		   name, res.slab_stat, res.gfp_stat,
		   res.vmalloc_vmem_stat, res.vmalloc_pmem_stat);
}

static void *m_start(struct seq_file *m, loff_t *pos)
{
	read_lock(&mod_mem_usage_rwlock);
	if (*pos == 0) {
		seq_printf(m, "ldk_memstat malloc size:\t%ld\n\n",
			   atomic_read(&alloc_stat_num) * sizeof(struct mod_alloc_stat)
			   + atomic_read(&alloc_usage_num) * sizeof(struct module_mem_usage));

		seq_printf(m, "%-20s %15s %15s %15s %15s\n",
			   "mod_name", "slab", "gfp", "vmalloc-vmem",
			   "vmalloc-pmem");
		__show_stat(m, "<builtin>", &mod_mem_usage);
	}
	return seq_list_start(&mod_mem_usage.list, *pos);
}

static void *m_next(struct seq_file *m, void *p, loff_t *pos)
{
	return seq_list_next(p, &mod_mem_usage.list, pos);
}

static void m_stop(struct seq_file *m, void *p)
{
	read_unlock(&mod_mem_usage_rwlock);
}

static int m_show(struct seq_file *m, void *p)
{
	struct module_mem_usage *usage = list_entry(p, struct module_mem_usage, list);

	if (!usage->alive)
		__show_stat(m, "<unload>", usage);
	else
		__show_stat(m, usage->mod->name, usage);

	return 0;
}

static const struct seq_operations mod_slab_op = {
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
	.show	= m_show
};

enum memstat_show_type {
	MEMSTAT_SHOW_TOTAL = 0,
	MEMSTAT_SHOW_GFP,
	MEMSTAT_SHOW_OTHER
};
static enum memstat_show_type memstat_detail = MEMSTAT_SHOW_TOTAL;

static void *m_one_stat_start(struct seq_file *m, loff_t *pos)
{
	struct rb_node *n;
	int num = *pos;
	struct rb_root *root_node;

	if (*pos == 0) {
		seq_printf(m, "%-16s\t %8s\t %4s\t %16s\t %32s\t %32s\t %16s\n",
		"addr", "size", "type", "mod_name",
		"end_stack", "top_stack", "slab_name");
	}

	if (memstat_detail == MEMSTAT_SHOW_GFP) {
		root_node = &gfp_tree.root;
		spin_lock(&gfp_tree.lock);
	} else {
		root_node = &stat_tree.root;
		spin_lock(&stat_tree.lock);
	}

	for (n = rb_first(root_node); n != NULL; n = rb_next(n)) {
		if (num-- == 0)
			return n;
	}

	return NULL;
}

static void *m_one_stat_next(struct seq_file *m, void *p, loff_t *pos)
{
	struct rb_node *n;

	n = rb_next(p);
	++(*pos);
	return n;
}

static void m_one_stat_stop(struct seq_file *m, void *p)
{
	if (memstat_detail == MEMSTAT_SHOW_GFP)
		spin_unlock(&gfp_tree.lock);
	else
		spin_unlock(&stat_tree.lock);
}

static atomic_t alloc_num = ATOMIC_INIT(0);

static void __switch_stack_to_name(unsigned long stack, char *name)
{
	if (atomic_read(&alloc_num) == 0)
		sprint_symbol_no_offset(name, stack);
	else
		sprintf(name, "0x%lx", stack);
}

static int m_one_stat_show(struct seq_file *m, void *p)
{
	struct mod_alloc_stat *stat = rb_entry(p, struct mod_alloc_stat, node);
	char end_name[KSYM_SYMBOL_LEN] = {0};
	char top_name[KSYM_SYMBOL_LEN] = {0};
	char key_name[KSYM_SYMBOL_LEN] = {0};

	if (stat == NULL) {
		pr_err("module usage is NULL\n");
		return -EINVAL;
	}

	__switch_stack_to_name(stat->caller.endstack, end_name);
	__switch_stack_to_name(stat->caller.topstack, top_name);
	if (stat->caller.mem_type == ALLOC_TYPE_SLAB)
		strncpy(key_name, stat->caller.slab_cache_name, MEMSTAT_SLAB_CACHE_NAME);
	else
		__switch_stack_to_name(stat->caller.keystack, key_name);

	seq_printf(m, "%-16lx\t %8lu\t %4d\t %16s\t %32s\t %32s\t %32s\n",
			(unsigned long)stat->mem_node.addr, stat->mem_node.size,
			stat->caller.mem_type, stat->caller.mod_name,
			end_name, top_name, key_name);

	return 0;
}

static const struct seq_operations mod_one_stat_op = {
	.start	= m_one_stat_start,
	.next	= m_one_stat_next,
	.stop	= m_one_stat_stop,
	.show	= m_one_stat_show
};

enum mod_status_type {
	RUN_STATUS = 0,
	WAIT_STATUS = 1,
};

static int memstat_status = WAIT_STATUS;
DEFINE_SPINLOCK(status_lock);

static void set_memstat_wait(void)
{
	spin_lock(&status_lock);
	memstat_status = WAIT_STATUS;
	spin_unlock(&status_lock);
}

static int update_memstat_status(void)
{
	int status;

	spin_lock(&status_lock);
	status = memstat_status;
	if (memstat_status == WAIT_STATUS)
		memstat_status = RUN_STATUS;

	spin_unlock(&status_lock);
	return status;
}

static int mod_slab_open(struct inode *inode, struct file *file)
{
	if (update_memstat_status() == RUN_STATUS)
		return -EBUSY;

	if (memstat_detail == MEMSTAT_SHOW_TOTAL)
		return seq_open(file, &mod_slab_op);
	else
		return seq_open(file, &mod_one_stat_op);
}

static ssize_t mod_stat_write(struct file *file, const char __user *user_buf,
			      size_t size, loff_t *ppos)
{
	char buf[64];
	int real_size;

	real_size = min(size, (sizeof(buf) - 1));
	if (strncpy_from_user(buf, user_buf, real_size) < 0) {
		pr_err("input is err\n");
		return -EFAULT;
	}
	buf[real_size - 1] = '\0'; /* replace '\n' with '\0' */

	if (strcmp(buf, "detail=gfp") == 0)
		memstat_detail = MEMSTAT_SHOW_GFP;
	else if (strcmp(buf, "detail=slab") == 0)
		memstat_detail = MEMSTAT_SHOW_OTHER;
	else if (strcmp(buf, "detail=off") == 0)
		memstat_detail = MEMSTAT_SHOW_TOTAL;
	else {
		pr_warn("input parameter not support\n");
		return -EINVAL;
	}

	return size;
}

static int mod_stat_release(struct inode *inode, struct file *file)
{
	set_memstat_wait();
	return seq_release(inode, file);
}

static const struct proc_ops proc_mod_slab_operations = {
	.proc_flags	= PROC_ENTRY_PERMANENT,
	.proc_open	= mod_slab_open,
	.proc_read	= seq_read,
	.proc_write	= mod_stat_write,
	.proc_lseek	= seq_lseek,
	.proc_release	= mod_stat_release,
};

struct mod_stack {
	unsigned long endstack;
	unsigned long topstack;
	struct module *mod;
};
/*
 * alloc hook
 */
static bool __walkfn(void *d, unsigned long pc)
{
	struct mod_stack *stack_info = (struct mod_stack *)d;
	struct module *mod = NULL;

	preempt_disable();
	mod = __module_text_address(pc);
	preempt_enable();

	if (mod != NULL) {
		if (stack_info->endstack == 0)
			stack_info->endstack = pc;

		if ((stack_info->topstack != 0) &&
			((strcmp(mod->name, "of") == 0 ||
			strcmp(mod->name, "transfs") == 0 ||
			strcmp(mod->name, "devhost") == 0)))
			return true;

		stack_info->mod = mod;
		stack_info->topstack = pc;
	}
	return true;
}

static void __resolve_stack_mod(struct mod_stack *stack_info)
{
	struct stackframe frame;

	start_backtrace(&frame,
			/* skip hook */
			(unsigned long)__builtin_frame_address(2),
			(unsigned long)__builtin_return_address(1));

	walk_stackframe(current, &frame, __walkfn, stack_info);
}

static void __mod_alloc_insert_nolock(struct mod_rb_tree *rb_tree,
				      struct module_mem_usage *usage,
				      struct mod_alloc_stat *stat)
{
	struct mod_alloc_stat *parent = NULL;
	struct rb_node **link = NULL;
	struct rb_node *rb_parent = NULL;

	link = &rb_tree->root.rb_node;
	rb_parent = NULL;
	while (*link) {
		rb_parent = *link;
		parent = rb_entry(rb_parent, struct mod_alloc_stat, node);
		if (stat->mem_node.addr + stat->mem_node.size <= parent->mem_node.addr) {
			link = &parent->node.rb_left;
		} else if (parent->mem_node.addr + parent->mem_node.size <= stat->mem_node.addr) {
			link = &parent->node.rb_right;
		} else {
			pr_debug("Can't insert mod stat into the stat search tree\n");
			liblinux_pal_free(stat);
			return;
		}
	}
	rb_link_node(&stat->node, rb_parent, link);
	rb_insert_color(&stat->node, &rb_tree->root);

	spin_lock(&usage->stat_splock);
	list_add_tail(&stat->list, &usage->stat_list);
	spin_unlock(&usage->stat_splock);

	atomic_inc(&alloc_stat_num);
}

static void __mod_alloc_insert(struct mod_rb_tree *rb_tree,
			       struct module_mem_usage *usage,
			       struct mod_alloc_stat *stat)
{
	spin_lock(&rb_tree->lock);
	__mod_alloc_insert_nolock(rb_tree, usage, stat);
	spin_unlock(&rb_tree->lock);
}

#ifdef CONFIG_STACKTRACE
#define MEMSTAT_MAX_TRACE 32
typedef void (*save_stack)(struct memstat_caller *caller, const void *data,
					unsigned long *trace, int trace_len);

/* In slab stack, it is slab_alloc_node(1) -> kmem_cache_alloc(2) -> *caller (3).*/
static void __save_slab_stack(struct memstat_caller *caller, const void *data,
					unsigned long *trace, int trace_len)
{
	if (data != NULL) {
		strncpy(caller->slab_cache_name, (const char *)data, MEMSTAT_SLAB_CACHE_NAME);
		caller->slab_cache_name[MEMSTAT_SLAB_CACHE_NAME - 1] = '\0';
	}
	caller->endstack = trace[3]; // 3 in slab type is caller api
}

/* In gfp stack, it is __alloc_pages_pool(1)... -> *allocate_slab (4) -> *caller (5).*/
static void __save_gfp_stack(struct memstat_caller *caller, const void *data,
					unsigned long *trace, int trace_len)
{
		caller->endstack = trace[5]; // 5 is caller api
		caller->keystack = trace[4]; // 4 is alloc page api
}

/* In vmalloc stack, __vmalloc_node_range can set pc to data, so next pc(data) is caller.*/
static void __save_vmalloc_stack(struct memstat_caller *caller, const void *data,
					unsigned long *trace, int trace_len)
{
	int i;

	if (data == NULL)
		return;

	for (i = 0; i < trace_len - 1; i++) {
		if (trace[i] == (unsigned long)data) {
			caller->endstack = trace[i + 1];
			caller->keystack = trace[i];
		}
	}
}

/*
 * For end_stack according to mem_type, select different save policy.
 * For top_stack, if module caller select module top.
 * if builtin caller, maybe 1/2 is a show stack.
*/
static void __save_stack_trace(struct memstat_caller *caller, enum mod_alloc_type type,
				     struct mod_stack *mod_stack_info, const void *data)
{
	unsigned long stack_trace[MEMSTAT_MAX_TRACE] = {0};
	unsigned long stack_len = 0;

	static save_stack endstack_policy[ALLOC_TYPE_NR] = {
		[ALLOC_TYPE_SLAB] = __save_slab_stack,
		[ALLOC_TYPE_GFP] = __save_gfp_stack,
		[ALLOC_TYPE_VMALLOC] = __save_vmalloc_stack,
	};

	stack_len = stack_trace_save(stack_trace, MEMSTAT_MAX_TRACE, 6);  /* 6 is skip stat hook */
	endstack_policy[type](caller, data, stack_trace, stack_len);

	if (mod_stack_info->topstack != 0)
		caller->topstack = mod_stack_info->topstack;
	else
		caller->topstack = stack_trace[stack_len * 1 / 2];
}
#endif

static void memstat_save_caller_info(struct memstat_caller *caller, enum mod_alloc_type type,
				     struct mod_stack *mod_stack_info, const void *data)
{
	caller->mem_type = type;
	if (mod_stack_info->mod != NULL) {
		strncpy(caller->mod_name, mod_stack_info->mod->name, MEMSTAT_MODULE_NAME);
		caller->mod_name[MEMSTAT_MODULE_NAME - 1] = '\0';
	} else {
		strncpy(caller->mod_name, "builtin", MEMSTAT_MODULE_NAME);
	}
#ifdef CONFIG_STACKTRACE
	__save_stack_trace(caller, type, mod_stack_info, data);
#endif
}

static struct mod_alloc_stat *__mod_stat_alloc(struct module_mem_usage *usage,
					       enum mod_alloc_type type,
					       void *addr, size_t size)
{
	struct mod_alloc_stat *stat = NULL;

	stat = (struct mod_alloc_stat *)liblinux_pal_malloc(sizeof(*stat));
	if (stat == NULL) {
		pr_err("alloc mod_alloc_stat failed\n");
		return NULL;
	}

	memset(stat, 0, sizeof(*stat));
	stat->usage = usage;
	stat->mem_node.addr = addr;
	stat->mem_node.size = size;

	INIT_LIST_HEAD(&stat->list);

	if (type == ALLOC_TYPE_GFP)
		stat->tree = &gfp_tree;
	else
		stat->tree = &stat_tree;

	return stat;
}

static void mod_alloc_insert(struct module_mem_usage *usage,
			     enum mod_alloc_type type,
			     void *addr, size_t size,
			     const void *data,
			     struct mod_stack *stack_info)
{
	struct mod_alloc_stat *stat = NULL;
	stat = __mod_stat_alloc(usage, type, addr, size);
	if (stat == NULL)
		return;

	memstat_save_caller_info(&stat->caller, type, stack_info, data);

	if (type == ALLOC_TYPE_GFP)
		__mod_alloc_insert(&gfp_tree, usage, stat);
	else
		__mod_alloc_insert(&stat_tree, usage, stat);
}

static struct mod_alloc_stat *__lookup_stat(struct rb_root *root, void *addr, size_t size)
{
	struct rb_node *rb = root->rb_node;
	struct mod_alloc_stat *stat = NULL;

	while (rb) {
		stat = rb_entry(rb, struct mod_alloc_stat, node);
		if (addr + size <= stat->mem_node.addr) {
			rb = stat->node.rb_left;
		} else if (stat->mem_node.addr + stat->mem_node.size <= addr) {
			rb = stat->node.rb_right;
		} else if (stat->mem_node.addr <= addr && addr + size <=
				stat->mem_node.addr + stat->mem_node.size) {
			return stat;
		} else {
			pr_debug("found stat failed\n");
			break;
		}
	}
	return NULL;
}

static void __mod_free_remove(struct mod_rb_tree *rb_tree, void *addr, size_t size)
{
	struct mod_alloc_stat *stat = NULL;
	struct mod_alloc_stat *new_stat = NULL;
	size_t new_size = 0;

	spin_lock(&rb_tree->lock);
	stat = __lookup_stat(&rb_tree->root, addr, size);
	if (stat == NULL) {
		pr_debug("stat is not exist\n");
		goto out;
	}

	rb_erase(&stat->node, &rb_tree->root);
	spin_lock(&stat->usage->stat_splock);
	list_del(&stat->list);
	spin_unlock(&stat->usage->stat_splock);

	if (stat->mem_node.addr == addr && stat->mem_node.size == size) {
		liblinux_pal_free(stat);
		atomic_dec(&alloc_stat_num);
	} else if (stat->mem_node.addr == addr && stat->mem_node.size > size) {
		stat->mem_node.addr = addr + size;
		stat->mem_node.size -= size;
		__mod_alloc_insert_nolock(stat->tree, stat->usage, stat);
	} else if (stat->mem_node.addr < addr &&
			stat->mem_node.addr + stat->mem_node.size == addr + size) {
		stat->mem_node.size -= size;
		__mod_alloc_insert_nolock(stat->tree, stat->usage, stat);
	} else if (stat->mem_node.addr < addr && stat->mem_node.size > size) {
		new_size = stat->mem_node.size - (addr + size - stat->mem_node.addr);
		stat->mem_node.size = addr - stat->mem_node.addr;
		__mod_alloc_insert_nolock(stat->tree, stat->usage, stat);

		new_stat = __mod_stat_alloc(stat->usage, stat->caller.mem_type, addr + size,
					    new_size);
		new_stat->caller = stat->caller;
		if (new_stat == NULL) {
			pr_err("remove stat error\n");
			goto out;
		}
		__mod_alloc_insert_nolock(stat->tree, stat->usage, new_stat);
	} else {
		pr_err("addr or size is illegal\n");
	}
out:
	spin_unlock(&rb_tree->lock);
}

static void mod_free_remove(enum mod_alloc_type type, void *addr, size_t size)
{
	if (type == ALLOC_TYPE_GFP)
		__mod_free_remove(&gfp_tree, addr, size);
	else
		__mod_free_remove(&stat_tree, addr, size);

}

static void __mod_alloc_hook(enum mod_alloc_type type, void *addr, size_t size,
			     const void *data)
{
	struct module_mem_usage *usage = NULL;
	struct mod_stack mod_stack = {0};

	atomic_inc(&alloc_num);
	__resolve_stack_mod(&mod_stack);
	read_lock(&mod_mem_usage_rwlock);
	list_for_each_entry(usage, &mod_mem_usage.list, list) {
		if (usage->alive && usage->mod == mod_stack.mod)
			break;
	}
	read_unlock(&mod_mem_usage_rwlock);

	mod_alloc_insert(usage, type, addr, size, data, &mod_stack);
	atomic_dec(&alloc_num);
}

static void __slab_alloc_hook(void *addr, size_t size, const void *data)
{
	__mod_alloc_hook(ALLOC_TYPE_SLAB, addr, size, data);
}
static void __slab_free_hook(void *addr, size_t size, const void *data)
{
	mod_free_remove(ALLOC_TYPE_SLAB, addr, size);
}

static void __page_alloc_hook(void *addr, size_t size, const void *data)
{
	__mod_alloc_hook(ALLOC_TYPE_GFP, addr, size, data);
}
static void __page_free_hook(void *addr, size_t size, const void *data)
{
	mod_free_remove(ALLOC_TYPE_GFP, addr, size);
}

static void __vmalloc_alloc_hook(void *addr, size_t size, const void *data)
{
	__mod_alloc_hook(ALLOC_TYPE_VMALLOC, addr, size, data);
}
static void __vmalloc_free_hook(void *addr, size_t size, const void *data)
{
	mod_free_remove(ALLOC_TYPE_VMALLOC, addr, size);
}

typedef void (*memstat_hook_t)(void *addr, size_t size, const void *data);
static memstat_hook_t __liblinux_mem_hooks[] = {
	[LIBLINUX_DFX_PAGE_ALLOC] = __page_alloc_hook,
	[LIBLINUX_DFX_PAGE_FREE] = __page_free_hook,
	[LIBLINUX_DFX_SLAB_ALLOC] = __slab_alloc_hook,
	[LIBLINUX_DFX_SLAB_FREE] = __slab_free_hook,
	[LIBLINUX_DFX_VMALLOC_ALLOC] = __vmalloc_alloc_hook,
	[LIBLINUX_DFX_VMALLOC_FREE] = __vmalloc_free_hook,
};

static void __liblinux_mod_memstat_hook(int ev, void *addr, unsigned long size,
					const void *data)
{
	if (ev <= (int)LIBLINUX_DFX_MEM_MIN || ev >= (int)LIBLINUX_DFX_MEM_MAX)
		return;

	if (__liblinux_mem_hooks[ev] != NULL)
		__liblinux_mem_hooks[ev](addr, size, data);
}

static __init int _dfx_enable_modstat(void)
{
	int ret;

	if (!memstat_enable)
		return 0;

	INIT_LIST_HEAD(&mod_mem_usage.list);
	INIT_LIST_HEAD(&mod_mem_usage.stat_list);
	spin_lock_init(&mod_mem_usage.stat_splock);
	mod_mem_usage.alive = true;
	spin_lock_init(&gfp_tree.lock);
	spin_lock_init(&stat_tree.lock);

	/* add self to module track list */
	__mod_mem_usage_cb(&mod_mem_usage_nb, MODULE_STATE_COMING, THIS_MODULE);

	/* register module notifier for futher module syminfo */
	ret = register_module_notifier(&mod_mem_usage_nb);
	if (ret < 0)
		pr_warn("register module mem usage support failed, ret=%d\n", ret);

	liblinux_dfx_mem_hook(__liblinux_mod_memstat_hook);
	liblinux_dfx_vmalloc_hook(__liblinux_mod_memstat_hook);

	/* create procfs node */
	if (proc_create("mod_memstat", 0644, NULL, &proc_mod_slab_operations) == NULL)
		pr_warn("add procfs node for mod_memstat failed\n");

	return 0;
}

#define ADD_DEL_LIST(head, node) list_add_tail(node, head)

#define CLEAR_DEL_LIST(head, type, member)				\
	do {								\
		struct list_head *pos;					\
		struct list_head *n;					\
		type *tmp;						\
		list_for_each_safe(pos, n, head) {			\
			tmp = list_entry(pos, type, member);		\
			list_del(pos);					\
			liblinux_pal_free(tmp);				\
		}							\
	} while (0)

void __exit _dfx_exit_modstat(void)
{
	struct module_mem_usage *usage = NULL;
	struct module_mem_usage *p = NULL;
	struct mod_alloc_stat *stat = NULL;
	struct mod_alloc_stat *tmp = NULL;
	struct list_head del_stat_list;
	struct list_head del_usage_list;

	if (!memstat_enable)
		return;

	liblinux_dfx_mem_hook(NULL);
	liblinux_dfx_vmalloc_hook(NULL);

	unregister_module_notifier(&mod_mem_usage_nb);
	remove_proc_entry("mod_memstat", NULL);

	INIT_LIST_HEAD(&del_stat_list);
	INIT_LIST_HEAD(&del_usage_list);
	spin_lock(&gfp_tree.lock);
	spin_lock(&stat_tree.lock);
	write_lock(&mod_mem_usage_rwlock);
	list_for_each_entry_safe(usage, p, &mod_mem_usage.list, list) {
		spin_lock(&usage->stat_splock);
		list_for_each_entry_safe(stat, tmp, &usage->stat_list, list) {
			rb_erase(&stat->node, &stat->tree->root);
			list_del(&stat->list);
			ADD_DEL_LIST(&del_stat_list, &stat->list);
		}
		list_del(&usage->list);
		ADD_DEL_LIST(&del_usage_list, &usage->list);
		spin_unlock(&usage->stat_splock);
		atomic_dec(&alloc_usage_num);
	}
	write_unlock(&mod_mem_usage_rwlock);

	spin_lock(&mod_mem_usage.stat_splock);
	list_for_each_entry_safe(stat, tmp, &mod_mem_usage.stat_list, list) {
		rb_erase(&stat->node, &stat->tree->root);
		list_del(&stat->list);
		ADD_DEL_LIST(&del_stat_list, &stat->list);
	}
	spin_unlock(&mod_mem_usage.stat_splock);
	spin_unlock(&stat_tree.lock);
	spin_unlock(&gfp_tree.lock);
	CLEAR_DEL_LIST(&del_stat_list, struct mod_alloc_stat, list);
	CLEAR_DEL_LIST(&del_usage_list, struct module_mem_usage, list);
}

module_param(memstat_enable, uint, 0644);
MODULE_PARM_DESC(memstat_enable, "enable memstat");

module_init(_dfx_enable_modstat);
module_exit(_dfx_exit_modstat);

MODULE_LICENSE("GPL");
