#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/debugfs.h>
#include <linux/kernel_stat.h>
#include <linux/task_work.h>
#include <asm/hardirq.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include "include/mm/slab.h"
#include <linux/slub_def.h>
#include <generated/uapi/linux/version.h>
#include "version.h"
#include "data_aware.h"
#include "hook.h"
#include "syms.h"

#ifdef CONFIG_ARM64
#define cpu_smt_mask(cpu)       cpumask_of(cpu)
#endif

extern unsigned int nr_cpu_ids;
/* Global instance */
unsigned int sysctl_module_disable = 0;//for unload module only
unsigned int sysctl_module_enable = 0; // enable module by sysctl
unsigned int sysctl_module_debug = 0;  // clean data or stat stat
unsigned int sysctl_module_data = 0;   // clean data or stat stat
unsigned int sysctl_module_print = 0;  // print struct info for debug
unsigned int sysctl_module_block_enable = 0;  //enable stat block for Throughput
struct func_latency sysctl_data[TRACK_SYSCALL_NUM];
static unsigned long irq_stat_data[4];
static unsigned long pagefault_stat_data;
static unsigned long access_vm_stat_time; // fuzzy stat, for access is fewer by proc/cmdline etc.
static unsigned long access_vm_stat_num; // fuzzy stat, for access is fewer by proc/cmdline etc.
char sysctl_module_process_comm[NAME_MAX] = "0"; // trace process assigned by pid

static unsigned long *stat_cpu_idle;
static unsigned long *stat_cpu_num;
static unsigned long *stat_node_num;
static unsigned long **stat_sys_num;
static unsigned long **stat_sys_time;
static unsigned long **stat_sys_time_block;
static unsigned long *stat_func_num;
static unsigned long *stat_func_time;
static unsigned long *stat_func_time_block;
static unsigned long *stat_pagefault_num;
static unsigned long *stat_pagefault_time;
static unsigned long *stat_pagefault_time_block;
static unsigned long *stat_allocpage_num;
static unsigned long *stat_allocpage_time_block;
static unsigned long *stat_slub_alloc_num;
static unsigned long *stat_slub_alloc_size;
static unsigned long *stat_slub_alloc_time_block;
static unsigned long *stat_bio_num;
static unsigned long *stat_bio_size;
static unsigned long *stat_submit_bio_time;
static unsigned long *stat_submit_bio_time_block;
static unsigned long *stat_dispatch_time_block;
static unsigned long *stat_end_bio_time;
static unsigned long *stat_end_bio_time_block;
static unsigned long *stat_bio_time_block;
static unsigned long *stat_bio_disk_num;
static unsigned long *stat_bio_disk_blocknum;
static int *nr_running_per_node;
static int *idle_cpu_stat;
static char *idle_cpu_stat_char;
static unsigned long *stat_sys_num_tmp;
static long *stat_sys_time_tmp;
static long *stat_sys_time_tmp_block;
static struct func_latency *data_ptr;

void set_sys_nr(unsigned long nr);
unsigned long get_sys_nr(void);
void set_func_type(int pos);
unsigned long get_func_type(void);
void clr_func_type(int pos);

bool module_is_enable(void)
{

	if (!sysctl_module_disable && sysctl_module_enable && sysctl_module_debug != 1) {
	    if (strcmp(sysctl_module_process_comm, "0") && !strstr(sysctl_module_process_comm, current->comm))
			return false;
		return true;
	}

	return false;
}

void unload_disable_module(void)
{
	sysctl_module_disable = 1;
}
/*
 * stat idle or not about other logic cpu,  which belone to the same core with current cpu
 */
int test_idle_cpu(struct rq *rq, struct task_struct *p)
{
	if (p != rq->idle)
		return 0;

	if (rq->nr_running)
        return 0;

#ifdef CONFIG_SMP
#ifdef TK5
	if (rq->ttwu_pending)
#else
	if (!llist_empty(&rq->wake_list))
#endif
        return 0;
#endif

    return 1;
}
/*
 * stat scheduler
 */
struct task_struct * stat_pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags_stat *rf)
{
	int cpu = smp_processor_id(), target, idle = 0;
	struct task_struct *p;

	enter_hook_system();

	p = pick_next_task(rq, prev, rf);

	if (module_is_enable() && stat_cpu_idle && stat_node_num) {
		for_each_cpu(target, cpu_smt_mask(cpu)){
			if (target == cpu)
				continue;
			idle += test_idle_cpu(rq, p);
		}
		stat_cpu_idle[cpu] += idle;
		stat_cpu_num[cpu]++;
		stat_node_num[cpu_to_node(cpu) % 2]++;
	}
	exit_hook_system();

	return p;
}
/* __schedule/prepare_task_switch */
/* support > 5.4.203: has sched_rqm_switch*/
void stat_sched_rqm_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{
	int cpu = smp_processor_id();
	unsigned long nr = get_sys_nr();
	unsigned long type;

	enter_hook();

	if (!module_is_enable())
		goto out;

	if (sysctl_module_block_enable)
		goto out;

	if (strstr(current->comm, "swapper"))
		goto not_stat;

	type = get_func_type();
	if ((type & STAT_SYSCALL_TYPE) != 0 && nr < NR_syscalls && current->numa_faults_locality[0] > 0)
		current->numa_faults_locality[1] += sched_clock() - current->numa_faults_locality[0];
	if ((type & STAT_FUNC_TYPE) != 0 && nr < NR_syscalls && current->numa_faults_locality[1] > 0) {
		current->numa_faults_locality[0] += sched_clock() - current->numa_faults_locality[1];
		save_sched_out();
	}
	if ((type & STAT_PAGEFAULT_TYPE) != 0)
		current->numa_faults_locality[1] += sched_clock() - current->numa_faults_locality[0];


not_stat:
	if ((type & STAT_FUNC_TYPE) == 0)
		prev->numa_faults_locality[1] = 0;
	else
		prev->numa_faults_locality[0] = 0;

out:
	sched_rqm_switch(rq, prev, next);

	exit_hook();
	return;
}

/* support 5.4.119: has no sched_rqm_switch*/
void stat_rcu_note_context_switch(bool preempt)
{
	int cpu = smp_processor_id();
	unsigned long nr = get_sys_nr();
	unsigned long type;

	enter_hook();

	if (!module_is_enable())
		goto out;

	if (sysctl_module_block_enable)
		goto out;

	if (strstr(current->comm, "swapper"))
		goto not_stat;

	type = get_func_type();
	if ((type & STAT_SYSCALL_TYPE) != 0 && nr < NR_syscalls && current->numa_faults_locality[0] > 0)
		current->numa_faults_locality[1] += sched_clock() - current->numa_faults_locality[0];
	if ((type & STAT_FUNC_TYPE) != 0 && nr < NR_syscalls && current->numa_faults_locality[1] > 0) {
		stat_func_time[cpu] += sched_clock() - current->numa_faults_locality[1];
		save_sched_out();
	}
	if ((type & STAT_PAGEFAULT_TYPE) != 0)
		current->numa_faults_locality[1] += sched_clock() - current->numa_faults_locality[0];


not_stat:
	if ((type & STAT_FUNC_TYPE) == 0)
		current->numa_faults_locality[1] = 0;
	else
		current->numa_faults_locality[0] = 0;

out:
	rcu_note_context_switch(preempt);

	exit_hook();
}
/*psi_task_switch for tk5: prev leave, stat prev time; next come, recored start time
* as, tk5 cannot hook finish task switch.
*/
#ifdef TK5
void stat_psi_task_switch(struct task_struct *prev, struct task_struct *next,
              bool sleep)
{
	int cpu = smp_processor_id();
	unsigned long nr = get_sys_nr();
	unsigned long type;

	enter_hook();

	if (!module_is_enable())
		goto out;

	if (sysctl_module_block_enable)
		goto out;

	type = get_func_type();
	if ((type & STAT_SYSCALL_TYPE) != 0  && nr < NR_syscalls && current->numa_faults_locality[0] > 0)
		current->numa_faults_locality[1] += sched_clock() - current->numa_faults_locality[0];
	if ((type & STAT_FUNC_TYPE) != 0 && nr < NR_syscalls && current->numa_faults_locality[1] > 0) {
		stat_func_time[cpu] += sched_clock()  - current->numa_faults_locality[1];
		save_sched_out();
	}
	if ((type & STAT_PAGEFAULT_TYPE) != 0)
		current->numa_faults_locality[1] += sched_clock() - current->numa_faults_locality[0];

	if ((type & STAT_FUNC_TYPE) == 0) {
		next->numa_faults_locality[0] = sched_clock();
		current->numa_faults_locality[1] = 0;
	}
	else {
		next->numa_faults_locality[1] = sched_clock();
		current->numa_faults_locality[0] = 0;
	}

out:
	__psi_task_switch(prev, next, sleep);

	exit_hook();
}
#endif
struct rq *stat_finish_task_switch(struct task_struct *prev)
{
	unsigned long type;
	struct rq *rq;

	enter_hook();

	rq = finish_task_switch(prev);

	if (sysctl_module_block_enable)
		goto out;

	type = get_func_type();
	if ((type & STAT_FUNC_TYPE) == 0) {
		current->numa_faults_locality[0] = sched_clock();
		save_sched_in();
	}
	else
		current->numa_faults_locality[1] = sched_clock();

out:
	exit_hook();

	return rq;
}
/*
 * stat which inode mem is alloced from
 */
static unsigned long *stat_mem_node = NULL;
struct page *stat_alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
                                                         nodemask_t *nodemask)
{
	unsigned long time = 0;
	struct page *page;

	enter_hook_system();
	time = sched_clock();

	page = __alloc_pages_nodemask(gfp_mask, order, preferred_nid, nodemask);

	if (module_is_enable()) {
		int nid;
		int cpu = smp_processor_id();
		stat_allocpage_time_block[cpu] += sched_clock() - time;
		stat_allocpage_num[cpu]++;
		if (!page)
			goto out;
		nid = page_to_nid(page);
		if (nid >= 0 && nid < nr_node_ids && stat_mem_node)
			stat_mem_node[nid] += (1 << order);
	}
out:
	exit_hook_system();

	return page;

}
void *stat__kmalloc(size_t size, gfp_t flags)
{
	void *ret;
	unsigned long time = 0;

	enter_hook_system();

	time = sched_clock();

	ret = test__kmalloc(size, flags);

	if (module_is_enable()) {
		int cpu = smp_processor_id();
		stat_slub_alloc_size[cpu] += size;
		stat_slub_alloc_num[cpu]++;
		stat_slub_alloc_time_block[cpu] += sched_clock() - time;
	}

	exit_hook_system();
	return ret;
}
void *stat__kmalloc_node(size_t size, gfp_t flags, int node)
{
	void *ret;
	unsigned long time = 0;

	enter_hook_system();

	time = sched_clock();

	ret = test__kmalloc_node(size, flags, node);

	if (module_is_enable()) {
		int cpu = smp_processor_id();
		stat_slub_alloc_size[cpu] += size;
		stat_slub_alloc_num[cpu]++;
		stat_slub_alloc_time_block[cpu] += sched_clock() - time;
	}

	exit_hook_system();
	return ret;
}
void *stat_kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
	void *ret;
	unsigned long time = 0;

	enter_hook_system();

	time = sched_clock();

	ret = test_kmem_cache_alloc(s, gfpflags);

	if (module_is_enable() && s) {
		int cpu = smp_processor_id();
		stat_slub_alloc_size[cpu] += s->object_size;
		stat_slub_alloc_num[cpu]++;
		stat_slub_alloc_time_block[cpu] += sched_clock() - time;
	}

	exit_hook_system();
	return ret;
}

blk_qc_t stat_submit_bio(struct bio *bio)
{
	blk_qc_t ret;
	unsigned long time;

	enter_hook_system();

	if (module_is_enable() && bio) {
		current->numa_faults_locality[1] = 0;
		time = sched_clock();
		current->numa_faults_locality[0] = time;

	}

	ret = test_submit_bio(bio);

	if (module_is_enable() && bio) {
		int cpu = smp_processor_id();
		struct bio *bio_tmp;
#ifndef TK3
		bio->kabi_reserved1 = 0x55aa;
		bio->kabi_reserved2 = sched_clock();
		for (bio_tmp = bio; bio_tmp; bio_tmp = bio_tmp->bi_next) {
			stat_bio_size[smp_processor_id()] += bio_tmp->bi_iter.bi_size;
			bio_tmp->kabi_reserved1 = 0x55aa;
			bio_tmp->kabi_reserved2 = bio->kabi_reserved2;
		}

		stat_submit_bio_time[cpu] += bio->kabi_reserved2 - current->numa_faults_locality[0] + current->numa_faults_locality[1];
		stat_submit_bio_time_block[cpu] += bio->kabi_reserved2 - time;
#endif
	}

	exit_hook_system();

	return ret;
}
//for tk5
#ifdef TK5
bool stat_blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
       unsigned int nr_budgets)
{
    bool ret;
    unsigned long time;
    struct request *rq;
    struct bio *bio, *bio_tmp;

    if (module_is_enable()) {
	time = sched_clock();
	list_for_each_entry(rq, list, queuelist) {
	    int cpu = smp_processor_id();
	    if (!rq->bio)
	      continue;

	    bio = rq->bio;
	    if (bio->kabi_reserved2 > 0 && time > bio->kabi_reserved2 && (bio->kabi_reserved1 == 0x55aa))
		  stat_dispatch_time_block[cpu] += time - bio->kabi_reserved2;

	    for (bio_tmp = rq->bio; bio_tmp; bio_tmp = bio_tmp->bi_next)
		   bio_tmp->kabi_reserved2 = sched_clock();
	}
    }
    ret = test_blk_mq_dispatch_rq_list(hctx, list, nr_budgets);

    return ret;
}
//for tk4
#else
bool stat_blk_mq_get_driver_tag(struct request *rq)
{
    bool ret;
    int cpu;
    unsigned long time;
    struct bio *bio, *bio_tmp;

    enter_hook_system();

    if (module_is_enable() && rq && rq->bio) {
	int cpu = smp_processor_id();
	time = sched_clock();

	bio = rq->bio;
	if (bio->kabi_reserved2 > 0 && time > bio->kabi_reserved2 && (bio->kabi_reserved1 == 0x55aa))
	      stat_dispatch_time_block[cpu] += time - bio->kabi_reserved2;

	for (bio_tmp = rq->bio; bio_tmp; bio_tmp = bio_tmp->bi_next)
	       bio_tmp->kabi_reserved2 = sched_clock();
    }

    ret = test_blk_mq_get_driver_tag(rq);

    exit_hook_system();

    return ret;
}
#endif

int find_index(int start, int total, int partno)
{
    int i, index = -1;

    for(i = start; i < start + total; i++) {
	if (stat_bio_disk_blocknum[i] == partno)
	  return i;
	if (stat_bio_disk_blocknum[i] == 0 && index == -1)
	  index = i;
    }

    return index;
}
void stat_bio_endio(struct bio *bio)
{
	unsigned long time;

	enter_hook_system();

	if (module_is_enable() && bio) {
		int cpu = smp_processor_id(), num, index;
		int major, partno, first_minor;

		time = sched_clock();
#ifndef TK3
		if (bio->kabi_reserved2 > 0 && time > bio->kabi_reserved2 && (bio->kabi_reserved1 == 0x55aa)) {
			stat_bio_num[cpu]++;
			stat_bio_time_block[cpu] += time - bio->kabi_reserved2;
#ifdef TK5
			if (!bio->bi_bdev || !bio->bi_bdev->bd_disk)
				return;
			major = bio->bi_bdev->bd_disk->major;
			partno = bio->bi_bdev->bd_partno;
			first_minor = bio->bi_bdev->bd_disk->first_minor;
#else
			major = bio->bi_disk->major;
			partno = bio->bi_partno;
			first_minor = bio->bi_disk->first_minor;
#endif
			switch (major) {
			case DISK_SDA:
				index = find_index(0, DISK_SDA_NUM, partno);
				if (index < 0)
				  break;
				stat_bio_disk_num[index]++;
				stat_bio_disk_blocknum[index] = partno;
				break;
			case DISK_HDD:
				index = find_index(DISK_SDA_NUM, DISK_HDD_SUBNUM, partno);
				if (index < 0)
				  break;
				stat_bio_disk_num[index]++;
				stat_bio_disk_blocknum[index] = partno;
				break;
			case DISK_HDD1:
				index = find_index(DISK_SDA_NUM + DISK_HDD_SUBNUM, DISK_HDD_SUBNUM, partno);
				if (index < 0)
				  break;
				stat_bio_disk_num[index]++;
				stat_bio_disk_blocknum[index] = partno;
				break;
			case DISK_HDD2:
				index = find_index(DISK_SDA_NUM + 2 * DISK_HDD_SUBNUM, DISK_HDD_SUBNUM, partno);
				if (index < 0)
				  break;
				stat_bio_disk_num[index]++;
				stat_bio_disk_blocknum[index] = partno;
				break;
			case DISK_HDD3:
				index = find_index(DISK_SDA_NUM + 3 * DISK_HDD_SUBNUM, DISK_HDD_SUBNUM, partno);
				if (index < 0)
				  break;
				stat_bio_disk_num[index]++;
				stat_bio_disk_blocknum[index] = partno;
				break;
			case DISK_DM:
				index = find_index(DISK_SDA_NUM + DISK_HDD_NUM, DISK_SDA_NUM + DISK_HDD_NUM + DISK_DM_NUM, partno);
				if (index < 0)
				  break;
				stat_bio_disk_num[index]++;
				stat_bio_disk_blocknum[index] = partno;
				break;
			case DISK_VD:
				index = find_index(DISK_SDA_NUM + DISK_HDD_NUM + DISK_DM_NUM, DISK_SDA_NUM + DISK_HDD_NUM + DISK_DM_NUM + DISK_VD_NUM, partno);
				if (index < 0)
				  break;
				stat_bio_disk_num[index]++;
				stat_bio_disk_blocknum[index] = partno;
				break;
			case DISK_NVME:
				index = find_index(DISK_SDA_NUM + DISK_HDD_NUM + DISK_DM_NUM + DISK_VD_NUM, DISK_SDA_NUM + DISK_HDD_NUM + DISK_DM_NUM + DISK_VD_NUM + DISK_NVME_NUM, partno);
				if (index < 0)
				  break;
				stat_bio_disk_num[index]++;
				stat_bio_disk_blocknum[index] = partno;
				break;
			default:
				break;
			}

			bio->kabi_reserved1 = 0;
			bio->kabi_reserved2 = 0;
		}
		time = sched_clock();
		current->numa_faults_locality[1] = 0;
		current->numa_faults_locality[0] = time;
#endif
	}

	test_bio_endio(bio);

	if (module_is_enable()) {
		stat_end_bio_time[smp_processor_id()] += sched_clock() - current->numa_faults_locality[0] + current->numa_faults_locality[1];
		stat_end_bio_time_block[smp_processor_id()] += sched_clock() - time;
	}

	exit_hook_system();
}
#ifdef TK5
/* page fault for tk5*/
vm_fault_t stat_handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
                unsigned int flags, struct pt_regs *regs)
{
	vm_fault_t ret;
	unsigned long time;

	enter_hook_system();

	current->numa_faults_locality[1] = 0;

	set_func_type(STAT_PAGEFAULT_TYPE);
	time = sched_clock();
	current->numa_faults_locality[0] = time;

	ret = handle_mm_fault(vma, address, flags, regs);

	if (module_is_enable() && stat_pagefault_num) {
		int cpu = smp_processor_id();
		stat_pagefault_num[cpu]++;
		stat_pagefault_time[cpu] += sched_clock() - current->numa_faults_locality[0] + current->numa_faults_locality[1];
		stat_pagefault_time_block[cpu] += sched_clock() - time;
	}
	current->numa_faults_locality[2] = 0;

	exit_hook_system();

	return ret;
}
#endif
/* page fault for tk4*/
void stat_do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
{
	unsigned long time;

	enter_hook_system();

	current->numa_faults_locality[1] = 0;

	set_func_type(STAT_PAGEFAULT_TYPE);
	time = sched_clock();
	current->numa_faults_locality[0] = time;

	do_page_fault(regs, error_code, address);

	if (module_is_enable() && stat_pagefault_num) {
		int cpu = smp_processor_id();
		stat_pagefault_num[cpu]++;
		stat_pagefault_time[cpu] += sched_clock() - current->numa_faults_locality[0] + current->numa_faults_locality[1];
		stat_pagefault_time_block[cpu] += sched_clock() - time;
	}
	current->numa_faults_locality[2] = 0;

	exit_hook_system();
}

int stat_access_remote_vm(struct mm_struct *mm, unsigned long addr,
         void *buf, int len, unsigned int gup_flags)
{
	int ret;
	unsigned long time = sched_clock();

	ret = test_access_remote_vm(mm, addr, buf, len, gup_flags);

	if (module_is_enable()) {
		access_vm_stat_time += sched_clock() - time;
		access_vm_stat_num++;
	}

	return ret;
}

/*
 * stat syscall
 */
void set_sys_nr(unsigned long nr)
{
	current->numa_faults_locality[2] = nr << 3;
}
unsigned long get_sys_nr(void)
{
	return current->numa_faults_locality[2] >> 3;
}
void set_func_type(int pos)
{
	current->numa_faults_locality[2] |= pos;
}
unsigned long get_func_type(void)
{
	return current->numa_faults_locality[2] & 0x07;
}
void clr_func_type(int pos)
{
	current->numa_faults_locality[2] ^= pos;
}
void stat_stat_syscall_enter(struct kret_data *data)
{
	set_func_type(STAT_SYSCALL_TYPE);
	current->numa_faults_locality[0] = sched_clock();
	current->numa_faults_locality[1] = 0;
	current->numa_pages_migrated = sched_clock();

	return;
}
void stat_stat_syscall_exit(unsigned long nr, struct kret_data *data)
{
	int cpu = smp_processor_id();
	unsigned long nr_tmp = nr, time2;

	time2 = sched_clock();
	if (module_is_enable() && nr_tmp < NR_syscalls) {
		nr_tmp = array_index_nospec(nr, NR_syscalls);
		if (nr_tmp < NR_syscalls) {
			stat_sys_num[cpu][nr_tmp]++;
			stat_sys_time[cpu][nr_tmp] += time2 - current->numa_faults_locality[0] + current->numa_faults_locality[1];
			stat_sys_time_block[cpu][nr_tmp] += time2 - current->numa_pages_migrated;
		}
		current->numa_faults_locality[0] = 0;
		current->numa_faults_locality[2] = 0;
	}

	return;
}
void stat_func_enter(struct kret_data *data)
{
	current->total_numa_faults++;
	if (current->total_numa_faults > 1)
	  return;

	set_func_type(STAT_FUNC_TYPE);
	current->numa_pages_migrated = sched_clock();
	current->numa_faults_locality[1] = sched_clock();
	current->numa_faults_locality[0] = 0;

	return;
}
void stat_func_exit(struct kret_data *data)
{
	int cpu = smp_processor_id();
	unsigned long time2;

	time2 = sched_clock();

	current->total_numa_faults--;
	if (current->total_numa_faults > 0)
	  return;

	if (module_is_enable() && stat_sys_num) {
		if (time2 >= current->numa_faults_locality[1] && current->numa_faults_locality[1] >= current->numa_pages_migrated) {
			stat_func_num[cpu]++;
			stat_func_time[cpu] += time2 - current->numa_faults_locality[1] + current->numa_faults_locality[0];
			stat_func_time_block[cpu] += time2 - current->numa_pages_migrated;
		}
		clr_func_type(STAT_FUNC_TYPE);
		current->numa_faults_locality[1] = 0;
	}

	return;
}

void sys_get_latency(struct func_latency **data)
{
	int i, j, k, cpu, busy = 0;
	unsigned long syscall_max[PRINT_SYSCALL_NUM][2] = {0};
	unsigned long func_num = 0, func_time = 0, func_block_time = 0, alloc_page_block_time = 0;
	unsigned long bio_block_time = 0, bio_num = 0, allloc_page_num = 0, bio_size = 0;
	unsigned long slub_block_time = 0, slub_num = 0, slub_size = 0;
	unsigned long submit_bio_time = 0, end_bio_time = 0, submit_bio_time_block = 0, end_bio_time_block = 0;
	unsigned long dispatch_bio_time_block = 0;
	unsigned long irq_time = 0, softirq_time = 0, irq_num = 0, softirq_num = 0, pagefault_hit = 0;
	int nid, idle_cur = 0, idle_smt = 0, target;
	size_t len_tmp = sizeof(struct func_latency);

	if (!data)
		return;

	memset(stat_sys_num_tmp, 0, 3 * NR_syscalls * sizeof(unsigned long));
	for(i = 0; i < NR_syscalls; i++) {
		for_each_possible_cpu(cpu) {
			stat_sys_num_tmp[i] += stat_sys_num[cpu][i];
			stat_sys_time_tmp[i] += stat_sys_time[cpu][i];
			stat_sys_time_tmp_block[i] += stat_sys_time_block[cpu][i];
		}
		for (j = 0; j < PRINT_SYSCALL_NUM; j++) {
			if (stat_sys_num_tmp[i] > syscall_max[j][1]) {
				for (k = PRINT_SYSCALL_NUM - 1; k > j; k--) {
					syscall_max[k][0] = syscall_max[k - 1][0];
					syscall_max[k][1] = syscall_max[k - 1][1];
				}
				syscall_max[j][0] = i;
				syscall_max[j][1] = stat_sys_num_tmp[i];
				break;
			}
		}
	}

	for_each_possible_cpu(cpu) {
		struct kernel_cpustat *kcs = &kcpustat_cpu(cpu);
#ifdef CONFIG_VM_EVENT_COUNTERS
		struct vm_event_state *vm_stat = &per_cpu(vm_event_states, cpu);
#endif
		func_num += stat_pagefault_num[cpu];
		func_time += stat_pagefault_time[cpu];
		func_block_time += stat_pagefault_time_block[cpu];
		allloc_page_num += stat_allocpage_num[cpu];
		alloc_page_block_time += stat_allocpage_time_block[cpu];
		slub_size += stat_slub_alloc_size[cpu];
		slub_block_time += stat_slub_alloc_time_block[cpu];
		slub_num += stat_slub_alloc_num[cpu];
		bio_num += stat_bio_num[cpu];
		bio_size += stat_bio_size[cpu];
		bio_block_time += stat_bio_time_block[cpu];
		submit_bio_time += stat_submit_bio_time[cpu];
		submit_bio_time_block += stat_submit_bio_time_block[cpu];
		dispatch_bio_time_block += stat_dispatch_time_block[cpu];
		end_bio_time += stat_end_bio_time[cpu];
		end_bio_time_block += stat_end_bio_time_block[cpu];
		irq_time +=  kcs->cpustat[CPUTIME_IRQ];
		softirq_time +=  kcs->cpustat[CPUTIME_SOFTIRQ];
#ifdef CONFIG_X86_64
		irq_num +=  kstat_cpu_irqs_sum(cpu) + stat_arch_irq_stat_cpu(cpu);
#endif

		for (i = 0; i < NR_SOFTIRQS; i++)
			softirq_num += kstat_softirqs_cpu(i, cpu);
#ifdef CONFIG_VM_EVENT_COUNTERS
		pagefault_hit += vm_stat->event[PGMAJFAULT];
#endif
	}
#ifdef CONFIG_X86_64
	irq_num += stat_arch_irq_stat();
#endif

	for (i = 0; i < PRINT_SYSCALL_NUM; i++) {
		(*data)[i].nr = syscall_max[i][0];
		(*data)[i].num = syscall_max[i][1];
		(*data)[i].latency = 0;
		(*data)[i].block_latency = 0;
		if (syscall_max[i][1]) {
			(*data)[i].latency = stat_sys_time_tmp[(*data)[i].nr] / syscall_max[i][1];
			(*data)[i].block_latency = stat_sys_time_tmp_block[(*data)[i].nr] / syscall_max[i][1];
			if ((*data)[i].latency > (*data)[i].block_latency)
				(*data)[i].latency = (*data)[i].block_latency;
		}
#ifdef TK5
		sprintf((*data)[i].func, "%pS", stat_sys_call_table[syscall_max[i][0]]);
#else
		sprintf((*data)[i].func, "%pF", stat_sys_call_table[syscall_max[i][0]]);
#endif
	}

	/* PRINT_MORE_NUM:PRINT_SYSCALL_NUM + 0 */
	(*data)[i].nr = 0;
	(*data)[i].num = access_vm_stat_num;
	if (access_vm_stat_num) {
		(*data)[i].latency = 0;
		(*data)[i].block_latency = access_vm_stat_time / access_vm_stat_num;
	}
	sprintf((*data)[i++].func, "access vm stat, for /proc/$pid/cmdline,/proc/$pid/mem, /proc/$pid/environ etc.");

	/* PRINT_MORE_NUM:PRINT_SYSCALL_NUM + 1 */
	(*data)[i].nr = irq_num - irq_stat_data[2];
	(*data)[i].num = softirq_num - irq_stat_data[3];
	if (irq_num) {
		(*data)[i].latency = irq_time - irq_stat_data[0];
		(*data)[i].block_latency = softirq_time - irq_stat_data[1];
	}
	sprintf((*data)[i++].func, "irq stat");


	/* PRINT_MORE_NUM:PRINT_SYSCALL_NUM + 2 */
	(*data)[i].nr = func_num;
	(*data)[i].num = pagefault_hit - pagefault_stat_data;
	if (func_num) {
		(*data)[i].latency = func_time / func_num;
		(*data)[i].block_latency = func_block_time / func_num;
	}
	strcpy((*data)[i++].func, "do_page_fault");

	/* PRINT_MORE_NUM:PRINT_SYSCALL_NUM + 3 */
	(*data)[i].nr = slub_num;
	(*data)[i].num = slub_size >> 10;
	if (slub_num) {
		(*data)[i].latency = 0;
		(*data)[i].block_latency = slub_block_time / slub_num;
	}
	strcpy((*data)[i++].func, "slub:__kmalloc/__kmalloc_node/kmemcachealloc");

	/* PRINT_MORE_NUM:PRINT_SYSCALL_NUM + 4 , +5 */
	j = 0;
	for_each_node(nid) {
		(*data)[i].nr = nid;
		(*data)[i].num = (stat_mem_node[nid] * PAGE_SIZE) >> 10;
		(*data)[i].latency = allloc_page_num;
		if (allloc_page_num)
			(*data)[i].block_latency = alloc_page_block_time / allloc_page_num;
		sprintf((*data)[i].func, "alloc pages, total nodes:%d", nr_node_ids);
		i++;
		if (j++ >= 1)
			break;
	}
	/* PRINT_MORE_NUM:PRINT_SYSCALL_NUM + 5 */
	if (j == 1)
		i++;

	memset(nr_running_per_node, 0, nr_node_ids * sizeof(int));
	memset(idle_cpu_stat, 0, nr_cpu_ids * sizeof(int));
	for_each_possible_cpu(cpu) {
		nid = cpu_to_node(cpu);
		idle_smt = 0;
		idle_cur = 0;
		for_each_cpu(target, cpu_smt_mask(cpu)){
			if (idle_cpu_stat[target] == 0)
#ifdef TK5
				nr_running_per_node[nid] += per_cpu(*runqueues, target).nr_running;
#else
				nr_running_per_node[nid] += cpu_rq(target)->nr_running;
#endif
			if (target == cpu)
				idle_cur = idle_cpu(target);
			else
				idle_smt += idle_cpu(target);
		}
		if (!idle_cur && !idle_smt) {
			idle_cpu_stat[cpu] = 1;
			busy++;
		}
		if (idle_cur || idle_smt)
			idle_cpu_stat[cpu] = 2;
		if (idle_cur && idle_smt)
				idle_cpu_stat[cpu] = 3;
	}
	j = 0;
	/* PRINT_MORE_NUM:PRINT_SYSCALL_NUM + 6, +7 */
	for_each_node(nid) {
		(*data)[i].nr = nid;
		(*data)[i].num = nr_running_per_node[nid];
		(*data)[i].latency = busy;
		(*data)[i].block_latency = 0;
		strcpy((*data)[i].func, "rq running per node");
		i++;
		if (++j > 1)
			break;
	}

	/* PRINT_MORE_NUM:PRINT_SYSCALL_NUM + 7 */
	if (j == 1)
		i++;

	/* PRINT_MORE_NUM:PRINT_SYSCALL_NUM + 8 */
	(*data)[i].nr = 0x00;
	(*data)[i].num = bio_num;
	if (bio_num) {
		(*data)[i].latency = submit_bio_time / bio_num;
		(*data)[i].block_latency = submit_bio_time_block / bio_num;
	}
	strcpy((*data)[i++].func, "submit_bio");

	/* PRINT_MORE_NUM:PRINT_SYSCALL_NUM + 9 */
	(*data)[i].nr = 0x00;
	(*data)[i].num = bio_num;
	if (bio_num) {
		(*data)[i].latency = end_bio_time / bio_num;
		(*data)[i].block_latency = end_bio_time_block / bio_num;
	}
	strcpy((*data)[i++].func, "bio_endio");

	/* PRINT_MORE_NUM:PRINT_SYSCALL_NUM + 10 */
	(*data)[i].nr = 0x00;
	(*data)[i].num = bio_num;
	if (bio_num) {
		(*data)[i].latency = 0;
		(*data)[i].block_latency = dispatch_bio_time_block / bio_num;
	}
	sprintf((*data)[i++].func, "submit_bio after -> pre blk_mq_dispatch_rq_list,  bytes:%ld", bio_size);

	/* PRINT_MORE_NUM:PRINT_SYSCALL_NUM + 11 */
	(*data)[i].nr = 0x00;
	(*data)[i].num = bio_num;
	if (bio_num) {
		(*data)[i].latency = 0;
		(*data)[i].block_latency = bio_block_time / bio_num;
	}
	sprintf((*data)[i++].func, "pre blk_mq_dispatch_rq_list -> pre bio_endio,     bytes:%ld", bio_size);


	/* DISK_TOTAL_NUM :0 ~ DISK_TOTAL_NUM */
	for(j = 0; j < DISK_TOTAL_NUM; j++) {
		if (j < DISK_SDA_NUM && stat_bio_disk_num[j] > 0) {
			(*data)[j+i].nr = 0x00;
			(*data)[j+i].num = stat_bio_disk_num[j];
			sprintf((*data)[j+i].func, "sdx stat:%d:%d", DISK_SDA, stat_bio_disk_blocknum[j]);
		}
		else if (j < DISK_SDA_NUM + DISK_HDD_SUBNUM && stat_bio_disk_num[j] > 0) {
			(*data)[j+i].nr = 0x00;
			(*data)[j+i].num = stat_bio_disk_num[j];
			sprintf((*data)[j+i].func, "hdd stat:%d:%d", DISK_HDD, stat_bio_disk_blocknum[j]);
		}
		else if (j < DISK_SDA_NUM + 2 * DISK_HDD_SUBNUM && stat_bio_disk_num[j] > 0) {
			(*data)[j+i].nr = 0x00;
			(*data)[j+i].num = stat_bio_disk_num[j];
			sprintf((*data)[j+i].func, "hdd stat:%d:%d", DISK_HDD1, stat_bio_disk_blocknum[j]);
		}
		else if (j < DISK_SDA_NUM + 3 * DISK_HDD_SUBNUM && stat_bio_disk_num[j] > 0) {
			(*data)[j+i].nr = 0x00;
			(*data)[j+i].num = stat_bio_disk_num[j];
			sprintf((*data)[j+i].func, "hdd stat:%d:%d", DISK_HDD2, stat_bio_disk_blocknum[j]);
		}
		else if (j < DISK_SDA_NUM + 4 * DISK_HDD_SUBNUM && stat_bio_disk_num[j] > 0) {
			(*data)[j+i].nr = 0x00;
			(*data)[j+i].num = stat_bio_disk_num[j];
			sprintf((*data)[j+i].func, "hdd stat:%d:%d", DISK_HDD3, stat_bio_disk_blocknum[j]);
		}
		else if (j < DISK_SDA_NUM + DISK_HDD_NUM + DISK_DM_NUM && stat_bio_disk_num[j] > 0) {
			(*data)[j+i].nr = 0x00;
			(*data)[j+i].num = stat_bio_disk_num[j];
			sprintf((*data)[j+i].func, "dm stat:%d:%d", DISK_DM, stat_bio_disk_blocknum[j]);
		}
		else if (j < DISK_SDA_NUM + DISK_HDD_NUM + DISK_DM_NUM + DISK_VD_NUM && stat_bio_disk_num[j] > 0) {
			(*data)[j+i].nr = 0x00;
			(*data)[j+i].num = stat_bio_disk_num[j];
			sprintf((*data)[j+i].func, "vdx stat:%d:%d", DISK_VD, stat_bio_disk_blocknum[j]);
		}
		else if (stat_bio_disk_num[j] > 0) {
			(*data)[j+i].nr = 0x00;
			(*data)[j+i].num = stat_bio_disk_num[j];
			sprintf((*data)[j+i].func, "nvme stat:%d:%d", DISK_NVME, stat_bio_disk_blocknum[j]);
		}
	}

	return;
}

void sys_get_latency_first(struct func_latency (*data)[PRINT_SYSCALL_NUM])
{
	int i, j, k, cpu;
	unsigned long syscall_max[PRINT_SYSCALL_NUM][4] = {0};
	unsigned long func_num = 0, func_time = 0, func_block_time = 0;

	if (!data)
		return;

	memset(stat_sys_num_tmp, 0, 3 * NR_syscalls * sizeof(unsigned long));
	for(i = 0; i < NR_syscalls; i++) {
		for_each_possible_cpu(cpu) {
			stat_sys_num_tmp[i] += stat_sys_num[cpu][i];
			stat_sys_time_tmp[i] += stat_sys_time[cpu][i];
			stat_sys_time_tmp_block[i] += stat_sys_time_block[cpu][i];
		}
		for (j = 0; j < PRINT_SYSCALL_NUM; j++) {
			if (stat_sys_time_tmp[i] * stat_sys_num_tmp[syscall_max[j][0]]> syscall_max[j][1] * stat_sys_num_tmp[i]) {
				for (k = PRINT_SYSCALL_NUM - 1; k > j; k--) {
					syscall_max[k][0] = syscall_max[k - 1][0];
					syscall_max[k][1] = syscall_max[k - 1][1];
					syscall_max[k][2] = syscall_max[k - 1][2];
					syscall_max[k][3] = syscall_max[k - 1][3];
				}
				syscall_max[j][0] = i;
				syscall_max[j][1] = stat_sys_time_tmp[i];
				syscall_max[j][2] = stat_sys_time_tmp_block[i];
				syscall_max[j][3] = stat_sys_num_tmp[i];
				break;
			}
		}
	}

	for_each_possible_cpu(cpu) {
		func_num += stat_pagefault_num[cpu];
		func_time += stat_pagefault_time[cpu];
		func_block_time += stat_pagefault_time_block[cpu];
	}

	for (i = 0; i < PRINT_SYSCALL_NUM - 1; i++) {
		(*data)[i].nr = syscall_max[i][0];
		(*data)[i].num = syscall_max[i][2];
		(*data)[i].latency = 0;
		(*data)[i].block_latency = 0;
		if ((*data)[i].num) {
			(*data)[i].latency = syscall_max[i][1] / (*data)[i].num;
			(*data)[i].block_latency = syscall_max[i][2] / (*data)[i].num;
		}
		sprintf((*data)[i].func, "%pF", stat_sys_call_table[syscall_max[i][0]]);
	}

	(*data)[i].nr = 0x00;
	(*data)[i].num = func_num;
	if (func_num) {
		(*data)[i].latency = func_time / func_num;
		(*data)[i].block_latency = func_block_time / func_num;
	}
	strcpy((*data)[i].func, "do_page_fault");
	return;
}
/**********************sysctl***************************/
int sysctl_numa_enable_handler(struct ctl_table *table, int write,
#ifdef TK5
		void *buffer, size_t *lenp, loff_t *ppos)
#else
		void __user *buffer, size_t *lenp, loff_t *ppos)
#endif
{
	int ret;

	if (write && !capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (sysctl_module_disable)
		return -EPERM;

	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);

	return ret;
}

int sysctl_get_data_handler(struct ctl_table *table, int write,
#ifdef TK5
		void *buffer, size_t *lenp, loff_t *ppos)
#else
		void __user *buffer, size_t *lenp, loff_t *ppos)
#endif
{
	int ret;
	size_t len = *lenp >TRACK_SYSCALL_NUM * sizeof(struct func_latency)?TRACK_SYSCALL_NUM * sizeof(struct func_latency):*lenp;

	sys_get_latency(&data_ptr);
#ifdef TK5
	memcpy(buffer, data_ptr, len);
#else
	copy_to_user(buffer, (void *)data_ptr, len);
#endif

#if 0
	int i, j;
	for (i = 0; i < PRINT_SYSCALL_NUM - 5; i++) {
		pr_info("nr:%4ld,  num:%8ld, ave:%8ld(ns), total:%8ld, block ave:%8ld(ns), total:%8ld, func:%s\n",
				data[i].nr, data[i].num, data[i].latency,
				data[i].num * data[i].latency, data[i].block_latency,
				data[i].num * data[i].block_latency,
				data[i].func);
	}
	pr_info("--------memory-----------\n");
	pr_info("num :%8ld, ave latency:%8ld(ns), block ave latency:%8ld(ns), %s\n", data[i].num, data[i].latency, data[i].block_latency, data[i].func);
	i++;
	j = 0;
	for (;i < PRINT_SYSCALL_NUM - 2; i++) {
	  pr_info("node:%8ld,   mem:%8ld(k), ave block time:%ld, %s\n", data[i].nr, data[i].num, data[i].block_latency, data[i].func);
	  j++;
	  if (j >= nr_node_ids) {
	      i++;
	      break;
	  }
	}
	pr_info("--------rq running num-----------\n");
	j = 0;
	for (;i < PRINT_SYSCALL_NUM; i++) {
	  pr_info("node:%8ld, nr running:%8ld, busy:%ld, %s\n", data[i].nr, data[i].num, data[i].latency, data[i].func);
	  j++;
	  if (j >= nr_node_ids)
	    break;
	}
#endif
	return 0;
}

int sysctl_get_func_handler(struct ctl_table *table, int write,
#ifdef TK5
		void *buffer, size_t *lenp, loff_t *ppos)
#else
		void __user *buffer, size_t *lenp, loff_t *ppos)
#endif
{
	int ret = 0, cpu, i, j;
	unsigned long func_num = 0, func_time = 0, func_block_time = 0;
	struct func_latency data[HOOK_FUNC_NUM];
	struct func_latency *tmp = (struct func_latency *)buffer;
	size_t len = sizeof(data);

	len = len < *lenp? len:*lenp;
	stat_total_time();

	memset(data, 0, HOOK_FUNC_NUM * sizeof(struct func_latency));
	for_each_possible_cpu(cpu) {
		func_num += stat_func_num[cpu];
		func_time += stat_func_time[cpu];
		func_block_time += stat_func_time_block[cpu];
	}
	data[0].nr = 0x00;
	data[0].num = func_num;
	if (func_num) {
		data[0].latency = func_time / func_num;
		data[0].block_latency = func_block_time / func_num;
	}
	strcpy(data[0].func, func_name_new);
	j = *lenp / sizeof(struct func_latency);
	if (j > 1) {
		strcpy(data[0].func, get_one_func_name(0));
		for(i = 1; i < j; i++) {
		    if (stat_func_total_num[i] == 0)
		      continue;
		    data[i].num = stat_func_total_num[i];
		    data[i].latency = stat_func_total_time[i];
		    strcpy(data[i].func, get_one_func_name(i));
		}
	}
#ifdef TK5
	memcpy(buffer, &data[0], len);
#else
	copy_to_user(buffer, &data[0], len);
#endif
	return ret;
}

int sysctl_clear_data_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp, loff_t *ppos)
{
	int ret, cpu, i;

	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
	if (sysctl_module_debug == 1) {
			memset(stat_sys_num[0], 0, 3 * nr_cpu_ids * NR_syscalls * sizeof(unsigned long));
			memset(stat_cpu_idle, 0, 21 * nr_cpu_ids * sizeof(unsigned long));
			memset(irq_stat_data, 0, sizeof(irq_stat_data)/sizeof(unsigned long));
			for_each_possible_cpu(cpu) {
				struct kernel_cpustat *kcs = &kcpustat_cpu(cpu);
#ifdef CONFIG_VM_EVENT_COUNTERS
				struct vm_event_state *vm_stat = &per_cpu(vm_event_states, cpu);
#endif
				irq_stat_data[0] +=  kcs->cpustat[CPUTIME_IRQ];
				irq_stat_data[1] +=  kcs->cpustat[CPUTIME_SOFTIRQ];
#ifdef CONFIG_X86_64
				irq_stat_data[2] +=  kstat_cpu_irqs_sum(cpu) + stat_arch_irq_stat_cpu(cpu);
#endif
				for (i = 0; i < NR_SOFTIRQS; i++)
					irq_stat_data[3] += kstat_softirqs_cpu(i, cpu);
#ifdef CONFIG_VM_EVENT_COUNTERS
				pagefault_stat_data += vm_stat->event[PGMAJFAULT];
#endif
			}
#ifdef CONFIG_X86_64
			irq_stat_data[2] += stat_arch_irq_stat();
#endif
			memset(stat_mem_node, 0, 2 * nr_node_ids * sizeof(int));
			memset(stat_bio_disk_num, 0, 2 * DISK_TOTAL_NUM * sizeof(unsigned long));
			memset(data_ptr, 0, TRACK_SYSCALL_NUM * sizeof(struct func_latency));
			memset(stat_one_func_time, 0, stat_one_func_size);
			access_vm_stat_time = 0;
			access_vm_stat_num = 0;
	}

	return ret;
}

int data_init(void)
{
	int cpu, i;

	stat_sys_num = (unsigned long **)vmalloc(3 * nr_cpu_ids * sizeof(unsigned long));
	if (stat_sys_num == NULL)
		return -ENOMEM;

	stat_sys_time = stat_sys_num + nr_cpu_ids;
	stat_sys_time_block = stat_sys_num + 2 * nr_cpu_ids;

	stat_sys_num[0] = (unsigned long *)vmalloc(3 * nr_cpu_ids * NR_syscalls * sizeof(unsigned long));
	if (stat_sys_num[0] == NULL)
		goto err_stat_sys_num_0;

	stat_sys_time[0] = stat_sys_num[0] + nr_cpu_ids * NR_syscalls;
	stat_sys_time_block[0] = stat_sys_num[0] + 2 * nr_cpu_ids * NR_syscalls;
	for_each_possible_cpu(cpu) {
		struct kernel_cpustat *kcs = &kcpustat_cpu(cpu);
#ifdef CONFIG_VM_EVENT_COUNTERS
		struct vm_event_state *vm_stat = &per_cpu(vm_event_states, cpu);
#endif
		irq_stat_data[0] +=  kcs->cpustat[CPUTIME_IRQ];
		irq_stat_data[1] +=  kcs->cpustat[CPUTIME_SOFTIRQ];
#ifdef CONFIG_X86_64
		irq_stat_data[2] +=  kstat_cpu_irqs_sum(cpu) + stat_arch_irq_stat_cpu(cpu);
#endif
		for (i = 0; i < NR_SOFTIRQS; i++)
			irq_stat_data[3] += kstat_softirqs_cpu(i, cpu);

#ifdef CONFIG_VM_EVENT_COUNTERS
		pagefault_stat_data += vm_stat->event[PGMAJFAULT];
#endif

		if (cpu == 0)
			continue;
		stat_sys_num[cpu] = stat_sys_num[cpu - 1] + NR_syscalls;
		stat_sys_time[cpu] = stat_sys_time[cpu - 1] + NR_syscalls;
		stat_sys_time_block[cpu] = stat_sys_time_block[cpu - 1] + NR_syscalls;
	}
#ifdef CONFIG_X86_64
	irq_stat_data[2] += stat_arch_irq_stat();
#endif

	stat_cpu_idle = (unsigned long *)vmalloc(21 * nr_cpu_ids * sizeof(unsigned long));
	if (stat_cpu_idle == NULL)
		goto err_stat_cpu_idle;

	stat_cpu_num = stat_cpu_idle + nr_cpu_ids;
	stat_func_num = stat_cpu_idle + 2 * nr_cpu_ids;
	stat_func_time = stat_cpu_idle + 3 * nr_cpu_ids;
	stat_pagefault_num = stat_cpu_idle + 4 * nr_cpu_ids;
	stat_pagefault_time = stat_cpu_idle + 5 * nr_cpu_ids;
	stat_pagefault_time_block = stat_cpu_idle + 6 * nr_cpu_ids;
	stat_allocpage_num = stat_cpu_idle + 7 * nr_cpu_ids;
	stat_allocpage_time_block = stat_cpu_idle + 8 * nr_cpu_ids;
	stat_bio_time_block = stat_cpu_idle + 9 * nr_cpu_ids;
	stat_bio_num = stat_cpu_idle + 10 * nr_cpu_ids;
	stat_bio_size = stat_cpu_idle + 11 * nr_cpu_ids;
	stat_func_time_block = stat_cpu_idle + 12 * nr_cpu_ids;
	stat_submit_bio_time = stat_cpu_idle + 13 * nr_cpu_ids;
	stat_submit_bio_time_block = stat_cpu_idle + 14 * nr_cpu_ids;
	stat_end_bio_time = stat_cpu_idle + 15 * nr_cpu_ids;
	stat_end_bio_time_block = stat_cpu_idle + 16 * nr_cpu_ids;
	stat_dispatch_time_block = stat_cpu_idle + 17 * nr_cpu_ids;
	stat_slub_alloc_size = stat_cpu_idle + 18 * nr_cpu_ids;
	stat_slub_alloc_num = stat_cpu_idle + 19 * nr_cpu_ids;
	stat_slub_alloc_time_block = stat_cpu_idle + 20 * nr_cpu_ids;

	nr_running_per_node = vmalloc(nr_node_ids * sizeof(int));
	if (nr_running_per_node == NULL)
		goto err_nr_running_per_node;

	idle_cpu_stat = vmalloc(nr_cpu_ids * sizeof(int));
	if (idle_cpu_stat == NULL)
		goto err_dle_cpu_stat;

	idle_cpu_stat_char = vmalloc(nr_cpu_ids * sizeof(int));
	if (idle_cpu_stat_char == NULL)
		goto err_idle_cpu_stat_char;

	stat_mem_node = vmalloc(2 * nr_node_ids * sizeof(int));
	if (stat_mem_node == NULL)
		goto err_stat_mem_node;
	stat_node_num = stat_mem_node + nr_node_ids;

	stat_sys_num_tmp = (unsigned long *)vmalloc(3 * NR_syscalls * sizeof(unsigned long));
	if (stat_sys_num_tmp == NULL)
		goto err_stat_sys_num_tmp;
	stat_sys_time_tmp = stat_sys_num_tmp + NR_syscalls;
	stat_sys_time_tmp_block = stat_sys_time_tmp + NR_syscalls;

	stat_bio_disk_num = (unsigned long *)vmalloc(2 * DISK_TOTAL_NUM * sizeof(unsigned long));
	if (stat_bio_disk_num == NULL)
		goto err_stat_bio_disk_num;

	stat_bio_disk_blocknum = stat_bio_disk_num + DISK_TOTAL_NUM;
	data_ptr = (struct func_latency *)kmalloc(TRACK_SYSCALL_NUM * sizeof(struct func_latency), GFP_KERNEL);
	if (data_ptr == NULL)
		goto err_data_ptr;

	memset(stat_sys_num[0], 0, 3 * nr_cpu_ids * NR_syscalls * sizeof(unsigned long));
	memset(stat_cpu_idle, 0, 21 * nr_cpu_ids * sizeof(unsigned long));
	memset(stat_mem_node, 0, 2 * nr_node_ids * sizeof(int));
	memset(stat_bio_disk_num, 0, 2 * DISK_TOTAL_NUM * sizeof(unsigned long));
	memset(data_ptr, 0, TRACK_SYSCALL_NUM * sizeof(struct func_latency));

	INIT_WORK(&ftrace_work, ftrace_unhook_work_fn);
	INIT_WORK(&ftrace_work_init, ftrace_hook_work_fn);
	return 0;
err_data_ptr:
	vfree(stat_bio_disk_num);
err_stat_bio_disk_num:
	vfree(stat_sys_num_tmp);
err_stat_sys_num_tmp:
	vfree(stat_mem_node);
err_stat_mem_node:
	vfree(idle_cpu_stat_char);
err_idle_cpu_stat_char:
	vfree(idle_cpu_stat);
err_dle_cpu_stat:
	vfree(nr_running_per_node);
err_nr_running_per_node:
	vfree(stat_cpu_idle);
err_stat_cpu_idle:
	vfree(stat_sys_num[0]);
err_stat_sys_num_0:
	vfree(stat_sys_num);
	return -ENOMEM;
}

int data_exit(void)
{
	vfree(stat_sys_num[0]);
	vfree(stat_sys_num);
	vfree(stat_cpu_idle);
	vfree(nr_running_per_node);
	vfree(idle_cpu_stat);
	vfree(idle_cpu_stat_char);
	vfree(stat_mem_node);
	vfree(stat_bio_disk_num);
	kfree(data_ptr);
	vfree(stat_sys_num_tmp);
	if (stat_one_func_time)
	      vfree(stat_one_func_time);

	return 0;
}

