/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2018-2019.
 * Description: top+ feature, account for various states much more accurately
 * Author: duyanlin <duyanlin@huawei.com>
 * Create: 2018-08-30
 */

#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/msa.h>
#ifdef CONFIG_RTOS_MICROSTATE_ACCT
#include <linux/irq.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <linux/jiffies.h>
#include <linux/kernel_stat.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/bug.h>
#include <linux/syscalls.h>
#include <linux/kernel_stat.h>
#include "sched/sched.h"
#include <linux/uaccess.h>
#include <trace/rtos_tracepoint.h>
#include <linux/cputime_msa.h>
#include <trace/events/sched.h>
#include <linux/task_struct_extend.h>
#ifdef CONFIG_RTOS_TINYSLEEP
#include <linux/rtos/tinysleep.h>
#endif

/*
 * Track time spend in interrupt handlers.
 */
struct msa_irq {
	msa_time_t times;
	msa_time_t last_entered;
	msa_time_t last_exited;
	int nested;
};

DEFINE_MUTEX(msa_mutex);

static struct proc_dir_entry *msa_proc_root;
static struct proc_dir_entry *msa_proc_account_enable;
static int msa_account_enabled;
#define MSA_PROC_ROOT          "msa"
#define MSA_ACCOUNT_ENABLE     "msa_account_enable"
#define MSA_ACCOUNT_KBUF_LEN   2
struct kmem_cache *microstates_struct_cachep;
struct microstates microstate_init_task;

/*
 * Time spent in interrupt handlers
 */
DEFINE_PER_CPU(struct msa_irq[1], msa_irq);

#ifdef CONFIG_RTOS_MICROSTATE_ACCT

void msa_user_time(struct task_struct *p, msatime_t msatime)
{
	u64 *cpustat = msa_kstat_this_cpu->cpustat;
	msatime64_t tmp;

	preempt_disable();

	/* Add user time to cpustat. */
	tmp = msatime_to_msatime64(msatime);
#ifdef CONFIG_RTOS_TINYSLEEP
	if (tinysleep_test_and_clear(0))
		cpustat[CPUTIME_IDLE] = msatime64_add(cpustat[CPUTIME_IDLE], tmp);
	else if (task_nice(p) > 0)
#else
	if (task_nice(p) > 0)
#endif
		cpustat[CPUTIME_NICE] = msatime64_add(cpustat[CPUTIME_NICE], tmp);
	else
		cpustat[CPUTIME_USER] = msatime64_add(cpustat[CPUTIME_USER], tmp);
	preempt_enable();

	/* Account for user time used */
	acct_update_integrals(p);
}

void msa_system_time(struct task_struct *p, msatime_t msatime)
{
	u64 *cpustat = msa_kstat_this_cpu->cpustat;
	struct rq *rq = this_rq();
	msatime64_t tmp;

	preempt_disable();

	/* Add system time to cpustat. */
	tmp = msatime_to_msatime64(msatime);

#ifdef CONFIG_RTOS_TINYSLEEP
	if (tinysleep_test_and_clear(0))
		cpustat[CPUTIME_IDLE] = msatime64_add(cpustat[CPUTIME_IDLE], tmp);
	else if (p != rq->idle)
#else
	if (p != rq->idle)
#endif
		cpustat[CPUTIME_SYSTEM] = msatime64_add(cpustat[CPUTIME_SYSTEM], tmp);
	else if (atomic_read(&rq->nr_iowait) > 0)
		cpustat[CPUTIME_IOWAIT] = msatime64_add(cpustat[CPUTIME_IOWAIT], tmp);
	else
		cpustat[CPUTIME_IDLE] = msatime64_add(cpustat[CPUTIME_IDLE], tmp);
	preempt_enable();

	/* Account for system time used */
	acct_update_integrals(p);
}
#endif

/*
 * msa_get_account_enabled: Get the value of variable msa_account_enabled
 *
 */
int msa_get_account_enabled(void)
{
	return msa_account_enabled;
}

static void msa_switch_state(struct microstates *prev_msp,
		struct microstates *next_msp, msa_time_t now,
		enum msa_thread_state next_state)
{
	prev_msp->next_state = prev_msp->cur_state;
	prev_msp->cur_state = next_state;
	prev_msp->last_change = now;

	next_msp->last_change = now;
	next_msp->cur_state = next_msp->next_state;
	if (next_msp->cur_state != MSA_ONCPU_USER)
		next_msp->cur_state = MSA_ONCPU_SYS;
	next_msp->next_state = MSA_UNKNOWN;
}

/*
 * msa_task_switch_handler: Update microstate timers when switching from
 * one task to another.
 *
 * @prev, @next:  The prev task is coming off the processor;
 *                the new task is about to run on the processor.
 *
 * Update the times in both prev and next.  It may be necessary to infer the
 * next state for each task.
 *
 */
static void msa_task_switch_handler(void *ignore, bool preempt, struct task_struct *prev, struct task_struct *next)
{
	struct rtos_task_struct *rtos_task_prev = task_to_rtos_task(prev);
	struct rtos_task_struct *rtos_task_next = task_to_rtos_task(next);
	struct microstates *prev_msp = rtos_task_prev->microstates;
	struct microstates *next_msp = rtos_task_next->microstates;
	msa_time_t now;
	enum msa_thread_state next_state;
	unsigned long flags;
	long prev_state;

	local_irq_save(flags);

	MSA_NOW(now);
	next_msp->timers[next_msp->cur_state] += now - next_msp->last_change;
	prev_msp->timers[prev_msp->cur_state] += now - prev_msp->last_change;

	msa_system_time(prev, (now - prev_msp->last_change));

	/*
	 * Update states, state is sort of a bitmask, except that
	 * TASK_RUNNING is 0.
	 *
	 * We must load prev->state once (task_struct::state is volatile)
	 * like __schedule does.
	 * other locations such as try_to_wake_up can change ->state underneath us.
	 */
	prev_state = prev->state;
	if ((prev_state == TASK_RUNNING) || (prev_state & TASK_WAKING)) {
		next_state = MSA_ONRUNQUEUE;
	} else if (prev_state & TASK_INTERRUPTIBLE) {
		next_state = MSA_INTERRUPTIBLE_SLEEP;
#ifdef CONFIG_RTOS_RTPC_RPC
	} else if (prev_state & (TASK_UNINTERRUPTIBLE | TASK_RTPC_UNWAKEABLE)) {
#else
	} else if (prev_state & TASK_UNINTERRUPTIBLE) {
#endif
		next_state = MSA_UNINTERRUPTIBLE_SLEEP;
	} else if (prev_state & (TASK_STOPPED | TASK_TRACED | TASK_PARKED)) {
		next_state = MSA_STOPPED;
	} else if (prev_state & (TASK_DEAD | EXIT_DEAD | EXIT_ZOMBIE)) {
		next_state = MSA_ZOMBIE;
	} else {
		pr_warn("msa: Setting UNKNOWN state from %ld\n", prev_state);
		WARN_ON(1);
		next_state = MSA_UNKNOWN;
	}

	/* special states */
	switch (prev_msp->next_state) {
	case MSA_PAGING_SLEEP:
	case MSA_FUTEX_SLEEP:
	case MSA_POLL_SLEEP:
		if ((prev_state & TASK_INTERRUPTIBLE) ||
		    (prev_state & TASK_UNINTERRUPTIBLE))
			next_state = prev_msp->next_state;
		break;
	default:
		break;
	}

	msa_switch_state(prev_msp, next_msp, now, next_state);
	local_irq_restore(flags);
}

/*
 * msa_init:  Initialise the struct microstates in a new task
 * @p: pointer to the struct task_struct to be initialised
 *
 * This function is called from copy_process().
 * It initialises the microstate timers to zero, and sets the
 * current state to MSA_UNINTERRUPTIBLE_SLEEP.
 */
void msa_init(struct task_struct *p)
{
	struct rtos_task_struct *rtos_task = task_to_rtos_task(p);
	struct microstates *msp = rtos_task->microstates;

	memset(msp, 0, sizeof(*msp));
	MSA_NOW(msp->last_change);
	msp->cur_state = MSA_UNINTERRUPTIBLE_SLEEP;
	msp->next_state = MSA_ONCPU_SYS;
}

/*
 * helper_msa_set_timer: Helper function to update microstate times.
 * &msp:  Pointer to the struct microstates to update
 * next_state: the state being changed to.
 *
 * The time spent in the current state is updated, and the time of
 * last state change set to MSA_NOW().  Then the current state is updated
 * to next_state.
 */
static msa_time_t helper_msa_set_timer(struct microstates *msp, int next_state)
{
	unsigned long flags;
	msa_time_t now, delta;

	local_irq_save(flags);
	MSA_NOW(now);
	delta = now - msp->last_change;
	msp->timers[msp->cur_state] += delta;
	msp->last_change = now;
	msp->cur_state = next_state;
	local_irq_restore(flags);

	return delta;
}

/*
 * msa_next_state:  Get the next status of microstates.
 * @p: pointer to the task that has just changed state.
 * @next_state: the state being changed to.
 *
 * This function is called, e.g., from enqueue_task(), when an
 * next state need to change.
 */
void msa_next_state(struct task_struct *p, int state)
{
	struct rtos_task_struct *rtos_task = task_to_rtos_task(p);

	rtos_task->microstates->next_state = state;
}

/*
 * msa_set_timer:  Time stamp an explicit state change.
 * @p: pointer to the task that has just changed state.
 * @next_state: the state being changed to.
 *
 * This function is called, e.g., from __activate_task(), when an
 * immediate state change happens.
 */
void msa_set_timer(struct task_struct *p, int next_state)
{
	struct rtos_task_struct *rtos_task = task_to_rtos_task(p);
	struct microstates *msp = rtos_task->microstates;
	msa_time_t delta;

	delta = helper_msa_set_timer(msp, MSA_ONCPU_SYS);

	msa_user_time(p, delta);
}

/*
 * msa_finish_irq: end processing for an interrupt.
 * @irq: the interrupt that was just serviced.
 *
 * Update the time spent handling irq, then update the current task's
 * state to MSA_ONCPU_USER or MSA_ONCPU_SYS.
 *
 * This MUST be called instead of irq_exit() whenever msa_start_irq()
 * was called for a given irq.  irq_exit() is implied by this function.
 *
 * See the notes in msa_start_irq() for info about irq_id
 */
void msa_irq_exit(int irq_id, int is_going_to_user)
{
	struct task_struct *p = current;
	struct rtos_task_struct *rtos_task = task_to_rtos_task(p);
	struct microstates *msp = rtos_task->microstates;
	u64 *cpustat = msa_kstat_this_cpu->cpustat;
	msa_time_t now, delta;
	struct msa_irq *mip;
	int nested;

	if (msa_account_enabled == 0) {
#ifndef CONFIG_X86
		irq_exit();
#endif
		return;
	}

	mip = get_cpu_var(msa_irq);
	nested = hardirq_count() - HARDIRQ_OFFSET;
	WARN_ONCE(nested < 0, "hardirq_count(%lx) is less than HARDIRQ_OFFSET(%lx)\n", hardirq_count(), HARDIRQ_OFFSET);

	MSA_NOW(now);
	delta = now - mip->last_entered;
	mip->times += delta;
	if (!nested)
		cpustat[CPUTIME_IRQ] = msatime64_add(cpustat[CPUTIME_IRQ], delta);

#ifndef CONFIG_X86
	irq_exit();
#endif

	if (nested <= 0) {
		msa_time_t before = now;

		MSA_NOW(now);
		delta = now - before;
		cpustat[CPUTIME_SOFTIRQ] = msatime64_add(cpustat[CPUTIME_SOFTIRQ], delta);
		msp->timers[msp->cur_state] += now - msp->last_change;

		msp->last_change = now;
		if (is_going_to_user)
			msp->cur_state = MSA_ONCPU_USER;
		else
			msp->cur_state = MSA_ONCPU_SYS;
	}

	put_cpu_var(msa_irq);
}

/*
 * msa_simple_irq_account:  Handler of simple hard irq
 * which without "irq_enter()" and "irq_exit()";
 * @irq_id: irq number
 * @regs: irq register value.
 */
void msa_simple_irq_account(unsigned int irq_id, struct pt_regs *regs)
{
	struct task_struct *p = current;
	struct rtos_task_struct *rtos_task = task_to_rtos_task(p);
	struct microstates *msp = rtos_task->microstates;
	msa_time_t now;
#ifdef CONFIG_RTOS_TINYSLEEP
	u64 *cpustat = msa_kstat_this_cpu->cpustat;
#endif

	if (msa_account_enabled == 0)
		return;

	MSA_NOW(now);
	/* account msa only for no irq nested */
	if (hardirq_count() <= HARDIRQ_OFFSET) {
		msa_time_t delta = now - msp->last_change;

		msp->timers[msp->cur_state] += delta;
		msp->last_change = now;

		if (msp->cur_state == MSA_ONCPU_USER)
			msa_user_time(p, delta);
#ifdef CONFIG_RTOS_TINYSLEEP
		else if (tinysleep_test_and_clear(1)) /* in irq account */
			cpustat[CPUTIME_IDLE] = msatime64_add(cpustat[CPUTIME_IDLE], delta);
#endif
		else
			msa_system_time(p, delta);

#ifdef CONFIG_X86
		if (regs->cs != __KERNEL_CS)
#else
		if (user_mode(regs))    /* is going to user */
#endif
			msp->cur_state = MSA_ONCPU_USER;
		else
			msp->cur_state = MSA_ONCPU_SYS;
	}
}

/*
 * msa_irq_start_handler: mark the start of an interrupt handler.
 * @irq: irq number being handled.
 *
 * Update the current task state to MSA_INTERRUPTED, and start
 * accumulating time to the interrupt handler for irq.
 *
 * Note that the irq_id does not have to be the actual irq, just some way
 * to uniquely identify the interrupt source that is less than NR_IRQ.
 * x86 uses the vector, for instance, since the IRQ numbers don't map
 * to all the relevant interrupt sources.
 */
static void msa_irq_start_handler(void *ignore, unsigned int irq_id, struct pt_regs *regs, void *pfunc)
{
	struct task_struct *p = current;
	struct rtos_task_struct *rtos_task = task_to_rtos_task(p);
	struct microstates *msp = rtos_task->microstates;
	msa_time_t now;
	int nested;
#ifdef CONFIG_RTOS_TINYSLEEP
	u64 *cpustat = msa_kstat_this_cpu->cpustat;
#endif

	if (msa_account_enabled == 0)
		return;

	/* we're in an interrupt handler... no possibility of preemption */
	MSA_NOW(now);

	nested = hardirq_count() - HARDIRQ_OFFSET;
	WARN_ONCE(nested < 0, "hardirq_count(%lx) is less than HARDIRQ_OFFSET(%lx)\n", hardirq_count(), HARDIRQ_OFFSET);

	this_cpu_ptr(msa_irq)->last_entered = now;

	if (nested <= 0) {
		msa_time_t delta = now - msp->last_change;

		msp->timers[msp->cur_state] += delta;

		msp->last_change = now;
		if (msp->cur_state == MSA_ONCPU_USER)
			msa_user_time(p, delta);
#ifdef CONFIG_RTOS_TINYSLEEP
		else if (tinysleep_test_and_clear(1)) /* in irq account */
			cpustat[CPUTIME_IDLE] = msatime64_add(cpustat[CPUTIME_IDLE], delta);
#endif
		else
			msa_system_time(p, delta);
		if (msp->cur_state == MSA_ONCPU_USER
				|| msp->cur_state == MSA_ONCPU_SYS) {
			switch (msp->next_state) {
			case MSA_PAGING_SLEEP:
			case MSA_FUTEX_SLEEP:
			case MSA_POLL_SLEEP:
				break;
			default:
				msp->next_state = msp->cur_state;
				break;
			}
			msp->cur_state = MSA_INTERRUPTED;
		}
	}
}

/*
 * msa_sched_process_free_handler:  Accumulate child times into parent, after zombie is over.
 * @this: pointer to task that is now a zombie
 *
 * Called from release_task(). (Note: it may be better to call this
 * from wait_zombie())
 */
static void msa_sched_process_free_handler(void *ignore, struct task_struct *this)
{
	enum msa_thread_state s;
	struct rtos_task_struct *rtos_task_this = task_to_rtos_task(this);
	struct rtos_task_struct *rtos_task_parent = task_to_rtos_task(this->parent);
	msa_time_t *pmsp = rtos_task_parent->microstates->child_timers;
	struct microstates *msp = rtos_task_this->microstates;
	msa_time_t *msc = msp->timers;
	msa_time_t *msgc = msp->child_timers;
	struct task_struct *leader = this->group_leader;

	if (leader != this && thread_group_empty(leader)) {
		/*
		 * State could be MSA_ZOMBIE (if parent is interested)
		 * or something else (if the parent isn't interested)
		 */
		(void)helper_msa_set_timer(msp, msp->cur_state);

		for (s = 0; s < MSA_NR_STATES; s++)
			*pmsp++ += *msc++ + *msgc++;
	}
}

/*
 * msa_sched_process_exit_handler
 * add exiting thread microstates to leading thread,
 * to fix > 100% CPU issue
 */
static void msa_sched_process_exit_handler(void *ignore, struct task_struct *tsk)
{
	int i = 0;
	struct rtos_task_struct *rtos_task, *rtos_task_group_leader;

	read_lock(&tasklist_lock);
	if (!thread_group_leader(tsk)) {
		rtos_task = task_to_rtos_task(tsk);
		rtos_task_group_leader = task_to_rtos_task(tsk->group_leader);
		for (i = 0; i < MSA_NR_STATES; ++i) {
			rtos_task_group_leader->microstates->timers[i] +=
			rtos_task->microstates->timers[i];
			rtos_task->microstates->timers[i] = 0;
		}
	}
	read_unlock(&tasklist_lock);
}

#ifdef CONFIG_RTOS_MICROSTATE_ACCT_ON_SYSTEM_CALL
asmlinkage void msa_space_switch(enum msa_thread_state state)
{
	trace_space_switch(state);
}
#endif

/*
 * msa_sched_process_free_handler:  Accumulate
 * timer for syscall's entering and exiting
 * @state:  mark if syscall enter or exit
 */
void notrace msa_space_switch_handler(void *ignore, enum msa_thread_state state)
{
	unsigned long flags;
	struct task_struct *p = current;
	struct rtos_task_struct *rtos_task = task_to_rtos_task(p);
	struct microstates *msp = rtos_task->microstates;
	msa_time_t delta, now;

	local_irq_save(flags);
	if (msp->cur_state == state) {
		local_irq_restore(flags);
		return;
	}

	MSA_NOW(now);
	delta = now - msp->last_change;
	msp->timers[msp->cur_state] += delta;
	msp->last_change = now;
	msp->cur_state = state;
	local_irq_restore(flags);
	if (delta) {
		if (state == MSA_ONCPU_USER)
			msa_system_time(p, delta);
		else if (state == MSA_ONCPU_SYS)
			msa_user_time(p, delta);
		else
			pr_warn("Msa space switch handlers invalid state(%d)!\n", state);
	}
}

#ifdef CONFIG_X86
/*
 * Just account timers on page fault from user space
 */
void notrace msa_error_exit(void)
{
	struct pt_regs *regs = task_pt_regs(current);

	if (msa_account_enabled == 0) {
		return;
	}
	if (regs->cs != __KERNEL_CS) {
		msa_space_switch_handler(NULL, MSA_ONCPU_USER);
	}
}
#endif /* CONFIG_X86 */

#ifdef CONFIG_PROC_FS

static int msa_register_tracepoint(void)
{
	int ret;

	ret = register_trace_sched_switch(msa_task_switch_handler, NULL);
	if (ret) {
		pr_warn("wakeup trace: Couldn't activate tracepoint probe to sched_switch.\n");
		return ret;
	}

#ifdef CONFIG_RTOS_MICROSTATE_ACCT_ON_SYSTEM_CALL
	ret = register_trace_space_switch(msa_space_switch_handler, NULL);
	if (ret) {
		pr_warn("wakeup trace: Couldn't activate tracepoint probe to space_switch.\n");
		goto fail_deprobe_sched_switch;
	}
#endif

	ret = register_trace_msa_irq_start(msa_irq_start_handler, NULL);
	if (ret) {
		pr_warn("wakeup trace: Couldn't activate tracepoint probe to irq_start.\n");
		goto fail_deprobe_space_switch;
	}

	ret = register_trace_sched_release_task_msa(msa_sched_process_free_handler, NULL);
	if (ret) {
		pr_warn("wakeup trace: Couldn't activate tracepoint probe to sched_release_task_mas.\n");
		goto fail_deprobe_irq_start;
	}

	ret = register_trace_sched_process_exit(msa_sched_process_exit_handler, NULL);
	if (ret) {
		pr_warn("wakeup trace: Couldn't activate tracepoint probe to sched_process_exit.\n");
		goto fail_deprobe_sched_release_task_msa;
	}
	return 0;

fail_deprobe_sched_release_task_msa:
	unregister_trace_sched_release_task_msa(msa_sched_process_free_handler, NULL);
fail_deprobe_irq_start:
	unregister_trace_msa_irq_start(msa_irq_start_handler, NULL);
fail_deprobe_space_switch:
#ifdef CONFIG_RTOS_MICROSTATE_ACCT_ON_SYSTEM_CALL
	unregister_trace_space_switch(msa_space_switch_handler, NULL);
fail_deprobe_sched_switch:
#endif
	unregister_trace_sched_switch(msa_task_switch_handler, NULL);
	return ret;
}

int msa_enable(void)
{
	int ret;

	mutex_lock(&msa_mutex);
	if (msa_account_enabled == 1) {
		pr_info("msa has already enabled!\n");
		ret = -EAGAIN;
		goto msa_unlock;
	}

	ret = msa_register_tracepoint();
	if (ret == 0)
		msa_account_enabled = 1;

msa_unlock:
	mutex_unlock(&msa_mutex);
	return ret;
}
fs_initcall(msa_enable);

void msa_disable(void)
{
	mutex_lock(&msa_mutex);
	if (msa_account_enabled == 0) {
		pr_info("msa has already disabled!\n");
		goto out;
	}
	unregister_trace_sched_switch(msa_task_switch_handler, NULL);
#ifdef CONFIG_RTOS_MICROSTATE_ACCT_ON_SYSTEM_CALL
	unregister_trace_space_switch(msa_space_switch_handler, NULL);
#endif
	unregister_trace_msa_irq_start(msa_irq_start_handler, NULL);
	unregister_trace_sched_release_task_msa(msa_sched_process_free_handler, NULL);
	unregister_trace_sched_process_exit(msa_sched_process_exit_handler, NULL);
	tracepoint_synchronize_unregister();
	msa_account_enabled = 0;
out:
	mutex_unlock(&msa_mutex);
}

static ssize_t msa_account_enable_write(
	struct file *filp, const char __user *buffer, size_t count, loff_t *ppos)
{
	char kbuf[MSA_ACCOUNT_KBUF_LEN];
	size_t len = MSA_ACCOUNT_KBUF_LEN;

	if (count <= 0)
		return -EINVAL;
	len = len < count ? len : count;

	if (copy_from_user(kbuf, buffer, len)) {
		pr_warn("msa copy data fail or no data!\n");
		return -EFAULT;
	}

	switch (kbuf[0]) {
	case '0':
		msa_disable();
		break;
	case '1':
		msa_enable();
		break;
	default:
		return -EINVAL;
	}
	return count;
}

static ssize_t msa_account_enable_read(struct file *file,
	char __user *buffer, size_t buflen, loff_t *fpos)
{
	char kbuf[MSA_ACCOUNT_KBUF_LEN];

	kbuf[0] = '0' + msa_account_enabled;
	kbuf[1] = '\n';
	return simple_read_from_buffer(buffer, buflen, fpos, kbuf, MSA_ACCOUNT_KBUF_LEN);
}

struct proc_ops msa_account_enable_operations = {
	.proc_write = msa_account_enable_write,
	.proc_read  = msa_account_enable_read,
};

static int __init msa_proc_init(void)
{
	msa_proc_root = proc_mkdir(MSA_PROC_ROOT, NULL);
	if (msa_proc_root == NULL) {
		pr_warn("Proc entry creation for MSA_PROC_ROOT failed\n");
		goto fail_rmproc;
	}

	msa_proc_account_enable = proc_create_data(MSA_ACCOUNT_ENABLE, (mode_t)0640, msa_proc_root,
							&msa_account_enable_operations, NULL);
	if (msa_proc_account_enable == NULL) {
		pr_warn("Proc entry creation for MSA_ACCOUNT_ENABLE failed\n");
		goto fail_rmproc_root;
	}

	return 0;

fail_rmproc_root:
	remove_proc_entry(MSA_PROC_ROOT, NULL);
fail_rmproc:
	return -1;
}
fs_initcall(msa_proc_init);

#endif /* CONFIG_PROC_FS */

#endif /* CONFIG_RTOS_MICROSTATE_ACCT */
