// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved.
 * Description: perf sched tools
 * Author: Huawei Technologies Co., Ltd
 * Create: 2022-05-31
 */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/hrtimer.h>
#include <linux/tracepoint.h>
#include <linux/percpu.h>
#include <linux/timer.h>
#include <trace/events/sched.h>
#include <trace/events/irq.h>
#include <asm/irq_regs.h>
#include <linux/sched/clock.h>
#include <linux/sched/task.h>
#include <linux/sched/stat.h>
#include <linux/slab.h>
#include <linux/task_struct_extend.h>
#include <linux/stacktrace.h>
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/kfifo.h>
#include <linux/irq_work.h>
#include <linux/hashtable.h>
#include <linux/perf_schedtime.h>
#include <linux/task_struct_extend.h>
#include "perf-tools.h"
#include "../../kernel/sched/sched.h"
#define MAX_TRACE_ENTRIES		10
#define DEFAULT_TRACE_SWITCH_RECORDS	100
#define MAX_TRACE_SWITCH_RECORDS	1000
#define DEFAULT_TRACE_SCHED_RECORDS	20
#define MAX_TRACE_SCHED_RECORDS		100
#define DFX_PERF_DEFAULT_THRESHOLD	50 * 1000 * 1000

#ifdef pr_fmt
#undef pr_fmt
#endif
#define pr_fmt(fmt)  "[perf sched tools] " fmt

/* first bit control enable tracing
 * second bit control stack save
 * third bit control runtime info save
 */
enum dfx_perf_sched_status_t {
	SCHED_DISABLE = 0x0,
	SCHED_ENABLE = 0x1,
	SCHED_CLEAN = 0x2,
	SCHED_STACK_SAVE = 0x4,
	SCHED_RUNTIME_SAVE = 0x8,
	SCHED_MAX_STATUS = 0xf
};
static enum dfx_perf_sched_status_t dfx_perf_sched_status;

/* every bit represent a sotfirq vec,
 * second highest bit represent to enable hardware irq trace,
 * highest bit give to represent to enable arch timer interrupt trace.
 * default do not record TIMER/HRTIMER/SCHED/RCU softirq and arch_timer hwirq */
static unsigned int irq_mask = 0x407D;
u64 min_schedtime_threshold = 1000000;
#define HW_IRQ_TRACE_MASK_BIT 0x4000
#define ARCH_TIMER_MASK_BIT 0x8000

struct tracepoint_entry {
	void *probe;
	const char *name;
	struct tracepoint *tp;
};

struct sched_record_t {
	u64 delay;
	u64 avg_delay;
	u32 scount;
	int cpu_id;
	struct lite_bandwidth_info b_info;
	struct switch_record *cur_switch;
	unsigned int nr_entries;
	unsigned long entries[MAX_TRACE_ENTRIES];
	int flag;
	long e2e_id;
	atomic64_t nr_switchs;
	struct switch_record *switchs;
};

struct per_cpu_stack_trace {
	unsigned int nr_records;
	bool is_full;
	struct sched_record_t *records;
	u64 sched_threshold;
	struct kfifo sched_data_fifo;
};

static int poll_task_pid = 0;
static DEFINE_MUTEX(dfx_perf_sched_lock);
static int record_size_changed = 0;

static struct irq_work	sched_data_work;
static DECLARE_WAIT_QUEUE_HEAD(sched_data_waitq);
#define DFX_PERF_SCHED_DEFAULT_HT_SIZE 100
static struct hlist_head dfx_perf_ht[DFX_PERF_SCHED_DEFAULT_HT_SIZE];

struct lite_perf_sched_param_inner {
	long e2e_id;
	u64 start_time;
	u64 timeout;
	struct hlist_node node;
};
#ifdef CONFIG_CFS_BANDWIDTH
static void collect_cpu_stat(struct task_struct *p, struct lite_bandwidth_info *b_info)
{
	struct cfs_rq *cfs_rq = p->se.cfs_rq;
	struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;

	b_info->nr_periods = cfs_b->nr_periods;
	b_info->nr_throttled = cfs_b->nr_throttled;
	b_info->throttled_time = cfs_b->throttled_time;
}
#else
static void collect_cpu_stat(struct task_struct *p, struct lite_bandwidth_info *b_info)
{
        b_info->nr_periods = 0;
        b_info->nr_throttled = 0;
        b_info->throttled_time = 0;
}
#endif

static u32 max_trace_sched_records = DEFAULT_TRACE_SCHED_RECORDS;
static u32 max_trace_switch_records = DEFAULT_TRACE_SWITCH_RECORDS;
static u64 sched_record_start_time = 0;
static bool dfx_perf_sched_stack_save = false;
static bool dfx_perf_sched_runtime_save = false;

struct dfx_perf_sched_info {
	struct tracepoint_entry tp_entries[9];
	unsigned int tp_initalized;
	struct per_cpu_stack_trace __percpu *stack_trace;
};

static inline void starck_trace_save(struct sched_record_t *cpu_sched_record, struct task_struct *next)
{
	unsigned long *sentry = &cpu_sched_record->entries[0];

	if (dfx_perf_sched_stack_save)
		cpu_sched_record->nr_entries = stack_trace_save_tsk(next, sentry, MAX_TRACE_ENTRIES, 0);
}

static void init_sched_record(struct sched_record_t *record)
{
        record->cur_switch = record->switchs;
        atomic64_set(&record->nr_switchs, 0);
        record->nr_entries = 0;
}

static void put_one_record_into_kfifo(struct sched_record_t *cpu_sched_record, struct kfifo *sched_data_fifo)
{
	int ret;
	struct sched_record_t *cpu_sched_record_ = cpu_sched_record;
	/* drop one when kfifo is full, so that we can put new one into it.
	 * if more than one records occurred one time, we only can put one record into it.
	 * if kfifo_out failed, disable poll feature; if kfifo_in failed, just skip it
	 * */
	if (poll_task_pid != 0) {
		if (kfifo_is_full(sched_data_fifo)) {
			struct sched_record_t *cpu_sched_record_tmp;
			ret = kfifo_out(sched_data_fifo, &cpu_sched_record_tmp, sizeof(struct sched_record_t *));
			if (!ret) {
				poll_task_pid = 0;
				return;
			}
		}

		ret = kfifo_in(sched_data_fifo, &cpu_sched_record_, sizeof(struct sched_record_t *));
		if (ret != sizeof(struct sched_record_t *)) {
			return;
		}

		cpu_sched_record->flag = DFX_SCHED_FLAG_NORMAL;
		irq_work_queue(&sched_data_work);
	}
}

static struct switch_record *fetch_srecord_add(struct sched_record_t *cpu_sched_record)
{
	unsigned long cpu_nr_switchs_raw = atomic64_fetch_add(1, &cpu_sched_record->nr_switchs);
	unsigned long cpu_nr_switchs = cpu_nr_switchs_raw % max_trace_switch_records;
	struct switch_record *srecord = &cpu_sched_record->switchs[cpu_nr_switchs];

	smp_wmb();

	return srecord;
}

static void probe_sched_switch(void *priv, bool preempt,
			       struct task_struct *prev,
			       struct task_struct *next)
{
	u64 now = local_clock();
	struct per_cpu_stack_trace __percpu *stack_trace = priv;
	struct per_cpu_stack_trace *cpu_stack_trace = this_cpu_ptr(stack_trace);
	unsigned int cpu_nr_records = cpu_stack_trace->nr_records;
	struct sched_record_t *cpu_sched_record = &cpu_stack_trace->records[cpu_nr_records];

	struct vendor_task *vendor_info = task_to_vendor_task(next);
	struct task_sched_record *task_sched_record = &vendor_info->sched_record;

	struct vendor_task *vendor_info_prev = task_to_vendor_task(prev);
	struct task_sched_record *task_sched_record_prev = &vendor_info_prev->sched_record;

	struct switch_record *srecord;

	u64 sched_threshold_;

	if (vendor_info->sched_threshold != 0) {
		sched_threshold_ = vendor_info->sched_threshold;
	} else {
		sched_threshold_ = cpu_stack_trace->sched_threshold;
	}

	task_sched_record_prev->wakeup_at = now;
	task_sched_record_prev->is_used = false;
	task_sched_record_prev->type = TYPE_SWITCH;
	task_sched_record_prev->prev_state = task_state_index(prev);

	if (!(task_sched_record->is_used) && (task_sched_record->type != 0)) {
		srecord = fetch_srecord_add(cpu_sched_record);

		srecord->type = task_sched_record->type;
		memcpy(srecord->next_comm, next->comm, TASK_COMM_LEN);
		srecord->next_pid = next->pid;
		srecord->next_prio = next->prio;
		srecord->next_tgid = next->tgid;
		srecord->timestamp = task_sched_record->wakeup_at;
		srecord->cpu = task_sched_record->wakeup_cpu;
		srecord->dest_cpu = task_sched_record->cpu;
		srecord->prev_pid = task_sched_record->wakeup_pid;
		memcpy(srecord->prev_comm, task_sched_record->wakeup_comm, TASK_COMM_LEN);
	}

	srecord = fetch_srecord_add(cpu_sched_record);

	srecord->cpu = smp_processor_id();
	srecord->type = TYPE_SWITCH;
	memcpy(srecord->prev_comm, prev->comm, TASK_COMM_LEN);
	srecord->prev_pid = prev->pid;
	srecord->prev_prio = prev->prio;
	srecord->prev_tgid = prev->tgid;
	memcpy(srecord->next_comm, next->comm, TASK_COMM_LEN);
	srecord->next_pid = next->pid;
	srecord->next_prio = next->prio;
	srecord->next_tgid = next->tgid;
	srecord->timestamp = now;
	srecord->prev_state = task_state_index(prev);
	if (dfx_perf_sched_runtime_save)
		srecord->runtime = task_sched_record_prev->runtime;

	cpu_sched_record->cur_switch = srecord;

	if ((task_sched_record->wakeup_at > sched_record_start_time) && (!task_sched_record->is_used) &&
			(task_sched_record->prev_state == TASK_RUNNING)) {
		u64 delay = now - task_sched_record->wakeup_at;
		task_sched_record->is_used = true;
		if (delay > task_sched_record->avg_delay) {
			task_sched_record->avg_delay = task_sched_record->avg_delay +
				(delay - task_sched_record->avg_delay) / (1 + task_sched_record->switchs);
		} else {
			task_sched_record->avg_delay = task_sched_record->avg_delay -
				(task_sched_record->avg_delay - delay) / (1 + task_sched_record->switchs);
		}
		task_sched_record->switchs ++;
		if ((sched_threshold_ > 0) && (delay > sched_threshold_) && (next->pid != 0)) {
			cpu_stack_trace->nr_records = (cpu_stack_trace->nr_records + 1) % max_trace_sched_records;
			init_sched_record(&cpu_stack_trace->records[cpu_stack_trace->nr_records]);
			if ((!cpu_stack_trace->is_full) && (cpu_stack_trace->nr_records == 0)) {
				cpu_stack_trace->is_full = true;
			}
			smp_wmb();

			cpu_sched_record->cpu_id = smp_processor_id();
			cpu_sched_record->delay = delay;
			cpu_sched_record->avg_delay = task_sched_record->avg_delay;
			cpu_sched_record->scount = task_sched_record->switchs;
			if (dfx_perf_sched_stack_save) {
				starck_trace_save(cpu_sched_record, next);
			}
			collect_cpu_stat(next, &cpu_sched_record->b_info);
			put_one_record_into_kfifo(cpu_sched_record, &cpu_stack_trace->sched_data_fifo);
		}
	}
}

static void probe_sched_wakeup_template(void *priv, struct task_struct *p, u8 type)
{
	u64 now = local_clock();

	struct vendor_task *vendor_info = task_to_vendor_task(p);
	struct task_sched_record *task_sched_record = &vendor_info->sched_record;

	if (type == TYPE_WAKEUP_NEW) {
		task_sched_record->switchs = 0;
		task_sched_record->runtime = 0;
		task_sched_record->avg_delay = 0;

	}
	task_sched_record->wakeup_at = now;
	task_sched_record->is_used = false;
	task_sched_record->cpu = task_cpu(p);
	task_sched_record->type = type;
	task_sched_record->wakeup_pid = current->pid;
	memcpy(task_sched_record->wakeup_comm, current->comm, TASK_COMM_LEN);
	task_sched_record->wakeup_cpu = smp_processor_id();
}

static void probe_sched_wakeup(void *priv, struct task_struct *p)
{
	probe_sched_wakeup_template(priv, p, TYPE_WAKEUP);
}

static void probe_sched_wakeup_new(void *priv, struct task_struct *p)
{
	probe_sched_wakeup_template(priv, p, TYPE_WAKEUP_NEW);
}

static void probe_sched_migrate_task(void *priv, struct task_struct *p, int dest_cpu)
{
	u64 now = local_clock();
	int running_cpu = task_cpu(p);
	struct per_cpu_stack_trace __percpu *stack_trace = priv;
	struct per_cpu_stack_trace *cpu_stack_trace = this_cpu_ptr(stack_trace);

	unsigned int cpu_nr_records = cpu_stack_trace->nr_records;
	struct sched_record_t *cpu_sched_record = &cpu_stack_trace->records[cpu_nr_records];

	struct vendor_task *vendor_info = task_to_vendor_task(p);
	struct task_sched_record *task_sched_record = &vendor_info->sched_record;

	struct switch_record *srecord;

	u64 sched_threshold_;

	if (vendor_info->sched_threshold != 0) {
		sched_threshold_ = vendor_info->sched_threshold;
	} else {
		sched_threshold_ = cpu_stack_trace->sched_threshold;
	}

	srecord = fetch_srecord_add(cpu_sched_record);

	srecord->type = TYPE_MIGRATE;
	memcpy(srecord->next_comm, p->comm, TASK_COMM_LEN);
	srecord->next_pid = p->pid;
	srecord->next_prio = p->prio;
	srecord->next_tgid = p->tgid;
	srecord->timestamp = now;
	srecord->cpu = smp_processor_id();
	srecord->origin_cpu = running_cpu;
	srecord->dest_cpu = dest_cpu;
	srecord->prev_pid = current->pid;
	memcpy(srecord->prev_comm, current->comm, TASK_COMM_LEN);

	cpu_sched_record->cur_switch = srecord;

	if ((task_sched_record->wakeup_at > sched_record_start_time) && (!task_sched_record->is_used) &&
			(task_sched_record->prev_state == TASK_RUNNING)) {
		u64 delay = now - task_sched_record->wakeup_at;
		if ((sched_threshold_ > 0) && (delay > sched_threshold_) && (p->pid != 0)) {
			cpu_stack_trace->nr_records = (cpu_stack_trace->nr_records + 1) % max_trace_sched_records;
			init_sched_record(&cpu_stack_trace->records[cpu_stack_trace->nr_records]);
			if ((!cpu_stack_trace->is_full) && (cpu_stack_trace->nr_records == 0)) {
				cpu_stack_trace->is_full = true;
			}
			smp_wmb();

			cpu_sched_record->cpu_id = smp_processor_id();
			cpu_sched_record->delay = delay;
			cpu_sched_record->avg_delay = task_sched_record->avg_delay;
			cpu_sched_record->scount = task_sched_record->switchs;
			if (dfx_perf_sched_stack_save) {
				starck_trace_save(cpu_sched_record, p);
			}
			collect_cpu_stat(p, &cpu_sched_record->b_info);
			put_one_record_into_kfifo(cpu_sched_record, &cpu_stack_trace->sched_data_fifo);
		}
	}
}

static void probe_sched_stat_runtime(void *priv, struct task_struct *p, u64 runtime, u64 vruntime)
{
	struct vendor_task *vendor_info = task_to_vendor_task(p);
	struct task_sched_record *task_sched_record = &vendor_info->sched_record;

	if (dfx_perf_sched_runtime_save){
		task_sched_record->runtime += runtime;
	}else{
		task_sched_record->runtime = 0;
	}
}

static inline void fill_irq_srecord(struct switch_record *srecord, u8 type, u64 now, const char *action_name)
{
	if (action_name != NULL) {
		unsigned int action_name_len = min(strlen(action_name) + 1, (size_t)TASK_COMM_LEN);

		srecord->type = type;
		srecord->timestamp = now;
		srecord->prev_pid = current->pid;
		memcpy(srecord->prev_comm, current->comm, TASK_COMM_LEN);
		srecord->cpu = smp_processor_id();
		memcpy(srecord->action_name, action_name, action_name_len);
		srecord->action_name[action_name_len - 1] = '\0';
	}
}

static inline struct switch_record *get_srecord(void *priv)
{
	struct per_cpu_stack_trace __percpu *stack_trace = priv;
	struct per_cpu_stack_trace *cpu_stack_trace = this_cpu_ptr(stack_trace);
	unsigned int cpu_nr_records = cpu_stack_trace->nr_records;
	struct sched_record_t *cpu_sched_record = &cpu_stack_trace->records[cpu_nr_records];
	struct switch_record *srecord;

	if (cpu_sched_record == NULL)
		return NULL;

	cpu_sched_record->flag = DFX_SCHED_FLAG_DEFAULT;
	srecord = fetch_srecord_add(cpu_sched_record);

	return srecord;
}

static void probe_softirq_template(void *priv, unsigned int vec_nr, u8 type)
{
	if ((1 << vec_nr) & irq_mask) {
		u64 now = local_clock();
		struct switch_record *srecord = get_srecord(priv);

		if (srecord == NULL)
			return;

		srecord->vec_nr = vec_nr;
		fill_irq_srecord(srecord, type, now, softirq_to_name[vec_nr]);
	}
}

static void probe_softirq_entry(void *priv, unsigned int vec_nr)
{
	probe_softirq_template(priv, vec_nr, TYPE_SOFTIRQ_ENTRY);
}
static void probe_softirq_exit(void *priv, unsigned int vec_nr)
{
	probe_softirq_template(priv, vec_nr, TYPE_SOFTIRQ_EXIT);
}

static void probe_irq_handler_entry(void *priv, int irq, struct irqaction *action)
{
	if (!(HW_IRQ_TRACE_MASK_BIT & irq_mask)) {
		return;
	} else if ((ARCH_TIMER_MASK_BIT & irq_mask) || (strcmp(action->name, "arch_timer") != 0)) {
		u64 now = local_clock();
		struct switch_record *srecord = get_srecord(priv);

		if (srecord == NULL)
			return;

		srecord->irq = irq;
		fill_irq_srecord(srecord, TYPE_HANDLER_ENTRY, now, action->name);
	}
}

static void probe_irq_handler_exit(void *priv, int irq, struct irqaction *action, int ret)
{
	if (!(HW_IRQ_TRACE_MASK_BIT & irq_mask)) {
		return;
	} else if ((ARCH_TIMER_MASK_BIT & irq_mask) || (strcmp(action->name, "arch_timer") != 0)) {
		u64 now = local_clock();
		struct switch_record *srecord = get_srecord(priv);

		if (srecord == NULL)
			return;

		srecord->irq = irq;
		srecord->irq_ret = ret;
		fill_irq_srecord(srecord, TYPE_HANDLER_EXIT, now, action->name);
	}
}

static struct dfx_perf_sched_info perf_sched_info = {
	.tp_entries = {
		[0] = {
			.name	= "sched_switch",
			.probe	= probe_sched_switch,
		},
		[1] = {
			.name	= "sched_wakeup",
			.probe	= probe_sched_wakeup,
		},
		[2] = {
			.name	= "sched_wakeup_new",
			.probe	= probe_sched_wakeup_new,
		},
		[3] = {
			.name	= "sched_migrate_task",
			.probe	= probe_sched_migrate_task,
		},
		[4] = {
			.name   = "softirq_entry",
			.probe  = probe_softirq_entry,
		},
		[5] = {
			.name   = "softirq_exit",
			.probe  = probe_softirq_exit,
		},
		[6] = {
			.name   = "irq_handler_entry",
			.probe  = probe_irq_handler_entry,
		},
		[7] = {
			.name   = "irq_handler_exit",
			.probe  = probe_irq_handler_exit,
		},
		[8] = {
			.name	= "sched_stat_runtime",
			.probe	= probe_sched_stat_runtime,
		},

	},
	.tp_initalized = 0,
};

static int init_percpu_ringbuffer(void)
{
	int cpu;
	int i;
	int ret;
	struct dfx_perf_sched_info *info = &perf_sched_info;
	struct per_cpu_stack_trace __percpu *stack_trace = info->stack_trace;

	pr_info("[perf-sched] init percpu ringbuffers record size: %u(one for working record), switch size: %u\n",
			max_trace_sched_records, max_trace_switch_records);

	for_each_online_cpu(cpu) {
		struct per_cpu_stack_trace *cpu_stack_trace = per_cpu_ptr(stack_trace, cpu);
		cpu_stack_trace->nr_records = 0;
		cpu_stack_trace->records =
			(struct sched_record_t *)kzalloc(sizeof(struct sched_record_t) * max_trace_sched_records, GFP_KERNEL);
		if (cpu_stack_trace->records == NULL)
			return -ENOMEM;

		for (i = 0; i < max_trace_sched_records; ++i) {
			cpu_stack_trace->records[i].flag = DFX_SCHED_FLAG_DEFAULT;
			cpu_stack_trace->records[i].e2e_id = 0;
			atomic64_set(&cpu_stack_trace->records[i].nr_switchs, 0);
			cpu_stack_trace->records[i].switchs =
				(struct switch_record *)kzalloc(sizeof(struct switch_record) * max_trace_switch_records, GFP_KERNEL);
			if (cpu_stack_trace->records[i].switchs == NULL)
				return -ENOMEM;
		}

		ret = kfifo_alloc(&cpu_stack_trace->sched_data_fifo, sizeof(struct sched_record_t *) *
				rounddown_pow_of_two(max_trace_sched_records), GFP_KERNEL);
		if (ret) {
			return -ENOMEM;
		}
	}
	return 0;
}

static void free_percpu_ringbuffer(void)
{
	int cpu;
	int i;
	struct dfx_perf_sched_info *info = &perf_sched_info;
	struct per_cpu_stack_trace __percpu *stack_trace = info->stack_trace;

	for_each_online_cpu(cpu) {
		struct per_cpu_stack_trace *cpu_stack_trace = per_cpu_ptr(stack_trace, cpu);

		for (i = 0; i < max_trace_sched_records; ++i) {
			if (cpu_stack_trace->records[i].switchs) {
				kfree(cpu_stack_trace->records[i].switchs);
				cpu_stack_trace->records[i].switchs = NULL;
			}
		}

		if (cpu_stack_trace->records) {
			kfree(cpu_stack_trace->records);
			cpu_stack_trace->records = NULL;
		}

		kfifo_free(&cpu_stack_trace->sched_data_fifo);
	}
}

static int sched_ctrl_show(struct seq_file *m, void *v)
{
	seq_printf(m, "0x%x\n", dfx_perf_sched_status);
	return 0;
}

static int trace_nosched_register_tp(void)
{
	int i;
	struct dfx_perf_sched_info *info = &perf_sched_info;

	for (i = 0; i < ARRAY_SIZE(info->tp_entries); i++) {
		int ret;
		struct tracepoint_entry *entry = info->tp_entries + i;

		ret = tracepoint_probe_register(entry->tp, entry->probe, info->stack_trace);
		if (ret && ret != -EEXIST) {
			pr_err("[perf-sched] sched trace: can not activate tracepoint "
			       "probe to %s with error code: %d\n", entry->name, ret);
			while (i--) {
				entry = info->tp_entries + i;
				tracepoint_probe_unregister(entry->tp, entry->probe, info->stack_trace);
			}
			return ret;
		}
	}

	return 0;
}

static int trace_nosched_unregister_tp(void)
{
	int i;
	struct dfx_perf_sched_info *info = &perf_sched_info;

	for (i = 0; i < ARRAY_SIZE(info->tp_entries); i++) {
		int ret;

		ret = tracepoint_probe_unregister(info->tp_entries[i].tp,
				info->tp_entries[i].probe, info->stack_trace);
		if (ret && ret != -ENOENT) {
			pr_err("[perf-sched] sched trace: can not inactivate tracepoint "
			       "probe to %s with error code: %d\n", info->tp_entries[i].name, ret);
			return ret;
		}
	}

	return 0;
}

static void stack_trace_clear(struct per_cpu_stack_trace *cpu_stack_trace)
{
	int i;

	cpu_stack_trace->nr_records = 0;
	cpu_stack_trace->is_full = false;

	for (i = 0; i < max_trace_sched_records; ++i) {
		if (cpu_stack_trace->records) {
			init_sched_record(&cpu_stack_trace->records[i]);
		}
	}
}

static void each_stack_trace_clear(void *priv)
{
	struct per_cpu_stack_trace __percpu *stack_trace = priv;
	struct per_cpu_stack_trace *cpu_stack_trace = this_cpu_ptr(stack_trace);

	stack_trace_clear(cpu_stack_trace);
}

static ssize_t sched_ctrl_store(void *priv, const char __user *buf, size_t count)
{
	char cmd[DFX_PERF_CMD_SIZE] = {0};
	u32 stat;
	int ret = -EFAULT;

	if (count > DFX_PERF_CMD_SIZE) {
		ret = -EINVAL;
		goto out;
	}

	if (copy_from_user(cmd, buf, count))
		goto out;

	ret = kstrtouint(cmd, 0, &stat);
	if (ret)
		goto out;

	if (stat > SCHED_MAX_STATUS) {
		ret = -EINVAL;
		goto out;
	}

	ret = count;

	mutex_lock(&dfx_perf_sched_lock);
	if (stat == SCHED_DISABLE){
		if (dfx_perf_sched_status & SCHED_ENABLE) {
			trace_nosched_unregister_tp();
			on_each_cpu(each_stack_trace_clear, priv, true);
			free_percpu_ringbuffer();
		}
		dfx_perf_sched_status = SCHED_DISABLE;
		goto out_unlock;
	}

	if ((stat & SCHED_CLEAN) && (dfx_perf_sched_status & SCHED_ENABLE))
		on_each_cpu(each_stack_trace_clear, priv, true);

	if (stat & SCHED_ENABLE) {
		if (!(dfx_perf_sched_status & SCHED_ENABLE)) {
			int err = init_percpu_ringbuffer();
			if (err != 0) {
				on_each_cpu(each_stack_trace_clear, priv, true);
				free_percpu_ringbuffer();
				dfx_perf_sched_status = SCHED_DISABLE;
				ret = err;
				goto out_unlock;
			}
			sched_record_start_time = local_clock();
			trace_nosched_register_tp();
		}
	}

	dfx_perf_sched_stack_save = (stat & SCHED_STACK_SAVE) ? true : false;
	dfx_perf_sched_runtime_save = (stat & SCHED_RUNTIME_SAVE) ? true : false;
	if (dfx_perf_sched_status & SCHED_ENABLE)
		dfx_perf_sched_status = stat | 0x1;
	else
		dfx_perf_sched_status = stat;

out_unlock:
	mutex_unlock(&dfx_perf_sched_lock);
out:
	return ret;
}

DEFINE_PROC_ATTRIBUTE_RW(sched_ctrl);

static int sched_threshold_show(struct seq_file *m, void *v)
{
	int cpu;
	struct dfx_perf_sched_info *info = &perf_sched_info;
	struct per_cpu_stack_trace *cpu_stack_trace;

	for_each_online_cpu(cpu) {
		cpu_stack_trace = per_cpu_ptr(info->stack_trace, cpu);
		seq_printf(m, "%llu ", cpu_stack_trace->sched_threshold);
	}
	seq_printf(m, "\n");
	return 0;
}

static void sched_threshold_set_per_cpu(int cpu, u64 thres, void *priv)
{
	struct per_cpu_stack_trace __percpu *stack_trace = priv;
	struct per_cpu_stack_trace *cpu_stack_trace;

	cpu_stack_trace = per_cpu_ptr(stack_trace, cpu);
	cpu_stack_trace->sched_threshold = thres;
	stack_trace_clear(cpu_stack_trace);
}

static ssize_t sched_threshold_store(void *priv, const char __user *buf, size_t count)
{
	char *cmd, *cmd_free, *token = NULL;
	u64 user_param[2];
	int ret = -EFAULT;

	mutex_lock(&dfx_perf_sched_lock);
	cmd = kzalloc(count + 1, GFP_KERNEL);
	if (!cmd) {
		mutex_unlock(&dfx_perf_sched_lock);
		return -ENOMEM;
	}
	cmd_free = cmd;

	if (copy_from_user(cmd, buf, count))
		goto out;

	token = strsep(&cmd, ":");
	if (!token) {
		ret = -EINVAL;
		goto out;
	}

	ret = kstrtou64(token, 0, &(user_param[0]));
	if (ret) {
		ret = -EINVAL;
		goto out;
	}

	if (cmd == NULL) {
		int cpu;
		if (user_param[0] != 0 && user_param[0] < min_schedtime_threshold ) {
			ret = -EINVAL;
			goto out;
		}

		for_each_online_cpu(cpu) {
			sched_threshold_set_per_cpu(cpu, user_param[0], priv);
		}
	} else {
		token = strsep(&cmd, ":");
		if (!token || cmd != NULL) {
			ret = -EINVAL;
			goto out;
		}

		ret = kstrtou64(token, 0, &(user_param[1]));
		if (ret) {
			ret = -EINVAL;
			goto out;
		}

		if (!cpu_online(user_param[0])) {
			ret = -EINVAL;
			goto out;
		}

		if (user_param[1] != 0 && user_param[1] < min_schedtime_threshold ) {
			ret = -EINVAL;
			goto out;
		}

		sched_threshold_set_per_cpu(user_param[0], user_param[1], priv);
	}

	ret =  count;
out:
	kfree(cmd_free);
	mutex_unlock(&dfx_perf_sched_lock);
	return ret;
}
DEFINE_PROC_ATTRIBUTE_RW(sched_threshold);

static int sched_min_threshold_show(struct seq_file *m, void *v)
{
	seq_printf(m, "%llu\n", min_schedtime_threshold);
	return 0;
}

static ssize_t sched_min_threshold_store(void *priv, const char __user *buf, size_t count)
{
	char *cmd = NULL;
	u64 mask;
	int ret = -EFAULT;

	cmd = kzalloc(count + 1, GFP_KERNEL);
	if (!cmd)
		return -ENOMEM;
	
	if (copy_from_user(cmd, buf, count))
		goto out;

	ret = kstrtou64(cmd, 0, &mask);
	if (ret) 
		goto out;

	min_schedtime_threshold = mask;
	ret =  count;
out:
	kfree(cmd);
	return ret;
}
DEFINE_PROC_ATTRIBUTE_RW(sched_min_threshold);

static int sched_record_size_show(struct seq_file *m, void *v)
{
	seq_printf(m, "%u,%u\n", max_trace_sched_records - 1, max_trace_switch_records);
	return 0;
}

static ssize_t sched_record_size_store(void *priv, const char __user *buf, size_t count)
{
	char *token, *o_free, *o = NULL;
	int i;
	int user_param[2];
	int ret = -EINVAL;

	mutex_lock(&dfx_perf_sched_lock);
	if (dfx_perf_sched_status & SCHED_ENABLE) {
		ret = -EBUSY;
		goto out;
	}

	o = kzalloc(count + 1, GFP_KERNEL);
	if (!o) {
		ret = -ENOMEM;
		goto out;
	}
	o_free = o;

	if (copy_from_user(o, buf, count))
		goto freeout;

	for (i = 0; i < 2; i++) {
		token = strsep(&o, ",");
		if (!token) {
			ret = -EINVAL;
			goto freeout;
		}

		ret = kstrtoint(token, 10, &(user_param[i]));
		if (ret)
			goto freeout;
	}

	/* check the end of buffer */
	if (o != NULL) {
		ret = -EINVAL;
		goto freeout;
	}
	
	if ((user_param[0] <= 0) || (user_param[0] > MAX_TRACE_SCHED_RECORDS)
		|| (user_param[1] <= 0) || (user_param[1] > MAX_TRACE_SWITCH_RECORDS)) {
		ret = -EINVAL;
		goto freeout;
	}

	/* one more buffer for working record */
	max_trace_sched_records = user_param[0] + 1;
	max_trace_switch_records = user_param[1];
	record_size_changed = 1;
	ret = count;

freeout:
	kfree(o_free);
out:
	mutex_unlock(&dfx_perf_sched_lock);
	return ret;
}
DEFINE_PROC_ATTRIBUTE_RW(sched_record_size);

static void perf_print(struct seq_file *m, const char *fmt, ...)
{
	va_list args;
	if (m) {
		va_start(args, fmt);
		seq_vprintf(m, fmt, args);
		va_end(args);
	} else {
		va_start(args, fmt);
		vprintk(fmt, args);
		va_end(args);
	}

}

static void print_trace(struct seq_file *m, struct switch_record *srecord)
{
	switch (srecord->type) {
		case TYPE_SWITCH:
			if (dfx_perf_sched_runtime_save)
				perf_print(m, "sched_switch: prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%c runtime=%llu "
						"==> next_comm=%s next_pid=%d next_prio=%d {tgid: %d ==> %d}\n",
						srecord->prev_comm, srecord->prev_pid,
						srecord->prev_prio, task_index_to_char(srecord->prev_state), srecord->runtime, srecord->next_comm,
						srecord->next_pid, srecord->next_prio, srecord->prev_tgid, srecord->next_tgid);
			else
				perf_print(m, "sched_switch: prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%c "
						"==> next_comm=%s next_pid=%d next_prio=%d {tgid: %d ==> %d}\n",
						srecord->prev_comm, srecord->prev_pid,
						srecord->prev_prio, task_index_to_char(srecord->prev_state), srecord->next_comm,
						srecord->next_pid, srecord->next_prio, srecord->prev_tgid, srecord->next_tgid);
			break;
		case TYPE_WAKEUP:
			perf_print(m, "sched_wakeup: comm=%s pid=%d prio=%d target_cpu=%03d {tgid: %d}\n",
					srecord->next_comm, srecord->next_pid,
					srecord->next_prio, srecord->dest_cpu, srecord->next_tgid);
			break;
		case TYPE_WAKEUP_NEW:
			perf_print(m, "sched_wakeup_new: comm=%s pid=%d prio=%d target_cpu=%03d {tgid: %d}\n",
					srecord->next_comm, srecord->next_pid,
					srecord->next_prio, srecord->dest_cpu, srecord->next_tgid);
			break;
		case TYPE_MIGRATE:
			perf_print(m, "sched_migrate_task: comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d {tgid: %d}\n",
					srecord->next_comm, srecord->next_pid,
					srecord->next_prio, srecord->origin_cpu, srecord->dest_cpu, srecord->next_tgid);
			break;
		case TYPE_SOFTIRQ_ENTRY:
			perf_print(m, "softirq_entry: vec=%u [action=%s]\n", srecord->vec_nr, srecord->action_name);
			break;
		case TYPE_SOFTIRQ_EXIT:
			perf_print(m, "softirq_exit: vec=%u [action=%s]\n", srecord->vec_nr, srecord->action_name);
			break;
		case TYPE_HANDLER_ENTRY:
			perf_print(m, "irq_handler_entry: irq=%d name=%s\n", srecord->irq, srecord->action_name);
			break;
		case TYPE_HANDLER_EXIT:
			perf_print(m, "irq_handler_exit: irq=%d ret=%s\n", srecord->irq, srecord->irq_ret ? "handled" : "unhandled");
			break;
		default:
			return;
	}; 
}

typedef void (*sched_show_func_t)(struct seq_file *m, int cpu, unsigned int record_idx, struct sched_record_t *cpu_sched_record);

static int sched_show_template(struct seq_file *m, void *v, sched_show_func_t func, bool print_record_n)
{
	int cpu;
	int ret = 0;
	struct per_cpu_stack_trace __percpu *stack_trace = m->private;

	mutex_lock(&dfx_perf_sched_lock);

	if (!(dfx_perf_sched_status & SCHED_ENABLE))
		goto out;

	for_each_online_cpu(cpu) {
		unsigned int i;
		struct per_cpu_stack_trace *cpu_stack_trace = per_cpu_ptr(stack_trace, cpu);

		unsigned int record_n, start;
		/* when record is full, dont print the working record */
		if (cpu_stack_trace->is_full) {
			record_n = max_trace_sched_records - 1;
			start = cpu_stack_trace->nr_records + 1;
		} else {	
			record_n = cpu_stack_trace->nr_records;
			start = 0;
		}

		if (print_record_n)
			seq_printf(m, "# CPU:%d,	%u records found\n", cpu, record_n);

		for (i = 0; i < record_n; ++i) {
			struct sched_record_t *cpu_sched_record;
			unsigned int cur = (start + i) % max_trace_sched_records;
			cpu_sched_record = &cpu_stack_trace->records[cur];

			if (cpu_sched_record->cur_switch == NULL) {
				ret = -EFAULT;
				pr_err("cur_switch is NULL\n");
				goto out;
			}

			func(m, cpu, i, cpu_sched_record);
		}
	}
out:
	mutex_unlock(&dfx_perf_sched_lock);
	return ret;
}

static void sched_log_show_func(struct seq_file *m, int cpu, unsigned int record_idx, struct sched_record_t *cpu_sched_record)
{
	int j;

	seq_printf(m, "# CPU:%d    RECORD:%d    comm:%s[%d]{tgid[%d]}    delay=%llu    delay_at:%llu    avg_delay:%llu    switchs:%u\n",
			cpu, record_idx, cpu_sched_record->cur_switch->next_comm, cpu_sched_record->cur_switch->next_pid,
			cpu_sched_record->cur_switch->next_tgid, cpu_sched_record->delay, cpu_sched_record->cur_switch->timestamp,
			cpu_sched_record->avg_delay, cpu_sched_record->scount);
	seq_printf(m, "# nr_periods:%d, nr_throttled:%d, throttled_time:%llu\n",
			cpu_sched_record->b_info.nr_periods, cpu_sched_record->b_info.nr_throttled, cpu_sched_record->b_info.throttled_time);
	for (j = 0; j < max_trace_switch_records; ++j) {
		unsigned long nr_switchs = atomic64_read(&cpu_sched_record->nr_switchs);
		unsigned int cur = (nr_switchs + j) % max_trace_switch_records;
		struct switch_record *srecord = &cpu_sched_record->switchs[cur];
		if (srecord->timestamp != 0) {
			unsigned long secs = srecord->timestamp / NSEC_PER_SEC;
			unsigned long usec = (srecord->timestamp % NSEC_PER_SEC) / NSEC_PER_USEC;
			seq_printf(m, " %15s-%-8d[%03d] %lu.%06lu: ", srecord->prev_comm, srecord->prev_pid, srecord->cpu, secs, usec);
			print_trace(m, srecord);
		}
	} 
}

static int sched_log_show(struct seq_file *m, void *v)
{
	return sched_show_template(m, v, sched_log_show_func, true);
}
DEFINE_PROC_ATTRIBUTE_RO(sched_log);

static void sched_table_show_func(struct seq_file *m, int cpu, unsigned int record_idx, struct sched_record_t *cpu_sched_record)
{
	struct pid *kpid;
	struct task_struct *task;

	kpid = find_get_pid(cpu_sched_record->cur_switch->next_tgid);
	task = pid_task(kpid, PIDTYPE_PID);
	if (task)
		seq_printf(m, "%3d    %15s    %8d    %15s    %8d    %20llu    %20llu    %20llu    %u\n",
			cpu, cpu_sched_record->cur_switch->next_comm,
			cpu_sched_record->cur_switch->next_pid,
			task->comm, cpu_sched_record->cur_switch->next_tgid,
			cpu_sched_record->delay, cpu_sched_record->cur_switch->timestamp,
			cpu_sched_record->avg_delay, cpu_sched_record->scount);
	put_pid(kpid);
}

static int sched_table_show(struct seq_file *m, void *v)
{

	seq_printf(m, "CPU               comm         pid             tgcomm        tgid                   delay"
			"                delay_at               avg_delay    switchs\n");
	return sched_show_template(m, v, sched_table_show_func, false);
}
DEFINE_PROC_ATTRIBUTE_RO(sched_table);

static inline void seq_print_stack_trace(struct seq_file *m,
					 struct sched_record_t *entry)
{
	unsigned int i;

	for (i = 0; i < entry->nr_entries; i++)
		seq_printf(m, "%*c%pS\n", 5, ' ', (void *)entry->entries[i]);
}

static void sched_stacks_show_func(struct seq_file *m, int cpu, unsigned int record_idx, struct sched_record_t *cpu_sched_record)
{
	seq_printf(m, "CPU:%d    RECORD:%d    COMM:%s[%d]{tgid[%d]}   DELAY:%llu    DELAY_AT:%llu\n",
			cpu, record_idx, cpu_sched_record->cur_switch->next_comm,
			cpu_sched_record->cur_switch->next_pid, cpu_sched_record->cur_switch->next_tgid,
			cpu_sched_record->delay, cpu_sched_record->cur_switch->timestamp);
	seq_print_stack_trace(m, cpu_sched_record);
}

static int sched_stacks_show(struct seq_file *m, void *v)
{
	return sched_show_template(m, v, sched_stacks_show_func, false);
}
DEFINE_PROC_ATTRIBUTE_RO(sched_stacks);

static bool is_has_data(void)
{
	int cpu;
	struct dfx_perf_sched_info *info = &perf_sched_info;
	struct per_cpu_stack_trace __percpu *stack_trace = info->stack_trace;

	for_each_online_cpu(cpu) {
		struct per_cpu_stack_trace *cpu_stack_trace = per_cpu_ptr(stack_trace, cpu);
		if (!kfifo_is_empty(&cpu_stack_trace->sched_data_fifo))
			return true;
	}

	return false;
}

static int sched_data_open(struct inode *inode, struct file *file)
{
	return 0;
}

static unsigned int sched_data_poll(struct file *file, poll_table *wait)
{
	unsigned int ret = 0;

	poll_task_pid = current->pid;
	poll_wait(file, &sched_data_waitq, wait);

	if (is_has_data()) {
		ret = POLLIN;
	}

	/* return POLLERR when record_size_changed, so that user can update the buffer copy to
	 * */
	if (record_size_changed == 1) {
		ret = POLLERR;
		record_size_changed = 0;
	}

	return ret;
}

static long sched_data_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	int ret = 0;

	mutex_lock(&dfx_perf_sched_lock);

	if (!(dfx_perf_sched_status & SCHED_ENABLE))
		goto out;

	switch(cmd) {
		case DFX_SCHED_IOCTL_ENABLE:
			pr_info("enable sched trace dumping\n");
			poll_task_pid = current->pid;
			break;
		case DFX_SCHED_IOCTL_DISABLE:
			pr_info("disable sched trace dumping\n");
			poll_task_pid = 0;
			break;
		default:
			ret = -EINVAL;
	}
out:
	mutex_unlock(&dfx_perf_sched_lock);
	return ret;
}

static ssize_t sched_data_read(struct file * file, char __user * userbuf, size_t count, loff_t * off)
{
	int ret = -1;
	struct sched_delay_info delay_info;
	struct sched_record_t *cpu_sched_record = NULL;
	unsigned long nr_switchs;
	unsigned long buf_size = sizeof(struct sched_delay_info) + sizeof(struct switch_record) * max_trace_switch_records;
	int cpu;
	struct dfx_perf_sched_info *info = &perf_sched_info;
	struct per_cpu_stack_trace __percpu *stack_trace = info->stack_trace;

	if (!is_has_data()) {
		ret = -EINVAL;
		pr_warn("kfifo is empty, skip it\n");
		return ret;
	}

	mutex_lock(&dfx_perf_sched_lock);

	if (!(dfx_perf_sched_status & SCHED_ENABLE)) {
		ret = -EAGAIN;
		pr_warn("lite perf is disable\n");
		goto out;
	}

	if(count < buf_size) {
		ret = -EINVAL;
		pr_err("sched data read error, buffer size small then expect\n");
		goto out;
	}

	for_each_online_cpu(cpu) {
		struct per_cpu_stack_trace *cpu_stack_trace = per_cpu_ptr(stack_trace, cpu);
		if (!kfifo_is_empty(&cpu_stack_trace->sched_data_fifo)) {
			ret = kfifo_out(&cpu_stack_trace->sched_data_fifo, &cpu_sched_record, sizeof(struct sched_record_t *));
			if (!ret) {
				ret = -EIO;
				pr_err("kfifo out failed\n");
				goto out;
			}
			break;
		}
	}

	if (IS_ERR_OR_NULL(cpu_sched_record) || (cpu_sched_record->switchs == NULL)) {
		ret = -EFAULT;
		pr_warn("record is not accessable, 0x%lx\n", (unsigned long)cpu_sched_record);
		goto out;
	}

	if (cpu_sched_record->cur_switch == NULL) {
		ret = -EFAULT;
		pr_err("cur_switch is NULL\n");
		goto out;
	}

	nr_switchs = atomic64_read(&cpu_sched_record->nr_switchs);

	delay_info.cpuid = cpu_sched_record->cpu_id;
	delay_info.pid = cpu_sched_record->cur_switch->next_pid;
	delay_info.tgid = cpu_sched_record->cur_switch->next_tgid;
	delay_info.delay = cpu_sched_record->delay;
	delay_info.avg_delay = cpu_sched_record->avg_delay;
	delay_info.timestamp = cpu_sched_record->cur_switch->timestamp;
	delay_info.scount = cpu_sched_record->scount;
	delay_info.nr_switchs = nr_switchs % max_trace_switch_records;
	memcpy(delay_info.comm, cpu_sched_record->cur_switch->next_comm, TASK_COMM_LEN);
	delay_info.b_info.nr_periods = cpu_sched_record->b_info.nr_periods;
	delay_info.b_info.nr_throttled = cpu_sched_record->b_info.nr_throttled;
	delay_info.b_info.throttled_time = cpu_sched_record->b_info.throttled_time;
	delay_info.e2e_id = cpu_sched_record->e2e_id;
	cpu_sched_record->e2e_id = 0;

	if (cpu_sched_record->flag < DFX_SCHED_FLAG_NORMAL) {
		ret = -EAGAIN;
		pr_warn("record has already used, skip it\n");
		goto out;
	}

	ret = copy_to_user(userbuf + sizeof(struct sched_delay_info), cpu_sched_record->switchs, sizeof(struct switch_record) * max_trace_switch_records);
	if (ret) {
		ret = -EPERM;
		pr_err("copy switchs error\n");
		goto out;
	}

	delay_info.flag = cpu_sched_record->flag;

	ret = copy_to_user(userbuf, &delay_info, sizeof(struct sched_delay_info));
	if (ret) {
		ret = -EPERM;
		pr_err("copy delay info error\n");
		goto out;
	}

	cpu_sched_record->flag = DFX_SCHED_FLAG_DEFAULT;
	ret = buf_size;
out:
	mutex_unlock(&dfx_perf_sched_lock);
	return ret;
}

static int sched_data_close(struct inode * inode, struct file * file)
{
	return 0;
}

static const struct proc_ops sched_data_ops = {
	.proc_open	= sched_data_open,
	.proc_read	= sched_data_read,
	.proc_poll	= sched_data_poll,
	.proc_release	= sched_data_close,
	.proc_ioctl	= sched_data_ioctl,
};

static inline bool is_tracepoint_lookup_success(struct dfx_perf_sched_info *info)
{
	return info->tp_initalized == ARRAY_SIZE(info->tp_entries);
}

static void __init tracepoint_lookup(struct tracepoint *tp, void *priv)
{
	int i;
	struct dfx_perf_sched_info *info = priv;

	if (is_tracepoint_lookup_success(info))
		return;

	for (i = 0; i < ARRAY_SIZE(info->tp_entries); i++) {
		if (info->tp_entries[i].tp || !info->tp_entries[i].name ||
		    strcmp(tp->name, info->tp_entries[i].name))
			continue;
		info->tp_entries[i].tp = tp;
		info->tp_initalized++;
	}
}

static void wake_up_sched_fifo(struct irq_work *work)
{
	wake_up_interruptible(&sched_data_waitq);
}

static void __init each_init_stacktrace(void *priv)
{
	struct per_cpu_stack_trace __percpu *stack_trace = priv;
	struct per_cpu_stack_trace *cpu_stack_trace = this_cpu_ptr(stack_trace);

	cpu_stack_trace->sched_threshold = DFX_PERF_DEFAULT_THRESHOLD;
}

int __init init_perf_sched(struct proc_dir_entry *perf_dir)
{
	struct dfx_perf_sched_info *info = &perf_sched_info;
	for_each_kernel_tracepoint(tracepoint_lookup, info);

	if (!is_tracepoint_lookup_success(info))
		return -ENODEV;
	info->stack_trace = alloc_percpu(struct per_cpu_stack_trace);
	if (!info->stack_trace)
		return -ENOMEM;
	init_irq_work(&sched_data_work, wake_up_sched_fifo);
	on_each_cpu(each_init_stacktrace, info->stack_trace, true);
	hash_init(dfx_perf_ht);
	if (!proc_create_data("sched_ctrl", 0600, perf_dir, &sched_ctrl_ops, info->stack_trace))
		goto free_buf;
	if (!proc_create_data("sched_threshold", 0600, perf_dir, &sched_threshold_ops, info->stack_trace))
		goto free_buf;
	if (!proc_create_data("sched_min_threshold", 0600, perf_dir, &sched_min_threshold_ops, info->stack_trace))
		goto free_buf;
	if (!proc_create_data("sched_record_size", 0600, perf_dir, &sched_record_size_ops, info->stack_trace))
		goto free_buf;
	if (!proc_create_data("sched_log", 0400, perf_dir, &sched_log_ops, info->stack_trace))
		goto free_buf;
	if (!proc_create_data("sched_table", 0400, perf_dir, &sched_table_ops, info->stack_trace))
		goto free_buf;
	if (!proc_create_data("sched_stacks", 0400, perf_dir, &sched_stacks_ops, info->stack_trace))
		goto free_buf;
	if (!proc_create_data("sched_data", 0400, perf_dir, &sched_data_ops, info->stack_trace))
		goto free_buf;
	return 0;
free_buf:
	free_percpu(info->stack_trace);
	return -ENOMEM;
}

void __exit exit_perf_sched(void)
{
	struct dfx_perf_sched_info *info = &perf_sched_info;
	if (dfx_perf_sched_status & SCHED_ENABLE) {
		trace_nosched_unregister_tp();
		on_each_cpu(each_stack_trace_clear, info->stack_trace, true);
		free_percpu_ringbuffer();
	}
	free_percpu(info->stack_trace);	
}
