/*
 *  runlog.c
 */

#include <linux/types.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <mach/pmu.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/threads.h>
#include <linux/dma-mapping.h>

#ifndef CONFIG_KALLSYMS
#error RUNLOG depend on CONFIG_KALLSYMS
#endif

#define TRACE_MAGIC			0xa579
#define TRACE_TYPE_SCHED	0
#define TRACE_TYPE_FUNC		1
#define TRACE_TYPE_IRQ		2
#define TRACE_TYPE_USR		3
#define TRACE_TYPE_MAX		3

#define TRACE_ACTION_ENTER	1
#define TRACE_ACTION_EXIT	0

struct trace_head
{
	int   unused; //for kgd test...
	int   pos;    //current pos...
	int   end;
	int   size;
	int   cpu;
	int   total_cpu;
	int   sys_time_offset;
	char  name[64-28];
} __packed;

struct trace_sched
{
	unsigned short pid_pre;
	unsigned short pid;
	char name[16];
} __packed;
struct trace_func
{
	void *f;
	short line;
	short action;
} __packed;
struct trace_irq
{
	short irq_no;
	short action;
} __packed;
struct trace_usr
{
	// void *f;
	char action;
	char des[15];
} __packed;

struct trace_node
{
	unsigned int magic:16;
	unsigned int type:2;
	unsigned int cpu:2;
	unsigned int pts_hi:12;
	unsigned int pts_lo;
	void *data[0];
} __packed;

#define MAX_TRACE_SIZE	(sizeof(struct trace_node) + sizeof(struct trace_sched))

#define RECORD_EN		BIT(0)
#define RECORD_DUMP		BIT(1)
#define RECORD_BIN		BIT(2)
#define RECORD_STS_OK	RECORD_EN

#define RUNLOG_SIZE		(CONFIG_RUNLOG_SIZE_KB * SZ_1K)
static u32 warn_runtime_us = CONFIG_SCHED_RUNTIME_WARN_US;
static int trace_sts;
DEFINE_SPINLOCK(trace_lock);

#define NEXT_NODE(n) \
	((n) = (typeof(n))((void *)(n) + sizeof(struct trace_node) + type_len(((struct trace_node *)(n))->type)))

#ifndef CONFIG_RUNLOG_MEM_PERCPU
static unsigned int g_trace_pos = sizeof(struct trace_head);
static unsigned int g_trace_mem_vaddr;

#define CHECK_TRACE_VALID() \
	if (!g_trace_mem_vaddr) return; \
	if ((trace_sts & 0x3) != RECORD_STS_OK) return

#define TRACE_START(typ) \
	unsigned long flags; \
	struct trace_node *n; \
	struct trace_head *h = (struct trace_head *)g_trace_mem_vaddr; \
							\
	CHECK_TRACE_VALID(); \
	spin_lock_irqsave(&trace_lock, flags); \
	h->pos = g_trace_pos; \
	CUR_NODE(n, 0); \
	tn = (__typeof__(tn))n->data; \
	n->type = typ; \
	n->magic = TRACE_MAGIC; \
	n->cpu = smp_processor_id(); \
	set_pts(n)

#define TRACE_END() \
	g_trace_pos += sizeof(struct trace_node) + sizeof(*tn); \
	if (g_trace_pos + MAX_TRACE_SIZE >= RUNLOG_SIZE) { \
		h->end = g_trace_pos; \
		g_trace_pos = sizeof(struct trace_head); \
	} \
	spin_unlock_irqrestore(&trace_lock, flags)

#define FIRST_NODE(n, cpu) \
	((n) = (typeof(n))(g_trace_mem_vaddr + sizeof(struct trace_head)))

#define CUR_NODE(n, cpu) \
	((n) = (typeof(n))(g_trace_mem_vaddr + g_trace_pos))

#else

static unsigned int g_trace_pos[NR_CPUS] = {sizeof(struct trace_head)};
static unsigned int g_trace_mem_vaddr[NR_CPUS];

#define CHECK_TRACE_VALID() \
	if (!g_trace_mem_vaddr[0]) return; \
	if ((trace_sts & 0x3) != RECORD_STS_OK) return

#define TRACE_START(typ) \
	unsigned long flags; \
	int cpu = smp_processor_id(); \
	struct trace_node *n; \
	struct trace_head *h = (struct trace_head *)g_trace_mem_vaddr[cpu]; \
							\
	CHECK_TRACE_VALID(); \
	spin_lock_irqsave(&trace_lock, flags); \
	h->pos = g_trace_pos[cpu]; \
	CUR_NODE(n, cpu); \
	tn = (__typeof__(tn))n->data; \
	n->type = typ; \
	n->magic = TRACE_MAGIC; \
	n->cpu = cpu; \
	set_pts(n)

#define TRACE_END() \
	g_trace_pos[cpu] += sizeof(struct trace_node) + sizeof(*tn); \
	if (g_trace_pos[cpu] + MAX_TRACE_SIZE >= RUNLOG_SIZE) { \
		h->end = g_trace_pos[cpu]; \
		g_trace_pos[cpu] = sizeof(struct trace_head); \
	} \
	spin_unlock_irqrestore(&trace_lock, flags)

#define FIRST_NODE(n, cpu) \
	((n) = (typeof(n))(g_trace_mem_vaddr[cpu] + sizeof(struct trace_head)))

#define CUR_NODE(n, cpu) \
	((n) = (typeof(n))(g_trace_mem_vaddr[cpu] + g_trace_pos[cpu]))

#endif

static void set_pts(struct trace_node *n)
{
	// fh_pmu_set_reg(REG_PMU_PTSLO, 0x01);
	// n->pts_lo = (unsigned long)fh_pmu_get_reg(REG_PMU_PTSLO);
	// n->pts_hi = (unsigned long)fh_pmu_get_reg(REG_PMU_PTSHI);
	unsigned long long pts = fh_get_pts64(); //get PTS

	n->pts_lo = (unsigned int)pts;
	n->pts_hi = (unsigned short)(pts >> 32);
}

void add_trace_node_func_ext(void *func, int line, int enter)
{
	struct trace_func *tn;

	TRACE_START(TRACE_TYPE_FUNC);

	tn->f = (void *)func;
	tn->line = line;
	tn->action = enter;

	TRACE_END();
}
EXPORT_SYMBOL(add_trace_node_func_ext);

void add_trace_node_irq(int irq, int enter)
{
	struct trace_irq *tn;

	TRACE_START(TRACE_TYPE_IRQ);

	tn->irq_no = irq;
	tn->action = enter;

	TRACE_END();
}
EXPORT_SYMBOL(add_trace_node_irq);

void add_trace_node_usr(const char *des, int enter)
{
	struct trace_usr *tn;

	TRACE_START(TRACE_TYPE_USR);

	// tn->f = (void *)_RET_IP_;
	tn->action = enter;
	strncpy(tn->des, des, sizeof(tn->des));
	tn->des[sizeof(tn->des)-1] = '\0';

	TRACE_END();
}
EXPORT_SYMBOL(add_trace_node_usr);

void add_trace_node_usr_printf(int enter, const char *fmt, ...)
{
	char buf[FIELD_SIZEOF(struct trace_usr, des)];
	va_list args;
	int i;

	va_start(args, fmt);
	i = vsnprintf(buf, sizeof(buf), fmt, args);
	va_end(args);

	add_trace_node_usr(buf, enter);
}
EXPORT_SYMBOL(add_trace_node_usr_printf);

static void sched_check(struct task_struct *prev, struct task_struct *next);

void add_trace_node_sched(struct task_struct *prev, struct task_struct *next)
{
	struct trace_sched *tn;

	TRACE_START(TRACE_TYPE_SCHED);

	tn->pid = next->pid;
	tn->pid_pre = prev->pid;
	strncpy(tn->name, next->comm, sizeof(tn->name));
	tn->name[sizeof(tn->name)-1] = '\0';

	TRACE_END();
	if (warn_runtime_us > 0)
		sched_check(prev, next);
}
EXPORT_SYMBOL(add_trace_node_sched);

static unsigned long type_len(int type) {
	static const unsigned long ts[] = {
		[TRACE_TYPE_USR] = sizeof(struct trace_usr),
		[TRACE_TYPE_FUNC] = sizeof(struct trace_func),
		[TRACE_TYPE_IRQ] = sizeof(struct trace_irq),
		[TRACE_TYPE_SCHED] = sizeof(struct trace_sched),
	};
	if (unlikely(type > TRACE_TYPE_MAX))
		pr_err("bad trace type: %x", type);
	return ts[type];
}

static char type_str(int type) {
	static const char s[] = {
		[TRACE_TYPE_USR] = 'U',
		[TRACE_TYPE_FUNC] = 'F',
		[TRACE_TYPE_IRQ] = 'I',
		[TRACE_TYPE_SCHED] = 'S',
	};
	return s[type];
}

static int dump_trace_node(struct seq_file *sfile, struct trace_node *n, int i)
{
	static char buf0[32], buf1[128];

#ifndef CONFIG_RUNLOG_MEM_PERCPU
	int trace_mem_vaddr = g_trace_mem_vaddr;
#else
	int trace_mem_vaddr = g_trace_mem_vaddr[0];
#endif

	if (unlikely(n->magic != TRACE_MAGIC)) {
		pr_err("err magic: 0x%x in %p\n", n->magic, n);
		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 4, n-1, 64, 1);
		return -EIO;
	}
	if (n->pts_lo) {
		unsigned long long pts = (unsigned long long)n->pts_lo|((unsigned long long)n->pts_hi<<32);

		snprintf(buf0, sizeof(buf0), "%X|%c|%u|%llu|", (int)n - trace_mem_vaddr, type_str(n->type), n->cpu, pts);
		switch(n->type) {
		case TRACE_TYPE_SCHED:
		{
			struct trace_sched *tn = (__typeof__(tn))n->data;

			snprintf(buf1, sizeof(buf1), "%d,%d,%s\n", tn->pid_pre, tn->pid, tn->name);
			break;
		}
		case TRACE_TYPE_FUNC:
		{
			struct trace_func *tn = (__typeof__(tn))n->data;

			snprintf(buf1, sizeof(buf1), "%pf,%d,%x\n", tn->f, tn->line, tn->action);
			break;
		}
		case TRACE_TYPE_IRQ:
		{
			struct trace_irq *tn = (__typeof__(tn))n->data;
			struct irq_desc *d = irq_to_desc(tn->irq_no);
			// void * func = NULL;
			const char *name = "irq";

			if (d && d->action) {
				struct irqaction *a = d->action;
				// func = a->handler;
				name = a->name;
			}
			snprintf(buf1, sizeof(buf1), "%s,%d,%x\n", name, tn->irq_no, tn->action);
			break;
		}
		case TRACE_TYPE_USR:
		{
			struct trace_usr *tn = (__typeof__(tn))n->data;
			snprintf(buf1, sizeof(buf1), "%s,%x\n", tn->des, tn->action);
			break;
		}
		default:
			BUG();
			break;
		}
		if (sfile) {
			seq_puts(sfile, buf0);
			seq_puts(sfile, buf1);
		} else {
			pr_alert("%s%s", buf0, buf1);
		}
	}
	return 0;
}

#define print(m, fmt, ...) \
	if (m) seq_printf(m , fmt, ##__VA_ARGS__); \
	else pr_alert(KERN_CONT fmt, ##__VA_ARGS__)

static struct trace_node *find_next_valid_node(struct trace_node *n, int size)
{
	int j = 0;

	while (n->magic != TRACE_MAGIC && j < size/4) {
		n = (struct trace_node *)((void *)n + 4);
		j++;
	}
	return n;
}

static int _dump_trace_log(struct seq_file *sfile, int cpu)
{
	int i = 0;
	unsigned long flags;
	struct trace_node *n;

#ifndef CONFIG_RUNLOG_MEM_PERCPU
	int trace_mem_vaddr = g_trace_mem_vaddr;
	int trace_pos = g_trace_pos;
#else
	WARN_ON(cpu >= NR_CPUS);
	int trace_mem_vaddr = g_trace_mem_vaddr[cpu];
	int trace_pos = g_trace_pos[cpu];
#endif

	struct trace_head *h = (struct trace_head *)trace_mem_vaddr;

	if (!trace_mem_vaddr)
		return 0;

	spin_lock_irqsave(&trace_lock, flags);
	trace_sts |= RECORD_DUMP;
	if ((trace_sts & RECORD_BIN) && sfile) {
		seq_write(sfile, (void *)trace_mem_vaddr, RUNLOG_SIZE);
		return 0;
	}

	print(sfile, "#########g_trace_pos=0x%x/0x%x/0x%x. addr=%x\n", trace_pos, h->end, RUNLOG_SIZE, trace_mem_vaddr);

	if (h->end) {
		CUR_NODE(n, cpu);
find_next:
		n = find_next_valid_node(n, MAX_TRACE_SIZE);
		while ((unsigned long)n < trace_mem_vaddr + h->end) {
			if (dump_trace_node(sfile, n, i++))
				goto find_next;
			NEXT_NODE(n);
		}
	}

	FIRST_NODE(n, cpu);
	while ((unsigned long)n < trace_mem_vaddr + trace_pos) {
		dump_trace_node(sfile, n, i++);
		NEXT_NODE(n);
	}

	trace_sts &= ~RECORD_DUMP;
	spin_unlock_irqrestore(&trace_lock, flags);
	return 0;
}

void dump_trace_log(void)
{
#ifndef CONFIG_RUNLOG_MEM_PERCPU
	_dump_trace_log(NULL, 0);
#else
	int i = 0;

	for (i = 0; i < num_possible_cpus(); i++) {
		_dump_trace_log(NULL, i);
	}
#endif
}
EXPORT_SYMBOL(dump_trace_log);

void set_trace_status(int status)
{
	unsigned long flags;

	spin_lock_irqsave(&trace_lock, flags);
	trace_sts = status;
	spin_unlock_irqrestore(&trace_lock, flags);
}
EXPORT_SYMBOL(set_trace_status);

void stop_trace_log(void)
{
	set_trace_status(0);
}
EXPORT_SYMBOL(stop_trace_log);

void start_trace_log(void)
{
	set_trace_status(RECORD_EN);
}
EXPORT_SYMBOL(start_trace_log);

/**************************** sched info *****************************/
static void dump_process_info(struct task_struct *tsk, u64 pts, u32 runtime)
{
	pr_err("### [%llu] cpu:%u task:%s[%d] cost %uus\n", pts, task_cpu(tsk), tsk->comm, tsk->pid, runtime);
	sched_show_task(tsk);
}

static u64 last_pts[NR_CPUS];

static void sched_check(struct task_struct *prev, struct task_struct *next)
{
	static struct task_struct *last[NR_CPUS];
	u64 pts;
	int cpu = smp_processor_id();

	pts = fh_get_pts64();
	if (last[cpu] == prev && last_pts[cpu]) {
		u32 runtime = pts - last_pts[cpu];

		/* filter out idle task */
		if (runtime >= warn_runtime_us && prev->pid > 1)
			dump_process_info(prev, pts, runtime);
	}

	last[cpu] = next;
	last_pts[cpu] = pts;
}

static void sched_check_reinit(void)
{
	memset(last_pts, 0, sizeof(last_pts));
}


/****************************debug proc*****************************/
#include <linux/proc_fs.h>

#define PROC_NAME "runlog"
static struct proc_dir_entry *proc_file;
static int dump_idx;

#define TO_TN(ptr)	(struct trace_node *)(*(struct trace_node **)ptr)

static void *seq_next(struct seq_file *f, void *v, loff_t *pos);

static void *seq_start(struct seq_file *f, loff_t *pos)
{
	struct trace_node *n;
#ifndef CONFIG_RUNLOG_MEM_PERCPU
	int __maybe_unused cpu = 0;
	int trace_mem_vaddr = g_trace_mem_vaddr;
	int trace_pos = g_trace_pos;
#else
	int cpu = (int)f->private;
	int trace_mem_vaddr = g_trace_mem_vaddr[cpu];
	int trace_pos = g_trace_pos[cpu];
#endif
	struct trace_head *h = (struct trace_head *)trace_mem_vaddr;
	loff_t p = *pos;

	if (!trace_mem_vaddr)
		return NULL;

	if ((p & 0x3)) { /* contiguously dump */
		*pos = ALIGN_DOWN(p, 4);
		return seq_next(f, NULL, pos);
	}

	/* looking for first node */
	if (h->end) {
		CUR_NODE(n, cpu);
		n = find_next_valid_node(n, MAX_TRACE_SIZE);
		if ((unsigned long)n >= trace_mem_vaddr + h->end)
			FIRST_NODE(n, cpu);
	} else {
		if (!h->pos)
			return NULL;
		FIRST_NODE(n, cpu);
	}
	dump_idx = 0;
	*pos = (loff_t)(unsigned long)n;
	if (cpu == 0)
		print(f, "###### %s nr_cpus:%u, pts-printk_time=%dus\n", IS_ENABLED(CONFIG_RUNLOG_MEM_PERCPU)?"use percpu mem":"", NR_CPUS, h->sys_time_offset);

	print(f, "#########g_trace_pos=0x%x/0x%x/0x%x. addr=%x\n", trace_pos, h->end, RUNLOG_SIZE, trace_mem_vaddr);
	return pos;
}

static void *seq_next(struct seq_file *f, void *v, loff_t *pos)
{
	struct trace_node *n = TO_TN(pos);
#ifndef CONFIG_RUNLOG_MEM_PERCPU
	int __maybe_unused cpu = 0;
	int trace_mem_vaddr = g_trace_mem_vaddr;
	int trace_pos = g_trace_pos;
#else
	int cpu = (int)f->private;
	int trace_mem_vaddr = g_trace_mem_vaddr[cpu];
	int trace_pos = g_trace_pos[cpu];
#endif
	struct trace_head *h = (struct trace_head *)trace_mem_vaddr;

	NEXT_NODE(n);
	if (h->end) {
		if ((unsigned long)n >= trace_mem_vaddr + h->end) {
			FIRST_NODE(n, cpu);
			goto out;
		}
	}

out:
#ifdef CONFIG_RUNLOG_MEM_PERCPU
	/* when overflow, just restart it */
	if ((unsigned long)n < trace_mem_vaddr) {
		return seq_start(f, pos);
	}
	if ((unsigned long)n == trace_mem_vaddr + trace_pos) {/* end pos */
		if (cpu < num_possible_cpus()-1) {
			f->private = (void *)(cpu+1);
			return seq_start(f, pos);
		} else
			return NULL;
	}
#else
	if ((unsigned long)n == trace_mem_vaddr + trace_pos) {/* end pos */
		return NULL;
	}
#endif
	*pos = (loff_t)(unsigned long)n;
	return pos;
}

static void seq_stop(struct seq_file *f, void *v)
{
}

static int seq_show_bin(struct seq_file *sfile, void *v)
{
#ifndef CONFIG_RUNLOG_MEM_PERCPU
	int trace_mem_vaddr = g_trace_mem_vaddr;
#else
	int trace_mem_vaddr = g_trace_mem_vaddr[0];
#endif
	seq_write(sfile, (void *)trace_mem_vaddr, RUNLOG_SIZE);
	return 0;
}

static int seq_show(struct seq_file *sfile, void *v)
{
	dump_trace_node(sfile, TO_TN(v), dump_idx++);
	return 0;
}

static const struct seq_operations seq_ops = {
	.start = seq_start,
	.next  = seq_next,
	.stop  = seq_stop,
	.show  = seq_show,
};

static int proc_open(struct inode *inode, struct file *file)
{
	unsigned long flags;

	spin_lock_irqsave(&trace_lock, flags);
	trace_sts |= RECORD_DUMP;
	spin_unlock_irqrestore(&trace_lock, flags);

	if ((trace_sts & RECORD_BIN))
		return single_open_size(file, seq_show_bin, NULL, RUNLOG_SIZE+SZ_4K);
	return seq_open(file, &seq_ops);
}

static ssize_t proc_write(struct file *file, const char __user *buffer,
				    size_t count, loff_t *pos)
{
	char buf[64];

	if (count > 0) {
		int len = min(sizeof(buf), count);
		if (copy_from_user(buf, buffer, len))
			return -EIO;
		if (buf[0] == '1')
			trace_sts |= RECORD_EN;
		else if (buf[0] == '0')
			trace_sts &= ~RECORD_EN;
		if (strncmp("bin", buf, 3) == 0)
			trace_sts |= RECORD_BIN;
		else if (strncmp("txt", buf, 3) == 0)
			trace_sts &= ~RECORD_BIN;
		else if (strncmp("dump", buf, 4) == 0) {
			dump_trace_log();
			goto out;
		} else if (strncmp("warn:", buf, 5) == 0) {
			int val = 0;
			int ret = kstrtou32(&buf[5], 10, &val);

			if (ret)
				return 0;
			pr_err("set warn_runtime_us = %d\n", val);
			warn_runtime_us = val;
			goto out;
		}
	}
	pr_err("sts:%x en:%d fmt:%s dump:%d\n", trace_sts, !!(trace_sts&RECORD_EN),
		(trace_sts&RECORD_BIN)?"bin":"txt", !!(trace_sts&RECORD_DUMP));
out:
	return count;
}

static int proc_release(struct inode *inode, struct file *file)
{
	unsigned long flags;

	spin_lock_irqsave(&trace_lock, flags);
	trace_sts &= ~RECORD_DUMP;
	sched_check_reinit();
	spin_unlock_irqrestore(&trace_lock, flags);

	return seq_release(inode, file);
}

static struct file_operations proc_ops =
{
	.owner = THIS_MODULE,
	.open = proc_open,
	.read = seq_read,
	.release = proc_release,
	.write = proc_write,
};

static void create_proc(void)
{
	proc_file = proc_create(PROC_NAME, 0644, NULL, &proc_ops);

	if (proc_file == NULL)
		pr_err("%s: ERROR: %s proc file create failed",
			   __func__, PROC_NAME);
}

static void __init init_trace_head(int head, int size, int cpu, int offset)
{
	struct trace_head *h = (struct trace_head *)head;

	h->pos = 0;
	h->end = 0;
	h->size = size;
	h->cpu = cpu;
	h->total_cpu = num_possible_cpus();
	/* delta time of pts and printk */
	h->sys_time_offset = offset;
}

static int __init init_trace_memory(void)
{
	unsigned long flags;
	dma_addr_t paddr;
	unsigned long long sys_clock =  div_u64(local_clock(), 1000);
	int sys_time_offset = fh_get_pts64()-sys_clock;

#ifndef CONFIG_RUNLOG_MEM_PERCPU
	int trace_mem_vaddr = g_trace_mem_vaddr;
	int runlog_size = RUNLOG_SIZE;
#else
	int trace_mem_vaddr = g_trace_mem_vaddr[0];
	int runlog_size = RUNLOG_SIZE * num_possible_cpus();
#endif

	BUILD_BUG_ON(PID_MAX_LIMIT > U16_MAX);

	if (!trace_mem_vaddr)
	{
#ifdef CONFIG_RUNLOG_USE_CUSTOM_MEM
		paddr = CONFIG_RUNLOG_CUSTOM_MEM_ADDR;
	#ifdef CONFIG_RUNLOG_COHERENT_MEM
		trace_mem_vaddr = (int)ioremap(paddr, runlog_size);
	#else
		trace_mem_vaddr = (int)ioremap_cached(paddr, runlog_size);
	#endif
#else
	#ifdef CONFIG_RUNLOG_COHERENT_MEM
		trace_mem_vaddr = (unsigned int)dma_alloc_coherent(
			NULL, runlog_size, &paddr, GFP_KERNEL);
	#else
		trace_mem_vaddr = (unsigned int)kmalloc(runlog_size, GFP_KERNEL);
		paddr = virt_to_phys((void *)trace_mem_vaddr);
	#endif
#endif
		if (!trace_mem_vaddr) {
			pr_err("init_trace_memory: failed!!!!\n");
			return -ENOMEM;
		} else {
			pr_err("########## init_trace_memory: va(%08x) pa(%08x). %scached\n", trace_mem_vaddr, (int)paddr, IS_ENABLED(CONFIG_RUNLOG_COHERENT_MEM)?"non":"");
			pr_err("\t\t\t\tsize:0x%x%s, nr_cpus:%u\n", runlog_size, IS_ENABLED(CONFIG_RUNLOG_MEM_PERCPU)?"(use percpu mem)":"", num_possible_cpus());
			pr_err("\t\t\t\tpts-printk_time=%dus\n\n", sys_time_offset);
			memset((void *)trace_mem_vaddr, 0, runlog_size);
#ifndef CONFIG_RUNLOG_MEM_PERCPU
			g_trace_mem_vaddr = trace_mem_vaddr;
			init_trace_head(g_trace_mem_vaddr, runlog_size, num_possible_cpus(), sys_time_offset);
#else
			{
				int i = 0;
				for_each_possible_cpu(i) {
					g_trace_mem_vaddr[i] = trace_mem_vaddr + i*RUNLOG_SIZE;
					g_trace_pos[i] = g_trace_pos[0];
					init_trace_head(g_trace_mem_vaddr[i], runlog_size, i, sys_time_offset);
				}
			}
#endif
		}
	}

	spin_lock_irqsave(&trace_lock, flags);
#ifdef CONFIG_RUNLOG_TRACE_WHEN_STARTUP
	trace_sts = RECORD_STS_OK;
#else
	trace_sts = 0;
#endif
	spin_unlock_irqrestore(&trace_lock, flags);
	create_proc();
	return 0;
}

late_initcall(init_trace_memory);
