#define MODVERSIONS
#include <linux/init.h>
#include <linux/version.h>
#include <linux/module.h>
#include "trace.h"
#include "sched-if.h"

static char *info[]={
    "sched_vm_add",		// 0
    "sched_sleep",		// 1
    "sched_wake",		// 2
    "sched_yield",		// 3
    "sched_switch_inf_prev",	// 4
    "sched_swithc_inf_next",	// 5
    "sched_switch",		// 6
    "sched_timer",		// 7
    "sched_relayed_timer",	// 8
    "vcpu_schedule",		// 9
    "runq_tickle",		// 10
    "tasklet_schedule",		// 11
    "csched_tick_stub",		// 12
    "csched_sleep",		// 13
    "csched_vcpu_wake",		// 14
    "csched_tick",		// 15
    "do_pending_svc",		// 16
    "pseudo_cli",		// 17
    "pseudo_sti",		// 18
    "csched_load_balance",	// 19
    "csched_schedule",		// 20
    "pseudo_intr",		// 21
    "pseudo_eoi",		// 22
    "pseudo_open_intr",		// 23
    "s_timer_fn",		// 24
    "preempt_send_ipi",		// 25
    "inject_pend_ipi",		// 26
    "do_pending_intr",		// 27
    "csched_runq_sort",		// 28
    NULL
};

static DEFINE_PER_CPU(struct trace_logger, t_logger);

extern volatile int err_cnt;
extern spinlock_t dbg_lock;

int init_trace_buf(int cpu)
{
    struct trace_logger *my_logger = &per_cpu(t_logger, cpu);
    struct t_rec* t_bufs = kzalloc(sizeof(struct t_rec)*NR_TRACES, GFP_KERNEL);
    
    if(!t_bufs) return -ENOMEM; 

    my_logger->buf = t_bufs;
    my_logger->ptr = 0;
    spin_lock_init(&my_logger->lock);

    return 0;
}
void free_trace_buf(int cpu)
{
    /*XXX: what if someone else is using the trace buf? */
    struct trace_logger *my_logger = &per_cpu(t_logger, cpu);

    spin_lock(&my_logger->lock);
    if(my_logger->buf){
       	kfree(my_logger->buf);
	my_logger->buf = NULL;
    }
    spin_unlock(&my_logger->lock);
}
void trace(u32 event, unsigned long d0, unsigned long d1,
	unsigned long d2, unsigned long d3, unsigned long d4)
{
	unsigned long flags;
	int idx;
	int cpu;
	struct trace_logger *my_logger;
	struct t_rec *rec;

	local_irq_save(flags);

	cpu = get_cpu();
	my_logger = &per_cpu(t_logger, cpu);

	BUG_ON(!my_logger);

	spin_lock(&my_logger->lock);
	rec= my_logger->buf;
	idx = my_logger->ptr++;
	idx %= NR_TRACES;
	rec += idx;

	rdtscll(rec->cycles);
	rec->event = event;
	rec->data[0] = d0;
	rec->data[1] = d1;
	rec->data[2] = d2;
	rec->data[3] = d3;
	rec->data[4] = d4;


	spin_unlock(&my_logger->lock);
	put_cpu();

	local_irq_restore(flags);

}
/* when executing, it is assumed that we are holding a spinlock */
static void dump_per_cpu_trace(struct trace_logger* log)
{
	int i;
	struct t_rec *records= log->buf;

	i = log->ptr - 1;
	if (i < 0) i = 0;
	for( ;(i > (log->ptr - NR_TRACES) && (i>=0)); i--){
	    struct t_rec* rec = &records[i % NR_TRACES];
	    if(unlikely(rec->cycles == 0)) break;
	    printk("(%llx)e:%s (%lx %lx %lx %lx %lx)\n",
		    rec->cycles, info[rec->event], rec->data[0],
		    rec->data[1], rec->data[2], rec->data[3], rec->data[4]); 
	}
}
enum hrtimer_restart dump_cpu_trace(struct hrtimer *timer)
{
	dump_traces(NULL);
	return HRTIMER_NORESTART;
}

enum hrtimer_restart dump_traces(void* _unused)
{
    int cpu;
    unsigned long flags;

    for_each_online_cpu(cpu) {
	struct trace_logger *logger = &per_cpu(t_logger, cpu);
	printk("dumping trace logged on cpu %d\n", cpu);

	spin_lock(&logger->lock);
	local_irq_save(flags);
	preempt_disable();

	dump_per_cpu_trace(logger);
	printk("dumping trace line %d\n", __LINE__);

	spin_unlock(&logger->lock);
	preempt_enable();
	local_irq_restore(flags);
    }
    printk("log trace dump finished\n");
    return HRTIMER_NORESTART;
}
EXPORT_SYMBOL(dump_traces);
