#define MODVERSIONS
/****************************************************************************
 * (C) 2002-2003 - Rolf Neugebauer - Intel Research Cambridge
 * (C) 2002-2003 University of Cambridge
 * (C) 2004      - Mark Williamson - Intel Research Cambridge
 * (C) 2009      - Xiaojian Liu
 ****************************************************************************
 *
 *        File: common/schedule.c
 *      Author: Rolf Neugebauer & Keir Fraser
 *              Updated for generic API by Mark Williamson
 *		Updated for KVM by Xiaojian Liu
 * 
 * Description: Generic CPU scheduling code
 *              implements support functionality for the Xen scheduler API.
 *
 */
#include <linux/init.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include "trace.h"
#include "sched-if.h"
#include "schedule.h"
#include "external-if.h"
#include "ipi.h"
#include <linux/proc_fs.h>
#include <linux/uaccess.h>
#include <linux/kthread.h>

#ifdef DEBUG
#define ASSERT(x)                                                  \
do {                                                               \
	if (!(x)) {                                                \
	    printk(KERN_EMERG "assertion failed %s: %d: %s\n",     \
		   __FILE__, __LINE__, #x);                        \
	    BUG();                                                 \
    }                                                              \
} while (0)
#else
#define ASSERT(x) do { } while (0)
#endif

DEFINE_PER_CPU(struct schedule_data, schedule_data);
DEFINE_PER_CPU(rwlock_t, pseudo_cli);

static struct list_head sched_vm_list;
static spinlock_t  sched_vm_list_lock;

#define VM_SCHEDULER "vmscheduler"

struct proc_dir_entry *kvm_scheduler_root;

#ifndef COMPAT

/* Various timer handlers. */
static enum hrtimer_restart s_timer_fn(struct hrtimer* timer);
static void vcpu_singleshot_timer_fn(void *data);
static void poll_timer_fn(void *data);

/* This is global for now so that private implementations can reach it */

extern struct scheduler sched_credit_def;
static struct scheduler *schedulers[] = { 
    &sched_credit_def,
    NULL
};

static struct scheduler ops;

bool shutting_down = false;

#define SCHED_OP(fn, ...)                                 \
         (( ops.fn != NULL ) ? ops.fn( __VA_ARGS__ )      \
          : (typeof(ops.fn(__VA_ARGS__)))0 )

void vcpu_pause(struct sched_vcpu *v)
{
	ASSERT(v != current_vcpu);

	atomic_inc(&v->pause_count);
	vcpu_sleep_sync(v);
}
void vcpu_pause_nosync(struct sched_vcpu *v)
{
	atomic_inc(&v->pause_count);
	vcpu_sleep_nosync(v);
}
void vcpu_unpause(struct sched_vcpu *v)
{
	if(atomic_dec_and_test(&v->pause_count))
		vcpu_wake(v);
}

void kvm_force_tasklet_schedule(void *_unused)
{
    tasklet_schedule(&per_cpu(schedule_data, raw_smp_processor_id()).sched_tasklet);
}
void vm_pause(struct sched_vm *kvm)
{
    int late_wait = -1;
    int i;
    int my_cpu;

    kvm->is_paused_by_controller = 1;
    printk("func %s line %d\n", __FUNCTION__, __LINE__);
    my_cpu = get_cpu();
    for(i=0; i< MAX_SCHED_VCPUS; i++) {
	if(kvm->vcpus[i]) {
	    if(current_vcpu == kvm->vcpus[i]) { 
		atomic_inc(&kvm->vcpus[i]->pause_count); 
		late_wait = i; 
		continue;
	    }
	    if (shutting_down) {
		printk("the shutting down case! to cpu %d i am cpu %d\n",
			kvm->vcpus[i]->processor, my_cpu);
		smp_call_function_mask(cpumask_of_cpu(kvm->vcpus[i]->processor), 
			kvm_force_tasklet_schedule, NULL, 1);
	    }
	    vcpu_sleep_sync(kvm->vcpus[i]);
	}
    }
    put_cpu();
    printk("func %s line %d\n", __FUNCTION__, __LINE__);
    if (late_wait != -1) {
	if(current_vcpu == kvm->vcpus[late_wait]) {
	    if(!is_host_vm(kvm) && !is_idle_vm(kvm)) {
		tasklet_schedule(&per_cpu(schedule_data, raw_smp_processor_id()).sched_tasklet);
		while(current_vcpu->vm == kvm) {
		    printk("waiting\n");
		    schedule();
		}
	    }
	}
    }
}

void vm_unpause(struct sched_vm *kvm)
{
	int i;
	struct sched_vcpu* v;
	int cpu = get_cpu();

	kvm->is_paused_by_controller = 0;
	if ( atomic_dec_and_test(&kvm->pause_count) ){
		for(i = 0; i < MAX_SCHED_VCPUS; ++i) {
			if(unlikely(i == cpu)) continue;
			v = kvm->vcpus[i];
			if (v) {
				printk("me is %d waking vcpu %d\n", cpu, i);
				vcpu_wake(v);
			}
		}
		if(kvm->vcpus[cpu]){
		    printk("waking myself %d now\n", cpu);
		    vcpu_wake(kvm->vcpus[cpu]);
		}
	}
	put_cpu();
}
void vm_pause_by_systemcontroller(struct sched_vm* kvm)
{
    if(!kvm->is_paused_by_controller)
	vm_pause(kvm);
}

void vm_unpause_by_systemcontroller(struct sched_vm* kvm)
{
    if(kvm->is_paused_by_controller)
	vm_unpause(kvm);
}

void vcpu_runstate_change(
    struct sched_vcpu *v, int new_state, s_time_t new_entry_time)
{
    ASSERT(v->runstate.state != new_state);
    ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock));

    v->runstate.time[v->runstate.state] +=
        new_entry_time - v->runstate.state_entry_time;
    v->runstate.state_entry_time = new_entry_time;
    v->runstate.state = new_state;
}

void vcpu_runstate_get(struct sched_vcpu *v, struct vcpu_runstate_info *runstate)
{
    if ( likely(v == current_vcpu) )
    {
        /* Fast lock-free path. */
        memcpy(runstate, &v->runstate, sizeof(*runstate));
        ASSERT(runstate->state == RUNSTATE_running);
        runstate->time[RUNSTATE_running] += NOW() - runstate->state_entry_time;
    }
    else
    {
	unsigned long flags;
	get_cpu();
	local_irq_save(flags);
        vcpu_schedule_lock_irq(v);
        memcpy(runstate, &v->runstate, sizeof(*runstate));
        runstate->time[runstate->state] += NOW() - runstate->state_entry_time;
        vcpu_schedule_unlock_irq(v);
	local_irq_restore(flags);
	put_cpu();
    }
}

int vmsched_init_vcpu(struct sched_vcpu *v, unsigned int processor) 
{
	int r;
	struct sched_vm *vm = v->vm;
	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1};

	/*
	 * Initialize processor and affinity settings. The idler, and potentially
	 * domain-0 VCPUs, are pinned onto their respective physical CPUs.
	 */
	v->processor = processor;
	if (!is_host_vcpu(v) && !is_idle_vcpu(v))
	    kvm_sched_setaffinity(current->pid, cpumask_of_cpu(processor));
	sched_setscheduler(current, SCHED_RR, &param);


	if ( is_idle_vm(vm) || is_host_vm(vm))
		v->cpu_affinity = cpumask_of_cpu(processor);
	else
		cpus_setall(v->cpu_affinity);

	/* Idle VCPUs are scheduled immediately. */
	if ( is_idle_vm(vm) )
	{
		per_cpu(schedule_data, v->processor).curr = v;
		per_cpu(schedule_data, v->processor).idle = v;
		v->is_running = true;
	}

	TRACE_1D(TRC_SCHED_VM_ADD, v->vcpu_id);
	r = SCHED_OP(init_vcpu, v);
	return r;
}

void vmsched_destroy_vcpu(struct sched_vcpu *v)
{
	int cpu = get_cpu();
	SCHED_OP(destroy_vcpu, v);
	put_cpu();
}
struct sched_vm* get_kvm_by_id(int id)
{

    struct sched_vm *kvm, *n;
    spin_lock(&sched_vm_list_lock);
    list_for_each_entry_safe(kvm, n, &sched_vm_list, vm_link) {
	if (kvm->vmid == id)  {
	    spin_unlock(&sched_vm_list_lock);
	    return kvm;
	}
    }
    return NULL;
}
static inline struct sched_vm* get_proc_kvm(struct file *file)
{
    struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
    vmid_t id;

    if(strcmp(pde->name, "host") == 0)
	return host_vm;

    sscanf(pde->name, "%d", &id);
    return get_kvm_by_id(id);
}

#define MAX_SCHEDULE_PARAMS 50
static int kvm_scheduler_write(struct file *file, const char __user *buffer,
	unsigned long count, void *data)
{
    char scheduler_param[MAX_SCHEDULE_PARAMS] = { '\0'};
    struct sched_vm* kvm = get_proc_kvm(file);
    int i;
    int r = count;
    unsigned long flags;
    
    get_cpu();
    
    if(!kvm) {
	r = -EINVAL;
	goto quit;
    }

    if (count > MAX_SCHEDULE_PARAMS - 1 ) {
	r = -EINVAL;
	goto quit;
    }
    if(copy_from_user(scheduler_param, buffer, count)) {
	r = -EFAULT;
	goto quit;
    }
    scheduler_param[count] = '\0';

    for(i=0; i < MAX_SCHED_VCPUS; i++){
	struct sched_vcpu *v = kvm->vcpus[i];
	if(v && v != current_vcpu)
	    vcpu_pause(v);
    }

    if ( kvm == current_vcpu->vm) {
	local_irq_save(flags);
	if(!vcpu_schedule_try_lock(current_vcpu)) {
	    local_irq_restore(flags);
	    r = -EAGAIN;
	    goto quit_with_unpause;
	}
    }

    if ( (SCHED_OP(write_schedule_info, kvm, scheduler_param)) == 0 )
	printk("failed write schedule info!\n");

    if ( kvm == current_vcpu->vm) {
	vcpu_schedule_unlock(current_vcpu);
	local_irq_restore(flags);
    }

quit_with_unpause:
    for(i=0; i < MAX_SCHED_VCPUS; i++){
	struct sched_vcpu *v = kvm->vcpus[i];
	if(v && v != current_vcpu)
	    vcpu_unpause(v);
    }
quit:
    put_cpu();
    return r;
}

static ssize_t kvm_scheduler_read(struct file *file,
	char __user * buffer, size_t count, loff_t *ppos)
{
    int res;
    struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
    struct sched_vm *kvm;
    int i;
    char *msg;
    unsigned long flags;

    get_cpu();

    msg = kmalloc(256, GFP_KERNEL);
    if (!msg) {
	res = -ENOMEM;
	goto quit;
    }

    kvm = get_proc_kvm(file);
    if(!kvm) {
	sprintf(msg, "file /proc/%s/%s does not have a corresponding kvm!\n",
		VM_SCHEDULER, pde->name);
	goto quit_with_msg;
    }

    for(i=0; i < MAX_SCHED_VCPUS; i++){
	struct sched_vcpu *v = kvm->vcpus[i];
	if(v && v != current_vcpu)
	    vcpu_pause(v);
    }

    if ( kvm == current_vcpu->vm){
	local_irq_save(flags);
	if(!vcpu_schedule_try_lock(current_vcpu)) {
	    local_irq_restore(flags);
	    snprintf(msg, 256,
		    "scheduler is locked by current domain. Try again!\n");
	    goto quit_with_unpause;
	}
    }

    if ( (SCHED_OP(read_schedule_info, kvm, msg, 256)) == 0 )
	printk("failed read schedule info!\n");

    if ( kvm == current_vcpu->vm) {
	vcpu_schedule_unlock(current_vcpu);
	local_irq_restore(flags);
    }

quit_with_unpause:
    for(i=0; i < MAX_SCHED_VCPUS; i++){
	struct sched_vcpu *v = kvm->vcpus[i];
	if(v && v != current_vcpu)
	    vcpu_unpause(v);
    }

quit_with_msg:
    res = simple_read_from_buffer(buffer, count, ppos, msg, strlen(msg));
    kfree(msg);
quit:
    put_cpu();
    return res;
    
}
static const struct file_operations kvm_scheduler_ops = {
    .read = kvm_scheduler_read,
    .write = kvm_scheduler_write,
};
int vmsched_init_vm(struct sched_vm *vm)
{
    struct proc_dir_entry *entry;
    char name[MAX_PROCESSID_LEN];
    int r;

    get_cpu();
    BUG_ON(vm->vmid != ANONY_VM_ID);

    if(unlikely(is_idle_vm(vm)))
	vm->vmid = IDLE_VM_ID;
    else if(unlikely(is_host_vm(vm)))
	vm->vmid = HOST_VM_ID;
    else {
	vm->vmid = (unsigned long)current->pid;
	if(vm->vmid > MAX_PROCESSID) { 
	    printk("process id %lx too big. only the lower 32bits are used!\n",
		    vm->vmid); 
	    vm->vmid &= 0x0ffffffff;
	}
    }
    spin_lock(&sched_vm_list_lock);
    list_add(&vm->vm_link, &sched_vm_list);
    spin_unlock(&sched_vm_list_lock);

    if(!is_idle_vm(vm) && (kvm_scheduler_root != NULL)) {
	if(!is_host_vm(vm))
	    sprintf(name, "%d", vm->vmid);
	else
	    sprintf(name, "host");
	entry = create_proc_entry(name, S_IRUGO | S_IWUGO | S_IFREG, 
		kvm_scheduler_root);
	if (entry)
	    entry->proc_fops = &kvm_scheduler_ops;
	else {
	    printk("failed to create vm %x's corresponding procfs\n", vm->vmid);
	    printk("user will not be able to control its scheduling!\n");
	    vm->vmid = ANONY_VM_ID;
	}
    }

    r = SCHED_OP(init_vm, vm);
    put_cpu();
    return r;
}

void vmsched_destroy_vm(struct sched_vm *d)
{
    char name[MAX_PROCESSID_LEN];

    get_cpu();
    spin_lock(&sched_vm_list_lock);
    list_del(&d->vm_link);
    spin_unlock(&sched_vm_list_lock);

    if(d->vmid){
	if(is_host_vm(d))
	    sprintf(name, "host");
	else
	    sprintf(name, "%d", d->vmid);
	remove_proc_entry(name, kvm_scheduler_root);
    }

    SCHED_OP(destroy_vm, d);
    put_cpu();
}

void vcpu_sleep_nosync(struct sched_vcpu *v)
{
    unsigned long flags;
    unsigned long my_flags;

    get_cpu();
    local_irq_save(my_flags);
    vcpu_schedule_lock_irqsave(v, flags);

    if ( likely(!vcpu_runnable(v)) )
    {
        if ( v->runstate.state == RUNSTATE_runnable )
            vcpu_runstate_change(v, RUNSTATE_offline, NOW());

        SCHED_OP(sleep, v);
    }

    vcpu_schedule_unlock_irqrestore(v, flags);
    local_irq_restore(my_flags);
    put_cpu();

    TRACE_1D(TRC_SCHED_SLEEP, v->vcpu_id);
}
void sync_vcpu_execstate(struct sched_vcpu *v)
{
    printk("TODO: func %s called. \n", __FUNCTION__);
}
void vcpu_sleep_sync(struct sched_vcpu *v)
{
    printk("func %s line %d vcpu %p\n", __FUNCTION__, __LINE__, v);
    vcpu_sleep_nosync(v);
    printk("func %s line %d vcpu %p\n", __FUNCTION__, __LINE__, v);

    if(v == current_vcpu) printk("odd sleeping cur_vcpu\n");
    while ( !vcpu_runnable(v) && v->is_running ) 
        cpu_relax();
    printk("func %s line %d vcpu %p\n", __FUNCTION__, __LINE__, v);

    sync_vcpu_execstate(v);
    printk("func %s line %d vcpu %p\n", __FUNCTION__, __LINE__, v);
}

void vcpu_wake(struct sched_vcpu *v)
{
    unsigned long flags;
    unsigned long my_flags;
    int tmp_cpu = get_cpu();;
    local_irq_save(my_flags);
    vcpu_schedule_lock_irqsave(v, flags);
    TRACE_2D(TRC_SCHED_WAKE, tmp_cpu, v); 

    if ( likely(vcpu_runnable(v)) )
    {
        if ( v->runstate.state >= RUNSTATE_blocked )
            vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
        SCHED_OP(wake, v);
    }
    else if ( !test_bit(_VPF_blocked, &v->pause_flags) )
    {
        if ( v->runstate.state == RUNSTATE_blocked )
            vcpu_runstate_change(v, RUNSTATE_offline, NOW());
    }

    vcpu_schedule_unlock_irqrestore(v, flags);
    local_irq_restore(my_flags);

    TRACE_3D(TRC_SCHED_WAKE, tmp_cpu, v->vcpu_id, __LINE__);
    put_cpu();
}
static void vcpu_migrate(struct sched_vcpu *v)
{
    unsigned long flags;
    unsigned long my_flags;
    int old_cpu;
    int tmp_cpu = get_cpu();;

    local_irq_save(my_flags);
    vcpu_schedule_lock_irqsave(v, flags);

    /*
     * NB. Check of v->running happens /after/ setting migration flag
     * because they both happen in (different) spinlock regions, and those
     * regions are strictly serialised.
     */
    if ( v->is_running ||
         !test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
    {
        vcpu_schedule_unlock_irqrestore(v, flags);
	local_irq_restore(my_flags);
	put_cpu();
        return;
    }

    /* Switch to new CPU, then unlock old CPU. */
    old_cpu = v->processor;
    v->processor = SCHED_OP(pick_cpu, v);

    /* because v->cpu is changed, 
     *we should not use vcpu_schedule_unlock_restore()  
     */
    BUG_ON(old_cpu != tmp_cpu);
    spin_unlock(&per_cpu(schedule_data, old_cpu).schedule_lock);
    pseudo_irq_restore(flags);
    local_irq_restore(my_flags);
    /* Wake on new CPU. */
    vcpu_wake(v);
    put_cpu();
}

/*
 * Force a VCPU through a deschedule/reschedule path.
 * For example, using this when setting the periodic timer period means that
 * most periodic-timer state need only be touched from within the scheduler
 * which can thus be done without need for synchronisation.
 */
void vcpu_force_reschedule(struct sched_vcpu *v)
{
    printk("TODO: func %s called\n", __FUNCTION__);
}

int vcpu_set_affinity(struct sched_vcpu *v, cpumask_t *affinity)
{
    printk("TODO: func %s called\n", __FUNCTION__);
    return 0;
}

/* Block the currently-executing domain until a pertinent event occurs. */
static long do_block(void)
{
    printk("TODO: func %s called\n", __FUNCTION__);
    return 0;
}

/* Voluntarily yield the processor for this allocation. */
static long do_yield(void)
{
    TRACE_1D(TRC_SCHED_YIELD, current_vcpu->vcpu_id);
    tasklet_schedule(&per_cpu(schedule_data, raw_smp_processor_id()).sched_tasklet);
    return 0;
}
long do_sched_op_compat(int cmd, unsigned long arg)
{
    long ret = 0;
    printk("TODO: unimplemented func %s \n", __FUNCTION__);
    return ret;
}

typedef long ret_t;

#endif /* !COMPAT */

#ifndef COMPAT

/* sched_id - fetch ID of current scheduler */
int sched_id(void)
{
    return ops.sched_id;
}


void continue_running(struct sched_vcpu *vcpu)
{
}
void context_saved(struct sched_vcpu *prev)
{
	prev->is_running = 0;
	cmpxchg(&prev->status, VCPU_RUNNING, VCPU_YIELD);

	if(unlikely(test_bit(_VPF_migrating, &prev->pause_flags))){
		vcpu_migrate(prev);
	}
}
void vm_context_switch(struct sched_vcpu *prev, struct sched_vcpu *next)
{
	context_saved(prev);
	if(!is_idle_vcpu(next) && !is_host_vcpu(next)) {
	    cmpxchg(&next->status, VCPU_YIELD, VCPU_RUNNING);
	    wake_up(&next->wq);
	}
}
/* Return Value: 0
 * meaning: upon completion, the pseudo_irq remains disabled
 */
static int __vcpu_schedule(int my_cpu)
{
    struct sched_vcpu          *prev = current_vcpu, *next = NULL;
    s_time_t		now;
    ktime_t		ktime_now;
    struct schedule_data *sd;
    struct task_slice     next_slice;
    s32                   r_time;     /* time for new dom to run */

    BUG_ON(thread_preemptible());
		
    ASSERT(!in_irq());

    sd = &per_cpu(schedule_data, my_cpu);

    /* if the kvm module is to be unloaded, the lock might fail */
    if(cmpxchg(&sd->in_use, false, true) != false)
	return 0;

    spin_lock(&sd->schedule_lock);

    hrtimer_cancel(&sd->s_timer);
    ktime_now = sd->s_timer.base->get_time();
    now = ktime_to_ns(ktime_now);

    /* get policy-specific decision on scheduling... */
    next_slice = ops.do_schedule(now);

    r_time = next_slice.time;
    next = next_slice.task;

    sd->curr = next;
    
    if(unlikely(shutting_down)) {
	spin_unlock(&sd->schedule_lock);
	goto switch_and_quit;
    }

    if ( unlikely(prev == next) )
    {
	    spin_unlock(&sd->schedule_lock);

	    continue_running(prev);
	    goto finished;
    }

    TRACE_1D(TRC_SCHED_SWITCH_INFPREV,
             // prev->domain->domain_id,
             now - prev->runstate.state_entry_time);
    TRACE_2D(TRC_SCHED_SWITCH_INFNEXT,
             // next->domain->domain_id,
             (next->runstate.state == RUNSTATE_runnable) ?
             (now - next->runstate.state_entry_time) : 0,
             r_time);

    ASSERT(prev->runstate.state == RUNSTATE_running);
    vcpu_runstate_change(
        prev,
        (test_bit(_VPF_blocked, &prev->pause_flags) ? RUNSTATE_blocked :
         (vcpu_runnable(prev) ? RUNSTATE_runnable : RUNSTATE_offline)),
        now);

    ASSERT(next->runstate.state != RUNSTATE_running);
    vcpu_runstate_change(next, RUNSTATE_running, now);

    ASSERT(!next->is_running);
    next->is_running = 1;

    spin_unlock(&sd->schedule_lock);

    TRACE_3D(TRC_SCHED_SWITCH, __LINE__, prev->vcpu_id, next->vcpu_id);
switch_and_quit:
    vm_context_switch(prev, next);
finished:
    hrtimer_cancel(&sd->watchdog);
    if (!shutting_down) {
	hrtimer_start(&sd->s_timer, ktime_add_ns(ktime_now, r_time), HRTIMER_MODE_ABS);

	/* restart the watchdog */
	ktime_now = sd->watchdog.base->get_time();
	hrtimer_start(&sd->watchdog, ktime_add_ns(ktime_now, WATCHDOG_NS), HRTIMER_MODE_ABS);
    }
    sd->in_use = false;
    return 0;
}
/* 
 * The main function
 * - deschedule the current domain (scheduler independent).
 * - pick a new domain (scheduler dependent).
 */
static void vcpu_schedule(unsigned long _unused)
{

    int my_cpu = raw_smp_processor_id();
    // tasklet_disable(&per_cpu(schedule_data, my_cpu).tick_tasklet);
try_again:
    while(per_cpu(schedule_data, my_cpu).sched_state == SCHEDULER_USER) 
	;
    if(per_cpu(schedule_data, my_cpu).sched_state == SCHEDULER_KERNEL) {
	/* for the case that others has entered the critical section
	 * we abort scheduling the vcpus this time
	 */
	struct schedule_data *sd = &per_cpu(schedule_data, my_cpu);
	ktime_t now = sd->s_timer.base->get_time();

	hrtimer_cancel(&sd->s_timer);
	if(!shutting_down) 
	    hrtimer_start(&sd->s_timer, ktime_add_ns(now, 10000), HRTIMER_MODE_ABS);
	// tasklet_enable(&per_cpu(schedule_data, my_cpu).tick_tasklet);
	return;
    }
    if(cmpxchg(&per_cpu(schedule_data, my_cpu).sched_state,
		SCHEDULER_FREE, SCHEDULER_KERNEL) != SCHEDULER_FREE)
	goto try_again;

    preempt_disable();
    __vcpu_schedule(my_cpu);
    per_cpu(schedule_data, my_cpu).sched_state = SCHEDULER_FREE;

    // tasklet_enable(&per_cpu(schedule_data, my_cpu).tick_tasklet);
    preempt_enable();
    // put_cpu();

    return;
}

/* The scheduler timer: force a run through the scheduler */
static enum hrtimer_restart s_timer_fn(struct hrtimer* timer)
{
    int cpu = raw_smp_processor_id();
    struct schedule_data *sd = container_of(timer, struct schedule_data, s_timer);
    if(cpu != sd->id){
	int r;
	r = insert_pending_ipi(cpu, cpumask_of_cpu(sd->id), kvm_force_tasklet_schedule, NULL, 1);
	// tasklet_schedule(&sd->ipi_tasklet);
	wake_up(&sd->ipi_wq);
	if(r){
	    dump_traces(NULL);
	    BUG_ON(1);
	}
    }else{
	per_cpu(schedule_data, raw_smp_processor_id()).can_migrate = !in_atomic_preempt_off();
	tasklet_schedule(&per_cpu(schedule_data, cpu).sched_tasklet);
    }
    /* no need to restart the timer. the vcpu_schedule() will do it */
    return HRTIMER_NORESTART;
}
static void per_cpu_stop_sched(int cpu)
{
    struct schedule_data* sd = &per_cpu(schedule_data, cpu);
    hrtimer_cancel(&sd->s_timer);
    hrtimer_cancel(&sd->watchdog);
}
static void per_cpu_kill_scheduler(int cpu)
{
    struct schedule_data* sd = &per_cpu(schedule_data, cpu);
    tasklet_kill(&sd->sched_tasklet);

}

void stop_auto_schedule(void)
{
    int i;

    shutting_down = true;
    barrier();

    for_each_online_cpu(i) {
	struct schedule_data *sd = &per_cpu(schedule_data, i);

	sd->ipi_quit = true;
	wake_up(&sd->ipi_wq);
	printk("waiting ipi helper stop!\n");
	while(sd->ipi_quit) schedule();
	printk("ipi helper stopped!\n");
	SCHED_OP(stop_schedule, i);
	per_cpu_stop_sched(i);

    }
}
static inline void scheduler_disabled(int cpu)
{
    per_cpu_kill_scheduler(cpu);
}
void wait_scheduler_stops(void)
{
    int i;
    for_each_online_cpu(i) {
	SCHED_OP(disable_scheduler, i);
	scheduler_disabled(i);
    }
}
int scheduler_start(void)
{
    int i;

    for_each_online_cpu(i)  {
	if(SCHED_OP(start_scheduler, i)) {
	    printk("error start scheduler %d!\n", i);
	    return -EFAULT;
	}
    }
    return 0;
}
void scheduler_destroy(void)
{
    int i;

    if(kvm_scheduler_root)
	remove_proc_entry(VM_SCHEDULER, NULL);

    for_each_online_cpu(i) {
	destroy_pending_ipi_buf(i);
    }
}

extern void inject_pending_ipi(void);
static void per_cpu_init_sched(int cpu)
{
	struct pending_work * pd_work;
	ktime_t now;
	struct schedule_data *sd = &per_cpu(schedule_data, cpu);
	struct task_struct *task;
	char name[]="ipi_helper_0";

	sd->sched_state = SCHEDULER_FREE;
	tasklet_init(&sd->sched_tasklet, vcpu_schedule, (unsigned long)NULL);

	name[strlen(name)-1] += cpu;
	init_waitqueue_head(&sd->ipi_wq);
	sd->ipi_quit = false;
	task = kthread_run(inject_pending_ipi, cpu, name);
	if(IS_ERR(task)){
	    printk("error creating help thread %d\n", cpu);
	    BUG_ON(1);
	}
	kvm_sched_setaffinity(task->pid, cpumask_of_cpu(cpu));
	
	rwlock_init(&per_cpu(pseudo_cli, cpu));
	spin_lock_init(&sd->schedule_lock);
	sd->in_use = false;
	sd->can_migrate = false;
	sd->id = cpu;

	/* the scheduler timer */
	hrtimer_init(&sd->s_timer,  
		CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
	sd->s_timer.function = s_timer_fn;
	hrtimer_data_pointer(&sd->s_timer);

	/* the watchdog timer */
	hrtimer_init(&sd->watchdog, 
		CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
	sd->watchdog.function = dump_cpu_trace;
	hrtimer_data_pointer(&sd->watchdog);

	/* start the watchdog */
	now = sd->watchdog.base->get_time();
	if(hrtimer_start(&sd->watchdog, 
		    ktime_add_ns(now, WATCHDOG_NS), HRTIMER_MODE_ABS)) {
	    printk("start watchdog timer failed!\n");
	    BUG_ON(1);
	}
}

/* Initialise the data structures. */
void vmscheduler_init(void)
{
	int i;
	
	INIT_LIST_HEAD(&sched_vm_list);
	spin_lock_init(&sched_vm_list_lock);

	kvm_scheduler_root = proc_mkdir(VM_SCHEDULER, NULL);
	if (!kvm_scheduler_root){
	    printk("fail to create the /proc/" VM_SCHEDULER "kvm procfs!\n");
	    printk("the user scheduler control function will be disabled!\n");
	}

	for_each_online_cpu(i) 
	    per_cpu_init_sched(i);

	ops = *schedulers[0];
	SCHED_OP(init);
}

void dump_runq(unsigned char key)
{
    s_time_t      now = NOW();
    int           i;
    unsigned long flags;

    local_irq_save(flags);

    printk("Scheduler: %s (%s)\n", ops.name, ops.opt_name);
    SCHED_OP(dump_settings);
    printk("NOW=0x%08X%08X\n",  (u32)(now>>32), (u32)now);

    for_each_online_cpu ( i )
    {
        spin_lock(&per_cpu(schedule_data, i).schedule_lock);
        printk("CPU[%02d] ", i);
        SCHED_OP(dump_cpu_state, i);
        spin_unlock(&per_cpu(schedule_data, i).schedule_lock);
    }

    local_irq_restore(flags);
}

#endif /* !COMPAT */

/*
 * Local variables:
 * mode: C
 * c-set-style: "BSD"
 * c-basic-offset: 4
 * tab-width: 4
 * indent-tabs-mode: nil
 * End:
 */
