#define MODVERSIONS
#include <linux/init.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/bitmap.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/miscdevice.h>

// Alex added
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/anon_inodes.h>
#include <linux/wait.h>

#include <asm/uaccess.h>


#include "sched-if.h"
#include "schedule.h"
#include "trace.h"
#include "external-if.h"
#include "ipi.h"


/* Linux kernel does not export the sched_setaffinity */
#include <linux/moduleparam.h>
static int setaffinity = -1;
module_param(setaffinity, int, 0);

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
long (*sched_setaffinity_p)(pid_t pid, cpumask_t new_mask);
#else
long (*sched_setaffinity_p)(pid_t pid, cpumask_t* in_mask);
#endif

#define vmscheduler_api_version 1
#define vmscheduler_VERSION "0.0.1"
#define vmscheduler_MODULE_NAME "vmscheduler"
#define vmscheduler_DRIVER_NAME   vmscheduler_MODULE_NAME " guest os scheduler " vmscheduler_VERSION

// based on earlier proprietary Tutis code; this modified version goes under GPL
MODULE_AUTHOR("Xiaojian Liu");
MODULE_DESCRIPTION("guest os scheduler");
MODULE_LICENSE("GPL");

#define vmscheduler_DEBUG /* define to enable copious debugging info */

#ifdef vmscheduler_DEBUG
#define DPRINTK(fmt, args...) printk("<1>" "%s: " fmt, __FUNCTION__ , ## args)
#else
#define DPRINTK(fmt, args...)
#endif

#define ERROR(fmt, args...) printk("<1>" "%s: " fmt, __FUNCTION__ , ## args)

struct kmem_cache *sched_vm_cache;
struct kmem_cache *sched_vcpu_cache;

struct sched_vm *idle_vm;
struct sched_vm *host_vm;
static void scheduler_notify_in(void* priv)
{
    struct sched_vcpu *vcpu = get_vcpu_by_priv(priv);
    vcpu_runstate_change(vcpu, RUNSTATE_running, NOW());
}
static void scheduler_notify_out(void* priv)
{
    struct sched_vcpu *vcpu = get_vcpu_by_priv(priv);
    BUG_ON(!vcpu);
    vcpu_runstate_change(
	    vcpu,
	     (test_bit(_VPF_blocked, &vcpu->pause_flags) ? RUNSTATE_blocked :
	      (vcpu_runnable(vcpu) ? RUNSTATE_runnable : RUNSTATE_offline)),
	     NOW());
}
static int scheduler_special_vm(void* priv)
{
    int r;
    struct sched_vm *vm = get_vm_by_priv(priv);
    r = (is_idle_vm(vm) || is_host_vm(vm));
    // printk("sched_vm %p is special ? %d\n", priv, r);
    return r;
}
static int scheduler_create_vcpu(void* vm_priv, void* priv, void** my_priv, int n)
{
    struct sched_vm  *vm = get_vm_by_priv(vm_priv);
    struct sched_vcpu *vcpu; 
    struct timespec now;

    BUG_ON(!vm);
    if(vm->vcpus[n]) {
	printk("func %s line %d: the vcpu already exists!\n",
		__FUNCTION__, __LINE__);
	return -EEXIST;
    }

    vcpu = kmem_cache_zalloc(sched_vcpu_cache, GFP_KERNEL);
    if(!vcpu) {
	printk("func %s line %d: fail to create vcpu!\n",
		__FUNCTION__, __LINE__);
	return -ENOMEM;
    }

    vm->vcpus[n] = vcpu;
    vcpu->vm = vm;
    init_waitqueue_head(&vcpu->wq);
    vcpu->kvm_priv = priv;
    if(my_priv) *my_priv = (void*)vcpu;
    printk("kvm_vcpu %p sched_vcpu %p my_priv %p\n", priv, vcpu, my_priv);
    vcpu->status = VCPU_YIELD;
    vcpu->is_running = false;
    vcpu->thread = current;
    // vcpu->cpu_affinity
    vcpu->pause_flags = 0;
    atomic_set(&vcpu->pause_count, 0);
    vcpu->processor = NO_PROCESSOR;
    vcpu->vcpu_id = n;

    if (unlikely(is_idle_vm(vm) || is_host_vm(vm))) {
	vcpu->pause_flags = 0;
	atomic_set(&vcpu->pause_count, 0);
	if(is_idle_vm(vm))
	    printk(" idle domain ");
	else {
	    printk(" host domain ");
	}
	printk("vcpu %d created\n", n);
    }
    if(likely(!is_idle_vm(vm)))
	set_bit(_VPF_blocked, &vcpu->pause_flags);
    vcpu->status = VCPU_YIELD;
    vcpu->runstate.state = is_idle_vcpu(vcpu)? RUNSTATE_running:RUNSTATE_offline;
    now = current_kernel_time();
    vcpu->runstate.state_entry_time = timespec_to_ns(&now);
    return vmsched_init_vcpu(vcpu, n % num_online_cpus());
}
static void scheduler_apply_cpu(void* priv)
{
    struct sched_vcpu *vcpu = get_vcpu_by_priv(priv);
    if(test_and_clear_bit(_VPF_blocked, &vcpu->pause_flags)) 
	vcpu_wake(vcpu);
    wait_event(vcpu->wq, (vcpu->status == VCPU_RUNNING));
}
static void scheduler_acquire_cpu(void* priv)
{
    struct sched_vcpu *vcpu = get_vcpu_by_priv(priv);
    if(test_and_clear_bit(_VPF_blocked, &vcpu->pause_flags)) {
	vcpu_wake(vcpu);
	tasklet_schedule(&per_cpu(schedule_data, raw_smp_processor_id()).sched_tasklet);
    }
    wait_event(vcpu->wq, (vcpu->status == VCPU_RUNNING));
}
static void scheduler_yield_nosync(void* priv)
{
    struct sched_vcpu *vcpu = get_vcpu_by_priv(priv);
    if(!vcpu) {
	printk("%p does not have a corresponding sched_vcpu!\n", priv);
	BUG_ON(1);
    }
    // set_bit(_VPF_blocked, &vcpu->pause_flags);
}

static void scheduler_yield_cpu(void* priv)
{

    struct sched_vcpu *vcpu = get_vcpu_by_priv(priv);

    if(!vcpu) {
	printk("%p does not have a corresponding sched_vcpu!\n", priv);
	BUG_ON(1);
    }
    // set_bit(_VPF_blocked, &vcpu->pause_flags);
    // tasklet_schedule(&per_cpu(schedule_data, raw_smp_processor_id()).sched_tasklet);
    schedule();
}
static int scheduler_destroy_vm(void* private)
{
    struct sched_vm *vm = get_vm_by_priv(private);
    int i;

    if(!vm){
       printk("%p does not have a corresponding sched_vm!\n", private);
       return -EFAULT;
    }
    if (!vm->is_paused_by_controller && !is_idle_vm(vm))
	vm_pause(vm);

    printk("func %s line %d\n", __FUNCTION__, __LINE__);
    for (i=0; i<MAX_SCHED_VCPUS; i++) 
	if (vm->vcpus[i]) {
	    vmsched_destroy_vcpu(vm->vcpus[i]);
	    kmem_cache_free(sched_vcpu_cache, vm->vcpus[i]);
	    vm->vcpus[i] = NULL;
	}

    printk("func %s line %d\n", __FUNCTION__, __LINE__);
    vmsched_destroy_vm(vm);
    printk("func %s line %d\n", __FUNCTION__, __LINE__);
    kmem_cache_free(sched_vm_cache, vm);
    return 0;
}
static int scheduler_init_vm(void* private, void** my_private)
{
    struct sched_vm *vm = kmem_cache_zalloc(sched_vm_cache, GFP_KERNEL);

    if(!vm) 
	return -ENOMEM;

    vm->kvm_priv = private;
    if(my_private) *my_private = (void*)vm;
    printk("func %s kvm_vm %p kvm_private %p sched_vm %p\n", 
	    __FUNCTION__, private, my_private, vm);
    vm->is_paused_by_controller = 0;
    atomic_set(&vm->pause_count, 0);
    INIT_LIST_HEAD(&vm->vm_link);
    vm->vmid = ANONY_VM_ID;

    /* it is required that the first vm should be idle_vm, 
     * and the second should be host_vm
     */
    if (unlikely(!idle_vm)) {
	idle_vm = vm;
    }else if (unlikely(!host_vm)) {
	host_vm = vm;
	vm->is_paused_by_controller = true;
	atomic_inc(&vm->pause_count);
    }

    /* save the sched_vm to the upper level's data structure, 
     * so that it can be retrieved later
     */
    return vmsched_init_vm(vm);
}

#define vmscheduler_api_version 1
#define vmscheduler_VERSION "0.0.1"
#define vmscheduler_MODULE_NAME "vmscheduler"
#define vmscheduler_DRIVER_NAME   vmscheduler_MODULE_NAME " guest os scheduler " vmscheduler_VERSION
#define vmscheduler_MISCDEV_MINOR       MISC_DYNAMIC_MINOR
static int vmscheduler_dev_open(struct inode *inode, struct file *filp)
{
    DPRINTK(" open device called\n");
    try_module_get(THIS_MODULE);
    return 0;
}
static int vmscheduler_dev_release(struct inode *inode, struct file *filp)
{
    module_put(THIS_MODULE);
    return 0;
}
static struct  file_operations vmscheduler_chrdev_ops = {
    owner:	    THIS_MODULE,
    open:           vmscheduler_dev_open,
    release:        vmscheduler_dev_release,
};
static struct miscdevice vmscheduler_miscdev = {
    vmscheduler_MISCDEV_MINOR,
    vmscheduler_MODULE_NAME,
    &vmscheduler_chrdev_ops,
};
extern void vmscheduler_init(void);
static int __init
scheduler_init(void)
{
	int cpu;
	int my_cpu;
	int rc;

	sched_setaffinity_p = (void*)setaffinity;
	if( setaffinity == -1 ){ 
	    printk("Please insert this module with parameters\n"); 
	    printk("to notify me the address of sched_setaffinity\n");
	    printk("Example:\n");
	    printk("  insmod kvm-intel.ko setaffinity=0xffffffff9002ecd9\n");
	    return -EINVAL;
	}else{
	    printk("the address of function sched_setaffinity is %p \n", 
		    sched_setaffinity_p);
	}

	DPRINTK("ENTER\n");
	printk(KERN_INFO vmscheduler_DRIVER_NAME " loaded\n");

	if ((rc = misc_register(&vmscheduler_miscdev))) {
	    printk("error registering  misc_device\n");
	    return rc;
	}

	sched_vm_cache = kmem_cache_create("sched_vm", sizeof(struct sched_vm),
		__alignof__(struct sched_vm), 0, NULL);
	if (!sched_vm_cache) 
	    return -ENOMEM;

	sched_vcpu_cache = kmem_cache_create("sched_vcpu", sizeof(struct sched_vcpu),
		__alignof__(struct sched_vcpu), 0, NULL);
	if (!sched_vcpu_cache) {
	    kmem_cache_destroy(sched_vm_cache);
	    return -ENOMEM;
	}

	for_each_online_cpu(cpu) {
	    /* FIXME:
	     *   when there is not enough memory,
	     *   we should free the buffers that have already allocated
	     */
	    if(init_trace_buf(cpu) || init_pending_ipi_buf(cpu)) {
		int i;
		for (i=0;i <= cpu; i++) {
		    destroy_pending_ipi_buf(i);
		    free_trace_buf(i);
		}
		kmem_cache_destroy(sched_vm_cache);
		kmem_cache_destroy(sched_vcpu_cache);
		printk("alloc trace buf failed. quit!\n");
		return -ENOMEM;
	    }
	}

	/* hook to kvm */
	sched_init_vm = scheduler_init_vm;
	sched_destroy_vm = scheduler_destroy_vm;
	sched_yield_cpu = scheduler_yield_cpu;
	sched_acquire_cpu = scheduler_acquire_cpu;
	sched_apply_cpu   = scheduler_apply_cpu;
	sched_yield_nosync = scheduler_yield_nosync;
	sched_create_vcpu = scheduler_create_vcpu;
	sched_special_vm  = scheduler_special_vm;
	sched_notify_in = scheduler_notify_in;
	sched_notify_out = scheduler_notify_out;

	/* create the idle vm first */
	vmscheduler_init();
	if (!kvm_create_vm()) {
	    printk("fail to create the idle vm!\n");
	    return -EFAULT;
	}
	for_each_online_cpu(cpu)
	    if (kvm_vm_ioctl_create_vcpu(idle_vm->kvm_priv, cpu) < 0) {
		int i;
		for (i=0; i<cpu; i++)
		    kvm_arch_vcpu_destroy(idle_vm->vcpus[i]->kvm_priv);
		printk("creating idle vcpus failed. quit!\n");
		return -EFAULT;
	    }
	if(scheduler_start()){
	    printk("error starting scheduler!\n");
	    return -EFAULT;
	}

	/* next is to create the host vm*/
	if (!kvm_create_vm()) {
	    printk("fail to create the host vm!\n");
	    kvm_destroy_vm(idle_vm->kvm_priv);
	    return -EFAULT;
	}
	for_each_online_cpu(cpu)
	    if (kvm_vm_ioctl_create_vcpu(host_vm->kvm_priv, cpu) < 0) {
		kvm_destroy_vm(host_vm->kvm_priv);
		kvm_destroy_vm(idle_vm->kvm_priv);
		return -EFAULT;
	    }

	for (cpu=0; cpu < MAX_SCHED_VCPUS; ++cpu) {
	    if (host_vm->vcpus[cpu])
		clear_bit(_VPF_blocked, &host_vm->vcpus[cpu]->pause_flags);
	}
	my_cpu = get_cpu();
	tasklet_schedule(&per_cpu(schedule_data, my_cpu).sched_tasklet);
	for_each_online_cpu(cpu) {
	    if (cpu != my_cpu)
		smp_call_function_mask(cpumask_of_cpu(cpu), 
			tasklet_schedule,
			(void*)&per_cpu(schedule_data, cpu).sched_tasklet,
			1);
	}
	put_cpu();
	vm_unpause_by_systemcontroller(host_vm);

	DPRINTK("vmscheduler loaded\n");
	return 0;
}

static void __exit
scheduler_cleanup(void)
{
	DPRINTK("ENTER\n");
	misc_deregister(&vmscheduler_miscdev);
	stop_auto_schedule();
	vm_pause_by_systemcontroller(host_vm);
	kvm_destroy_vm(host_vm->kvm_priv);
	wait_scheduler_stops();
	kvm_destroy_vm(idle_vm->kvm_priv);
	scheduler_destroy();
	kmem_cache_destroy(sched_vm_cache);
	kmem_cache_destroy(sched_vcpu_cache);
	/* unhook to KVM
	 * TODO:
	 *   is there any difference if we use the random unhook sequence?
	 */
	sched_init_vm = NULL;
	sched_destroy_vm = NULL;
	sched_yield_cpu = NULL; 
	sched_acquire_cpu = NULL; 
	sched_apply_cpu  = NULL;
	sched_yield_nosync =  NULL;
	sched_create_vcpu =  NULL;
	sched_special_vm  =  NULL;
	sched_notify_in =  NULL;
	sched_notify_out =  NULL;
	DPRINTK("EXIT\n");
}

module_init (scheduler_init);
module_exit (scheduler_cleanup);

