#ifndef SCHEDULE_PRIVATE_H
#define SCHEDULE_PRIVATE_H
#include <linux/types.h>
#include <asm/atomic.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include "trace.h"
#include "sched_types.h"

#define VCPU_RUNNING 1
#define VCPU_YIELD  2

/* VCPU is currently running on a physical CPU. */
#define RUNSTATE_running  0

/* VCPU is runnable, but not currently scheduled on any physical CPU. */
#define RUNSTATE_runnable 1

/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
#define RUNSTATE_blocked  2

/*
 * VCPU is not runnable, but it is not blocked.
 * This is a 'catch all' state for things like hotplug and pauses by the
 * system administrator (or for critical sections in the hypervisor).
 * RUNSTATE_blocked dominates this state (it is the preferred state).
 */
#define RUNSTATE_offline  3

struct vcpu_runstate_info {
    /* VCPU's current state (RUNSTATE_*). */
    int state;

    /* When was current state entered (system time, ns)? */
    uint64_t state_entry_time;

    /*
     * Time spent in each RUNSTATE_* (ns). The sum of these times is
     * guaranteed not to drift from system time.
     */
    uint64_t time[4];
};
struct sched_vm;
#define MAX_SCHED_VCPUS	4
#define NO_PROCESSOR	(-1)
struct sched_vcpu{
    void    *kvm_priv;
    struct sched_vm* vm;
    struct task_struct* thread;
    wait_queue_head_t wq;
    volatile unsigned int status;
    volatile bool	is_running;
    cpumask_t		cpu_affinity;
    unsigned long	pause_flags;
    atomic_t		pause_count;
    struct vcpu_runstate_info	runstate;
    void		*sched_priv;
    int			processor;
    int			vcpu_id;
};
typedef pid_t vmid_t;
#define IDLE_VM_ID  ((vmid_t)(-1))
#define HOST_VM_ID  1
#define ANONY_VM_ID 0
#define MAX_PROCESSID	((unsigned long)0x0ffffffff)
#define MAX_PROCESSID_LEN  11 // the length of string "4294967295"

struct sched_vm{
    void    *kvm_priv;	    /* the private info hold by KVM*/
    bool    is_paused_by_controller;
    atomic_t	pause_count;
    void    *sched_priv;
    struct list_head vm_link;
    struct sched_vcpu* vcpus[MAX_SCHED_VCPUS];
    vmid_t  vmid;
};

extern struct sched_vm* host_vm;
extern struct sched_vm* idle_vm;
static inline bool is_host_vm(struct sched_vm* vm)
{
    return  vm == host_vm;
}
static inline bool is_idle_vm(struct sched_vm* vm)
{
    return  vm == idle_vm;
}
static inline bool is_idle_vcpu(struct sched_vcpu* vcpu)
{
    return vcpu->vm == idle_vm;
}
static inline bool is_host_vcpu(struct sched_vcpu* vcpu)
{
    return vcpu->vm == host_vm;
}
#define _VPF_blocked         0
#define _VPF_down            1
#define _VPF_migrating       3

struct task_slice {
    struct sched_vcpu *task;
    s_time_t     time;
};

#define SCHEDULER_FREE	    0 /* none is using the scheduler	    */
#define SCHEDULER_USER	    1 /* it is used by from user requirement*/
#define SCHEDULER_KERNEL    2 /* it is used by scheduler or ticker  */

struct schedule_data {
    spinlock_t	    schedule_lock;  /* spinlock protecting curr		*/
    struct sched_vcpu    *curr;	    /* current task			*/
    struct sched_vcpu    *idle;	    /* idle task for this cpu		*/
    void	    *sched_priv;
    struct hrtimer  s_timer;	    /* scheduling timer			*/
    int		    id;		    /* the cpu id			*/
    struct hrtimer  watchdog;	    /* the watchdog timer		*/
    struct tasklet_struct sched_tasklet;  /* per cpu schedule tasklet	*/
    wait_queue_head_t ipi_wq;	    /* ipi helper thread waitqueue	*/
    volatile bool	ipi_quit;   /* the ipi helper should quit	*/
    struct tasklet_struct tick_tasklet; /* per cpu tick tasklet */
    volatile int sched_state;	    /* the scheduler status  */
    volatile bool in_use;          /* indicate the whether the schedule can work*/
    volatile bool can_migrate;
};

DECLARE_PER_CPU(struct schedule_data, schedule_data);

#define get_vcpu_by_priv(priv) ((struct sched_vcpu*)(priv))
#define get_vm_by_priv(priv) ((struct sched_vm*)(priv))

static inline int vcpu_runnable(struct sched_vcpu* v)
{
    return !(v->pause_flags |
	    atomic_read(&v->pause_count) |
	    v->vm->is_paused_by_controller | 
	    atomic_read(&v->vm->pause_count));
}
#if  LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) 
extern long (*sched_setaffinity_p)(pid_t pid, cpumask_t new_mask);
static inline long kvm_sched_setaffinity(pid_t pid, cpumask_t new_mask)
{
    return  sched_setaffinity_p(pid, new_mask);
}
#else
extern long (*sched_setaffinity_p)(pid_t pid, cpumask_t* new_mask);
static inline long kvm_sched_setaffinity(pid_t pid, cpumask_t new_mask)
{
    return  sched_setaffinity_p(pid, &new_mask);
}
#endif

extern int vmsched_init_vm(struct sched_vm* vm);

static inline s64  NOW(void)
{
	struct timespec t = current_kernel_time();
	return timespec_to_ns(&t);
}
/*********************************************************/
DECLARE_PER_CPU(rwlock_t, pseudo_cli);

#ifdef CONFIG_PREEMPT
#define thread_preemptible() (preempt_count() == 0)
#else
#define thread_preemptible()	0
#endif

static inline int vcpu_schedule_try_lock(struct sched_vcpu *v)
{
    unsigned int cpu;
    struct schedule_data *sd;

    for ( ; ; ){
	int r;
	cpu = v->processor;
	sd = &per_cpu(schedule_data, cpu);
	r = spin_trylock(&sd->schedule_lock);
	if (!r) return 0;
	if (likely(v->processor == cpu))
	    return 1;
	spin_unlock(&sd->schedule_lock);
    }
}
static inline struct schedule_data* vcpu_schedule_lock(struct sched_vcpu *v)
{
    unsigned int cpu;
    struct schedule_data *sd;

    for ( ; ; )
    { 
	cpu = v->processor;
	sd = &per_cpu(schedule_data, cpu);
        spin_lock(&sd->schedule_lock);
        if ( likely(v->processor == cpu) ){
	    return sd;
	}
        spin_unlock(&sd->schedule_lock);
    }
}

static inline int pseudo_irq_cli(void)
{
    int cpu = raw_smp_processor_id();
    struct schedule_data *sd = &per_cpu(schedule_data, cpu);

    tasklet_disable(&sd->sched_tasklet);
    tasklet_disable(&sd->tick_tasklet);

    /* maybe, the tasklet is already running now, we try to lock
     * sched_state to detect this case
     */
    while(cmpxchg(&sd->sched_state, 
		SCHEDULER_FREE, SCHEDULER_USER) != SCHEDULER_FREE)
	schedule();

    return 1;
} 
static inline int pseudo_irq_save(int flags)
{
    int cpu = raw_smp_processor_id();
    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
    if(thread_preemptible()){
	dump_traces(NULL);
	BUG_ON(1);
    };

    tasklet_disable(&sd->sched_tasklet);
    tasklet_disable(&sd->tick_tasklet);

    /* maybe, the tasklet is already running now, we try to lock
     * sched_state to detect this case
     */
    while(cmpxchg(&sd->sched_state, 
		SCHEDULER_FREE, SCHEDULER_USER) != SCHEDULER_FREE)
	schedule();

    return 1;
}
static inline void pseudo_irq_sti(void)
{
    int cpu = raw_smp_processor_id();
    struct schedule_data *sd = &per_cpu(schedule_data, cpu);
    if(thread_preemptible()){
	dump_traces(NULL);
	BUG_ON(1);
    };
    sd->sched_state = SCHEDULER_FREE;
    barrier();
    tasklet_enable(&sd->sched_tasklet);
    tasklet_enable(&sd->tick_tasklet);
}

static inline void pseudo_irq_restore(int flags)
{
    pseudo_irq_sti();
}

#define vcpu_schedule_lock_irqsave(v, flags) do {  \
    struct schedule_data *sd;			    \
    int r = pseudo_irq_save(flags);   \
    BUG_ON(thread_preemptible());			    \
    if(!r) {					    \
	dump_traces(NULL);			    \
	BUG_ON(1);				    \
    };						    \
    sd = vcpu_schedule_lock((v));	    \
} while ( 0 )

#define vcpu_schedule_lock_irq(v) do {   \
    struct schedule_data *sd;			    \
    int r;					    \
    BUG_ON(thread_preemptible());		    \
    r = pseudo_irq_cli();			    \
    if(!r) {					    \
	dump_traces(NULL);			    \
	BUG_ON(1);				    \
    };						    \
    sd = vcpu_schedule_lock((v));	    \
} while ( 0 )

static inline void vcpu_schedule_unlock(struct sched_vcpu *v)
{
    spin_unlock(&per_cpu(schedule_data, v->processor).schedule_lock);
}

#define vcpu_schedule_unlock_irq(v) do {  \
    vcpu_schedule_unlock(v);			    \
    pseudo_irq_sti();			    \
} while ( 0 )
#define vcpu_schedule_unlock_irqrestore(v, flags) do { \
    vcpu_schedule_unlock(v);			    \
    pseudo_irq_restore(flags);	    \
} while ( 0 )
#define current_vcpu (per_cpu(schedule_data, raw_smp_processor_id()).curr)
#define test_and_set_bool(b) xchg(&(b), 1)
#define test_and_clear_bool(b) xchg(&(b), 0)
struct scheduler {
    char *name;             /* full name for this scheduler      */
    char *opt_name;         /* option name for this scheduler    */
    unsigned int sched_id;  /* ID for this scheduler             */

    void         (*init)           (void);

    int          (*init_vm)    (struct sched_vm*);
    void         (*destroy_vm) (struct sched_vm*);

    int          (*init_vcpu)      (struct sched_vcpu *);
    void         (*destroy_vcpu)   (struct sched_vcpu *);

    void         (*sleep)          (struct sched_vcpu *);
    void         (*wake)           (struct sched_vcpu *);

    struct task_slice (*do_schedule) (s_time_t);

    void	(*disable_scheduler) (int cpu);
    int		(*start_scheduler) (int cpu);

    void	(*stop_schedule) (int cpu);

    int          (*pick_cpu)       (struct sched_vcpu *);
    int		 (*read_schedule_info) (struct sched_vm*, char*, int sz);
    int		 (*write_schedule_info) (struct sched_vm*, char*);
    void         (*dump_settings)  (void);
    void         (*dump_cpu_state) (int);
};
extern bool shutting_down;
#endif
