#ifndef	EXTERNAL_INTERFACE_H
#define EXTERNAL_INTERFACE_H
#include <linux/version.h>
#include <linux/module.h>
#include <linux/hrtimer.h>
#if 0
#include <linux/cpu.h>
#include <asm/bitops.h>
#include <linux/cpumask.h>
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
static inline void hrtimer_data_pointer(struct hrtimer *timer)
{
    timer->data = (void *)timer;
}
#else
static inline void hrtimer_data_pointer(struct hrtimer *timer) {}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
typedef cpumask_t cpumask_var_t[1];
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_t, cpu_core_map);

#define vms_cpu_isset(cpu, cpumask) cpu_isset((cpu), (cpumask))
#define vms_cpus_and(dst, src1, cpumask) cpus_and((dst),(src1), (cpumask))
static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
	return 1;
}

static inline void free_cpumask_var(cpumask_var_t mask)
{
}

static inline void cpumask_clear(cpumask_var_t mask)
{
	cpus_clear(*mask);
}

static inline void cpumask_set_cpu(int cpu, cpumask_var_t mask)
{
	cpu_set(cpu, *mask);
}
#define nr_cpumask_bits	NR_CPUS
#define cpumask_bits(maskp) ((maskp)->bits)
/**
 * cpumask_and - *dstp = *src1p & *src2p
 * @dstp: the cpumask result
 * @src1p: the first input
 * @src2p: the second input
 */
static inline void cpumask_and(cpumask_var_t  dstp,
			       const cpumask_var_t  src1p,
			       const cpumask_var_t  src2p)
{
	bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
				       cpumask_bits(src2p), nr_cpumask_bits);
}

/**
 * cpumask_or - *dstp = *src1p | *src2p
 * @dstp: the cpumask result
 * @src1p: the first input
 * @src2p: the second input
 */
static inline void cpumask_or(cpumask_var_t dstp, const cpumask_var_t src1p,
			      const cpumask_var_t src2p)
{
	bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
				      cpumask_bits(src2p), nr_cpumask_bits);
}
static inline int smp_call_function_many(cpumask_var_t cpus,
					 void (*func)(void *data), void *data,
					 int sync)
{
	return smp_call_function_mask(*cpus, func, data, sync);
}

static inline int cpumask_empty(cpumask_var_t mask)
{
	return cpus_empty(*mask);
}

static inline int cpumask_test_cpu(int cpu, cpumask_var_t mask)
{
	return cpu_isset(cpu, *mask);
}

static inline void cpumask_clear_cpu(int cpu, cpumask_var_t mask)
{
	cpu_clear(cpu, *mask);
}

#define cpu_online_mask (&cpu_online_map)

#else
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
#define vms_cpu_isset(cpu, cpumask) cpu_isset((cpu), *(cpumask))
#define vms_cpus_and(dst, src1, cpumask) cpus_and((dst),(src1), *(cpumask))
#endif

extern void kvm_arch_vcpu_destroy(void* kvm_priv);
extern int kvm_vm_ioctl_create_vcpu(void* kvm_priv, int n);
extern void kvm_destroy_vm(void* kvm_priv);
extern void* kvm_create_vm(void);
extern int (*sched_init_vm)(void*, void**);
extern int (*sched_create_vcpu)(void*, void*, void**, int);
extern int (*sched_special_vm)(void*);
extern void (*sched_notify_in)(void*);
extern void (*sched_notify_out)(void*);
extern int (*sched_special_vcpu)(void*);
extern int (*sched_destroy_vm)(void*);
extern void (*sched_yield_cpu)(void*);
extern void (*sched_yield_nosync)(void*);
extern void (*sched_acquire_cpu)(void*);
extern void (*sched_apply_cpu)(void*);
extern void* kvm_create_vm(void);
extern void kvm_destroy_vm(void*);
#endif
