static inline struct task_struct *cbs_task_of(struct sched_cbs_entity *se)
{
	return container_of(se, struct task_struct, cbs_se);
}

static inline struct rq *cbs_rq_of(struct cbs_rq *cbs_rq)
{
	return container_of(cbs_rq, struct rq, cbs);
}

#define for_each_cbs_sched_entity(se) \
		for (; se; se = NULL)

static inline struct cbs_rq *task_cbs_rq(struct task_struct *p)
{
	return &task_rq(p)->cbs;
}


static inline struct cbs_rq *cbs_cbs_rq_of(struct sched_cbs_entity *se)
{
	struct task_struct *p = cbs_task_of(se);
	struct rq *rq = task_rq(p);

	return &rq->cbs;
}

/* runqueue "owned" by this group */
static inline struct cbs_rq *group_cbs_rq(struct sched_cbs_entity *grp)
{
	return NULL;
}

static inline int
is_same_cbs_group(struct sched_cbs_entity *se, struct sched_cbs_entity *pse)
{
	return 1;
}

static inline struct sched_cbs_entity *parent_cbs_entity(struct sched_cbs_entity *se)
{
	return NULL;
}

/*wwj*/
#ifdef CONFIG_SMP
static u64 gcd(u64 a,u64 b)
{
	u64 tmp;
	while(b)
	{
		tmp = b;
		b = do_div(a,b);
		a = tmp;
	}

	return a;
}
static inline int cmp_ge_utilization(struct cbs_rq *rq,s64 budget,u64 period)
{
	return rq->n_utilization * period >= rq->d_utilization * budget;
}
static void sub_utilization(struct cbs_rq *rq,s64 budget,u64 period)
{
	u64 tmp;
	rq->n_utilization = rq->n_utilization * period - rq->d_utilization * budget;
	rq->d_utilization *= period;
	tmp = gcd(rq->d_utilization,rq->n_utilization>=0?rq->n_utilization:-rq->n_utilization);
	do_div(rq->d_utilization,tmp);
	do_div(rq->n_utilization,tmp);
}
static void add_utilization(struct cbs_rq *rq,s64 budget,u64 period)
{
	u64 tmp;
	rq->n_utilization = rq->n_utilization * period + rq->d_utilization * budget;
	rq->d_utilization *= period;
	tmp = gcd(rq->d_utilization,rq->n_utilization>=0?rq->n_utilization:-rq->n_utilization);
	do_div(rq->d_utilization,tmp);
	do_div(rq->n_utilization,tmp);
}
#endif

/**************************************************************
 * Scheduling class tree data structure manipulation methods:
 */

static inline u64 max_dl(u64 min_dl, u64 dl)
{
	s64 delta = (s64)(dl - min_dl);
	if (delta > 0)
		min_dl = dl;

	return min_dl;
}

static inline u64 min_dl(u64 min_dl, u64 dl)
{
	s64 delta = (s64)(dl - min_dl);
	if (delta < 0)
		min_dl = dl;

	return min_dl;
}

static inline void deadline_postpone(struct sched_cbs_entity *cbs_se)
{
	while (cbs_se->budget < 0) {
		cbs_se->deadline += cbs_se->period;
		cbs_se->budget += cbs_se->max_budget;
	}
}

static inline s64 entity_deadline(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
{
	return se->deadline - cbs_rq->min_deadline;
}

/*
 * Enqueue an entity into the rb-tree:
 */
static void __enqueue_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
{
	struct rb_node **link = &cbs_rq->tasks_timeline.rb_node;
	struct rb_node *parent = NULL;
	struct sched_cbs_entity *entry;
	s64 key = entity_deadline(cbs_rq, se);
	int leftmost = 1;

	/*
	 * Find the right place in the rbtree:
	 */
	while (*link) {
		parent = *link;
		entry = rb_entry(parent, struct sched_cbs_entity, run_node);
		/*
		 * We dont care about collisions. Nodes with
		 * the same key stay together.
		 */
		if (key < entity_deadline(cbs_rq, entry)) {
			link = &parent->rb_left;
		} else {
			link = &parent->rb_right;
			leftmost = 0;
		}
	}

	/*
	 * Maintain a cache of leftmost tree entries (it is frequently
	 * used):
	 */
	if (leftmost) {
		cbs_rq->rb_leftmost = &se->run_node;
		/*
		 * maintain cbs_rq->min_deadline to be a monotonic increasing
		 * value tracking the leftmost deadline in the tree.
		 */
		cbs_rq->min_deadline =
			max_dl(cbs_rq->min_deadline, se->deadline);
	}

	rb_link_node(&se->run_node, parent, link);
	rb_insert_color(&se->run_node, &cbs_rq->tasks_timeline);
}

static void __dequeue_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
{
	if (cbs_rq->rb_leftmost == &se->run_node) {
		struct rb_node *next_node;
		struct sched_cbs_entity *next;

		next_node = rb_next(&se->run_node);
		cbs_rq->rb_leftmost = next_node;

		if (next_node) {
			next = rb_entry(next_node,
					struct sched_cbs_entity, run_node);
			cbs_rq->min_deadline =
				max_dl(cbs_rq->min_deadline,
					     next->deadline);
		}
	}

	rb_erase(&se->run_node, &cbs_rq->tasks_timeline);
}

static inline struct rb_node *earliest_deadline(struct cbs_rq *cbs_rq)
{
	return cbs_rq->rb_leftmost;
}

static struct sched_cbs_entity *__pick_next_cbs_entity(struct cbs_rq *cbs_rq)
{
	return rb_entry(earliest_deadline(cbs_rq), struct sched_cbs_entity, run_node);
}

/*
 * Update the current task's runtime statistics. Skip current tasks that
 * are not in our scheduling class.
 */
static inline void
__update_curr_cbs(struct cbs_rq *cbs_rq, struct sched_cbs_entity *curr,
	      unsigned long delta_exec)
{
	schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));

//	curr->sum_exec_runtime += delta_exec;
	schedstat_add(cbs_rq, exec_clock, delta_exec);
	curr->budget -= delta_exec;
	deadline_postpone(curr);
}

static void update_curr_cbs(struct cbs_rq *cbs_rq)
{
	struct sched_cbs_entity *curr = cbs_rq->curr;
	u64 now = cbs_rq_of(cbs_rq)->clock;
	unsigned long delta_exec;

	if (unlikely(!curr))
		return;

	/*
	 * Get the amount of time the current task was running
	 * since the last accounting time
	 */
	delta_exec = (unsigned long)(now - curr->exec_start);

	__update_curr_cbs(cbs_rq, curr, delta_exec);
	curr->exec_start = now;

#if 0
	if (entity_is_task(curr)) {
		struct task_struct *curtask = cbs_task_of(curr);

		cpuacct_charge(curtask, delta_exec);
	}
#endif
}

/*
 * We are picking a new current task - update its stats:
 */
static inline void
update_stats_curr_start_cbs(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
{
	/*
	 * We are starting a new run period:
	 */
	se->exec_start = cbs_rq_of(cbs_rq)->clock;
}

/**************************************************
 * Scheduling class queueing methods:
 */

static void
account_cbs_entity_enqueue(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
{
	cbs_rq->nr_running++;
	se->on_rq = 1;
}

static void
account_cbs_entity_dequeue(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
{
	BUG_ON(se->on_rq == 0);
	BUG_ON(cbs_rq->nr_running == 0);
	cbs_rq->nr_running--;
	se->on_rq = 0;
}

static void
enqueue_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
{
	u64 vt, now = cbs_rq_of(cbs_rq)->clock;

	/*
	 * Update run-time statistics of the 'current'.
	 */
	update_curr_cbs(cbs_rq);
	account_cbs_entity_enqueue(cbs_rq, se);

	vt = se->period * se->budget;
	do_div(vt, se->max_budget);

	if (vt + now > se->deadline) {
		se->budget = se->max_budget;
		se->deadline = se->period + now;
	}
	
	if (se != cbs_rq->curr)
		__enqueue_cbs_entity(cbs_rq, se);
}

static void
dequeue_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
{
	/*
	 * Update run-time statistics of the 'current'.
	 */
	update_curr_cbs(cbs_rq);

	if (se != cbs_rq->curr)
		__dequeue_cbs_entity(cbs_rq, se);
	account_cbs_entity_dequeue(cbs_rq, se);
}

static void
set_next_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
{
	/* 'current' is not kept within the tree. */
	if (se->on_rq) {
		__dequeue_cbs_entity(cbs_rq, se);
	}

	update_stats_curr_start_cbs(cbs_rq, se);
	cbs_rq->curr = se;
//	se->prev_sum_exec_runtime = se->sum_exec_runtime;
}

static int
wakeup_preempt_cbs_entity(struct sched_cbs_entity *curr, struct sched_cbs_entity *se)
{
	return se->deadline < curr->deadline;
}

static struct sched_cbs_entity *pick_next_cbs_entity(struct cbs_rq *cbs_rq)
{
	struct sched_cbs_entity *se = NULL;

	if (earliest_deadline(cbs_rq)) {
		se = __pick_next_cbs_entity(cbs_rq);
		set_next_cbs_entity(cbs_rq, se);
	}

	return se;
}

static void put_prev_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *prev)
{
	/*
	 * If still on the runqueue then deactivate_task()
	 * was not called and update_curr() has to be done:
	 */
	if (prev->on_rq)
		update_curr_cbs(cbs_rq);

	if (prev->on_rq) {
		/* Put 'current' back into the tree. */
		__enqueue_cbs_entity(cbs_rq, prev);
	}
	cbs_rq->curr = NULL;
}

static void
cbs_entity_tick(struct cbs_rq *cbs_rq, struct sched_cbs_entity *curr, int queued)
{
	/*
	 * Update run-time statistics of the 'current'.
	 */
	update_curr_cbs(cbs_rq);

	if (cbs_rq->nr_running > 1)
		resched_task(cbs_rq_of(cbs_rq)->curr);	/* FIXME: Check! */
}


/**************************************************
 * CBS operations on tasks:
 */

#ifdef CONFIG_SCHED_HRTICK
static void hrtick_start_cbs(struct rq *rq, struct task_struct *p)
{
	int requeue = rq->curr == p;
	struct sched_cbs_entity *se = &p->cbs_se;
	s64 delta;

	WARN_ON(task_rq(p) != rq);

	/*
	 * Don't schedule timeouts shorter than 10000ns, that just
	 * doesn't make sense.
	 */
	delta = max(10000LL, se->budget);
	hrtick_start(rq, delta, requeue);
}
#else
static inline void
hrtick_start_cbs(struct rq *rq, struct task_struct *p)
{
}
#endif

/*
 * The enqueue_task method is called before nr_running is
 * increased. Here we update the fair scheduling stats and
 * then put the task into the rbtree:
 */
static void enqueue_task_cbs(struct rq *rq, struct task_struct *p, int wakeup)
{
	struct cbs_rq *cbs_rq;
	struct sched_cbs_entity *se = &p->cbs_se;

	for_each_cbs_sched_entity(se) {
		if (se->on_rq)
			break;
		cbs_rq = cbs_cbs_rq_of(se);
		enqueue_cbs_entity(cbs_rq, se);
	}

	hrtick_start_cbs(rq, rq->curr);
}

/*
 * The dequeue_task method is called before nr_running is
 * decreased. We remove the task from the rbtree and
 * update the fair scheduling stats:
 */
static void dequeue_task_cbs(struct rq *rq, struct task_struct *p, int sleep)
{
	struct cbs_rq *cbs_rq;
	struct sched_cbs_entity *se = &p->cbs_se;

	for_each_cbs_sched_entity(se) {
		cbs_rq = cbs_cbs_rq_of(se);
		dequeue_cbs_entity(cbs_rq, se);
		/* FIXME: Don't dequeue parent if it has other entities besides us */
	}
/*wwj*/
#ifdef CONFIG_SMP
	se = &p->cbs_se;
	if(p->exit_state && p->policy == SCHED_CBS && se->on_smp)
	{
		cbs_rq = &rq->cbs;
		//se->on_smp = 0;
		/*need lock*/
		add_utilization(cbs_rq,se->max_budget,se->period);

	}
#endif

	hrtick_start_cbs(rq, rq->curr);
}

/*
 * sched_yield() is broken on CBS.
 *
 * If compat_yield is turned on then we requeue to the end of the tree.
 */
static void yield_task_cbs(struct rq *rq)
{
}

/* return depth at which a sched entity is present in the hierarchy */
static inline int depth_se_cbs(struct sched_cbs_entity *se)
{
	int depth = 0;

	for_each_cbs_sched_entity(se)
		depth++;

	return depth;
}

/*
 * Preempt the current task with a newly woken task if needed:
 */
static void check_preempt_wakeup_cbs(struct rq *rq, struct task_struct *p)
{
	struct task_struct *curr = rq->curr;
	struct cbs_rq *cbs_rq = task_cbs_rq(curr);
	struct sched_cbs_entity *se = &curr->cbs_se, *pse = &p->cbs_se;
#if 0
	int se_depth, pse_depth;
#endif

	if (unlikely(rt_prio(p->prio))) {
		update_rq_clock(rq);
		update_curr_cbs(cbs_rq);
		resched_task(curr);
		return;
	}

//	se->last_wakeup = se->sum_exec_runtime;
	if (unlikely(se == pse))
		return;

#if 0 
	/*
	 * preemption test can be made between sibling entities who are in the
	 * same cbs_rq i.e who have a common parent. Walk up the hierarchy of
	 * both tasks until we find their ancestors who are siblings of common
	 * parent.
	 */

	/* First walk up until both entities are at same depth */
	se_depth = depth_se_cbs(se);
	pse_depth = depth_se_cbs(pse);

	while (se_depth > pse_depth) {
		se_depth--;
		se = parent_cbs_entity(se);
	}

	while (pse_depth > se_depth) {
		pse_depth--;
		pse = parent_cbs_entity(pse);
	}

	while (!is_same_cbs_group(se, pse)) {
		se = parent_cbs_entity(se);
		pse = parent_cbs_entity(pse);
	}
#endif

	if (wakeup_preempt_cbs_entity(se, pse) == 1)
		resched_task(curr);
}

static struct task_struct *pick_next_task_cbs(struct rq *rq)
{
	struct task_struct *p;
	struct cbs_rq *cbs_rq = &rq->cbs;
	struct sched_cbs_entity *se;

	if (unlikely(!cbs_rq->nr_running))
		return NULL;

	do {
		se = pick_next_cbs_entity(cbs_rq);
		cbs_rq = group_cbs_rq(se);
	} while (cbs_rq);

	p = cbs_task_of(se);
	hrtick_start_cbs(rq, p);

	return p;
}

/*
 * Account for a descheduled task:
 */
static void put_prev_task_cbs(struct rq *rq, struct task_struct *prev)
{
	struct sched_cbs_entity *se = &prev->cbs_se;
	struct cbs_rq *cbs_rq;

	for_each_cbs_sched_entity(se) {
		cbs_rq = cbs_cbs_rq_of(se);
		put_prev_cbs_entity(cbs_rq, se);
	}
}

/*
 * scheduler tick hitting a task of our scheduling class:
 */
static void task_tick_cbs(struct rq *rq, struct task_struct *curr, int queued)
{
	struct cbs_rq *cbs_rq;
	struct sched_cbs_entity *se = &curr->cbs_se;

	for_each_cbs_sched_entity(se) {
		cbs_rq = cbs_cbs_rq_of(se);
		cbs_entity_tick(cbs_rq, se, queued);
	}
}

static void task_new_cbs(struct rq *rq, struct task_struct *p)
{
	struct cbs_rq *cbs_rq = task_cbs_rq(p);

	update_curr_cbs(cbs_rq);

	enqueue_task_cbs(rq, p, 0);

/*wwj*/
	printk(KERN_ALERT "task_new_cbs() has been called!\n");
}

/*
 * Priority of the task has changed. Check to see if we preempt
 * the current task.
 */
static void prio_changed_cbs(struct rq *rq, struct task_struct *p,
			      int oldprio, int running)
{
#warning Check prio_changed_cbs() implementation, thanks!
	printk("prio_changed_cbs has been called!\n");
	check_preempt_curr(rq, p);
}

/*
 * We switched to the sched_cbs class.
 */
static void switched_to_cbs(struct rq *rq, struct task_struct *p,
			     int running)
{
#warning Check switched_to_cbs() implementation, thanks!
	//printk("switched_to_cbs has been called!\n");
	check_preempt_curr(rq, p);
}

/* Account for a task changing its policy or group.
 *
 * This routine is mostly called to set cbs_rq->curr field when a task
 * migrates between groups/classes.
 */
static void set_curr_task_cbs(struct rq *rq)
{
	struct sched_cbs_entity *se = &rq->curr->cbs_se;

	for_each_cbs_sched_entity(se)
		set_next_cbs_entity(cbs_cbs_rq_of(se), se);
}

/*wwj*/
#ifdef CONFIG_SMP
static int find_right_rq(struct task_struct *task)
{
	struct sched_cbs_entity *cbs_se = &task->cbs_se;
	struct cbs_rq *cbs_rq;
	int cpu;
	for_each_cpu_mask(cpu,task->cpus_allowed){
		cbs_rq = &cpu_rq(cpu)->cbs;
		if (cmp_ge_utilization(cbs_rq,cbs_se->max_budget,cbs_se->period))
		{
			return cpu;
		}
	}
	return task_cpu(task);
}

static int select_task_rq_cbs(struct task_struct *p, int sync)
{
	struct rq *rq = task_rq(p);
	struct sched_cbs_entity *cbs_se = &p->cbs_se;
	int cpu;
	if (cbs_se->on_smp)
		return task_cpu(p);
	cpu = find_right_rq(p);
	rq = cpu_rq(cpu);
	/*need lock*/
	sub_utilization(&rq->cbs,cbs_se->max_budget,cbs_se->period);
	cbs_se->on_smp = 1;
	return cpu;
}

static unsigned long
load_balance_cbs(struct rq *this_rq, int this_cpu, struct rq *busiest,
		unsigned long max_load_move,
		struct sched_domain *sd, enum cpu_idle_type idle,
		int *all_pinned, int *this_best_prio)
{
	return 0;
}

static int
move_one_task_cbs(struct rq *this_rq, int this_cpu, struct rq *busiest,
		 struct sched_domain *sd, enum cpu_idle_type idle)
{
	return 0;
}
#endif /* CONFIG_SMP */

/*
 * All the scheduling class methods
 *
 * Modified by: wwj
 * Time: 2009.4.20
 *
 * Improvment:
 *  SMP
 *
 * TODO:
 * 1. need lock when modify rq (consider deadlock)
 * 2. testing
 * 3. ...
*/
static const struct sched_class cbs_sched_class = {
	.next			= &fair_sched_class,
	.enqueue_task		= enqueue_task_cbs,
	.dequeue_task		= dequeue_task_cbs,
	.yield_task		= yield_task_cbs,
/*wwj*/
#ifdef CONFIG_SMP
	.select_task_rq		= select_task_rq_cbs,
#endif /* CONFIG_SMP */

	.check_preempt_curr	= check_preempt_wakeup_cbs,

	.pick_next_task		= pick_next_task_cbs,
	.put_prev_task		= put_prev_task_cbs,
/*wwj*/
#ifdef CONFIG_SMP
	.load_balance		= load_balance_cbs,
	.move_one_task		= move_one_task_cbs,
#endif /* CONFIG_SMP */

	.set_curr_task          = set_curr_task_cbs,
	.task_tick		= task_tick_cbs,
	.task_new		= task_new_cbs,

	.prio_changed		= prio_changed_cbs,
	.switched_to		= switched_to_cbs,

#ifdef CONFIG_CBS_GROUP_SCHED
#error CBS GROUP is still a No-No!
	.moved_group		= ,
#endif
};
