#include <linux/sched.h>
#include <linux/preempt.h>
#include <linux/irqflags.h>
#include <linux/wait.h>
#include <linux/hardirq.h>
#include <linux/bug.h>
#include <linux/smp.h>

#include <asm/mmu_context.h>

struct list_head task_head;

struct rq runqueues[8];

#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return 0;
}

static inline int cpu_of(struct rq *rq)
{
	return 0;
}

#define	cpu_rq(cpu)	(&runqueues[(cpu)])
#define task_rq(p)		cpu_rq(task_cpu(p))

static void resched_task(struct task_struct *p)
{
	// assert_spin_locked(&task_rq(p)->lock);
	set_tsk_need_resched(p);
}

#include "sched_fair.c"
#include "sched_rt.c"

static void inc_nr_running(struct rq *rq)
{
	rq->nr_running++;
}

static void dec_nr_running(struct rq *rq)
{
	rq->nr_running--;
}

static void set_load_weight(struct task_struct *p)
{
	if (task_has_rt_policy(p)) {
		p->se.load.weight = prio_to_weight[0] * 2;
		p->se.load.inv_weight = prio_to_wmult[0] >> 1;
		return;
	}

	/*
	 * SCHED_IDLE tasks get minimal weight:
	 */
	// if (p->policy == SCHED_IDLE) {
	// 	p->se.load.weight = WEIGHT_IDLEPRIO;
	// 	p->se.load.inv_weight = WMULT_IDLEPRIO;
	// 	return;
	// }

	p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
	p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
}

static void update_avg(u64 *avg, u64 sample)
{
	s64 diff = sample - *avg;
	*avg += diff >> 3;
}

static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
	rq->curr->sched_class->check_preempt_curr(rq, p, flags);
}

static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
{
	if (wakeup)
		p->se.start_runtime = p->se.sum_exec_runtime;

	p->sched_class->enqueue_task(rq, p, wakeup);
	p->se.on_rq = 1;
}

unsigned int sysctl_sched_wakeup_granularity = 1000000UL;

static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
{
	if (sleep) {
		if (p->se.last_wakeup) {
			update_avg(&p->se.avg_overlap,
				p->se.sum_exec_runtime - p->se.last_wakeup);
			p->se.last_wakeup = 0;
		} else {
			update_avg(&p->se.avg_wakeup,
				sysctl_sched_wakeup_granularity);
		}
	}

	p->sched_class->dequeue_task(rq, p, sleep);
	p->se.on_rq = 0;
}

static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
{
	// if (task_contributes_to_load(p))
	// 	rq->nr_uninterruptible--;

	enqueue_task(rq, p, wakeup);
	inc_nr_running(rq);
}

static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
{
	// if (task_contributes_to_load(p))
		// rq->nr_uninterruptible++;

	dequeue_task(rq, p, sleep);
	dec_nr_running(rq);
}

#if 1
static inline void context_switch(struct rq *rq, struct task_struct *prev,
	       struct task_struct *next)
{
	struct mm_struct *mm, *oldmm;

	// prepare_task_switch(rq, prev, next);
	// trace_sched_switch(rq, prev, next);
	mm = next->mm;
	oldmm = prev->active_mm;
	/*
	 * For paravirt, this is coupled with an exit in switch_to to
	 * combine the page table reload and the switch backend into
	 * one hypercall.
	 */
	// arch_start_context_switch(prev);

	if (!mm) {
		// printf("this is %s(): %d\r\n", __func__, __LINE__);
		next->active_mm = oldmm;
		// atomic_inc(&oldmm->mm_count);
		// enter_lazy_tlb(oldmm, next);
	} else {
		// printf("this is %s(): %d\r\n", __func__, __LINE__);
		switch_mm(oldmm, mm, next);
	}

	if (!prev->mm) {
		// printf("this is %s(): %d\r\n", __func__, __LINE__);
		prev->active_mm = NULL;
		// rq->prev_mm = oldmm;
	}
	/*
	 * Since the runqueue lock will be released by the next
	 * task (which is an invalid locking op but in the case
	 * of the scheduler it's an obvious special-case), so we
	 * do an early lockdep release here:
	 */
// #ifndef __ARCH_WANT_UNLOCKED_CTXSW
// 	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
// #endif

	/* Here we just switch the register state and the stack. */
	switch_to(prev, next, prev);

	// barrier();
	/*
	 * this_rq must be evaluated again because prev may have moved
	 * CPUs since it called schedule(), thus the 'rq' on its stack
	 * frame will be invalid.
	 */
	// finish_task_switch(this_rq(), prev);
}
#else
static inline void context_switch(struct rq *rq, struct task_struct *prev,
	       struct task_struct *next)
{
	switch_to(prev, next, prev);

}
#endif

static void put_prev_task(struct rq *rq, struct task_struct *p)
{
	u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime;

	update_avg(&p->se.avg_running, runtime);

	if (p->state == TASK_RUNNING) {
		/*
		 * In order to avoid avg_overlap growing stale when we are
		 * indeed overlapping and hence not getting put to sleep, grow
		 * the avg_overlap on preemption.
		 *
		 * We use the average preemption runtime because that
		 * correlates to the amount of cache footprint a task can
		 * build up.
		 */
		runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
		update_avg(&p->se.avg_overlap, runtime);
	} else {
		update_avg(&p->se.avg_running, 0);
	}
	p->sched_class->put_prev_task(rq, p);
}

static struct task_struct *pick_next_task(struct rq *rq)
{
	const struct sched_class *class;
	struct task_struct *p;
	// printf("this is %s(): %d   rq->clock is %ld\r\n", __func__, __LINE__, rq->clock);

	/*
	 * Optimization: we know that if all tasks are in
	 * the fair class we can call that function directly:
	 */
	if (rq->nr_running == rq->cfs.nr_running) {
		p = fair_sched_class.pick_next_task(rq);
		// p = rt_sched_class.pick_next_task(rq);
		if (p)
			return p;
	}

	for ( ; ; ) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		delayms(1000);
	}
	// class = sched_class_highest;
	// for ( ; ; ) {
	// 	p = class->pick_next_task(rq);
	// 	if (p)
	// 		return p;
	// 	/*
	// 	 * Will never be NULL as the idle class always
	// 	 * returns a non-NULL p:
	// 	 */
	// 	class = class->next;
	// }
}

#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
#define NSEC_PER_SEC	1000000000L

#if 0
unsigned long long sched_clock(void)
{
	// printf("this is %s(): %d   clock is %ld\r\n", __func__, __LINE__, (unsigned long long)(jiffies - INITIAL_JIFFIES) * (NSEC_PER_SEC / HZ));
	return (unsigned long long)(jiffies - INITIAL_JIFFIES)
					* (NSEC_PER_SEC / HZ);
}
#else
unsigned long sched_clock(void)
{
	// printf("this is %s(): %d   clock is %ld\r\n", __func__, __LINE__, (unsigned long long)(jiffies - INITIAL_JIFFIES) * (NSEC_PER_SEC / HZ));
	return (unsigned long)(jiffies - INITIAL_JIFFIES)
					* (NSEC_PER_SEC / HZ);
}
#endif

u64 sched_clock_cpu(int cpu)
{
	return (u64)jiffies;
	// return sched_clock();
}

 void update_rq_clock(struct rq *rq)
{
	rq->clock = sched_clock_cpu(cpu_of(rq));
	// printf("this is %s(): %d   rq->clock is %ld, sched_clock_cpu(cpu_of(rq)) is %ld\r\n", __func__, __LINE__, rq->clock, sched_clock_cpu(cpu_of(rq)));
}

void schedule(void)
{
	struct task_struct *prev, *next;
	unsigned long *switch_count;
	struct rq *rq;
	int cpu;

	// printf("this is %s(): %d\r\n", __func__, __LINE__);

need_resched:
	preempt_disable();
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	cpu = smp_processor_id();
	rq = cpu_rq(0);
	// rcu_sched_qs(cpu);
	prev = rq->curr;
	// switch_count = &prev->nivcsw;

	// spin_lock_irq(&rq->lock);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	update_rq_clock(rq);
	clear_tsk_need_resched(prev);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
		// printf("this is %s(): %d\r\n", __func__, __LINE__);
		// if (signal_pending_state(prev->state, prev))
		// 	prev->state = TASK_RUNNING;
		// else
			deactivate_task(rq, prev, 1);
		// switch_count = &prev->nvcsw;
	}
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	put_prev_task(rq, prev);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	next = pick_next_task(rq);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	if (prev != next) {
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
		// sched_info_switch(prev, next);
		// perf_event_task_sched_out(prev, next, cpu);

		rq->nr_switches++;
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
		rq->curr = next;
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
		// ++*switch_count;
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

		context_switch(rq, prev, next); /* unlocks the rq */
		/*
		 * the context switch might have flipped the stack from under
		 * us, hence refresh the local variables.
		 */
		cpu = smp_processor_id();
		rq = cpu_rq(cpu);
	} else {
		// spin_unlock_irq(&rq->lock);
	}
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	preempt_enable_no_resched();
	if (need_resched())
		goto need_resched;
}

static void update_cpu_load(struct rq *this_rq)
{
	unsigned long this_load = this_rq->load.weight;
	int i, scale;

	this_rq->nr_load_updates++;

	/* Update our load: */
	for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
		unsigned long old_load, new_load;

		/* scale is effectively 1 << i now, and >> i divides by scale */

		old_load = this_rq->cpu_load[i];
		new_load = this_load;
		/*
		 * Round up the averaging division if load is increasing. This
		 * prevents us from getting stuck on 9 if the load is 10, for
		 * example.
		 */
		if (new_load > old_load)
			new_load += scale-1;
		this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
	}

	// if (time_after_eq(jiffies, this_rq->calc_load_update)) {
	// 	this_rq->calc_load_update += LOAD_FREQ;
	// 	calc_load_account_active(this_rq);
	// }
}

void scheduler_tick(void)
{
	int cpu = smp_processor_id();
	struct rq *rq = cpu_rq(cpu);
	struct task_struct *curr = rq->curr;

	// sched_clock_tick();

	spin_lock(&rq->lock);
	update_rq_clock(rq);
	update_cpu_load(rq);
	curr->sched_class->task_tick(rq, curr, 0);
	spin_unlock(&rq->lock);
}

static void __sched_fork(struct task_struct *p)
{
	p->se.exec_start		= 0;
	p->se.sum_exec_runtime		= 0;
	p->se.prev_sum_exec_runtime	= 0;
	p->se.nr_migrations		= 0;
	p->se.last_wakeup		= 0;
	p->se.avg_overlap		= 0;
	p->se.start_runtime		= 0;
	p->se.avg_wakeup		= sysctl_sched_wakeup_granularity;
	p->se.avg_running		= 0;

	INIT_LIST_HEAD(&p->rt.run_list);
	p->se.on_rq = 0;
	INIT_LIST_HEAD(&p->se.group_node);

	/*
	 * We mark the process as running here, but have not actually
	 * inserted it onto the runqueue yet. This guarantees that
	 * nobody will actually run it, and a signal or other external
	 * event cannot wake it up and insert it on the runqueue either.
	 */
	p->state = TASK_RUNNING;
}

static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
{
	struct rq *rq;

	for (;;) {
		local_irq_save(*flags);
		rq = task_rq(p);
		spin_lock(&rq->lock);
		if (rq == task_rq(p))
			return rq;
		spin_unlock_irqrestore(&rq->lock, *flags);
	}
}

static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
{
	spin_unlock_irqrestore(&rq->lock, *flags);
}

void sched_fork(struct task_struct *p, int clone_flags)
{
	// int cpu = get_cpu();

	__sched_fork(p);

	set_load_weight(p);

#if 0
	/*
	 * Revert to default priority/policy on fork if requested.
	 */
	if (p->sched_reset_on_fork) {
		if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
			p->policy = SCHED_NORMAL;
			p->normal_prio = p->static_prio;
		}

		if (PRIO_TO_NICE(p->static_prio) < 0) {
			p->static_prio = NICE_TO_PRIO(0);
			p->normal_prio = p->static_prio;
			set_load_weight(p);
		}

		/*
		 * We don't need the reset flag anymore after the fork. It has
		 * fulfilled its duty:
		 */
		p->sched_reset_on_fork = 0;
	}
#endif

	/*
	 * Make sure we do not leak PI boosting priority to the child.
	 */
	p->prio = current->normal_prio;
	// printf("this is %s(): %d  p->prio = %d\r\n", __func__, __LINE__, p->prio);

	if (!rt_prio(p->prio))
		p->sched_class = &fair_sched_class;

#ifdef CONFIG_PREEMPT
	/* Want to start with kernel preemption disabled. */
	task_thread_info(p)->preempt_count = 1;
#endif

	// put_cpu();
}

void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
{
	unsigned long flags;
	struct rq *rq;

	rq = task_rq_lock(p, &flags);
	BUG_ON(p->state != TASK_RUNNING);
	update_rq_clock(rq);

	if (!p->sched_class->task_new || !current->se.on_rq) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		activate_task(rq, p, 0);
	} else {
		/*
		 * Let the scheduling class do new task startup
		 * management (if any):
		 */
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		p->sched_class->task_new(rq, p);
		inc_nr_running(rq);
	}
	check_preempt_curr(rq, p, 0);
	task_rq_unlock(rq, &flags);
}

void init_idle(struct task_struct *idle, int cpu)
{
	struct rq *rq = cpu_rq(cpu);
	unsigned long flags;
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	spin_lock_irqsave(&rq->lock, flags);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	__sched_fork(idle);
	idle->se.exec_start = sched_clock();
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	idle->prio = idle->normal_prio = MAX_PRIO;
	// cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
	// __set_task_cpu(idle, cpu);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	rq->curr = rq->idle = idle;

	spin_unlock_irqrestore(&rq->lock, flags);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	/* Set the preempt count _outside_ the spinlocks! */
#if defined(CONFIG_PREEMPT)
	task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
#else
	task_thread_info(idle)->preempt_count = 0;
#endif
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	/*
	 * The idle tasks have their own, simple scheduling class:
	 */
	// idle->sched_class = &idle_sched_class;
}

static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
{
	cfs_rq->tasks_timeline = RB_ROOT;
	INIT_LIST_HEAD(&cfs_rq->tasks);
	cfs_rq->min_vruntime = (u64)(-(1LL << 20));
}

static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
{
	struct rt_prio_array *array;
	int i;

	array = &rt_rq->active;
	for (i = 0; i < MAX_RT_PRIO; i++) {
		INIT_LIST_HEAD(array->queue + i);
		__clear_bit(i, array->bitmap);
	}
	/* delimiter for bitsearch: */
	__set_bit(MAX_RT_PRIO, array->bitmap);

	rt_rq->rt_time = 0;
	rt_rq->rt_throttled = 0;
	rt_rq->rt_runtime = 0;
	// spin_lock_init(&rt_rq->rt_runtime_lock);
}

static unsigned long calc_load_update;

void sched_init(void)
{
	int i, j;
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	// for_each_possible_cpu(i) {
		struct rq *rq;

		rq = cpu_rq(0);
		spin_lock_init(&rq->lock);
		rq->nr_running = 0;
		rq->calc_load_active = 0;
		rq->calc_load_update = jiffies + LOAD_FREQ;
		init_cfs_rq(&rq->cfs, rq);
		init_rt_rq(&rq->rt, rq);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

		// rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;

		for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
			rq->cpu_load[j] = 0;
	// }
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	// set_load_weight(&init_task);

	/*
	 * Make us the idle thread. Technically, schedule() should not be
	 * called from this thread, however somewhere below it might be,
	 * but because we are the idle thread, we just pick up running again
	 * when this runqueue becomes "idle".
	 */
	init_idle(current, smp_processor_id());
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	calc_load_update = jiffies + LOAD_FREQ;
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	/*
	 * During early bootup we pretend to be a normal task:
	 */
	current->sched_class = &fair_sched_class;
	// current->sched_class = &rt_sched_class;
	// wake_up_new_task(current, 0);
	// printf("this is %s(): %d\r\n", __func__, __LINE__);

	// scheduler_running = 1;
}

void do_work_pending(void)
{
	do_signal();
	schedule();
}

void preempt_schedule(void)
{
	struct thread_info *ti = current_thread_info();

	/*
	 * If there is a non-zero preempt_count or interrupts are disabled,
	 * we do not want to preempt the current task. Just return..
	 */
	if (ti->preempt_count || irqs_disabled())
		return;

	do {
		add_preempt_count(PREEMPT_ACTIVE);
		schedule();
		sub_preempt_count(PREEMPT_ACTIVE);

		/*
		 * Check again in case we missed a preemption opportunity
		 * between schedule and now.
		 */
		barrier();
	} while (need_resched());
}

void preempt_schedule_irq(void)
{
	// printf("this is %s(): %d\r\n", __func__, __LINE__);
	struct thread_info *ti = current_thread_info();

	/* Catch callers which need to be fixed */
	BUG_ON(ti->preempt_count || !irqs_disabled());

	do {
		add_preempt_count(PREEMPT_ACTIVE);
		local_irq_enable();
		schedule();
		local_irq_disable();
		sub_preempt_count(PREEMPT_ACTIVE);

		/*
		 * Check again in case we missed a preemption opportunity
		 * between schedule and now.
		 */
		barrier();
	} while (need_resched());
}

static int try_to_wake_up(struct task_struct *p, unsigned int state,
			  int wake_flags)
{
	int cpu, orig_cpu, this_cpu, success = 0;
	unsigned long flags;
	struct rq *rq, *orig_rq;

	this_cpu = get_cpu();

	rq = orig_rq = task_rq_lock(p, &flags);
	update_rq_clock(rq);
	if (!(p->state & state))
		goto out;

	if (p->se.on_rq)
		goto out_running;

	cpu = task_cpu(p);
	orig_cpu = cpu;

	activate_task(rq, p, 1);
	success = 1;

	/*
	 * Only attribute actual wakeups done by this task.
	 */
	if (!in_interrupt()) {
		struct sched_entity *se = &current->se;
		u64 sample = se->sum_exec_runtime;

		if (se->last_wakeup)
			sample -= se->last_wakeup;
		else
			sample -= se->start_runtime;
		update_avg(&se->avg_wakeup, sample);

		se->last_wakeup = se->sum_exec_runtime;
	}

out_running:
	check_preempt_curr(rq, p, wake_flags);

	p->state = TASK_RUNNING;
out:
	task_rq_unlock(rq, &flags);
	put_cpu();

	return success;
}

int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
			  void *key)
{
	return try_to_wake_up(curr->private, mode, wake_flags);
}

static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
			int nr_exclusive, int wake_flags, void *key)
{
	wait_queue_t *curr, *next;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
		printf("this is %s(): %d\r\n", __func__, __LINE__);
		unsigned flags = curr->flags;

		if (curr->func(curr, mode, wake_flags, key) &&
				(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
			break;
		printf("this is %s(): %d\r\n", __func__, __LINE__);
	}
	printf("this is %s(): %d\r\n", __func__, __LINE__);
}

void __wake_up(wait_queue_head_t *q, unsigned int mode,
			int nr_exclusive, void *key)
{
	unsigned long flags;

	spin_lock_irqsave(&q->lock, flags);
	__wake_up_common(q, mode, nr_exclusive, 0, key);
	spin_unlock_irqrestore(&q->lock, flags);
}

void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
			int nr_exclusive, void *key)
{
	unsigned long flags;
	// int wake_flags = WF_SYNC;
	int wake_flags = 0;

	printf("this is %s(): %d\r\n", __func__, __LINE__);

	// if (unlikely(!q))
	// 	return;

	// if (unlikely(!nr_exclusive))
	// 	wake_flags = 0;

	// spin_lock_irqsave(&q->lock, flags);
	__wake_up_common(q, mode, nr_exclusive, wake_flags, key);
	// spin_unlock_irqrestore(&q->lock, flags);
}
