#include "internal.h"

#include <linux/math.h>
#include <linux/jiffies.h>
#include <linux/minmax.h>

/*
 * Minimal preemption granularity for CPU-bound tasks:
 *
 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
 */
unsigned int sysctl_sched_base_slice = 750000ULL;

#define UPDATE_TG	0x0
#define SKIP_AGE_LOAD	0x0
#define DO_ATTACH	0x0
#define DO_DETACH	0x0

#define WMULT_CONST (~0U)
#define WMULT_SHIFT 32

#define for_each_sched_entity(se) \
    for (; se; se = NULL)

#define __node_2_se(node) \
    rb_entry((node), struct sched_entity, run_node)

static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
    return container_of(cfs_rq, struct rq, cfs);
}

static inline void __clear_buddies_next(struct sched_entity *se)
{
    for_each_sched_entity(se)
    {
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
        if (cfs_rq->next != se)
            break;

        cfs_rq->next = NULL;
    }
}

static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
    if (cfs_rq->next == se)
        __clear_buddies_next(se);
}

static void __update_inv_weight(struct load_weight *lw)
{
    unsigned long w;

    if (likely(lw->inv_weight))
        return;

    w = scale_load_down(lw->weight);

    if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
        lw->inv_weight = 1;
    else if (unlikely(!w))
        lw->inv_weight = WMULT_CONST;
    else
        lw->inv_weight = WMULT_CONST / w;
}

/*
 * delta_exec * weight / lw.weight
 *   OR
 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
 *
 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
 * we're guaranteed shift stays positive because inv_weight is guaranteed to
 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
 *
 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
 * weight/lw.weight <= 1, and therefore our shift will also be positive.
 */
static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
{
    u64 fact = scale_load_down(weight);
    u32 fact_hi = (u32)(fact >> 32);
    int shift = WMULT_SHIFT;
    int fs;

    __update_inv_weight(lw);

    if (unlikely(fact_hi))
    {
        fs = fls(fact_hi);
        shift -= fs;
        fact >>= fs;
    }

    fact = mul_u32_u32(fact, lw->inv_weight);

    fact_hi = (u32)(fact >> 32);
    if (fact_hi)
    {
        fs = fls(fact_hi);
        shift -= fs;
        fact >>= fs;
    }

    return mul_u64_u32_shr(delta_exec, fact, shift);
}

/*
 * delta /= w
 */
static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
{
    if (unlikely(se->load.weight != NICE_0_LOAD))
        delta = __calc_delta(delta, NICE_0_LOAD, &se->load);

    return delta;
}

static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
{
}

static inline void util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p)
{
}

static inline void util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}

static inline void util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
                                   bool task_sleep)
{
}

static inline bool entity_before(const struct sched_entity *a, const struct sched_entity *b)
{
    /*
     * Tiebreak on vruntime seems unnecessary since it can
     * hardly happen.
     */
    return (s64)(a->deadline - b->deadline) < 0;
}

static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
{
    return entity_before(__node_2_se(a), __node_2_se(b));
}

static inline __maybe_unused u64 min_vruntime(u64 min_vruntime, u64 vruntime)
{
    s64 delta = (s64)(vruntime - min_vruntime);
    if (delta < 0)
        min_vruntime = vruntime;

    return min_vruntime;
}

/*
 * se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
 */
static inline bool min_vruntime_update(struct sched_entity *se, bool exit)
{
    return true;
}

RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
                     run_node, min_vruntime, min_vruntime_update);

static void update_min_vruntime(struct cfs_rq *cfs_rq)
{
    // TODO
}

static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
    return (s64)(se->vruntime - cfs_rq->min_vruntime);
}

static void avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
    unsigned long weight = scale_load_down(se->load.weight);
    s64 key = entity_key(cfs_rq, se);

    cfs_rq->avg_vruntime -= key * weight;
    cfs_rq->avg_load -= weight;
}

/*
 * Entity is eligible once it received less service than it ought to have,
 * eg. lag >= 0.
 *
 * lag_i = S - s_i = w_i*(V - v_i)
 *
 * lag_i >= 0 -> V >= v_i
 *
 *     \Sum (v_i - v)*w_i
 * V = ------------------ + v
 *          \Sum w_i
 *
 * lag_i >= 0 -> \Sum (v_i - v)*w_i >= (v_i - v)*(\Sum w_i)
 *
 * Note: using 'avg_vruntime() > se->vruntime' is inaccurate due
 *       to the loss in precision caused by the division.
 */
static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
{
    struct sched_entity *curr = cfs_rq->curr;
    s64 avg = cfs_rq->avg_vruntime;
    long load = cfs_rq->avg_load;

    if (curr && curr->on_rq)
    {
        unsigned long weight = scale_load_down(curr->load.weight);

        avg += entity_key(cfs_rq, curr) * weight;
        load += weight;
    }

    return avg >= (s64)(vruntime - cfs_rq->min_vruntime) * load;
}

int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
    return vruntime_eligible(cfs_rq, se->vruntime);
}

static inline void hrtick_update(struct rq *rq)
{
}

struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq)
{
    struct rb_node *root = cfs_rq->tasks_timeline.rb_root.rb_node;

    if (!root)
        return NULL;

    return __node_2_se(root);
}

static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq)
{
    struct sched_entity *root = __pick_root_entity(cfs_rq);
    struct sched_entity *curr = cfs_rq->curr;
    u64 min_slice = ~0ULL;

    if (curr && curr->on_rq)
        min_slice = curr->slice;

    if (root)
        min_slice = min(min_slice, root->min_slice);

    return min_slice;
}

static void set_next_buddy(struct sched_entity *se)
{
    pr_todo();
}

static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
	lw->weight += inc;
	lw->inv_weight = 0;
}

static void account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	update_load_add(&cfs_rq->load, se->load.weight);
#ifdef CONFIG_SMP
	if (entity_is_task(se)) {
		struct rq *rq = rq_of(cfs_rq);

		account_numa_enqueue(rq, task_of(se));
		list_add(&se->group_node, &rq->cfs_tasks);
	}
#endif
	cfs_rq->nr_queued++;
}

static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
{
	lw->weight -= dec;
	lw->inv_weight = 0;
}

static void account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	update_load_sub(&cfs_rq->load, se->load.weight);
#ifdef CONFIG_SMP
	if (entity_is_task(se)) {
		account_numa_dequeue(rq_of(cfs_rq), task_of(se));
		list_del_init(&se->group_node);
	}
#endif
	cfs_rq->nr_queued--;
}

static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
    pr_todo();
}

static inline void update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
    //TODO
}

#include "fair-bandwidth.c"
#include "fair-group_sched.c"
#include "fair-update_curr.c"
#include "fair-enqueue_task.c"
#include "fair-dequeue_task.c"
#include "fair-pick_task.c"
#include "fair-task_tick.c"
#include "fair-wakeup_preempt.c"

static void init_cfs_rq(struct rq *rq)
{
    struct cfs_rq *cfs_rq = &rq->cfs;

    cfs_rq->tasks_timeline = RB_ROOT_CACHED;
    cfs_rq->min_vruntime = (u64)(-(1LL << 20));
}

/*
 * All the scheduling class methods:
 */
DEFINE_SCHED_CLASS(fair) = {
    .enqueue_task = enqueue_task_fair,
    .dequeue_task = dequeue_task_fair,

    .pick_task = pick_task_fair,
    .pick_next_task = __pick_next_task_fair,
    .put_prev_task = put_prev_task_fair,
    .set_next_task = set_next_task_fair,

    .task_tick = task_tick_fair,

    .update_curr = update_curr_fair,

    .wakeup_preempt = check_preempt_wakeup_fair,

    .init_rq = init_cfs_rq,
};
