#pragma once

#include <linux/container_of.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/sched/prio.h>
#include <linux/lockdep.h>
#include <linux/sched/idle.h>

#include "queueflags.h"
#include "features.h"
#include "stats.h"
#include "domain.h"
#include "rq.h"
#include "sched_class.h"
#include "task-rq.h"
#include "ext.h"

struct balance_callback
{
    struct balance_callback *next;
    void (*func)(struct rq *rq);
};

/* Wake flags. The first three directly map to some SD flag value */
#define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */
#define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */
#define WF_TTWU 0x08 /* Wakeup;            maps to SD_BALANCE_WAKE */

#define WF_SYNC 0x10        /* Waker goes to sleep after wakeup */
#define WF_MIGRATED 0x20    /* Internal use, task got migrated */
#define WF_CURRENT_CPU 0x40 /* Prefer to move the wakee to the current CPU. */
#define WF_RQ_SELECTED 0x80 /* ->select_task_rq() was called */

extern void update_rq_clock(struct rq *rq);

struct rq *task_rq(const struct task_struct *tsk);
struct rq *cpu_rq(unsigned cpu);

/* task_struct::on_rq states: */
#define TASK_ON_RQ_QUEUED 1
#define TASK_ON_RQ_MIGRATING 2
static inline int task_on_rq_migrating(struct task_struct *p)
{
    return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
}

static inline int task_on_rq_queued(struct task_struct *p)
{
    return READ_ONCE(p->on_rq) == TASK_ON_RQ_QUEUED;
}

#define ENQUEUE_WAKEUP 0x01
#define ENQUEUE_RESTORE 0x02
#define ENQUEUE_MOVE 0x04
#define ENQUEUE_NOCLOCK 0x08

#define ENQUEUE_INITIAL 0x80

#define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */

#define RQCF_REQ_SKIP 0x01
#define RQCF_ACT_SKIP 0x02
#define RQCF_UPDATED 0x04

static inline void rq_clock_skip_update(struct rq *rq)
{
    // lockdep_assert_rq_held(rq);
    rq->clock_update_flags |= RQCF_REQ_SKIP;
}

bool sched_class_above(const struct sched_class *a, const struct sched_class *b);

extern struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
    __acquires(rq->lock);
extern void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
    __releases(rq->lock);

extern struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf);

extern void init_entity_runnable_average(struct sched_entity *se);
extern void post_init_entity_util_avg(struct task_struct *p);

int sched_find_first_bit(const unsigned long *b);

#ifdef CONFIG_SCHED_DEBUG
#define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
#else
#define SCHED_WARN_ON(x) ({ (void)(x), 0; })
#endif

extern void schedule_idle(void);

extern void resched_curr(struct rq *rq);
extern void resched_curr_lazy(struct rq *rq);

extern int sched_rr_timeslice;

extern void init_rt_entity(struct task_struct *p);
extern void init_dl_entity(struct task_struct *p);
extern void init_fair_entity(struct task_struct *p);

static inline int idle_policy(int policy)
{
    return policy == SCHED_IDLE;
}

static inline int normal_policy(int policy)
{
    return policy == SCHED_NORMAL;
}

static inline int fair_policy(int policy)
{
    return normal_policy(policy) || policy == SCHED_BATCH;
}

static inline int dl_policy(int policy)
{
    return policy == SCHED_DEADLINE;
}

static inline int rt_policy(int policy)
{
    return policy == SCHED_FIFO || policy == SCHED_RR;
}

static inline bool valid_policy(int policy)
{
    return idle_policy(policy) || fair_policy(policy) ||
           rt_policy(policy) || dl_policy(policy);
}

static inline int task_has_idle_policy(struct task_struct *p)
{
    return idle_policy(p->policy);
}

#define task_of(_se) container_of(_se, struct task_struct, se)

static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
{
    const struct task_struct *p = task_of(se);
    struct rq *rq = task_rq(p);

    return &rq->cfs;
}

extern unsigned int sysctl_sched_features;

#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))

#define scx_enabled() false

void sub_nr_running(struct rq *rq, unsigned count);
void add_nr_running(struct rq *rq, unsigned count);

int task_on_cpu(struct rq *rq, struct task_struct *p);

void __block_task(struct rq *rq, struct task_struct *p);

void check_class_changing(struct rq *rq, struct task_struct *p,
                          const struct sched_class *prev_class);

void set_next_task(struct rq *rq, struct task_struct *next);

/*
 * Is p the current scheduling context?
 *
 * Note that it might be the current execution context at the same time if
 * rq->curr == rq->donor == p.
 */
static inline int task_current_donor(struct rq *rq, struct task_struct *p)
{
    return rq->donor == p;
}

const struct sched_class *__setscheduler_class(int policy, int prio);
void put_prev_task(struct rq *rq, struct task_struct *prev);
void task_dead(struct task_struct *p);

bool dl_entity_preempt(const struct sched_dl_entity *a,
                       const struct sched_dl_entity *b);

s64 update_curr_common(struct rq *rq);

/*
 * Is p the current execution context?
 */
static inline int task_current(struct rq *rq, struct task_struct *p)
{
    return rq->curr == p;
}

static inline u64 rq_clock_task(struct rq *rq)
{
    return rq->clock_task;
}

#define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)

/*
 * Increase resolution of nice-level calculations for 64-bit architectures.
 * The extra resolution improves shares distribution and load balancing of
 * low-weight task groups (eg. nice +19 on an autogroup), deeper task-group
 * hierarchies, especially on larger systems. This is not a user-visible change
 * and does not change the user-interface for setting shares/weights.
 *
 * We increase resolution only if we have enough bits to allow this increased
 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
 * are pretty high and the returns do not justify the increased costs.
 *
 * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
 * increase coverage and consistency always enable it on 64-bit platforms.
 */
#ifdef CONFIG_64BIT
#define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
#define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
#define scale_load_down(w)                                 \
    ({                                                     \
        unsigned long __w = (w);                           \
                                                           \
        if (__w)                                           \
            __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
        __w;                                               \
    })
#else
#define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
#define scale_load(w) (w)
#define scale_load_down(w) (w)
#endif

/*
 * Task weight (visible to users) and its load (invisible to users) have
 * independent resolution, but they should be well calibrated. We use
 * scale_load() and scale_load_down(w) to convert between them. The
 * following must be true:
 *
 *  scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD
 *
 */
#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)

static inline bool dl_server_active(struct sched_dl_entity *dl_se)
{
    return dl_se->dl_server_active;
}

static inline bool sched_dl_runnable(struct rq *rq)
{
    return rq->dl.dl_nr_running > 0;
}

extern void check_class_changed(struct rq *rq, struct task_struct *p,
                                const struct sched_class *prev_class,
                                int oldprio);

struct task_group
{
};

extern void dl_server_stop(struct sched_dl_entity *dl_se);

static inline bool task_is_blocked(struct task_struct *p)
{
    return false; // TODO
}

#include "pelt.h"

/*
 * To aid in avoiding the subversion of "niceness" due to uneven distribution
 * of tasks with abnormal "nice" values across CPUs the contribution that
 * each task makes to its run queue's load is weighted according to its
 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
 * scaled version of the new time slice allocation that they receive on time
 * slice expiry etc.
 */

#define WEIGHT_IDLEPRIO 3
#define WMULT_IDLEPRIO 1431655765

extern u64 avg_vruntime(struct cfs_rq *cfs_rq);
extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se);

static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se)
{
    return false;
}

static inline u64 rq_clock(struct rq *rq)
{
    return rq->clock;
}

#ifndef arch_scale_freq_capacity
/**
 * arch_scale_freq_capacity - get the frequency scale factor of a given CPU.
 * @cpu: the CPU in question.
 *
 * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
 *
 *     f_curr
 *     ------ * SCHED_CAPACITY_SCALE
 *     f_max
 */
static inline unsigned long arch_scale_freq_capacity(int cpu)
{
    return SCHED_CAPACITY_SCALE;
}
#endif

#ifndef arch_scale_cpu_capacity
/**
 * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
 * @cpu: the CPU in question.
 *
 * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
 *
 *             max_perf(cpu)
 *      ----------------------------- * SCHED_CAPACITY_SCALE
 *      max(max_perf(c) : c \in CPUs)
 */
static inline unsigned long arch_scale_cpu_capacity(int cpu)
{
    return SCHED_CAPACITY_SCALE;
}
#endif

#define cap_scale(v, s) ((v) * (s) >> SCHED_CAPACITY_SHIFT)

#define BW_SHIFT 20
#define BW_UNIT (1 << BW_SHIFT)
#define RATIO_SHIFT 8
#define MAX_BW_BITS (64 - BW_SHIFT)
#define MAX_BW ((1ULL << MAX_BW_BITS) - 1)

static inline void lockdep_assert_rq_held(struct rq *rq)
{
}

DEFINE_LOCK_GUARD_1(rq_lock, struct rq,
                    rq_lock(_T->lock, &_T->rf),
                    rq_unlock(_T->lock, &_T->rf),
                    struct rq_flags rf)

extern int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bool pi);

/*
 * XXX we want to get rid of these helpers and use the full load resolution.
 */
static inline long se_weight(struct sched_entity *se)
{
    return scale_load_down(se->load.weight);
}

extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);
