#pragma once

#include <linux/container_of.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/sched/prio.h>

#include "queueflags.h"
#include "cfs.h"
#include "rt.h"
#include "features.h"
#include "deadline.h"

struct task_struct;

struct rq_flags
{
    unsigned long flags;
    struct pin_cookie cookie;
};

struct rq
{
    /* runqueue lock: */
    raw_spinlock_t __lock;

    union
    {
        struct task_struct __rcu *donor; /* Scheduler context */
        struct task_struct __rcu *curr;  /* Execution context */
    };
    u64 clock_task;
    u64 clock;
    unsigned long next_balance;
    int clock_update_flags;
    int cpu;
    unsigned int nr_running;

    struct cfs_rq cfs;
    struct rt_rq rt;
    struct dl_rq dl;
    struct task_struct *idle;

    struct sched_dl_entity fair_server;

    struct sched_dl_entity *dl_server;
};

struct sched_class
{
    void (*enqueue_task)(struct rq *rq, struct task_struct *p, int flags);
    bool (*dequeue_task)(struct rq *rq, struct task_struct *p, int flags);
    void (*yield_task)(struct rq *rq);
    bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
    void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags);

    int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
    struct task_struct *(*pick_task)(struct rq *rq);
    /*
     * Optional! When implemented pick_next_task() should be equivalent to:
     *
     *   next = pick_task();
     *   if (next) {
     *       put_prev_task(prev);
     *       set_next_task_first(next);
     *   }
     */
    struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev);

    void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next);
    void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);

    void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);

    void (*switching_to)(struct rq *this_rq, struct task_struct *task);
    void (*switched_from)(struct rq *this_rq, struct task_struct *task);
    void (*switched_to)(struct rq *this_rq, struct task_struct *task);
    void (*prio_changed)(struct rq *this_rq, struct task_struct *task,
                         int oldprio);
    void (*reweight_task)(struct rq *this_rq, struct task_struct *task,
                          const struct load_weight *lw);
    void (*update_curr)(struct rq *rq);

    void (*init_rq)(struct rq *rq);

#ifdef CONFIG_SMP
    int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
    void (*task_woken)(struct rq *this_rq, struct task_struct *task);
#endif
};

#define DEFINE_SCHED_CLASS(name) \
    const struct sched_class name##_sched_class

extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags);
extern void raw_spin_rq_unlock(struct rq *rq);
extern void raw_spin_rq_lock(struct rq *rq);

static inline int cpu_of(struct rq *rq)
{
    return rq->cpu;
}

static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
{
    return &rq->__lock;
}

static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
{
    lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
}

static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
{
}

static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
{
}

static inline void rq_lock(struct rq *rq, struct rq_flags *rf)
{
    raw_spin_rq_lock(rq);
    rq_pin_lock(rq, rf);
}

static inline void rq_unlock(struct rq *rq, struct rq_flags *rf)
{
    rq_unpin_lock(rq, rf);
    raw_spin_rq_unlock(rq);
}

extern const struct sched_class **__sched_class_highest(void);
extern const struct sched_class **__sched_class_lowest(void);
extern const struct sched_class **__sched_class_at(const struct sched_class *sc);

extern void put_prev_set_next_task(struct rq *rq,
                                   struct task_struct *prev,
                                   struct task_struct *next);

static inline raw_spinlock_t *rq_lockp(struct rq *rq)
{
    return &rq->__lock;
}

/* Wake flags. The first three directly map to some SD flag value */
#define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */
#define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */
#define WF_TTWU 0x08 /* Wakeup;            maps to SD_BALANCE_WAKE */

#define WF_SYNC 0x10        /* Waker goes to sleep after wakeup */
#define WF_MIGRATED 0x20    /* Internal use, task got migrated */
#define WF_CURRENT_CPU 0x40 /* Prefer to move the wakee to the current CPU. */
#define WF_RQ_SELECTED 0x80 /* ->select_task_rq() was called */

extern void update_rq_clock(struct rq *rq);

struct rq *task_rq(const struct task_struct *tsk);
struct rq *cpu_rq(unsigned cpu);

/* task_struct::on_rq states: */
#define TASK_ON_RQ_QUEUED 1
#define TASK_ON_RQ_MIGRATING 2
static inline int task_on_rq_migrating(struct task_struct *p)
{
    return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
}

static inline int task_on_rq_queued(struct task_struct *p)
{
    return READ_ONCE(p->on_rq) == TASK_ON_RQ_QUEUED;
}

#define ENQUEUE_WAKEUP 0x01
#define ENQUEUE_RESTORE 0x02
#define ENQUEUE_MOVE 0x04
#define ENQUEUE_NOCLOCK 0x08

#define ENQUEUE_INITIAL 0x80

#define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */

#define RQCF_REQ_SKIP 0x01
#define RQCF_ACT_SKIP 0x02
#define RQCF_UPDATED 0x04

static inline void rq_clock_skip_update(struct rq *rq)
{
    // lockdep_assert_rq_held(rq);
    rq->clock_update_flags |= RQCF_REQ_SKIP;
}

extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
bool sched_class_above(const struct sched_class *a, const struct sched_class *b);

void task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf);

extern void activate_task(struct rq *rq, struct task_struct *p, int flags);

extern struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
    __acquires(rq->lock);
extern void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
    __releases(rq->lock);

extern void init_entity_runnable_average(struct sched_entity *se);
extern void post_init_entity_util_avg(struct task_struct *p);

extern const struct sched_class rt_sched_class;
extern const struct sched_class idle_sched_class;
extern const struct sched_class fair_sched_class;

int sched_find_first_bit(const unsigned long *b);

#ifdef CONFIG_SCHED_DEBUG
#define SCHED_WARN_ON(x)      WARN_ONCE(x, #x)
#else
#define SCHED_WARN_ON(x)      ({ (void)(x), 0; })
#endif

void __task_init(struct task_struct *p);

extern void schedule_idle(void);

extern void resched_curr(struct rq *rq);
extern void resched_curr_lazy(struct rq *rq);

static __always_inline bool is_idle_task(const struct task_struct *p)
{
    return !!(p->flags & PF_IDLE);
}

extern int sched_rr_timeslice;

extern void init_rt_entity(struct task_struct *p);

static inline int idle_policy(int policy)
{
    return policy == SCHED_IDLE;
}

static inline int task_has_idle_policy(struct task_struct *p)
{
    return idle_policy(p->policy);
}

#define task_of(_se) container_of(_se, struct task_struct, se)

static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
{
    const struct task_struct *p = task_of(se);
    struct rq *rq = task_rq(p);

    return &rq->cfs;
}

extern unsigned int sysctl_sched_features;

#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))

#define scx_enabled() false

void sub_nr_running(struct rq *rq, unsigned count);
void add_nr_running(struct rq *rq, unsigned count);

int task_on_cpu(struct rq *rq, struct task_struct *p);

void raw_spin_rq_lock_irq(struct rq *rq);
void raw_spin_rq_unlock_irq(struct rq *rq);

void __block_task(struct rq *rq, struct task_struct *p);

void check_class_changing(struct rq *rq, struct task_struct *p,
                          const struct sched_class *prev_class);

void set_next_task(struct rq *rq, struct task_struct *next);

/*
 * Is p the current scheduling context?
 *
 * Note that it might be the current execution context at the same time if
 * rq->curr == rq->donor == p.
 */
static inline int task_current_donor(struct rq *rq, struct task_struct *p)
{
    return rq->donor == p;
}

const struct sched_class *__setscheduler_class(int policy, int prio);
void put_prev_task(struct rq *rq, struct task_struct *prev);

bool dl_entity_preempt(const struct sched_dl_entity *a,
                       const struct sched_dl_entity *b);

s64 update_curr_common(struct rq *rq);

/*
 * Is p the current execution context?
 */
static inline int task_current(struct rq *rq, struct task_struct *p)
{
    return rq->curr == p;
}

static inline u64 rq_clock_task(struct rq *rq)
{
    return rq->clock_task;
}

#define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
#define scale_load_down(w) (w) // TODO

/*
 * Task weight (visible to users) and its load (invisible to users) have
 * independent resolution, but they should be well calibrated. We use
 * scale_load() and scale_load_down(w) to convert between them. The
 * following must be true:
 *
 *  scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD
 *
 */
#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)

static inline bool dl_server_active(struct sched_dl_entity *dl_se)
{
    return dl_se->dl_server_active;
}

static inline int normal_policy(int policy)
{
    return policy == SCHED_NORMAL;
}

static inline bool sched_dl_runnable(struct rq *rq)
{
    return rq->dl.dl_nr_running > 0;
}

extern void check_class_changed(struct rq *rq, struct task_struct *p,
                                const struct sched_class *prev_class,
                                int oldprio);

struct task_group
{
};

extern void dl_server_stop(struct sched_dl_entity *dl_se);
