#ifndef SEMINIX_SCHED_H
#define SEMINIX_SCHED_H

#include <utils/types.h>
#include <utils/rbtree.h>
#include <seminix/linkage.h>
#include <seminix/hrtimer.h>
#include <seminix/sched/clock.h>
#include <seminix/sched/idle.h>
#include <seminix/sched/init_task.h>
#include <libseminix/sched_types.h>

struct rq;
struct tcb;

/* Attach to any functions which should be ignored in wchan output. */
#define __sched		__attribute__((__section__(".sched.text")))

/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];

extern int in_sched_functions(unsigned long addr);

/* -1 0                         100          140
 * +--+--------------------------+------------+
 *
 * -1:        DEADLINE
 * 0 - 99:    RT
 * 100 - 139: CFS
 */

#define MAX_NICE	    (19)
#define MIN_NICE	    (-20)
#define NICE_WIDTH	    (MAX_NICE - MIN_NICE + 1)

#define MAX_RT_PRIO		(100)
#define MIN_RT_PRIO     (0)
#define RT_WIDTH        (MAX_RT_PRIO - MIN_RT_PRIO)

#define MAX_DL_PRIO		0

#define MAX_PRIO		    (MAX_RT_PRIO + 40)
#define DEFAULT_PRIO		(MAX_RT_PRIO + 20)

/*
 * Convert user-nice values [ -20 ... 0 ... 19 ]
 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
 * and back.
 */
#define NICE_TO_PRIO(nice)	((nice) + DEFAULT_PRIO)
#define PRIO_TO_NICE(prio)	((prio) - DEFAULT_PRIO)


struct load_weight {
    unsigned long weight;
    u32 inv_weight;
};

struct sched_entity {
    struct load_weight  load;
    struct rb_node		run_node;
    int     on_rq;

    u64			exec_start;
    u64			sum_exec_runtime;
    u64			vruntime;
    u64			prev_sum_exec_runtime;
};

struct sched_rt_entity {
    struct list_head run_list;
    unsigned int time_slice;

    struct sched_rt_entity *back;
};

struct sched_dl_entity {
    struct rb_node	rb_node;

    /*
     * Original scheduling parameters. Copied here from sched_attr
     * during sched_setscheduler2(), they will remain the same until
     * the next sched_setscheduler2().
     */
    u64 dl_runtime;		/* maximum runtime for each instance	*/
    u64 dl_deadline;	/* relative deadline of each instance	*/
    u64 dl_period;		/* separation of two instances (period) */
    u64 dl_bw;		/* dl_runtime / dl_deadline		*/

    /*
     * Actual scheduling parameters. Initialized with the values above,
     * they are continously updated during task execution. Note that
     * the remaining runtime could be < 0 in case we are in overrun.
     */
    s64 runtime;		/* remaining runtime for this instance	*/
    u64 deadline;		/* absolute deadline for this instance	*/

    /*
     * Some bool flags:
     *
     * @dl_throttled tells if we exhausted the runtime. If so, the
     * task has to wait for a replenishment to be performed at the
     * next firing of dl_timer.
     *
     * @dl_new tells if a new instance arrived. If so we must
     * start executing it with full runtime and reset its absolute
     * deadline;
     *
     * @dl_boosted tells if we are boosted due to DI. If so we are
     * outside bandwidth enforcement mechanism (but only until we
     * exit the critical section).
     */
    int dl_throttled, dl_new, dl_boosted;

    /*
     * Bandwidth enforcement timer. Each -deadline task has its
     * own bandwidth to be enforced, thus we need one timer per task.
     */
    struct hrtimer dl_timer;
};

extern int sysctl_sched_rt_runtime;
extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rr_timeslice;

enum sched_tunable_scaling {
    SCHED_TUNABLESCALING_NONE,
    SCHED_TUNABLESCALING_LOG,
    SCHED_TUNABLESCALING_LINEAR,
    SCHED_TUNABLESCALING_END,
};
extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;

extern u32 sysctl_sched_latency;
extern u32 sysctl_sched_min_granularity;
extern u32 sysctl_sched_child_runs_first;
extern u32 sysctl_sched_wakeup_granularity;

extern u32 sysctl_sched_features;


#ifdef CONFIG_SMP
extern bool sched_can_stop_tick(void);
extern u64 scheduler_tick_max_deferment(void);
#else
static inline bool sched_can_stop_tick(void) { return false; }
#endif

extern void sched_set_stop_task(int cpu, struct tcb *stop);

extern int task_curr(const struct tcb *p);

#ifdef CONFIG_SMP
extern void kick_process(struct tcb *tsk);
extern void scheduler_ipi(void);
#else
static inline void kick_process(struct tcb *tsk) {}
static inline void scheduler_ipi(void) {}
#endif

extern int wake_up_process(struct tcb *p);
extern int wake_up_state(struct tcb *p, int state);
extern void wake_up_new_task(struct tcb *p);

extern int sched_new(struct tcb *p, int state);

extern asmlinkage void schedule_tail(struct tcb *prev);

extern u64 nr_running(void);
extern u64 nr_context_switches(void);

extern unsigned long long task_delta_exec(struct tcb *p);
extern unsigned long long task_sched_runtime(struct tcb *p);

extern void scheduler_tick(void);

extern asmlinkage void schedule(void);
extern asmlinkage void preempt_schedule(void);
extern asmlinkage void preempt_schedule_irq(void);
extern void schedule_preempt_disabled(void);
extern void yield(void);

extern int idle_cpu(int cpu);
extern struct tcb *idle_task(int cpu);

struct sched_attr {
    u32 sched_policy;

    u32 sched_priority;

    /* SCHED_DEADLINE */
    u64 sched_runtime;
    u64 sched_deadline;
    u64 sched_period;
};

extern void sched_set_user_nice(struct tcb *p, long nice);
extern int sched_set_scheduler(struct tcb *p, int policy, int priority);
extern int sched_setattr(struct tcb *p, const struct sched_attr *attr);
extern int sched_set_affinity(struct tcb *p, int cpu);

extern void sched_get_sched_attr(struct tcb *p, struct sched_attr *attr);
extern int sched_rr_getinterval(struct tcb *p, struct timespec *interval);

extern void sched_show_task(struct tcb *p);
extern void show_state_filter(unsigned long state_filter);
extern void dump_cpu_task(int cpu);

extern void init_idle(struct tcb *idle, int cpu);
extern void sched_init_granularity(void);
extern void sched_init_smp(void);
extern void sched_init(void);

extern int sched_rt_handler(unsigned long rt_runtime, unsigned long rt_period);
extern int sched_rr_handler(void);

extern u32 cpu_avg_load_per_task(int cpu);
extern int find_idlest_cpu(struct tcb *p, int this_cpu, u32 *load_move, int type);
extern int find_busiest_cpu(struct tcb *p, int this_cpu, u32 *load_move, int type);

#define	MAX_SCHEDULE_TIMEOUT		LONG_MAX

extern s64 schedule_timeout(s64 timeout);
extern s64 schedule_timeout_interruptible(s64 timeout);
extern s64 schedule_timeout_uninterruptible(s64 timeout);

#endif /* !SEMINIX_SCHED_H */
