#pragma once

#include <uapi/linux/sched.h>
#include <linux/kconfig.h>
#include <asm/processor.h>
#include <linux/sched/task_types.h>
#include <asm/current.h>
#include <linux/sched/prio.h>
#include <linux/sched/sched.h>
#include <linux/preempt.h>
#include <linux/thread_info.h>
#include <linux/cache.h>

extern void sched_tick(void);
extern long schedule_timeout(int timeout);

extern int wake_up_process(struct task_struct *tsk);
extern void schedule_preempt_disabled(void);
extern int try_to_wake_up(struct task_struct *tsk, unsigned int state, int wake_flags);
extern int wake_up_state(struct task_struct *p, unsigned int state);

#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)

unsigned int task_cpu(const struct task_struct *p);
void set_task_cpu(struct task_struct *p, unsigned int cpu);

extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);

static __always_inline bool need_resched(void)
{
    return unlikely(tif_need_resched());
}

/*
 * Set thread flags in other task's structures.
 * See asm/thread_info.h for TIF_xxxx flags available:
 */
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
    set_ti_thread_flag(task_thread_info(tsk), flag);
}

static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
    clear_ti_thread_flag(task_thread_info(tsk), flag);
}

static inline void set_tsk_need_resched(struct task_struct *tsk)
{
    set_tsk_thread_flag(tsk, TIF_NEED_RESCHED);
}

static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
    clear_tsk_thread_flag(tsk, TIF_NEED_RESCHED);
}

#define __set_current_state(state_value)             \
    do                                               \
    {                                                \
        WRITE_ONCE(current->__state, (state_value)); \
    } while (0)

#define set_current_state(state_value)                 \
    do                                                 \
    {                                                  \
        smp_store_mb(current->__state, (state_value)); \
    } while (0)

extern long schedule_timeout_uninterruptible(int timeout);

void schedule(void);
void preempt_schedule_irq(void);
void schedule_none(void);
struct task_struct *preempt_schedule_irq_no_switch(void);

static inline int test_tsk_need_resched(struct task_struct *tsk)
{
    return unlikely(test_ti_thread_flag(task_thread_info(tsk), TIF_NEED_RESCHED));
}

/*
 * set_special_state() should be used for those states when the blocking task
 * can not use the regular condition based wait-loop. In that case we must
 * serialize against wakeups such that any possible in-flight TASK_RUNNING
 * stores will not collide with our state change.
 */
#define __set_special_state(tsk, state_value)             \
    do                                                    \
    {                                                     \
        unsigned long flags; /* may shadow */             \
                                                          \
        raw_spin_lock_irqsave(&tsk->pi_lock, flags);      \
        WRITE_ONCE(tsk->__state, (state_value));          \
        raw_spin_unlock_irqrestore(&tsk->pi_lock, flags); \
    } while (0)

#define cond_resched() ({})

/*
 * Integer metrics need fixed point arithmetic, e.g., sched/fair
 * has a few: load, load_avg, util_avg, freq, and capacity.
 *
 * We define a basic fixed point arithmetic range, and then formalize
 * all these metrics based on that basic range.
 */
#define SCHED_FIXEDPOINT_SHIFT 10
#define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)

static inline bool owner_on_cpu(struct task_struct *owner)
{
    /*
     * As lock holder preemption issue, we both skip spinning if
     * task is not on cpu or its cpu is preempted
     */
    return READ_ONCE(owner->on_cpu); // TODO
}
