#pragma once

/* API about TASK/RQ */

static inline bool is_migration_disabled(struct task_struct *p)
{
    return p->migration_disabled;
}

bool task_allowed_on_cpu(struct task_struct *p, int cpu);

extern struct rq *this_rq(void);

void set_rq_online(struct rq *rq);

extern void raw_spin_rq_unlock(struct rq *rq);
void raw_spin_rq_lock_nested(struct rq *rq, int subclass);

static inline void raw_spin_rq_lock(struct rq *rq)
{
    raw_spin_rq_lock_nested(rq, 0);
}

static inline void raw_spin_rq_lock_irq(struct rq *rq)
{
	local_irq_disable();
	raw_spin_rq_lock(rq);
}

static inline void raw_spin_rq_unlock_irq(struct rq *rq)
{
	raw_spin_rq_unlock(rq);
	local_irq_enable();
}

extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags);
extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
extern void activate_task(struct rq *rq, struct task_struct *p, int flags);

static inline int cpu_of(struct rq *rq)
{
    return rq->cpu;
}

static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
{
    return &rq->__lock;
}

static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
{
    lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
}

static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
{
}

static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
{
}

static inline void rq_lock(struct rq *rq, struct rq_flags *rf)
{
    raw_spin_rq_lock(rq);
    rq_pin_lock(rq, rf);
}

static inline void rq_unlock(struct rq *rq, struct rq_flags *rf)
{
    rq_unpin_lock(rq, rf);
    raw_spin_rq_unlock(rq);
}

extern void put_prev_set_next_task(struct rq *rq,
                                   struct task_struct *prev,
                                   struct task_struct *next);

static inline raw_spinlock_t *rq_lockp(struct rq *rq)
{
    return &rq->__lock;
}

void task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf);
