#ifndef SEMINIX_SCHED_IDLE_H
#define SEMINIX_SCHED_IDLE_H

/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
#define __cpuidle __attribute__((__section__(".cpuidle.text")))

/* Linker adds these: start and end of __cpuidle functions */
extern char __cpuidle_text_start[], __cpuidle_text_end[];

static inline void __current_set_polling(void)
{
    set_thread_flag(TIF_POLLING_NRFLAG);
}

static inline bool current_set_polling_and_test(void)
{
    __current_set_polling();

    /*
     * Polling state must be visible before we test NEED_RESCHED,
     * paired by resched_curr()
     */
    smp_mb__after_atomic();

    return unlikely(tif_need_resched());
}

static inline void __current_clr_polling(void)
{
    clear_thread_flag(TIF_POLLING_NRFLAG);
}

static inline bool current_clr_polling_and_test(void)
{
    __current_clr_polling();

    /*
     * Polling state must be visible before we test NEED_RESCHED,
     * paired by resched_curr()
     */
    smp_mb__after_atomic();

    return unlikely(tif_need_resched());
}

static inline void current_clr_polling(void)
{
    __current_clr_polling();

    /*
     * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
     * Once the bit is cleared, we'll get IPIs with every new
     * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
     * fold.
     */
    smp_mb(); /* paired with resched_curr() */

    preempt_fold_need_resched();
}

#endif /* !SEMINIX_SCHED_IDLE_H */
