#pragma once

#include <asm/preempt.h>
#include <linux/preempt_offset.h>

extern void preempt_schedule(void);

#define preempt_count_inc() __preempt_count_add(PREEMPT_OFFSET)
#define preempt_count_dec() __preempt_count_sub(PREEMPT_OFFSET)

#define preempt_count_add(val) __preempt_count_add(val)
#define preempt_count_sub(val) __preempt_count_sub(val)

/*
 * Disable preemption until the scheduler is running -- use an unconditional
 * value so that it also works on !PREEMPT_COUNT kernels.
 *
 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
 */
#define INIT_PREEMPT_COUNT PREEMPT_OFFSET

/*
 * Initial preempt_count value; reflects the preempt_count schedule invariant
 * which states that during context switches:
 *
 *    preempt_count() == 2*PREEMPT_DISABLE_OFFSET
 *
 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
 * Note: See finish_task_switch().
 */
#define FORK_PREEMPT_COUNT (2 * PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)

#define sched_preempt_enable_no_resched() \
    do                                    \
    {                                     \
        barrier();                        \
        preempt_count_dec();              \
    } while (0)

#define preempt_disable()    \
    do                       \
    {                        \
        preempt_count_inc(); \
        barrier();           \
    } while (0)

#define preempt_enable()                    \
    do                                      \
    {                                       \
        barrier();                          \
        if (__preempt_count_dec_and_test()) \
            preempt_schedule();             \
    } while (0)

#define preempt_enable_no_resched() \
    sched_preempt_enable_no_resched()

#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define irq_count() (preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))

#define in_hardirq() hardirq_count()
#define in_interrupt() (irq_count())
#define in_task() (!preempt_count())

#define preemptible() (preempt_count() == 0 && !irqs_disabled())

#define preempt_check_resched()   \
    do                            \
    {                             \
        if (should_resched(0))    \
            __preempt_schedule(); \
    } while (0)

#define init_task_preempt_count(p) \
    __ti_preempt_count_set(task_thread_info(p), FORK_PREEMPT_COUNT)
