#pragma once

#include <linux/thread_info.h>
#include <linux/preempt_offset.h>

#define PREEMPT_ENABLED (0)

static inline void set_preempt_need_resched(void)
{
}

static inline void __ti_preempt_count_add(struct thread_info *ti, unsigned int offset, int val)
{
    switch (offset)
    {
    case PREEMPT_OFFSET:
        ti->preempt.preempt += val;
        break;
    case SOFTIRQ_OFFSET:
        ti->preempt.softirq += val;
        break;
    case HARDIRQ_OFFSET:
        ti->preempt.hardirq += val;
        break;
    }
}

#define __preempt_count_add(val) __ti_preempt_count_add(current_thread_info(), PREEMPT_OFFSET, 1)
#define __preempt_count_sub(val) __ti_preempt_count_add(current_thread_info(), PREEMPT_OFFSET, -1)

static inline unsigned int __ti_preempt_count(struct thread_info *ti)
{
    return ti->preempt.val[0];
}

#define preempt_count() __ti_preempt_count(current_thread_info())

static inline void __ti_preempt_count_set(struct thread_info *ti, int pc)
{
    ti->preempt.val[0] = pc;
}

#define preempt_count_set(pc)                           \
    {                                                   \
        __ti_preempt_count_set(current_thread_info(), pc); \
    }

static bool __preempt_count_dec_and_test(void)
{
    struct thread_info *ti = current_thread_info();
    /*
     * Because of load-store architectures cannot do per-cpu atomic
     * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
     * lost.
     */
    __ti_preempt_count_add(ti, PREEMPT_OFFSET, -1);

    return !__ti_preempt_count(ti) && test_ti_thread_flag(ti, TIF_NEED_RESCHED);
}

/*
 * Returns true when we need to resched and can (barring IRQ state).
 */
static inline bool should_resched(int preempt_offset)
{
    return unlikely(preempt_count() == preempt_offset &&
                    tif_need_resched());
}
