#include <linux/hrtimer.h>
#include <linux/percpu.h>
#include <linux/hrtimer_defs.h>
#include <uapi/linux/time.h>
#include <linux/softirq.h>
#include <trace/events/timer.h>

#include "inc/tick.h"
#include "inc/timekeeping.h"
#include "inc/timer.h"

#define for_each_active_base(base, cpu_base, active) \
    while ((base = __next_base((cpu_base), &(active))))

/*
 * Masks for selecting the soft and hard context timers from
 * cpu_base->active
 */
#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)

#define lockdep_hrtimer_enter(__hrtimer) false
#define lockdep_hrtimer_exit(__context) \
    do                                  \
    {                                   \
        (void)(__context);              \
    } while (0)

unsigned int hrtimer_resolution = ((NSEC_PER_SEC + HZ / 2) / HZ);

#ifdef CONFIG_HIGH_RES_TIMERS
static bool hrtimer_hres_enabled = true;

static inline bool hrtimer_is_hres_enabled(void)
{
    return hrtimer_hres_enabled;
}
#else
static inline bool hrtimer_is_hres_enabled(void)
{
    return false;
}
#endif

/*
 * Is the high resolution mode active ?
 */
static inline int hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
{
    return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? cpu_base->hres_active : 0;
}

static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
{
    return timer->_softexpires;
}

static inline bool hrtimer_base_is_online(struct hrtimer_cpu_base *base)
{
    if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
        return true;
    else
        return likely(base->online);
}

#include "_hrtimer_/bases.c"
#include "_hrtimer_/debug.c"
#include "_hrtimer_/clock_base.c"
#include "_hrtimer_/cpu_base.c"
#include "_hrtimer_/switch_base.c"
#include "_hrtimer_/enqueue.c"
#include "_hrtimer_/next_event.c"
#include "_hrtimer_/reprogram.c"
#include "_hrtimer_/remove.c"
#include "_hrtimer_/start_range_ns.c"
#include "_hrtimer_/run_queues.c"
#include "_hrtimer_/setup.c"
#include "_hrtimer_/run_softirq.c"
#include "_hrtimer_/retrigger.c"
#include "_hrtimer_/interrupt.c"

static inline int tick_init_highres(void)
{
    return tick_switch_to_oneshot(hrtimer_interrupt);
}

/*
 * Switch to high resolution mode
 */
static void hrtimer_switch_to_hres(void)
{
    struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);

    if (tick_init_highres())
    {
        pr_warn("Could not switch to high-res mode on CPU %u\n", base->cpu);
        return;
    }

    base->hres_active = 1;
    hrtimer_resolution = HIGH_RES_NSEC;
    tick_setup_sched_timer(true);
    retrigger_next_event(NULL);
}

bool hrtimer_callback_running(struct hrtimer *timer)
{
    return timer->base->running == timer;
}

/**
 * hrtimer_start_range_ns - (re)start an hrtimer
 * @timer:	the timer to be added
 * @tim:	expiry time
 * @delta_ns:	"slack" range for the timer
 * @mode:	timer mode: absolute (HRTIMER_MODE_ABS) or
 *		relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
 *		softirq based mode is considered for debug purpose only!
 */
void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
                            u64 delta_ns, const enum hrtimer_mode mode)
{
    struct hrtimer_clock_base *base;
    unsigned long flags;

    /*
     * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
     * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard
     * expiry mode because unmarked timers are moved to softirq expiry.
     */
    WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);

    base = lock_hrtimer_base(timer, &flags);

    if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
        hrtimer_reprogram(timer, true);

    unlock_hrtimer_base(timer, &flags);
}

/*
 * A timer is active, when it is enqueued into the rbtree or the
 * callback function is running or it's in the state of being migrated
 * to another cpu.
 *
 * It is important for this function to not return a false negative.
 */
bool hrtimer_active(const struct hrtimer *timer)
{
    struct hrtimer_clock_base *base;
    unsigned int seq;

    do
    {
        base = READ_ONCE(timer->base);
        seq = raw_read_seqcount_begin(&base->seq);

        if (timer->state != HRTIMER_STATE_INACTIVE ||
            base->running == timer)
            return true;

    } while (read_seqcount_retry(&base->seq, seq) ||
             base != READ_ONCE(timer->base));

    return false;
}

/**
 * hrtimer_setup - initialize a timer to the given clock
 * @timer:	the timer to be initialized
 * @function:	the callback function
 * @clock_id:	the clock to be used
 * @mode:       The modes which are relevant for initialization:
 *              HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
 *              HRTIMER_MODE_REL_SOFT
 *
 *              The PINNED variants of the above can be handed in,
 *              but the PINNED bit is ignored as pinning happens
 *              when the hrtimer is started
 */
void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *),
                   clockid_t clock_id, enum hrtimer_mode mode)
{
    __hrtimer_setup(timer, function, clock_id, mode);
}

/**
 * hrtimer_try_to_cancel - try to deactivate a timer
 * @timer:	hrtimer to stop
 *
 * Returns:
 *
 *  *  0 when the timer was not active
 *  *  1 when the timer was active
 *  * -1 when the timer is currently executing the callback function and
 *    cannot be stopped
 */
int hrtimer_try_to_cancel(struct hrtimer *timer)
{
    struct hrtimer_clock_base *base;
    unsigned long flags;
    int ret = -1;

    /*
     * Check lockless first. If the timer is not active (neither
     * enqueued nor running the callback, nothing to do here.  The
     * base lock does not serialize against a concurrent enqueue,
     * so we can avoid taking it.
     */
    if (!hrtimer_active(timer))
        return 0;

    base = lock_hrtimer_base(timer, &flags);

    if (!hrtimer_callback_running(timer))
        ret = remove_hrtimer(timer, base, false, false);

    unlock_hrtimer_base(timer, &flags);

    return ret;
}

ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
{
    return timer->base->get_time();
}

/**
 * hrtimer_forward() - forward the timer expiry
 * @timer:	hrtimer to forward
 * @now:	forward past this time
 * @interval:	the interval to forward
 *
 * Forward the timer expiry so it will expire in the future.
 *
 * .. note::
 *  This only updates the timer expiry value and does not requeue the timer.
 *
 * There is also a variant of the function hrtimer_forward_now().
 *
 * Context: Can be safely called from the callback function of @timer. If called
 *          from other contexts @timer must neither be enqueued nor running the
 *          callback and the caller needs to take care of serialization.
 *
 * Return: The number of overruns are returned.
 */
u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
{
    u64 orun = 1;
    ktime_t delta;

    delta = ktime_sub(now, hrtimer_get_expires(timer));

    if (delta < 0)
        return 0;

    if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
        return 0;

    if (interval < hrtimer_resolution)
        interval = hrtimer_resolution;

    if (unlikely(delta >= interval))
    {
        s64 incr = ktime_to_ns(interval);

        orun = ktime_divns(delta, incr);
        hrtimer_add_expires_ns(timer, incr * orun);
        if (hrtimer_get_expires_tv64(timer) > now)
            return orun;
        /*
         * This (and the ktime_add() below) is the
         * correction for exact:
         */
        orun++;
    }
    hrtimer_add_expires(timer, interval);

    return orun;
}

/**
 * hrtimer_forward_now() - forward the timer expiry so it expires after now
 * @timer:	hrtimer to forward
 * @interval:	the interval to forward
 *
 * It is a variant of hrtimer_forward(). The timer will expire after the current
 * time of the hrtimer clock base. See hrtimer_forward() for details.
 */
u64 hrtimer_forward_now(struct hrtimer *timer, ktime_t interval)
{
    return hrtimer_forward(timer, timer->base->get_time(), interval);
}

void hrtimer_run_queues(void)
{
    struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
    unsigned long flags;
    ktime_t now;

    if (hrtimer_hres_active(cpu_base))
        return;

    if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
    {
        hrtimer_switch_to_hres();
        return;
    }

    raw_spin_lock_irqsave(&cpu_base->lock, flags);

    now = hrtimer_update_base(cpu_base);
    if (!ktime_before(now, cpu_base->softirq_expires_next))
    {
        cpu_base->softirq_expires_next = KTIME_MAX;
        cpu_base->softirq_activated = 1;
        raise_timer_softirq(HRTIMER_SOFTIRQ);
    }

    __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);

    raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
}

int hrtimers_cpu_starting(unsigned int cpu)
{
    struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);

    /* Clear out any left over state from a CPU down operation */
    cpu_base->active_bases = 0;
    cpu_base->hres_active = 0;
    cpu_base->hang_detected = 0;
    cpu_base->next_timer = NULL;
    cpu_base->softirq_next_timer = NULL;
    cpu_base->expires_next = KTIME_MAX;
    cpu_base->softirq_expires_next = KTIME_MAX;
    cpu_base->online = 1;
    return 0;
}

/*
 * Functions related to boot-time initialization:
 */
int hrtimers_prepare_cpu(unsigned int cpu)
{
    struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
    int i;

    for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
    {
        struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];

        clock_b->cpu_base = cpu_base;
        seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock);
        timerqueue_init_head(&clock_b->active);
    }

    cpu_base->cpu = cpu;
    hrtimer_cpu_base_init_expiry_lock(cpu_base);
    return 0;
}

u64 hrtimer_get_next_event(void)
{
    struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
    u64 expires = KTIME_MAX;
    unsigned long flags;

    raw_spin_lock_irqsave(&cpu_base->lock, flags);

    if (!hrtimer_hres_active(cpu_base))
        expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);

    raw_spin_unlock_irqrestore(&cpu_base->lock, flags);

    return expires;
}

void hrtimer_start_expires(struct hrtimer *timer, enum hrtimer_mode mode)
{
    u64 delta;
    ktime_t soft, hard;

    soft = hrtimer_get_softexpires(timer);
    hard = hrtimer_get_expires(timer);
    delta = ktime_to_ns(ktime_sub(hard, soft));
    hrtimer_start_range_ns(timer, soft, delta, mode);
}

void hrtimer_cancel_wait_running(const struct hrtimer *timer)
{

}

/**
 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
 * @timer:	the timer to be cancelled
 *
 * Returns:
 *  0 when the timer was not active
 *  1 when the timer was active
 */
int hrtimer_cancel(struct hrtimer *timer)
{
    int ret;

    do
    {
        ret = hrtimer_try_to_cancel(timer);

        if (ret < 0)
            hrtimer_cancel_wait_running(timer);
    } while (ret < 0);

    return ret;
}

void hrtimers_init(void)
{
    hrtimers_prepare_cpu(smp_processor_id());
    hrtimers_cpu_starting(smp_processor_id());
    open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
}

void destroy_hrtimer_on_stack(struct hrtimer *timer)
{
}
