static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
                                            const enum hrtimer_mode mode)
{
#ifdef CONFIG_TIME_LOW_RES
    /*
     * CONFIG_TIME_LOW_RES indicates that the system has no way to return
     * granular time values. For relative timers we add hrtimer_resolution
     * (i.e. one jiffy) to prevent short timeouts.
     */
    timer->is_rel = mode & HRTIMER_MODE_REL;
    if (timer->is_rel)
        tim = ktime_add_safe(tim, hrtimer_resolution);
#endif
    return tim;
}

static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
                                    u64 delta_ns, const enum hrtimer_mode mode,
                                    struct hrtimer_clock_base *base)
{
    struct hrtimer_cpu_base *this_cpu_base = this_cpu_ptr(&hrtimer_bases);
    struct hrtimer_clock_base *new_base;
    bool force_local, first;

    /*
     * If the timer is on the local cpu base and is the first expiring
     * timer then this might end up reprogramming the hardware twice
     * (on removal and on enqueue). To avoid that by prevent the
     * reprogram on removal, keep the timer local to the current CPU
     * and enforce reprogramming after it is queued no matter whether
     * it is the new first expiring timer again or not.
     */
    force_local = base->cpu_base == this_cpu_base;
    force_local &= base->cpu_base->next_timer == timer;

    /*
     * Don't force local queuing if this enqueue happens on a unplugged
     * CPU after hrtimer_cpu_dying() has been invoked.
     */
    force_local &= this_cpu_base->online;

    /*
     * Remove an active timer from the queue. In case it is not queued
     * on the current CPU, make sure that remove_hrtimer() updates the
     * remote data correctly.
     *
     * If it's on the current CPU and the first expiring timer, then
     * skip reprogramming, keep the timer local and enforce
     * reprogramming later if it was the first expiring timer.  This
     * avoids programming the underlying clock event twice (once at
     * removal and once after enqueue).
     */
    remove_hrtimer(timer, base, true, force_local);

    if (mode & HRTIMER_MODE_REL)
        tim = ktime_add_safe(tim, base->get_time());

    tim = hrtimer_update_lowres(timer, tim, mode);

    hrtimer_set_expires_range_ns(timer, tim, delta_ns);

    /* Switch the timer base, if necessary: */
    if (!force_local)
    {
        new_base = switch_hrtimer_base(timer, base,
                                       mode & HRTIMER_MODE_PINNED);
    }
    else
    {
        new_base = base;
    }

    first = enqueue_hrtimer(timer, new_base, mode);
    if (!force_local)
    {
        /*
         * If the current CPU base is online, then the timer is
         * never queued on a remote CPU if it would be the first
         * expiring timer there.
         */
        if (hrtimer_base_is_online(this_cpu_base))
            return first;

        /*
         * Timer was enqueued remote because the current base is
         * already offline. If the timer is the first to expire,
         * kick the remote CPU to reprogram the clock event.
         */
        if (first)
        {
            struct hrtimer_cpu_base *new_cpu_base = new_base->cpu_base;

            smp_call_function_single_async(new_cpu_base->cpu, &new_cpu_base->csd);
        }
        return 0;
    }

    /*
     * Timer was forced to stay on the current CPU to avoid
     * reprogramming on removal and enqueue. Force reprogram the
     * hardware by evaluating the new first expiring timer.
     */
    hrtimer_force_reprogram(new_base->cpu_base, 1);
    return 0;
}
