/*
 * sched_clock.c: support for extending counters to full 64-bit ns counter
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <irq.h>
#include <sched_clock.h>
#include <printf.h>
#include <div64.h>
#define CLOCKSOURCE_MASK(bits) (unsigned long)((bits) < 64 ? ((1ULL << (bits)) - 1) : -1)

struct clock_data {
    uint64_t epoch_ns;
    unsigned long epoch_cyc;
    unsigned long epoch_cyc_copy;
    unsigned long wrap;
    unsigned long rate;
    unsigned long sched_clock_mask;
    uint32_t mult;
    uint32_t shift;
};

static struct clock_data cd;
static struct sched_clock_event *sched_clock_event;
void clocks_calc_mult_shift(uint32_t *mult, uint32_t *shift, uint32_t from, uint32_t to, uint32_t maxsec)
{
    uint64_t tmp;
    uint32_t sft, sftacc = 32;

    /*
     * Calculate the shift factor which is limiting the conversion
     * range:
     */
    tmp = ((uint64_t)maxsec * from) >> 32;

    while (tmp) {
        tmp >>= 1;
        sftacc--;
    }

    /*
     * Find the conversion shift/mult pair which has the best
     * accuracy and fits the maxsec conversion range:
     */
    for (sft = 32; sft > 0; sft--) {
        tmp = (uint64_t) to << sft;
        tmp += from / 2;
        do_div(tmp, from);

        if ((tmp >> sftacc) == 0)
            break;
    }

    *mult = tmp;
    *shift = sft;
}
static inline uint64_t cyc_to_ns(unsigned long cyc, uint32_t mult, uint32_t shift)
{
    return ((uint64_t)cyc * (uint64_t)mult) >> shift;
}

uint64_t sched_clock(void)
{
    uint64_t epoch_ns;
    unsigned long epoch_cyc;
    unsigned long cyc;

    do {
        epoch_cyc = cd.epoch_cyc;
        epoch_ns = cd.epoch_ns;
    } while (epoch_cyc != cd.epoch_cyc_copy);

    cyc = sched_clock_event->read_cyc();
    cyc = (cyc - epoch_cyc) & cd.sched_clock_mask;
    return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
}

/*
 * Atomically update the sched_clock epoch.
 */
void update_sched_clock(void)
{
    unsigned long flags;
    unsigned long cyc;
    uint64_t ns;

    cyc = sched_clock_event->read_cyc();
    ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & cd.sched_clock_mask, cd.mult, cd.shift);
    //vprintk("update_sched_clock ns:%lld\n", ns);
    flags = irq_save_flags();
    cd.epoch_cyc_copy = cyc;
    cd.epoch_ns = ns;
    cd.epoch_cyc = cyc;
    irq_restore_flags(flags);
}
void sched_clock_register(struct sched_clock_event *event_ops, int bits,
                          unsigned long rate)

{
    unsigned long wrap, new_mask;
    uint32_t new_mult = 0, new_shift = 0;

    clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
    new_mask = CLOCKSOURCE_MASK(bits);
    wrap = new_mask - (new_mask >> 3);

    vprintk("new_mult:%u, new_shift:%u, rate:%d, new_mask:%x, wrap:%x\n", new_mult, new_shift, rate,
            new_mask, wrap);
    sched_clock_event = event_ops;
    cd.sched_clock_mask = new_mask;
    cd.epoch_ns = 0;
    cd.epoch_cyc = 0;
    cd.epoch_cyc_copy = 0;
    cd.wrap = wrap;
    cd.rate = rate;
    cd.mult = new_mult;
    cd.shift = new_shift;
}
void sched_clock_init(void)
{
    sched_clock_event->clock_config(cd.wrap);
    sched_clock_event->clock_start(false);
    update_sched_clock();
}
