#pragma once

#include <linux/types.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/bits.h>

/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)

#define CLOCK_SOURCE_IS_CONTINUOUS 0x01

#define CLOCK_SOURCE_VALID_FOR_HRES 0x20

struct clocksource
{
    const char *name;
    unsigned long flags;
    uint64_t mask;

    struct list_head list;

    u64 max_cycles;
    u64 max_idle_ns;
    u64 max_raw_delta;
    u32 rating;
    u32 mult;
    u32 shift;
    u32 maxadj;

    uint64_t (*read)(struct clocksource *cs);
    int (*enable)(struct clocksource *cs);
    void (*disable)(struct clocksource *cs);
};

int clocksource_register_hz(struct clocksource *cs, unsigned int hz);

extern struct clocksource *clocksource_default_clock(void);

extern void clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);

extern u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc);

/**
 * clocksource_cyc2ns - converts clocksource cycles to nanoseconds
 * @cycles:	cycles
 * @mult:	cycle to nanosecond multiplier
 * @shift:	cycle to nanosecond divisor (power of two)
 *
 * Converts clocksource cycles to nanoseconds, using the given @mult and @shift.
 * The code is optimized for performance and is not intended to work
 * with absolute clocksource cycles (as those will easily overflow),
 * but is only intended to be used with relative (delta) clocksource cycles.
 *
 * XXX - This could use some mult_lxl_ll() asm optimization
 */
static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift)
{
    return ((u64)cycles * mult) >> shift;
}
