#pragma once

#include <uapi/linux/const.h>

static inline u64 mul_u32_u32(u32 a, u32 b)
{
    return (u64)a * b;
}

static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
    u32 ah = a >> 32, al = a;
    u64 ret;

    ret = mul_u32_u32(al, mul) >> shift;
    if (ah)
        ret += mul_u32_u32(ah, mul) << (32 - shift);

    return ret;
}

/*
 * This looks more complex than it should be. But we need to
 * get the type for the ~ right in round_down (it needs to be
 * as wide as the result!), and we want to evaluate the macro
 * arguments just once each.
 */
#define __round_mask(x, y) ((__typeof__(x))((y) - 1))

/**
 * round_down - round down to next specified power of 2
 * @x: the value to round
 * @y: multiple to round down to (must be a power of 2)
 *
 * Rounds @x down to next multiple of @y (which must be a power of 2).
 * To perform arbitrary rounding down, use rounddown() below.
 */
#define round_down(x, y) ((x) & ~__round_mask(x, y))

/**
 * round_up - round up to next specified power of 2
 * @x: the value to round
 * @y: multiple to round up to (must be a power of 2)
 *
 * Rounds @x up to next multiple of @y (which must be a power of 2).
 * To perform arbitrary rounding up, use roundup() below.
 */
#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1)

static inline s64 div_s64(s64 dividend, s32 divisor)
{
    return dividend / divisor;
}

/**
 * do_div - returns 2 values: calculate remainder and update new dividend
 * @n: uint64_t dividend (will be updated)
 * @base: uint32_t divisor
 *
 * Summary:
 * ``uint32_t remainder = n % base;``
 * ``n = n / base;``
 *
 * Return: (uint32_t)remainder
 *
 * NOTE: macro parameter @n is evaluated multiple times,
 * beware of side effects!
 */
#define do_div(n, base)                   \
    ({                                    \
        uint32_t __base = (base);         \
        uint32_t __rem;                   \
        __rem = ((uint64_t)(n)) % __base; \
        (n) = ((uint64_t)(n)) / __base;   \
        __rem;                            \
    })

/**
 * div_u64 - unsigned 64bit divide with 32bit divisor
 * @dividend: unsigned 64bit dividend
 * @divisor: unsigned 32bit divisor
 *
 * This is the most common 64bit divide and should be used if possible,
 * as many 32bit archs can optimize this variant better than a full 64bit
 * divide.
 *
 * Return: dividend / divisor
 */
#ifndef div_u64
static inline u64 div_u64(u64 dividend, u32 divisor)
{
    return dividend / divisor;
}
#endif

#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP

#define DIV_ROUND_DOWN_ULL(ll, d) \
    ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })

#define DIV_ROUND_UP_ULL(ll, d) \
    DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))

static inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
{
    u32 ah = a >> 32, al = a;
    bool ovf;
    u64 ret;

    ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret);
    ret >>= shift;
    if (ovf && shift)
        ret += 1ULL << (64 - shift);
    if (ah)
        ret += mul_u32_u32(ah, mul) << (32 - shift);

    return ret;
}
