// SPDX-License-Identifier: GPL-2.0
/*
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  This file contains the interface functions for the various time related
 *  system calls: time, stime, gettimeofday, settimeofday, adjtime
 *
 * Modification history:
 *
 * 1993-09-02    Philip Gladstone
 *      Created file with time related functions from sched/core.c and adjtimex()
 * 1993-10-08    Torsten Duwe
 *      adjtime interface update and CMOS clock write code
 * 1995-08-13    Torsten Duwe
 *      kernel PLL updated to 1994-12-13 specs (rfc-1589)
 * 1999-01-16    Ulrich Windl
 *	Introduced error checking for many cases in adjtimex().
 *	Updated NTP code according to technical memorandum Jan '96
 *	"A Kernel Model for Precision Timekeeping" by Dave Mills
 *	Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
 *	(Even though the technical memorandum forbids it)
 * 2004-07-14	 Christoph Lameter
 *	Added getnstimeofday to allow the posix timer functions to return
 *	with nanosecond accuracy
 */
#include <errno.h>
#include <seminix/uaccess.h>
#include <seminix/time.h>
#include <seminix/jiffies.h>

/*
 * Convert jiffies to milliseconds and back.
 *
 * Avoid unnecessary multiplications/divisions in the
 * two most common HZ cases:
 */
unsigned int jiffies_to_msecs(const u64 j)
{
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
    return (MSEC_PER_SEC / HZ) * j;
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
    return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
#else
# if BITS_PER_LONG == 32
    return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
           HZ_TO_MSEC_SHR32;
# else
    return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
# endif
#endif
}

unsigned int jiffies_to_usecs(const u64 j)
{
    /*
     * Hz usually doesn't go much further MSEC_PER_SEC.
     * jiffies_to_usecs() and usecs_to_jiffies() depend on that.
     */
    BUILD_BUG_ON(HZ > USEC_PER_SEC);

#if !(USEC_PER_SEC % HZ)
    return (USEC_PER_SEC / HZ) * j;
#else
# if BITS_PER_LONG == 32
    return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
# else
    return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
# endif
#endif
}

u64 jiffies64_to_nsecs(u64 j)
{
#if !(NSEC_PER_SEC % HZ)
    return (NSEC_PER_SEC / HZ) * j;
# else
    return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
#endif
}

/**
 * msecs_to_jiffies: - convert milliseconds to jiffies
 * @m:	time in milliseconds
 *
 * conversion is done as follows:
 *
 * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
 *
 * - 'too large' values [that would result in larger than
 *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
 *
 * - all other values are converted to jiffies by either multiplying
 *   the input value by a factor or dividing it with a factor and
 *   handling any 32-bit overflows.
 *   for the details see __msecs_to_jiffies()
 *
 * msecs_to_jiffies() checks for the passed in value being a constant
 * via __builtin_constant_p() allowing gcc to eliminate most of the
 * code, __msecs_to_jiffies() is called if the value passed does not
 * allow constant folding and the actual conversion must be done at
 * runtime.
 * the _msecs_to_jiffies helpers are the HZ dependent conversion
 * routines found in include/linux/jiffies.h
 */
u64 __msecs_to_jiffies(const unsigned int m)
{
    /*
     * Negative value, means infinite timeout:
     */
    if ((int)m < 0)
        return MAX_JIFFY_OFFSET;
    return _msecs_to_jiffies(m);
}

u64 __usecs_to_jiffies(const unsigned int u)
{
    if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
        return MAX_JIFFY_OFFSET;
    return _usecs_to_jiffies(u);
}

/*
 * The TICK_NSEC - 1 rounds up the value to the next resolution.  Note
 * that a remainder subtract here would not do the right thing as the
 * resolution values don't fall on second boundries.  I.e. the line:
 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
 * Note that due to the small error in the multiplier here, this
 * rounding is incorrect for sufficiently large values of tv_nsec, but
 * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
 * OK.
 *
 * Rather, we just shift the bits off the right.
 *
 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
 * value to a scaled second value.
 */
static u64
__timespec64_to_jiffies(u64 sec, s64 nsec)
{
    nsec = nsec + TICK_NSEC - 1;

    if (sec >= MAX_SEC_IN_JIFFIES){
        sec = MAX_SEC_IN_JIFFIES;
        nsec = 0;
    }
    return ((sec * SEC_CONVERSION) +
        (((u64)nsec * NSEC_CONVERSION) >>
         (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;

}

u64
timespec64_to_jiffies(const struct timespec64 *value)
{
    return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec);
}

static u64
__timespec_to_jiffies(u64 sec, s64 nsec)
{
    return __timespec64_to_jiffies((u64)sec, nsec);
}

void
jiffies_to_timespec64(const u64 jiffies, struct timespec64 *value)
{
    /*
     * Convert jiffies to nanoseconds and separate with
     * one divide.
     */
    u32 rem;
    value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
                    NSEC_PER_SEC, (u64 *)&rem);
    value->tv_nsec = rem;
}

/*
 * We could use a similar algorithm to timespec_to_jiffies (with a
 * different multiplier for usec instead of nsec). But this has a
 * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
 * usec value, since it's not necessarily integral.
 *
 * We could instead round in the intermediate scaled representation
 * (i.e. in units of 1/2^(large scale) jiffies) but that's also
 * perilous: the scaling introduces a small positive error, which
 * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
 * units to the intermediate before shifting) leads to accidental
 * overflow and overestimates.
 *
 * At the cost of one additional multiplication by a constant, just
 * use the timespec implementation.
 */
u64
timeval_to_jiffies(const struct timeval *value)
{
    return __timespec_to_jiffies(value->tv_sec,
                     value->tv_usec * NSEC_PER_USEC);
}

void jiffies_to_timeval(const u64 jiffies, struct timeval *value)
{
    /*
     * Convert jiffies to nanoseconds and separate with
     * one divide.
     */
    u32 rem;

    value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
                    NSEC_PER_SEC, (u64 *)&rem);
    value->tv_usec = rem / NSEC_PER_USEC;
}

/**
 * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
 *
 * @n:	nsecs in u64
 *
 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
 * for scheduler, not for use in device drivers to calculate timeout value.
 *
 * note:
 *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
 *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
 */
u64 nsecs_to_jiffies64(u64 n)
{
#if (NSEC_PER_SEC % HZ) == 0
    /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
    return div_u64(n, NSEC_PER_SEC / HZ);
#elif (HZ % 512) == 0
    /* overflow after 292 years if HZ = 1024 */
    return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
#else
    /*
     * Generic case - optimized for cases where HZ is a multiple of 3.
     * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
     */
    return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
#endif
}

/**
 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
 *
 * @n:	nsecs in u64
 *
 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
 * for scheduler, not for use in device drivers to calculate timeout value.
 *
 * note:
 *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
 *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
 */
u64 nsecs_to_jiffies(u64 n)
{
    return (u64)nsecs_to_jiffies64(n);
}

int get_timespec64(struct timespec64 *ts,
           const struct __kernel_timespec __user *uts)
{
    struct __kernel_timespec kts;
    int ret;

    ret = copy_from_user(&kts, uts, sizeof(kts));
    if (ret)
        return -EFAULT;

    ts->tv_sec = kts.tv_sec;

    ts->tv_nsec = kts.tv_nsec;

    return 0;
}

int put_timespec64(const struct timespec64 *ts,
           struct __kernel_timespec __user *uts)
{
    struct __kernel_timespec kts = {
        .tv_sec = ts->tv_sec,
        .tv_nsec = ts->tv_nsec
    };

    return copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
}

int get_itimerspec64(struct itimerspec64 *it,
            const struct __kernel_itimerspec __user *uit)
{
    int ret;

    ret = get_timespec64(&it->it_interval, &uit->it_interval);
    if (ret)
        return ret;

    ret = get_timespec64(&it->it_value, &uit->it_value);

    return ret;
}

int put_itimerspec64(const struct itimerspec64 *it,
            struct __kernel_itimerspec __user *uit)
{
    int ret;

    ret = put_timespec64(&it->it_interval, &uit->it_interval);
    if (ret)
        return ret;

    ret = put_timespec64(&it->it_value, &uit->it_value);

    return ret;
}
