#include "../inc/sched.h"

/*
 * Nice levels are multiplicative, with a gentle 10% change for every
 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
 * nice 1, it will get ~10% less CPU time than another CPU-bound task
 * that remained on nice 0.
 *
 * The "10% effect" is relative and cumulative: from _any_ nice level,
 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
 * If a task goes up by ~10% and another task goes down by ~10% then
 * the relative distance between them is ~25%.)
 */
const int sched_prio_to_weight[40] = {
    /* -20 */ 88761,
    71755,
    56483,
    46273,
    36291,
    /* -15 */ 29154,
    23254,
    18705,
    14949,
    11916,
    /* -10 */ 9548,
    7620,
    6100,
    4904,
    3906,
    /*  -5 */ 3121,
    2501,
    1991,
    1586,
    1277,
    /*   0 */ 1024,
    820,
    655,
    526,
    423,
    /*   5 */ 335,
    272,
    215,
    172,
    137,
    /*  10 */ 110,
    87,
    70,
    56,
    45,
    /*  15 */ 36,
    29,
    23,
    18,
    15,
};

/*
 * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated.
 *
 * In cases where the weight does not change often, we can use the
 * pre-calculated inverse to speed up arithmetics by turning divisions
 * into multiplications:
 */
const u32 sched_prio_to_wmult[40] = {
    /* -20 */ 48388,
    59856,
    76040,
    92818,
    118348,
    /* -15 */ 147320,
    184698,
    229616,
    287308,
    360437,
    /* -10 */ 449829,
    563644,
    704093,
    875809,
    1099582,
    /*  -5 */ 1376151,
    1717300,
    2157191,
    2708050,
    3363326,
    /*   0 */ 4194304,
    5237765,
    6557202,
    8165337,
    10153587,
    /*   5 */ 12820798,
    15790321,
    19976592,
    24970740,
    31350126,
    /*  10 */ 39045157,
    49367440,
    61356676,
    76695844,
    95443717,
    /*  15 */ 119304647,
    148102320,
    186737708,
    238609294,
    286331153,
};

void set_load_weight(struct task_struct *p, bool update_load)
{
    int prio = p->static_prio - MAX_RT_PRIO;
    struct load_weight lw;

    if (task_has_idle_policy(p))
    {
        lw.weight = scale_load(WEIGHT_IDLEPRIO);
        lw.inv_weight = WMULT_IDLEPRIO;
    }
    else
    {
        lw.weight = scale_load(sched_prio_to_weight[prio]);
        lw.inv_weight = sched_prio_to_wmult[prio];
    }

    /*
     * SCHED_OTHER tasks have to update their load when changing their
     * weight
     */
    if (update_load && p->sched_class->reweight_task)
        p->sched_class->reweight_task(task_rq(p), p, &lw);
    else
        p->se.load = lw;
}
