#pragma once

static inline u64 rq_clock_pelt(struct rq *rq)
{
    return rq->clock_pelt - rq->lost_idle_time;
}

/* The rq is idle, we can sync to clock_task */
static inline void _update_idle_rq_clock_pelt(struct rq *rq)
{
    //TODO
}

/*
 * The clock_pelt scales the time to reflect the effective amount of
 * computation done during the running delta time but then sync back to
 * clock_task when rq is idle.
 *
 *
 * absolute time   | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
 * @ max capacity  ------******---------------******---------------
 * @ half capacity ------************---------************---------
 * clock pelt      | 1| 2|    3|    4| 7| 8| 9|   10|   11|14|15|16
 *
 */
static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
{
    if (unlikely(is_idle_task(rq->curr)))
    {
        _update_idle_rq_clock_pelt(rq);
        return;
    }

    rq->clock_pelt += delta;
}

u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq);

void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq);

int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);

int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);

static inline u32 get_pelt_divider(struct sched_avg *avg)
{
    return 47742 + avg->period_contrib; //todo
}
