#ifndef KERNEL_SCHED_PELT_H
#define KERNEL_SCHED_PELT_H

int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se);
int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se);
int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);

/*
 * When a task is dequeued, its estimated utilization should not be update if
 * its util_avg has not been updated at least once.
 * This flag is used to synchronize util_avg updates with util_est updates.
 * We map this information into the LSB bit of the utilization saved at
 * dequeue time (i.e. util_est.dequeued).
 */
#define UTIL_AVG_UNCHANGED 0x1

static inline void cfs_se_util_change(struct sched_avg *avg)
{
    unsigned int enqueued;

    if (!sched_feat(UTIL_EST))
        return;

    /* Avoid store if the flag has been already set */
    enqueued = avg->util_est.enqueued;
    if (!(enqueued & UTIL_AVG_UNCHANGED))
        return;

    /* Reset flag to report util_avg has been updated */
    enqueued &= ~UTIL_AVG_UNCHANGED;
    WRITE_ONCE(avg->util_est.enqueued, enqueued);
}

#endif /* KERNEL_SCHED_PELT_H */
