/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024.
 * Description: EulerOS fastsched Header
 * Create: 2024-06-15
 */
#ifndef KERNEL_SCHED_EULEROS_FASTSCHED_H
#define KERNEL_SCHED_EULEROS_FASTSCHED_H

#include <linux/euleros_fastsched.h>

#ifdef CONFIG_EULEROS_FAST_SCHED
#define FAST_SCHED_MAX_NR	1

#define FAST_SCHED_ST_NONE	0
#define FAST_SCHED_ST_WAIT	1
#define FAST_SCHED_ST_WAKEUP	2

struct fast_sched_ctx {
	int nr;
	struct task_struct *curr;
	spinlock_t lock;
};

extern const struct file_operations fast_sched_info_fops;

DECLARE_PER_CPU(struct fast_sched_ctx, fast_sched_ctx);

DECLARE_STATIC_KEY_FALSE(fast_sched_feature_key);
static inline bool fast_sched_feature(void)
{
	return static_branch_unlikely(&fast_sched_feature_key) || sched_feat(FAST_SCHED);
}

static inline void fast_sched_set_enable(struct task_struct *p, int enable)
{
	if (enable)
		p->fast_enable = FAST_SCHED_ENABLE;
	else
		p->fast_enable = FAST_SCHED_DISABLE;
}

static inline bool fast_sched_is_waiting_or_wakeup(struct task_struct *p)
{
	return READ_ONCE(p->fast_state);
}

static inline bool fast_sched_is_waiting(struct task_struct *p)
{
	/* use smp load instead of atomic */
	return smp_load_acquire(&p->fast_state) == FAST_SCHED_ST_WAIT;
}

static inline bool fast_sched_is_wakeup(struct task_struct *p)
{
	/* use smp load instead of atomic */
	return smp_load_acquire(&p->fast_state) == FAST_SCHED_ST_WAKEUP;
}

static inline void fast_sched_set_wakeup(struct task_struct *p)
{
	p->fast_state = FAST_SCHED_ST_WAKEUP;
}

static inline bool fast_sched_idle(struct task_struct *p)
{
	return is_idle_task(p) && !fast_sched_is_wakeup(p);
}

extern int fast_sched_enable(struct task_struct *p, int enable);
extern void fast_sched_prepare_waiting(void);
extern void fast_sched_finish_waiting(void);
extern int fast_sched_wakeup_current(struct task_struct *p);
extern int fast_sched_wakeup(struct task_struct *p, int wake_flags);
extern void schedule_fast_idle(struct task_struct *p);

extern void fast_idle_tick_enter(void);
extern void fast_idle_tick_exit(void);

#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void vtime_fastidle_enter(struct task_struct *tsk);
extern void vtime_fastidle_exit(struct task_struct *tsk);
#else
static inline void vtime_fastidle_enter(struct task_struct *tsk) {}
static inline void vtime_fastidle_exit(struct task_struct *tsk) {}
#endif

extern int fast_sched_ctx_exist(void);
extern void fast_sched_ctx_adjust_locked(struct task_struct *p, int enqueue,
	struct rq *rq, int flags);

extern void fast_sched_reload_vruntime(struct sched_entity *se, struct cfs_rq *cfs_rq);
extern void fast_sched_update_min_vruntime(struct cfs_rq *cfs_rq, struct sched_entity *curr,
	struct sched_entity **se);
extern void fast_sched_pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr,
	struct sched_entity **se);
#else
static inline bool fast_sched_feature(void) { return false; };
static inline bool fast_sched_is_waiting_or_wakeup(struct task_struct *p) { return false; };
static inline void schedule_fast_idle(struct task_struct *p) {};
static inline int fast_sched_wakeup_current(struct task_struct *p) { return -1; };
static inline int fast_sched_wakeup(struct task_struct *p, int wake_flags) { return -1; }
static inline bool fast_sched_idle(struct task_struct *p) { return false; };
static inline void fast_idle_tick_enter(void) {};
static inline void fast_idle_tick_exit(void) {};
static inline int fast_sched_ctx_exist(void) { return false; };
static inline void fast_sched_ctx_adjust_locked(struct task_struct *p, int enqueue,
	struct rq *rq, int flags) {};
static inline bool fast_sched_is_waiting(struct task_struct *p) { return false; };
static inline bool fast_sched_is_wakeup(struct task_struct *p) { return false; };
static inline void trace_fast_sched_ctx_adjust(struct task_struct *cur_tsk,
	bool enqueue, int cur_cpu, int flags) {};
static inline void trace_fast_sched_pick_next_entity(struct task_struct *next,
	struct task_struct *second, struct task_struct *curr) {};
static inline void trace_fast_sched_wakeup(struct task_struct *p, int result, bool curr) {};
static inline void trace_fast_sched_wait(struct task_struct *p, bool start) {};
static inline void trace_fast_sched_idle(bool need_sched, unsigned int nr_running) {};
static inline void fast_sched_finish_waiting(void) {};
static inline void fast_sched_prepare_waiting(void) {};
static inline void fast_sched_update_min_vruntime(struct cfs_rq *cfs_rq, struct sched_entity *curr,
	struct sched_entity **se) {};
static inline void fast_sched_pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr,
	struct sched_entity **se) {};
#endif

#endif
