// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024.
 * Description: EulerOS fastsched feature
 * Create: 2024-06-15
 */
#include <linux/sched.h>
#include "sched.h"
#include "../time/tick-sched.h"
#include "../../fs/proc/internal.h"
#include "euleros_fastsched.h"

DEFINE_PER_CPU(struct tick_sched, tick_cpu_fastsched);

#define FAST_SCHED_TASK_WOKEN	0 /* The task had been woken */
#define FAST_SCHED_IPI_WAKEUP	1 /* The task wake up by ipi */
#define FAST_SCHED_PREEMPT_WAKEUP	2 /* The task wake up by preempt */
#define FAST_SCHED_TASK_DEQUEUE	3 /* The task dequeue(unlikely) */

DEFINE_PER_CPU(struct fast_sched_ctx, fast_sched_ctx) = {
	.lock = __SPIN_LOCK_UNLOCKED(fast_sched_ctx.lock)
};

DEFINE_STATIC_KEY_FALSE(fast_sched_feature_key);
static int __init set_fast_sched(char *str)
{
	int fast_sched = 0;

	if (!str)
		return 0;

	if (kstrtoint(str, 0, &fast_sched) < 0)
		return 0;

	if (fast_sched == 1)
		static_branch_enable(&fast_sched_feature_key);

	return 1;
}
__setup("fast_sched=", set_fast_sched);

static inline int fast_sched_ctx_update(struct task_struct *p, int enable)
{
	int ret = 0;
	unsigned int bind_cpu;
	unsigned long flags;
	struct fast_sched_ctx *ctx;

	if (enable && (task_has_dl_policy(p)
		|| task_has_rt_policy(p)
		|| (p->nr_cpus_allowed != 1))) {
		printk_deferred("%s: pid %d fail\n", __func__, p->pid);
		return -1;
	}

	bind_cpu = cpumask_any(p->cpus_ptr);
	if (bind_cpu >= nr_cpu_ids) {
		printk_deferred("%s: pid %d fail:cpu bind id err:%d\n", __func__, p->pid, bind_cpu);
		return -1;
	}

	ctx = &per_cpu(fast_sched_ctx, bind_cpu);
	spin_lock_irqsave(&ctx->lock, flags);
	if (enable) {
		if (ctx->curr == p)
			goto out;
		if (ctx->curr || ctx->nr >= FAST_SCHED_MAX_NR) {
			ret = -1;
			printk_deferred("%s: pid %d en fail, nr %d cpu:%d\n", __func__,
				p->pid, ctx->nr, bind_cpu);
			goto out;
		}
		ctx->curr = p;
		ctx->nr++;
	} else {
		if (ctx->curr != p) {
			printk_deferred("%s: pid %d de fail, nr %d cpu:%d\n", __func__,
				p->pid, ctx->nr, bind_cpu);
			ret = -1;
			goto out;
		}
		ctx->curr = NULL;
		ctx->nr--;
	}
out:
	spin_unlock_irqrestore(&ctx->lock, flags);
	return ret;
}

void fast_sched_ctx_adjust_locked(struct task_struct *p, int enqueue, struct rq *rq, int flags)
{
	int ret;

	if (!fast_sched_feature())
		return;

	if (!fast_sched_is_enable(p))
		return;

	trace_fast_sched_ctx_adjust(p, enqueue, rq->cpu, flags);
	ret = fast_sched_ctx_update(p, enqueue);
	if (ret < 0)
		fast_sched_set_enable(p, 0);
}

int fast_sched_enable(struct task_struct *p, int enable)
{
	struct rq_flags rf;
	struct rq *rq;
	int ret = 0;

	if (!fast_sched_feature())
		return -EPERM;

	rq = task_rq_lock(p, &rf);

	ret = fast_sched_ctx_update(p, enable);
	if (ret < 0)
		goto out;

	fast_sched_set_enable(p, enable);
out:
	task_rq_unlock(rq, p, &rf);

	return ret;
}
EXPORT_SYMBOL(fast_sched_enable);

void fast_sched_prepare_waiting(void)
{
	struct task_struct *p = current;

	if (!READ_ONCE(p->fast_enable) || (p->nr_cpus_allowed != 1)) {
		if (READ_ONCE(p->fast_state))
			p->fast_state = FAST_SCHED_ST_WAIT;
		return;
	}

	p->fast_state = FAST_SCHED_ST_WAIT;
	trace_fast_sched_wait(p, true);
}

void fast_sched_finish_waiting(void)
{
	struct task_struct *p = current;

	if (!READ_ONCE(p->fast_state))
		return;

	p->fast_state = FAST_SCHED_ST_NONE;
	trace_fast_sched_wait(p, false);
}

static int fast_sched_wakeup_if_waiting(struct task_struct *p)
{
	int ret;
	typeof(p->fast_sched) val;

	val = READ_ONCE(p->fast_state);
	if (val == FAST_SCHED_ST_NONE) {
		ret = -1;
		return ret;
	}

	if (val == FAST_SCHED_ST_WAKEUP) {
		ret = 0;
		goto out;
	}

	/* use smp store instead of atomic */
	smp_store_release(&p->fast_state, FAST_SCHED_ST_WAKEUP);
	ret = 1;
out:
	return ret;
}

int fast_sched_wakeup(struct task_struct *p, int wake_flags)
{
	int success, trace_code;
	struct rq_flags rf;
	struct rq *rq;

	success = fast_sched_wakeup_if_waiting(p);
	if (success < 0)
		return success;

	if (success > 0) {
		rq = __task_rq_lock(p, &rf);
		if (task_on_rq_queued(p)) {
			if (p->on_cpu) { /* fast sched task is in lowpower, just send ipi */
				trace_code = FAST_SCHED_IPI_WAKEUP;
				smp_send_reschedule(task_cpu(p));
			} else {
				trace_code = FAST_SCHED_PREEMPT_WAKEUP;
				/* check_preempt_curr() may use rq clock */
				update_rq_clock(rq);
				/* fast sched task is scheduled, check_preempt */
				check_preempt_curr(rq, p, wake_flags);
			}
		} else {
			trace_code = FAST_SCHED_TASK_DEQUEUE;
		}
		__task_rq_unlock(rq, &rf);

		p->state = TASK_RUNNING;
	} else {
		trace_code = FAST_SCHED_TASK_WOKEN;
	}

	trace_fast_sched_wakeup(p, trace_code, false);
	return success;
}

int fast_sched_wakeup_current(struct task_struct *p)
{
	int success = -1;

	if (fast_sched_is_waiting_or_wakeup(p)) {
		fast_sched_set_wakeup(p);
		trace_fast_sched_wakeup(p, success, true);
		success = 0;
	}

	return success;
}

#define __node_2_se(node) \
	rb_entry((node), struct sched_entity, run_node)
static inline struct sched_entity *__fast_sched_next_entity(struct sched_entity *se)
{
	struct rb_node *next = rb_next(&se->run_node);

	if (!next)
		return NULL;

	return __node_2_se(next);
}

static inline struct task_struct *fast_sched_task_of(struct sched_entity *se)
{
	return container_of(se, struct task_struct, se);
}

void fast_sched_update_min_vruntime(struct cfs_rq *cfs_rq, struct sched_entity *curr,
	struct sched_entity **se)
{
	struct sched_entity *pse = *se;

	if (fast_sched_feature() && entity_is_task(pse) &&
			fast_sched_idle(fast_sched_task_of(pse))) { /* skip fast sched task */
		struct sched_entity *second;

		second = __fast_sched_next_entity(pse);
		if (!second)
			second = curr;

		if (second)
			*se = second;
	}
}

void fast_sched_pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr,
	struct sched_entity **se)
{
	struct sched_entity *pse = *se;

	if (fast_sched_feature() && pse && entity_is_task(pse) &&
		is_idle_task(fast_sched_task_of(pse))) {
		struct sched_entity *second = NULL;

		if (fast_sched_is_waiting(fast_sched_task_of(pse))) {
			/*
			 * the task could be migrated from another cpu
			 * (flags is clear in enqueue), reload the vruntime
			 */
			if (unlikely(!fast_sched_is_enable(fast_sched_task_of(pse)))) {
				fast_sched_reload_vruntime(pse, cfs_rq);
			} else {
				if (pse == curr) {
					second = __pick_first_entity(cfs_rq);
				} else {
					second = __fast_sched_next_entity(pse);
					if (!second)
						second = curr;
				}
				if (second)
					*se = second;
			}
		} else if (fast_sched_is_wakeup(fast_sched_task_of(pse))) {
			fast_sched_reload_vruntime(pse, cfs_rq);
		}
		trace_fast_sched_pick_next_entity(pse ? fast_sched_task_of(pse) : NULL, second ?
			fast_sched_task_of(second) : NULL, curr ? fast_sched_task_of(curr) : NULL);
	}
}

static void fast_idle_update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now,
	struct task_struct *fast_idle)
{
	ktime_t delta;

	if (ts->idle_active) {
		delta = ktime_sub(now, ts->idle_entrytime);
		if (nr_iowait_cpu(cpu) > 0)
			ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
		else
			ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
		ts->idle_entrytime = now;

		if (is_idle_task(fast_idle))
			fast_idle->itime += ktime_to_ns(delta);
	}
}

void fast_idle_tick_enter(void)
{
	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_fastsched);

	local_irq_disable();
	ts->idle_entrytime = ktime_get();
	ts->idle_active = 1;
	ts->inidle = 1;
	local_irq_enable();

	if (vtime_accounting_enabled_cpu(smp_processor_id()))
		vtime_fastidle_enter(current);
}

void fast_idle_tick_exit(void)
{
	ktime_t now;
	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_fastsched);

	local_irq_disable();
	now = ktime_get();
	ts->inidle = 0;
	fast_idle_update_ts_time_stats(smp_processor_id(), ts, now, current);
	ts->idle_active = 0;
	local_irq_enable();

	if (vtime_accounting_enabled_cpu(smp_processor_id()))
		vtime_fastidle_exit(current);
}

void fast_idle_irq_enter(void)
{
	struct tick_sched *tfs = this_cpu_ptr(&tick_cpu_fastsched);

	if (tfs->idle_active) {
		ktime_t now = ktime_get();

		fast_idle_update_ts_time_stats(smp_processor_id(), tfs, now, current);
		tfs->idle_active = 0;
	}
}

void fast_idle_irq_exit(void)
{
	if (is_idle_task(current) && !need_resched()) {
		if (!in_irq()) {
			struct tick_sched *tfs = this_cpu_ptr(&tick_cpu_fastsched);

			if (tfs->inidle) {
				tfs->idle_entrytime = ktime_get();
				tfs->idle_active = 1;
			}
		}
	}
}

/*
 * Updates the per-CPU time idle statistics counters
 */
static void
fast_sched_update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
{
	ktime_t delta;

	if (ts->idle_active) {
		delta = ktime_sub(now, ts->idle_entrytime);
		if (nr_iowait_cpu(cpu) > 0)
			ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
		else
			ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
		ts->idle_entrytime = now;
	}
	if (last_update_time)
		*last_update_time = ktime_to_us(now);
}

void fast_idle_account_update_idle(int cpu, u64 *last_update_time, ktime_t *idle, ktime_t now)
{
	struct tick_sched *tfs = &per_cpu(tick_cpu_fastsched, cpu);

	fast_sched_update_ts_time_stats(cpu, tfs, now, last_update_time);
	*idle += tfs->idle_sleeptime;
}

void fast_idle_account_update_iowait(int cpu, u64 *last_update_time, ktime_t *iowait, ktime_t now)
{
	struct tick_sched *tfs = &per_cpu(tick_cpu_fastsched, cpu);

	fast_sched_update_ts_time_stats(cpu, tfs, now, last_update_time);
	*iowait += tfs->iowait_sleeptime;
}


void fast_idle_account_add_idle(int cpu, ktime_t *idle, ktime_t now)
{
	struct tick_sched *tfs = &per_cpu(tick_cpu_fastsched, cpu);

	if (tfs->idle_active && !nr_iowait_cpu(cpu)) {
		ktime_t delta = ktime_sub(now, tfs->idle_entrytime);

		*idle += ktime_add(tfs->idle_sleeptime, delta);
	} else {
		*idle += tfs->idle_sleeptime;
	}
}

void fast_idle_account_add_iowait(int cpu, ktime_t *iowait, ktime_t now)
{
	struct tick_sched *tfs = &per_cpu(tick_cpu_fastsched, cpu);

	if (tfs->idle_active && nr_iowait_cpu(cpu) > 0) {
		ktime_t delta = ktime_sub(now, tfs->idle_entrytime);

		*iowait += ktime_add(tfs->iowait_sleeptime, delta);
	} else {
		*iowait += tfs->iowait_sleeptime;
	}
}

int fast_sched_ctx_exist(void)
{
	int i, ret = 0;
	unsigned long flags;
	struct fast_sched_ctx *ctx;

	for (i = 0; i < nr_cpu_ids; i++) {
		ctx = &per_cpu(fast_sched_ctx, i);
		if (ctx->curr) {
			spin_lock_irqsave(&ctx->lock, flags);
			if (ctx->curr) {
				ret = 1;
				goto unlock;
			}
			spin_unlock_irqrestore(&ctx->lock, flags);
		}
	}
	return ret;
unlock:
	spin_unlock_irqrestore(&ctx->lock, flags);
	return ret;
}

#define FAST_SCHED_INFO_SIZE 128
static int fast_sched_info_show(struct seq_file *m, void *v)
{
	int i, size = 0;
	char *inf;
	unsigned long flags;
	struct fast_sched_ctx *ctx;
	size_t bsize = nr_cpu_ids * FAST_SCHED_INFO_SIZE;

	inf = kzalloc(bsize, GFP_KERNEL);
	if (!inf)
		return -ENOMEM;

	for (i = 0; i < nr_cpu_ids; i++) {
		ctx = &per_cpu(fast_sched_ctx, i);
		if (ctx->curr) {
			spin_lock_irqsave(&ctx->lock, flags);
			if (ctx->curr)
				size += snprintf(inf + size, bsize - size - 1,
				"ctx[%d]: nr:%d cur:%s pid:%d allow:%d cpu:%d\n", i, ctx->nr,
				ctx->curr->comm, ctx->curr->pid, ctx->curr->nr_cpus_allowed,
				task_cpu(ctx->curr));
			spin_unlock_irqrestore(&ctx->lock, flags);
		}
	}

	seq_printf(m, "%s", inf);
	kfree(inf);
	return 0;
}

static int fast_sched_info_open(struct inode *inode, struct file *filp)
{
	return single_open(filp, fast_sched_info_show, NULL);
}

const struct file_operations fast_sched_info_fops = {
	.open		= fast_sched_info_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static int fast_sched_show(struct seq_file *m, void *v)
{
	struct inode *inode = m->private;
	struct task_struct *p;

	p = get_proc_task(inode);
	if (!p)
		return -ESRCH;

	seq_printf(m, "%d\n", fast_sched_is_enable(p));

	put_task_struct(p);

	return 0;
}

static ssize_t fast_sched_write(struct file *file, const char __user *buf,
			size_t count, loff_t *offset)
{
	struct inode *inode = file_inode(file);
	struct task_struct *p;
	unsigned int enable;
	int err;

	if (!arch_fast_sched_support())
		return -EPERM;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	err = kstrtouint_from_user(buf, count, 0, &enable);
	if (err)
		return err;

	if (enable != 1 && enable != 0)
		return -EINVAL;

	p = get_proc_task(inode);
	if (!p)
		return -ESRCH;

	err = fast_sched_enable(p, enable);

	printk_deferred("Setting pid %d fast sched (%s) %s.\n",
		p->pid,
		enable ? "on" : "off",
		err ? "fail" : "success");

	put_task_struct(p);
	return err ? err : count;
}

static int fast_sched_open(struct inode *inode, struct file *filp)
{
	return single_open(filp, fast_sched_show, inode);
}

const struct file_operations proc_fast_sched_operations = {
	.open		= fast_sched_open,
	.write		= fast_sched_write,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};
