/* Copyright 2023 Hangzhou Yingyi Technology Co., Ltd
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include <uk/plat/config.h>
#include <uk/plat/lcpu.h>
#include <uk/plat/memory.h>
#include <uk/plat/time.h>
#include <uk/sched_impl.h>
#include <tn/schedprio.h>
#include <uk/essentials.h>
#include <tn/systick.h>
#include "schedprio.h"

static inline void schedprio_schedule(struct schedprio *c, bool yield)
{
	struct uk_thread *prev, *next;
	unsigned long flags;
	bool runnable;
	int cmp_value;

	prev = uk_thread_current();
	flags = ukplat_lcpu_save_irqf();

	next = _runq_best(c);
	if (!next)
		goto out_without_switch;
	runnable = uk_thread_is_runnable(prev);
	if (!runnable) {
		/* Always switch when current thread is not runnable.
		 * idle thread will always be runnable, it's safe to do this.
		 */
		goto switch_thread;
	}

	cmp_value = schedprio_prio_cmp(prev, next);

	/* If we have a runnable thread with higher priority than the current
	 * one, we switch to it.
	 */
	if (!yield) {
#ifdef CONFIG_LIBTNSCHEDPRIO_ENABLE_PREEMTIVE
		if (cmp_value >= 0)
			goto out_without_switch;
		else
			goto switch_thread;
#else
		goto out_without_switch;
#endif
	} else {
		if (cmp_value > 0)
			goto out_without_switch;
		else
			goto switch_thread;
	}

switch_thread:
	if (next != prev) {
		/*
		 * Schedule idle thread that will halt the CPU
		 * We select the idle thread only if we do not
		 * have anything else to execute
		 */
		_runq_remove(c, next);
		if (runnable)
			_runq_add(c, prev);

		/*
		 * Queueable is used to cover the case when during a
		 * context switch, the thread that is about to be
		 * evacuated is interrupted and woken up.
		 */
		uk_thread_set_queueable(prev);
		uk_thread_clear_queueable(next);

		ukplat_lcpu_restore_irqf(flags);

		/* Interrupting the switch is equivalent to having the next
		 * thread interrupted at the return instruction. And therefore
		 * at safe point.
		 */
		uk_sched_thread_switch(next);
		return;
	}

out_without_switch:
	ukplat_lcpu_restore_irqf(flags);
}

static void schedprio_yield(struct uk_sched *s)
{
	struct schedprio *c = uksched2schedprio(s);

	schedprio_schedule(c, true);
}

static void schedprio_reschedule(struct uk_sched *s)
{
	struct schedprio *c = uksched2schedprio(s);

	schedprio_schedule(c, false);
}

static int schedprio_thread_add(struct uk_sched *s, struct uk_thread *t)
{
	struct schedprio *c = uksched2schedprio(s);

	UK_ASSERT(t);
	UK_ASSERT(!uk_thread_is_exited(t));

	if (t->prio < UK_THREADF_LOWEST_PRIO
	    || t->prio > UK_THREADF_HIGHEST_PRIO) {
		uk_pr_err("Thread %s has invalid priority %d\n", t->name,
			  t->prio);
		return -EINVAL;
	}

	/* Add to run queue in priority ascending order if runnable */
	if (uk_thread_is_runnable(t))
		_runq_add(c, t);

	return 0;
}

static void schedprio_thread_remove(struct uk_sched *s, struct uk_thread *t)
{
	struct schedprio *c = uksched2schedprio(s);

	/* Remove from run_queue */
	if (t != uk_thread_current() && uk_thread_is_runnable(t))
		_runq_remove(c, t);
}

static void schedprio_thread_blocked(struct uk_sched *s, struct uk_thread *t)
{
	struct schedprio *c = uksched2schedprio(s);

	UK_ASSERT(ukplat_lcpu_irqs_disabled());

	if (t != uk_thread_current())
		_runq_remove(c, t);
}

static __noreturn void idle_thread_fn(void *argp)
{
	struct schedprio *c = (struct schedprio *)argp;
	unsigned long flags;

	UK_ASSERT(c);

	for (;;) {
		flags = ukplat_lcpu_save_irqf();
		/*
		 * FIXME: We assume that `uk_sched_thread_gc()` is non-blocking
		 *        because we implement a cooperative scheduler. However,
		 *        this assumption may not be true depending on the
		 *        destructor functions that are assigned to the threads
		 *        and are called by `uk_sched_thred_gc()`.
		 *	  Also check if in the meantime we got a runnable
		 *	  thread.
		 * NOTE:  This idle thread must be non-blocking so that the
		 *        scheduler has always something to schedule.
		 */
		if (uk_sched_thread_gc(&c->sched) > 0 || !_is_runq_empty(c)) {
			/* We collected successfully some garbage or there is
			 * a runnable thread in the queue.
			 * Check if something else can be scheduled now.
			 */
			ukplat_lcpu_restore_irqf(flags);
			schedprio_schedule(c, true);

			continue;
		}
		ukplat_lcpu_restore_irqf(flags);

		/* Read return time set by last schedule operation */
		tn_systick_block_until(tn_systick_get_tick()
				       + tn_timer_next_tick());

		/* try to schedule a thread that might now be available */
		schedprio_schedule(c, true);
	}
}

static int schedprio_start(struct uk_sched *s __maybe_unused,
			   struct uk_thread *main_thread __maybe_unused)
{
	UK_ASSERT(main_thread);
	UK_ASSERT(main_thread->sched == s);
	UK_ASSERT(uk_thread_is_runnable(main_thread));
	UK_ASSERT(!uk_thread_is_exited(main_thread));
	UK_ASSERT(uk_thread_current() == main_thread);

	/* NOTE: We do not put `main_thread` into the thread list.
	 *       Current running threads will be added as soon as
	 *       a different thread is scheduled.
	 */

	ukplat_lcpu_enable_irq();

	return 0;
}

static const struct uk_thread *schedprio_idle_thread(struct uk_sched *s,
						     unsigned int proc_id)
{
	struct schedprio *c = uksched2schedprio(s);

	/* NOTE: We only support one processing LCPU (for now) */
	if (proc_id > 0)
		return NULL;

	return &(c->idle);
}

void schedprio_set_priority(struct uk_sched *s,
			    struct uk_thread *thread,
			    int32_t priority)
{
	struct schedprio *c = uksched2schedprio(s);

	UK_ASSERT(thread);
	UK_ASSERT(thread->sched == s);
	UK_ASSERT(uk_thread_is_runnable(thread));
	UK_ASSERT(!uk_thread_is_exited(thread));
	UK_ASSERT(uk_thread_current() == thread);
	UK_ASSERT(priority >= UK_THREADF_LOWEST_PRIO
		  && priority <= UK_THREADF_HIGHEST_PRIO);

	thread->prio = priority;

	/* Remove from run_queue */
	if (thread != uk_thread_current() && uk_thread_is_runnable(thread)) {
		_runq_remove(c, thread);
		_runq_add(c, thread);
	}
}

struct uk_sched *tn_schedprio_create(struct uk_alloc *a)
{
	struct schedprio *c = NULL;
	int rc;

	uk_pr_info("Initializing priority scheduler\n");
	c = uk_zalloc(a, sizeof(struct schedprio));
	if (!c)
		goto err_out;

	_runq_init(c);

	/* Create idle thread */
	rc = uk_thread_init_fn1(&c->idle, idle_thread_fn, (void *)c, a,
				STACK_SIZE, a,
				0, /* Default auxiliary stack size */
				a, false, NULL, "idle", NULL, NULL);
	if (rc < 0)
		goto err_free_c;
	c->idle.prio = UK_THREADF_IDLE_PRIO;
	c->idle.sched = &c->sched;

	uk_sched_init(&c->sched, schedprio_start, schedprio_yield,
		      schedprio_reschedule, schedprio_thread_add,
		      schedprio_thread_remove, schedprio_thread_blocked,
		      schedprio_thread_woken_isr, schedprio_thread_woken_isr,
		      schedprio_set_priority, schedprio_prio_cmp,
		      schedprio_idle_thread, a);

	/* Add idle thread to the scheduler's thread list */
	UK_TAILQ_INSERT_TAIL(&c->sched.thread_list, &c->idle, thread_list);
	_runq_add(c, &c->idle);

	return &c->sched;

err_free_c:
	uk_free(a, c);
err_out:
	return NULL;
}
