/* Copyright 2023 Hangzhou Yingyi Technology Co., Ltd
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include <uk/sched.h>
#include <uk/test.h>
#include <uk/assert.h>
#include <uk/essentials.h>
#include <uk/alloc.h>
#include <uk/sched_impl.h>
#include <stdlib.h>
#include <string.h>
#include <uk/plat/config.h>
#include <uk/plat/lcpu.h>
#include <uk/syscall.h>
#include <time.h>
#include <unistd.h>
#include <uk/schedcoop.h>
#if CONFIG_LIBUKBOOT_INITBBUDDY
#include <uk/allocbbuddy.h>
#define uk_alloc_init uk_allocbbuddy_init
#elif CONFIG_LIBUKBOOT_INITREGION
#include <uk/allocregion.h>
#define uk_alloc_init uk_allocregion_init
#elif CONFIG_LIBUKBOOT_INITMIMALLOC
#include <uk/mimalloc.h>
#define uk_alloc_init uk_mimalloc_init
#elif CONFIG_LIBUKBOOT_INITTLSF
#include <uk/tlsf.h>
#define uk_alloc_init uk_tlsf_init
#elif CONFIG_LIBUKBOOT_INITTINYALLOC
#include <uk/tinyalloc.h>
#define uk_alloc_init uk_tinyalloc_init
#endif

UK_TESTCASE(uksched, test_sched_register)
{
	/*初始化返回值*/
	int ret;
	struct uk_alloc *a = uk_alloc_get_default();
	struct uk_sched *node1 = uk_schedcoop_create(a);
	struct uk_sched *node2 = uk_schedcoop_create(a);

	ret = uk_sched_register(node1);
	// when the list is empty
	UK_TEST_EXPECT_SNUM_EQ(ret, 0);
	UK_TEST_EXPECT_NULL(node1->next);
	// register another node,node1 should have tails
	ret = uk_sched_register(node2);
	UK_TEST_EXPECT_NOT_NULL(node1->next);
	UK_TEST_EXPECT_PTR_EQ(node1->next, node2);
}


UK_TESTCASE(uksched, test_sched_gc)
{
	/*初始化返回值*/
	int ret;
	struct uk_sched *s = uk_sched_current();
	uintptr_t tlsp;
	struct uk_thread *t, *tmp;

	tlsp = ukplat_tlsp_get();
	struct uk_thread *main_thread = uk_thread_create_bare(
		s->a, 0x0, 0x0, 0x0, tlsp, !(!tlsp),
		false, "test", NULL, NULL);
	ret = uk_sched_thread_add(s, main_thread);
	//mock:  a thread runs and exit
	uk_thread_set_exited(main_thread);
	main_thread->sched = NULL;

	uk_sched_thread_gc(s);
	UK_TAILQ_REMOVE(&s->exited_threads, main_thread, thread_list);

	uk_sched_foreach_thread_safe(s, t, tmp) {
		UK_TEST_EXPECT_SNUM_NQ(t, main_thread);
	}
}


UK_TESTCASE(uksched, test_thread_release)
{
	struct uk_alloc *a = uk_alloc_get_default();
	struct uk_sched *s = uk_sched_current();
	uintptr_t tlsp;

	tlsp = ukplat_tlsp_get();
	struct uk_thread *main_thread = uk_thread_create_bare(
		s->a, 0x0, 0x0, 0x0, tlsp, !(!tlsp),
		false, "init", NULL, NULL);
	uk_thread_release(main_thread);
	UK_TEST_EXPECT_SNUM_EQ(uk_thread_is_exited(main_thread),
		UK_THREADF_EXITED);
	UK_TEST_EXPECT_NULL(main_thread->_mem.stack_a);
	UK_TEST_EXPECT_NULL(main_thread->_mem.uktls);
	UK_TEST_EXPECT_NULL(main_thread->_mem.uktls_a);
}

UK_TESTCASE(uksched, test_thread_block_no_timeout_then_wake)
{
	struct uk_alloc *a = uk_alloc_get_default();
	struct uk_sched *s = uk_sched_current();
	uintptr_t tlsp;

	tlsp = ukplat_tlsp_get();
	struct uk_thread *main_thread = uk_thread_create_bare(
		s->a, 0x0, 0x0, 0x0, tlsp, !(!tlsp),
		false, "init", NULL, NULL);
	uk_thread_set_runnable(main_thread);
	uk_thread_block(main_thread);
	UK_TEST_EXPECT_SNUM_EQ(uk_thread_is_runnable(main_thread), 0);
	uk_thread_wake(main_thread);
	// TODO：add test for irq-safe operation。
	UK_TEST_EXPECT_SNUM_EQ(uk_thread_is_runnable(main_thread), 1);
	UK_TEST_EXPECT_SNUM_EQ(main_thread->wakeup_time, 0LL);
}

UK_TESTCASE(uksched, test_thread_set_exited)
{
	struct uk_alloc *a = uk_alloc_get_default();
	struct uk_sched *s = uk_sched_current();
	uintptr_t tlsp;

	tlsp = ukplat_tlsp_get();
	struct uk_thread *main_thread = uk_thread_create_bare(
		s->a, 0x0, 0x0, 0x0, tlsp, !(!tlsp),
		false, "init", NULL, NULL);
	uk_thread_set_exited(main_thread);
	UK_TEST_EXPECT_SNUM_EQ(uk_thread_is_exited(main_thread),
		UK_THREADF_EXITED);
}


UK_TESTCASE(uksched, test_sched_thread_sleep)
{
	/*初始化返回值*/
	int ret;
	struct uk_sched *s = uk_sched_current();
	uintptr_t tlsp;

	tlsp = ukplat_tlsp_get();

	struct uk_thread *main_thread = uk_thread_create_bare(
		s->a, 0x0, 0x0, 0x0, tlsp, !(!tlsp),
		false, "test_sleep", NULL, NULL);
	ret = uk_sched_thread_add(s, main_thread);

	time_t begin = ukplat_monotonic_clock();

	uk_sched_thread_sleep(10000);

	time_t end = ukplat_monotonic_clock();

	UK_TEST_EXPECT_SNUM_NQ(end-begin, 0);
}

static __noreturn void idle_thread_fn(void *argp)
{}

UK_TESTCASE(uksched, test_thread_init)
{
	struct uk_alloc *a = uk_alloc_get_default();
	struct schedcoop {
		struct uk_sched sched;
		struct uk_thread_list run_queue;
		struct uk_thread_list sleep_queue;

		struct uk_thread idle;
		__nsec idle_return_time;
	};
	struct schedcoop *c = NULL;
	int rc;

	uk_pr_info("Initializing cooperative scheduler\n");
	c = uk_zalloc(a, sizeof(struct schedcoop));

	UK_TAILQ_INIT(&c->run_queue);
	UK_TAILQ_INIT(&c->sleep_queue);

	/* Create idle thread */
	rc = uk_thread_init_fn1(&c->idle, idle_thread_fn, (void *)c, a,
		STACK_SIZE, a, STACK_SIZE, a,
		false, NULL, "idle", NULL, NULL);
	UK_TEST_EXPECT_SNUM_EQ(rc, 0);
}


static __noreturn void entry_fn(void *arg)
{
}

UK_TESTCASE(uksched, test_uk_sched_thread_create)
{
	//these threads are created by sched
	struct uk_thread *thread1, *thread0, *thread2;
	//these threads are created by threads
	struct uk_thread *thread3, *thread4, *thread5;


	struct uk_alloc *a = uk_alloc_get_default();
	struct uk_sched *s = uk_sched_current();

	thread0 = uk_sched_thread_create_fn0((s),
		(entry_fn),
		0x0, 0x0, 0, 0, ("Waiter"), ((void *)0), ((void *)0));
	thread1 = uk_sched_thread_create_fn1((s),
		(entry_fn), (void *)(((void *)0)),
		0x0, 0x0, 0, 0, ("Waiter"), ((void *)0), ((void *)0));
	thread2 = uk_sched_thread_create_fn2((s),
		(entry_fn), (void *)(((void *)0)), (void *)(((void *)0)),
		0x0, 0x0, 0, 0, ("Waiter"), ((void *)0), ((void *)0));

	thread3 = uk_thread_create_fn0(s->a,
		(entry_fn),
		s->a_stack, 0x0, s->a_stack, 0x0, 0 ? NULL : s->a_uktls, 0,
		("Waiter"), ((void *)0), ((void *)0));
	thread4 = uk_thread_create_fn1(s->a,
		(entry_fn), (void *)(((void *)0)),
		s->a_stack, 0x0, s->a_stack, 0x0, 0 ? NULL : s->a_uktls, 0,
		("Waiter"), ((void *)0), ((void *)0));
	thread5 = uk_thread_create_fn2(s->a,
		(entry_fn), (void *)(((void *)0)), (void *)(((void *)0)),
		s->a_stack, 0x0, s->a_stack, 0x0, 0 ? NULL : s->a_uktls, 0,
		("Waiter"), ((void *)0), ((void *)0));

	UK_TEST_EXPECT_NOT_NULL(thread0);
	UK_TEST_EXPECT_NOT_NULL(thread1);
	UK_TEST_EXPECT_NOT_NULL(thread2);
	UK_TEST_EXPECT_NOT_NULL(thread3);
	UK_TEST_EXPECT_NOT_NULL(thread4);
	UK_TEST_EXPECT_NOT_NULL(thread5);
}

UK_TESTCASE(uksched, test_uk_thread_create_and_init)
{
	struct uk_alloc *a = uk_alloc_get_default();
	struct uk_sched *s = uk_sched_current();
	struct uk_thread *t0 = uk_thread_create_container(a, s->a_stack, 0x0,
		s->a_stack, 0x0, s->a_uktls, 0,
		("Waiter"), ((void *)0), ((void *)0));
	struct uk_thread *t1 = uk_thread_create_container(a, s->a_stack, 0x0,
		s->a_stack, 0x0, s->a_uktls, 0,
		("Waiter"), ((void *)0), ((void *)0));
	struct uk_thread *t2 = uk_thread_create_container(a, s->a_stack, 0x0,
		s->a_stack, 0x0, s->a_uktls, 0,
		("Waiter"), ((void *)0), ((void *)0));
	struct uk_thread *t3 = uk_thread_create_container2(a, 1, 1, 1, 0, 0,
		("Waiter"), ((void *)0), ((void *)0));
	UK_TEST_EXPECT_NOT_NULL(t0);
	uk_thread_container_init_fn1(t1, entry_fn, (void *)(((void *)0)));
	UK_TEST_EXPECT_SNUM_NQ(uk_thread_is_runnable(t1), 0);
	uk_thread_container_init_fn0(t0,
		entry_fn);
	UK_TEST_EXPECT_SNUM_NQ(uk_thread_is_runnable(t0), 0);
	uk_thread_container_init_fn2(t2,
		entry_fn, (void *)(((void *)0)), (void *)(((void *)0)));
	UK_TEST_EXPECT_SNUM_NQ(uk_thread_is_runnable(t2), 0);
	uk_thread_container_init_bare(t3, 1);
	UK_TEST_EXPECT_SNUM_NQ(uk_thread_is_runnable(t3), 0);
}


uk_testsuite_register(uksched, NULL);
