#include "atom.h"
#include <pthread.h>    
#include "syslog.h"
#include "debug.h"
#undef OK
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/queue.h"

static int _enable_int_cpu = 1;
pthread_mutex_t _interrupt_mutex = PTHREAD_MUTEX_INITIALIZER;  // 初始化互斥锁

int get_cc(void)
{
	if (_enable_int_cpu == 1) {
        pthread_mutex_lock(&_interrupt_mutex);
		_enable_int_cpu = 0;
		return 0;
	}else {
		return 1;
	}
}

void set_cc(int ccr)
{
	if (ccr == 0) {
        pthread_mutex_unlock(&_interrupt_mutex);
		_enable_int_cpu = 1;
	}
}

#ifdef ATOM_USE_IDLE_HOOK

#ifndef ATOM_IDLE_HOOK_LIST_SIZE
#define ATOM_IDLE_HOOK_LIST_SIZE (1)
#endif


static void (*idle_hook_list[ATOM_IDLE_HOOK_LIST_SIZE])(void);

uint8_t atomIdleSetHook(void (*hook)(void))
{
    uint8_t ret = ATOM_ERROR;
    /* disable interrupt */
    CRITICAL_STORE;

    CRITICAL_START();

    for (int i = 0; i < ATOM_IDLE_HOOK_LIST_SIZE; i++)
    {
        if (idle_hook_list[i] == NULL)
        {
            idle_hook_list[i] = hook;
            ret = ATOM_OK;
            break;
        }
    }
    
    CRITICAL_END();

    return ret;
}

#endif

/**
 * \b atomIdleThread
 *
 * Entry point for idle thread.
 *
 * This thread must always be present, and will be the thread executed when
 * no other threads are ready to run. It must not call any library routines
 * which would cause it to block.
 *
 * @param[in] param Unused (optional thread entry parameter)
 *
 * @return None
 */
static void atomIdleThread (void* param)
{
    /* Compiler warning  */
    param = param;

    /* Loop forever */
    while (1)
    {
       #ifdef ATOM_USE_IDLE_HOOK
        for (int i = 0; i < ATOM_IDLE_HOOK_LIST_SIZE; i++)
        {
            if (idle_hook_list[i] != NULL)
            {
                idle_hook_list[i]();
            }
        }
        #endif
        vTaskDelay(1);
    }
}

/** Set to TRUE when OS is started and running threads */
uint8_t atomOSStarted = FALSE;

/* Number of nested interrupts */
static int atomIntCnt = 0;
/** Storage for the idle thread's TCB */
#ifdef ATOM_STACK_CHECKING
ATOM_TCB idle_tcb = DEFAULT_TCB("idle");
#else
static ATOM_TCB idle_tcb = DEFAULT_TCB("idle");;
#endif

extern uint8_t atomOSInit(void* idle_thread_stack_bottom, uint32_t idle_thread_stack_size, uint8_t idle_thread_stack_check)
{
    uint8_t status;

    /* Initialise data */
    atomOSStarted = FALSE;

        /* Create the idle thread */
    status = atomThreadCreate(&idle_tcb,
                 ATOM_PROIORITY_MAX + 1,
                 atomIdleThread,
                 0,
                 idle_thread_stack_bottom,
                 idle_thread_stack_size,
				 idle_thread_stack_check);

    /* Return status */
    return (status);

	return ATOM_OK;
}

void atomOSStart(void)
{
	atomOSStarted = TRUE;
}

/**
 * \b atomIntEnter
 *
 * Interrupt handler entry routine.
 *
 * Must be called at the start of any interrupt handlers that may
 * call an OS primitive and make a thread ready.
 *
 * @return None
 */
void atomIntEnter(void)
{
	/* Increment the interrupt count */
	atomIntCnt++;
}

/**
 * \b atomSched
 *
 * This is an internal function not for use by application code.
 *
 * This is the main scheduler routine. It is called by the various OS
 * library routines to check if any threads should be scheduled in now.
 * If so, the context will be switched from the current thread to the
 * new one.
 *
 * The scheduler is priority-based with round-robin performed on threads
 * with the same priority. Round-robin is only performed on timer ticks
 * however. During reschedules caused by an OS operation (e.g. after
 * giving or taking a semaphore) we only allow the scheduling in of
 * threads with higher priority than current priority. On timer ticks we
 * also allow the scheduling of same-priority threads - in that case we
 * schedule in the head of the ready list for that priority and put the
 * current thread at the tail.
 *
 * @param[in] timer_tick Should be TRUE when called from the system tick
 *
 * @return None
 */
void atomSched(uint8_t timer_tick)
{
    //do nothing.
}

/**
 * \b atomIntExit
 *
 * Interrupt handler exit routine.
 *
 * Must be called at the end of any interrupt handlers that may
 * call an OS primitive and make a thread ready.
 *
 * This is responsible for calling the scheduler at the end of
 * interrupt handlers to determine whether a new thread has now
 * been made ready and should be scheduled in.
 *
 * @param timer_tick TRUE if this is a timer tick
 *
 * @return None
 */
void atomIntExit(uint8_t timer_tick)
{
    /* Decrement the interrupt count */
    atomIntCnt--;

    /* Call the scheduler */
    atomSched(timer_tick);
}


typedef void* (*START_ROUTINE) (void*);

static SLIST_HEAD(KernelList, KernelNode) _kernelTcbList;

#define KERNEL_TCB_INSERT(tcb) \
    SLIST_INSERT_HEAD(&_kernelTcbList, (KernelNode*)&tcb->node, node);

#define KERNEL_TCB_NODE_TCB(item) \
    ((ATOM_TCB*)((uint8_t*)(item) - offsetof(ATOM_TCB, node)))

enum ThreadStatus {
    TS_READY = 0,
    TS_RUN,
    TS_SUSP,
    TS_TERM
};

/** This is a pointer to the TCB for the currently-running thread */
ATOM_TCB* atomCurrentContext(void)
{
    KernelNode* item;
    CRITICAL_STORE;

    CRITICAL_START();
    TaskHandle_t tid = xTaskGetCurrentTaskHandle();

    SLIST_FOREACH(item, &_kernelTcbList, node) {
        ATOM_TCB* pTcb = KERNEL_TCB_NODE_TCB(item);
        if (pTcb->save_ptr == (uint32_t)tid) {
            return pTcb;
        }
    }

    CRITICAL_END();

    return NULL;
}

#ifdef ATOM_USE_KERNEL_DBG
void atomThreadStatusPrint(void)
{
    KernelNode* item;
    CRITICAL_STORE;
    int32_t used_bytes, free_bytes, stack_size, stack_bottom;
    uint8_t priority;
    const char* name;
    // 获取任务信息

    /* KPrintf is the stack padding */
	KPrint("thread   pri status  stack      size used left \r\n");
    KPrint("-----------------------------------------------\r\n");

    SLIST_FOREACH(item, &_kernelTcbList, node) {
        CRITICAL_START();
        ATOM_TCB* pTcb = KERNEL_TCB_NODE_TCB(item);
        
        free_bytes = uxTaskGetStackHighWaterMark((TaskHandle_t)pTcb->save_ptr);
        stack_size = pTcb->stack_size;
        used_bytes = stack_size - free_bytes;
        name = pTcb->name;
        priority = pTcb->priority;
        stack_bottom = (uint32_t)((char*)pTcb->stack_bottom + used_bytes);
        eTaskState status = eTaskGetState((TaskHandle_t)pTcb->save_ptr);
        CRITICAL_END();

        KPrintf("%-8s %3d", name, (uint32_t)priority);

        switch(status){
            case eBlocked:
            KPrint(" blocked");
            break;
            case eReady:
            KPrint(" ready  ");
            break;
            case eRunning:
            KPrint(" running");
            break;
            case eSuspended:
            KPrint(" suspend");
            break;
            case eDeleted:
            KPrint(" deleted");
            break;
            case eInvalid:
            KPrint(" invalid");
            break;
        }

        KPrintf(" 0x%08x %4d  %2d%% %4d \r\n", (uint32_t)stack_bottom, (uint32_t)stack_size, (uint32_t)(used_bytes*100/stack_size), (int32_t)free_bytes);
        
    }
}
#endif

//
uint8_t atomThreadCreate(ATOM_TCB* tcb_ptr, uint8_t priority, void (*entry_point)(void*), void* entry_param, void* stack_bottom, uint32_t stack_size, uint8_t stack_check)
{
    uint8_t ret = ATOM_OK;

    if ((tcb_ptr == NULL) || (entry_point == NULL) || (stack_bottom == NULL)
        || (stack_size == 0))
    {
        /* Bad parameters */
        ret = ATOM_ERR_PARAM;
    }
    else {
        int cc = get_cc();
        KERNEL_TCB_INSERT(tcb_ptr);
        set_cc(cc);

        TaskHandle_t xHandle = NULL;

        xHandle = xTaskCreateStatic(
            (TaskFunction_t)entry_point,        /* Task function. */
            tcb_ptr->name,             /* String with the name of the task. */
            ((stack_size - sizeof(StaticTask_t)) / sizeof(StackType_t)), /* Stack size in words (not bytes). */
            (void*)entry_param, /* Parameter passed into the task. */
            priority,           /* Priority at which the task is created. */
            (StackType_t*)((uint8_t*)stack_bottom + sizeof(StaticTask_t)),  //直接用于堆栈区域保存statictask_t
           (StaticTask_t*)stack_bottom);          /* Used to pass out the created task's handle. */

        if (xHandle != NULL) {
        // if(ret == 0){
            tcb_ptr->priority = priority;
            tcb_ptr->entry_point = entry_point;
            tcb_ptr->entry_param = entry_param;
            tcb_ptr->stack_bottom = ((uint8_t*)stack_bottom + sizeof(StaticTask_t));
            tcb_ptr->stack_size = ((stack_size - sizeof(StaticTask_t)) / sizeof(StackType_t));
            // 保存threadid到tcb中
            tcb_ptr->save_ptr = (uint32_t)xHandle;
        }
        else {
            ret = ATOM_ERROR;
        }
    }

    return ret;
}




#ifdef ATOM_STACK_CHECKING
uint8_t atomThreadStackCheck(ATOM_TCB* tcb_ptr, uint32_t* used_bytes, uint32_t* free_bytes)
{
    SYSLOG(LOG_NOTICE, "atomThreadStackCheck not implemented under vos!");
    return 0;
}

#endif

void archContextSwitch(ATOM_TCB* old_tcb_ptr, ATOM_TCB* new_tcb_ptr)
{
    SYSLOG(LOG_NOTICE, "archContextSwitch not implemented under vos!");
}


void archFirstThreadRestore(ATOM_TCB* new_tcb_ptr)
{
    SYSLOG(LOG_NOTICE, "archFirstThreadRestore not implemented under vos!");
}

/**
 * Initialise a threads stack so it can be scheduled in by
 * archFirstThreadRestore or the pend_sv_handler.
 */
void archThreadContextInit(ATOM_TCB* tcb_ptr, void* stack_top,
    void (*entry_point)(void*), void* entry_param)
{
    SYSLOG(LOG_NOTICE, "archThreadContextInit not implemented under vos!");
}

