#ifndef _TOS_SMP_H_
#define _TOS_SMP_H_

#define K_SMP_MSG_TYPE_SIMPLE 0x0 // Simple message, bit maps are defined below

#define K_SMP_MSG_SIMPLE_FORCEIDLE 0x1 // Force idle
#define K_SMP_MSG_SIMPLE_SCHREQ 0x2 // Request to schedule

// To PROCPU
#define K_SMP_MSG_TYPE_TASKRDY 0x1 // Task ready to be run on other CPU

// To APPCPU
#define K_SMP_MSG_TYPE_TASKSCH 0x1 // Delivered to run task

typedef void* k_smp_mutex_t;

__STATIC__ __KNL__ uint32_t smp_form_msg(uint8_t type, uint32_t datah30)
{
    return (datah30 & 0xFFFFFFFC) | (type) & 0x3;
}

__STATIC__ __KNL__ uint32_t smp_msg_taskptr(void* ptr)
{
    return smp_form_msg(K_SMP_MSG_TYPE_TASKSCH, (uint32_t)ptr);
}

__STATIC__ __KNL__ uint32_t smp_msg_taskrdy(void* ptr)
{
    return smp_form_msg(K_SMP_MSG_TYPE_TASKRDY, (uint32_t)ptr);
}

__STATIC__ __KNL__ uint32_t smp_msg_simple(uint8_t type)
{
    return smp_form_msg(K_SMP_MSG_TYPE_SIMPLE, type << 2);
}

__STATIC__ __KNL__ uint8_t smp_get_msg_type(uint32_t msg)
{
    return msg & 0x3;
}

__STATIC__ __KNL__ void* smp_get_msg_ptr(uint32_t msg)
{
    return (void*)(msg & 0xFFFFFFFC);
}

__STATIC__ __KNL__ uint8_t smp_get_msg_simple_type(uint32_t msg)
{
    return msg >> 2;
}

/**
 * @brief Get  the index of the current CPU.
 *
 * @return uint32_t  the index of the current CPU.
 */
__STATIC_INLINE__ __KNL__ uint32_t cpu_get_index(void);

/**
 * @brief Try to push message to the CPU with the specified index. The data is fixed to 32 bits.
 * @note Non-blocking
 * @param idx Target CPU index
 * @param data Data to push
 * @return k_err_t Error code. When error the function should return a non-zero value.
 */
__STATIC__ __KNL__ k_err_t smp_try_push_msg(uint8_t idx, uint32_t data);

/**
 * @brief Try to pop message from the CPU with the specified index. The data is fixed to 32 bits.
 * @note Non-blocking
 * @param idx Source CPU index
 * @param data Data pointer to save the popped data
 * @return k_err_t Error code. When error the function should return a non-zero value.
 */
__STATIC__ __KNL__ k_err_t smp_try_pop_msg(uint8_t idx, uint32_t* data);

/**
 * @brief Try to claim global lock with the specified index.
 * @details The function is somewhat different from the app locks since it's critical for kernel.
 *  It should be as fast as possible and interrupt may be disabled!
 * @param idx The global lock index. Only can be TOS_CFG_SMP_GLOCK_ID0 or TOS_CFG_SMP_GLOCK_ID1, 2 locks in total.
 * @return k_err_t Error code. When error the function should return a non-zero value.
 */
__STATIC__ __KNL__ k_err_t smp_global_lock(uint8_t idx);

/**
 * @brief Try to release global lock with the specified index.
 * @details The function is somewhat different from the app locks since it's critical for kernel.
 * It should be as fast as possible and interrupt may be restored!
 * @param idx The global lock index. Only can be TOS_CFG_SMP_GLOCK_ID0 or TOS_CFG_SMP_GLOCK_ID1, 2 locks in total.
 * @return k_err_t Error code. When error the function should return a non-zero value.
 */
__STATIC__ __KNL__ k_err_t smp_global_unlock(uint8_t idx);

/**
 * @brief Test if the global lock with the specified index is locked.
 *
 * @param idx Index of global lock.
 * @return uint8_t 1 if locked, 0 if not.
 */
__STATIC__ __KNL__ uint8_t smp_global_is_locked(uint8_t idx);

#define TOS_SMP_GLOCK_SCHED TOS_CFG_SMP_GLOCK_ID0

#define TOS_SMP_GLOCK_PMUTEX TOS_CFG_SMP_GLOCK_ID1

#define TOS_ON_PROCPU if (cpu_get_index() == TOS_CFG_SMP_PROCPU_INDEX)

#define TOS_SMP_DOEXP_AND_DIS_BEGIN \
    do {                            \
        TOS_CPU_INT_DISABLE();      \
    if((

#define TOS_SMP_DOEXP_AND_DIS_END \
    ) == K_ERR_NONE){ break; }    \
    TOS_CPU_INT_ENABLE();         \
    }                             \
    while (0)

// SMP Non-critical section

/**
 * @brief Initialize a platform mutex.
 *
 * @param ppMutex Pointer to the mutex object. Note that k_smp_mutex_t is a pointer type, 
 *      which stores the pointer to the actual platform mutex object, or the implementation can just use it's space for data.
 * @return k_err_r K_ERR_NONE if successful, otherwise an error code.
 */
__STATIC__ __KNL__ k_err_t smp_init_mutex(k_smp_mutex_t* ppMutex);

/**
 * @brief Try to acquire a platform mutex. Non-blocking.
 *
 * @param pMutex mutex object to acquire
 * @return k_err_t K_ERR_NONE if successful, otherwise an error code.
 */
__STATIC__ __KNL__ k_err_t smp_try_acquire_mutex(k_smp_mutex_t* pMutex);

/**
 * @brief Release a platform mutex.
 *
 * @param pMutex mutex object to release
 * @return k_err_t K_ERR_NONE if successful, otherwise an error code.
 */
__STATIC__ __KNL__ k_err_t smp_release_mutex(k_smp_mutex_t* pMutex);

/**
 * @brief Deinitialize a platform mutex.
 *
 * @param pMutex mutex object to deinitialize. Note that if the mutex is locked it should be automatically released.
 * @return k_err_t K_ERR_NONE if successful, otherwise an error code.
 */
__STATIC__ __KNL__ k_err_t smp_deinit_mutex(k_smp_mutex_t* pMutex);

#if TOS_CFG_SMP_EN > 0u

__STATIC_INLINE__ __KNL__ uint32_t cpu_get_index(void)
{
    return port_cpu_get_index();
}

__STATIC__ __KNL__ k_err_t smp_try_push_msg(uint8_t idx, uint32_t data)
{
    return port_smp_try_push_msg(idx, data);
}

__STATIC__ __KNL__ k_err_t smp_try_pop_msg(uint8_t idx, uint32_t* data)
{
    return port_smp_try_pop_msg(idx, data);
}

__STATIC__ __KNL__ k_err_t smp_global_lock(uint8_t idx)
{
    return port_smp_global_lock(idx);
}

__STATIC__ __KNL__ k_err_t smp_global_unlock(uint8_t idx)
{
    return port_smp_global_unlock(idx);
}

__STATIC__ __KNL__ uint8_t smp_global_is_locked(uint8_t idx)
{
    return port_smp_global_is_locked(idx);
}

#if TOS_CFG_SMP_PMUTEX_PROVIDED > 0u

__STATIC__ __KNL__ k_err_t smp_init_mutex(k_smp_mutex_t* ppMutex)
{
    return port_smp_init_mutex(ppMutex);
}

__STATIC__ __KNL__ k_err_t smp_try_acquire_mutex(k_smp_mutex_t* pMutex)
{
    return port_smp_try_acquire_mutex(pMutex);
}

__STATIC__ __KNL__ k_err_t smp_release_mutex(k_smp_mutex_t* pMutex)
{

    return port_smp_release_mutex(pMutex);
}

__STATIC__ __KNL__ k_err_t smp_deinit_mutex(k_smp_mutex_t* pMutex)
{

    return port_smp_deinit_mutex(pMutex);
}

#else // Not privided PMUTEX implementation, we impl it using the secondary global spinlock

typedef struct k_default_pmutex_st {
    uint8_t lock;
    uint8_t owner;
} k_default_pmutex_t;

__STATIC__ __KNL__ k_err_t smp_init_mutex(k_smp_mutex_t* pMutex)
{
    // We got at least 4 bytes from ppMutex, so we can safely cast it to k_default_pmutex_t
    *(k_default_pmutex_t*)(pMutex) = (k_default_pmutex_t) { .lock = 0, .owner = 0xff };
    return K_ERR_NONE;
}

__STATIC__ __KNL__ k_err_t smp_try_acquire_mutex(k_smp_mutex_t* pMutex)
{
    k_default_pmutex_t* mutex = (k_default_pmutex_t*)pMutex;
    smp_global_lock(TOS_SMP_GLOCK_PMUTEX);
    if (mutex->owner == cpu_get_index() || mutex->owner == 0xff) {
        mutex->owner = cpu_get_index();
        mutex->lock++;
        smp_global_unlock(TOS_SMP_GLOCK_PMUTEX);
        return K_ERR_NONE;
    }
    smp_global_unlock(TOS_SMP_GLOCK_PMUTEX);
    return K_ERR_MUTEX_NOT_OWNER;
}

__STATIC__ __KNL__ k_err_t smp_release_mutex(k_smp_mutex_t* pMutex)
{
    k_default_pmutex_t* mutex = (k_default_pmutex_t*)pMutex;
    smp_global_lock(TOS_SMP_GLOCK_PMUTEX);
    if (mutex->owner != cpu_get_index()) {
        smp_global_unlock(TOS_SMP_GLOCK_PMUTEX);
        return K_ERR_MUTEX_NOT_OWNER;
    }
    mutex->lock--;
    if (mutex->lock == 0) {
        mutex->owner = 0xff;
    }
    smp_global_unlock(TOS_SMP_GLOCK_PMUTEX);
    return K_ERR_NONE;
}

__STATIC__ __KNL__ k_err_t smp_deinit_mutex(k_smp_mutex_t* pMutex)
{
    *pMutex = (k_smp_mutex_t)-1;
    return K_ERR_NONE;
}

#endif

#else

__STATIC_INLINE__ __KNL__ void smp_init(void)
{
}

__STATIC_INLINE__ __KNL__ uint32_t cpu_get_index(void)
{
    return 0;
}

__STATIC__ __KNL__ k_err_t smp_try_push_msg(uint8_t idx, uint32_t data)
{
    return K_ERR_NONE;
}

__STATIC__ __KNL__ k_err_t smp_try_pop_msg(uint8_t idx, uint32_t* data)
{
    return K_ERR_NONE;
}

__STATIC__ __KNL__ k_err_t smp_global_lock(uint8_t idx)
{
    return K_ERR_NONE;
}

__STATIC__ __KNL__ k_err_t smp_global_unlock(uint8_t idx)
{
    return K_ERR_NONE;
}

__STATIC__ __KNL__ uint8_t smp_global_is_locked(uint8_t idx)
{
    return 0;
}

__STATIC__ __KNL__ k_err_t smp_init_mutex(void** ppMutex)
{
    return K_ERR_NONE;
}

__STATIC__ __KNL__ k_err_t smp_try_acquire_mutex(void** pMutex)
{
    return K_ERR_NONE;
}

__STATIC__ __KNL__ k_err_t smp_release_mutex(void** pMutex)
{
    return K_ERR_NONE;
}

__STATIC__ __KNL__ k_err_t smp_deinit_mutex(void** pMutex)
{
    return K_ERR_NONE;
}

#endif

#endif