/*
 *  Copyright (c) 2022 ZhuHai Jieli Technology Co.,Ltd.
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

#ifndef ASM_CPU_H
#define ASM_CPU_H

#include "br23.h"
#include "csfr.h"
#include "los_typedef.h"
#ifndef __ASSEMBLY__

#endif

#define ___trig        __asm__ volatile ("trigger")

#ifndef BIG_ENDIAN
#define BIG_ENDIAN             0x3021
#endif
#ifndef LITTLE_ENDIAN
#define LITTLE_ENDIAN         0x4576
#endif
#define CPU_ENDIAN             LITTLE_ENDIAN

#define CPU_CORE_NUM     1
#if CPU_CORE_NUM > 1
#define OS_CPU_ID           current_cpu_id()
#else

#define OS_CPU_ID           (0)
static inline int core_num(void)
{
    return 0;
}

#endif

#define __NONCPU_ADDR__         ((u8 *)0x04000000)

#define NO_CACHE_ADDR(addr) \
    (u32)(((u32)(addr) >= 0x4000000) ? ((u32)(addr) + 0x4000000) : (u32)(addr))

#define CPU_ADDR(addr) \
    (u32)(((u32)(addr) >= 0x8000000) ? ((u32)(addr) - 0x4000000) : (u32)(addr))

#define  CPU_TASK_CLR(a)
#define  CPU_TASK_SW(a)         \
    do { \
        q32DSP(a)->ILAT_SET |= BIT(3 - a); \
    } while (0)

#define  CPU_INT_NESTING     2

#ifndef __ASSEMBLY__

__attribute__((always_inline))
static inline u32 rand32(void)
{
    return JL_RAND->R64L;
}
__attribute__((always_inline))
static inline int current_cpu_id(void)
{
    unsigned id;
    __asm__ volatile("%0 = cnum" : "=r"(id) ::);
    return id ;
}

extern int cpu_in_irq(void);

__attribute__((always_inline))
static inline int cpu_irq_disabled(void)
{
    int flag;
    __asm__ volatile("%0 = icfg" : "=r"(flag));
    return (flag & 0x300) != 0x300;
}

__attribute__((always_inline)) static inline int data_sat_s16(int ind)
{
    __asm__ volatile(
        " %0 = sat16(%0)(s)  \t\n"
        : "=&r"(ind)
        : "0"(ind)
        :);
    return ind;
}

__attribute__((always_inline)) static inline u32 reverse_u32(u32 data32)
{
    u8 *dataptr = (u8 *)(&data32);
    data32 = (((u32)dataptr[0] << 24) | ((u32)dataptr[1] << 16) | ((u32)dataptr[2] << 8) | (u32)dataptr[3]);

    return data32;
}

__attribute__((always_inline)) static inline u32 reverse_u16(u16 data16)
{
    u32 retv;
    retv = ((u32)data16) << 16;
    __asm__ volatile("%0 = rev8(%0) \t\n" : "=&r"(retv) : "0"(retv) :);
    return retv;
}
#endif

#ifndef __ASSEMBLY__ // assembly

#define __asm_csync() \
    do { \
        __asm volatile("csync;"); \
    } while (0)

#include "irq.h"

#define arch_atomic_read(v)  \
    ({ \
        __asm_csync(); \
        (*(volatile int *)&(v)->counter); \
     })

extern volatile int cpu_lock_cnt[];
extern volatile int irq_lock_cnt[];

extern void __local_irq_disable(void);
extern void __local_irq_enable(void);
extern void local_irq_disable(void);
extern void local_irq_enable(void);
extern void sys_local_irq_disable(void);
extern void sys_local_irq_enable(void);

#define arch_spin_lock(lock)  \
    do { \
        __asm_csync(); \
        __asm__ volatile ( "1:            \n\t" "testset b[%0] \n\t" "ifeq goto 1b  \n\t" :: "r"(&(lock)->rwlock)); \
        __asm_csync(); \
        } while (0)
#define arch_spin_unlock(lock) do {__asm_csync(); (lock)->rwlock = 0;} while (0)

#define    CPU_SR_ALLOC()     \
//    int flags

#define CPU_CRITICAL_ENTER()  \
    do { \
        sys_local_irq_disable(); \
    } while (0)

#define CPU_CRITICAL_EXIT() \
    do { \
        sys_local_irq_enable(); \
    } while (0)

__attribute__((always_inline)) static inline void cpu_reset(void)
{
    while (1) {
    }
}

__attribute__((always_inline)) static inline void system_reset(void)
{
    extern void log_flush();
    extern void CORE_SYSTEM_RESET(void);

    CORE_SYSTEM_RESET();
    while (1) {
    }
}

extern void cpu_assert_debug();
extern const int config_asser;
#define ASSERT(a,...)   \
    do { \
        if (config_asser){\
            if (!(a)){ \
                printf("file:%s, line:%d", __FILE__, __LINE__); \
                printf("ASSERT-FAILD: "#a" "__VA_ARGS__); \
                cpu_assert_debug(); \
            } \
        } else {\
            if (!(a)){ \
                cpu_reset(); \
            }\
        }\
    } while (0);

#endif

#define CPU_RAND()    (JL_RAND->R64L)

#endif
