/*
 *  Copyright (c) 2022 ZhuHai Jieli Technology Co.,Ltd.
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

#ifndef SYS_SPINLOCK_H
#define SYS_SPINLOCK_H

#include "los_typedef.h"

struct __spinlock {
    volatile u8 rwlock;
};

typedef struct __spinlock spinlock_t;

#if CPU_CORE_NUM > 1

#define preempt_disable() \
    __local_irq_disable()

#define preempt_enable() \
    __local_irq_enable()

#else

#define preempt_disable() \
    local_irq_disable()

#define preempt_enable() \
    local_irq_enable()

#endif

#if CPU_CORE_NUM > 1

#define spin_acquire(lock)     \
    do { \
        arch_spin_lock(lock); \
    } while (0)

#define spin_release(lock) \
    do { \
        arch_spin_unlock(lock); \
    } while (0)

#else

#define spin_acquire(lock)     \
    do { \
        while ((lock)->rwlock); \
        (lock)->rwlock = 1; \
    } while (0)

#define spin_release(lock) \
    do { \
        (lock)->rwlock = 0; \
    } while (0)

#endif

#define DEFINE_SPINLOCK(x) \
    spinlock_t x = { .rwlock = 0 }

__attribute__((always_inline))
static inline void spin_lock_init(spinlock_t *lock)
{
    lock->rwlock = 0;
}

__attribute__((always_inline))
static inline void spin_lock(spinlock_t *lock)
{
    preempt_disable();
    spin_acquire(lock);
}

__attribute__((always_inline))
static inline void spin_unlock(spinlock_t *lock)
{
    spin_release(lock);
    preempt_enable();
}

#endif

