#pragma once

#include <linux/types.h>
#include <linux/spinlock_types.h>
#include <linux/lockdep.h>
#include <linux/irqflags.h>
#include <asm/barrier.h>
#include <linux/cleanup.h>

/*
 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
 */
#ifdef CONFIG_SMP
#include <asm/spinlock.h>
#else

#endif

void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
                          struct lock_class_key *key, short inner);
void _raw_spin_lock(raw_spinlock_t *lock);
void _raw_spin_unlock(raw_spinlock_t *lock);
unsigned long _raw_spin_lock_irqsave(raw_spinlock_t *lock);
void _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags);
void _raw_spin_lock_irq(raw_spinlock_t *lock);
void _raw_spin_unlock_irq(raw_spinlock_t *lock);
int _raw_spin_trylock(raw_spinlock_t *lock);

#define raw_spin_unlock(lock) _raw_spin_unlock(lock)

#define raw_spin_lock_irqsave(lock, flags)    \
    do                                        \
    {                                         \
        flags = _raw_spin_lock_irqsave(lock); \
    } while (0)

#define raw_spin_unlock_irqrestore(lock, flags)   \
    do                                            \
    {                                             \
        _raw_spin_unlock_irqrestore(lock, flags); \
    } while (0)

#define raw_spin_lock_init(lock)                                   \
    do                                                             \
    {                                                              \
        static struct lock_class_key __key;                        \
                                                                   \
        __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
    } while (0)

#define raw_spin_lock(lock) _raw_spin_lock(lock)

#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)

/*
 * Always evaluate the 'subclass' argument to avoid that the compiler
 * warns about set-but-not-used variables when building with
 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
 */
#define raw_spin_lock_nested(lock, subclass) \
    _raw_spin_lock(lock)

static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
    return &lock->rlock;
}

static inline void spin_lock(spinlock_t *lock)
{
    raw_spin_lock(&lock->rlock);
}

static inline void spin_unlock(spinlock_t *lock)
{
    raw_spin_unlock(&lock->rlock);
}

#define raw_spin_trylock(lock) _raw_spin_trylock(lock)

#define spin_lock_init(lock) __raw_spin_lock_init(&((lock)->rlock), #lock, NULL, 0)

#define spin_lock_irqsave(lock, flags)                  \
    do                                                  \
    {                                                   \
        raw_spin_lock_irqsave(&((lock)->rlock), flags); \
    } while (0)

static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
    raw_spin_unlock_irqrestore(&lock->rlock, flags);
}

#define raw_spin_trylock_irqsave(lock, flags) \
    ({                                        \
        local_irq_save(flags);                \
        raw_spin_trylock(lock) ? 1 : ({ local_irq_restore(flags); 0; });   \
    })

#define spin_trylock_irqsave(lock, flags)                      \
    ({                                                         \
        raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
    })

/*
 * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
 * between program-order earlier lock acquisitions and program-order later
 * memory accesses.
 *
 * This guarantees that the following two properties hold:
 *
 *   1) Given the snippet:
 *
 *	  { X = 0;  Y = 0; }
 *
 *	  CPU0				CPU1
 *
 *	  WRITE_ONCE(X, 1);		WRITE_ONCE(Y, 1);
 *	  spin_lock(S);			smp_mb();
 *	  smp_mb__after_spinlock();	r1 = READ_ONCE(X);
 *	  r0 = READ_ONCE(Y);
 *	  spin_unlock(S);
 *
 *      it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
 *      and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
 *      preceding the call to smp_mb__after_spinlock() in __schedule() and in
 *      try_to_wake_up().
 *
 *   2) Given the snippet:
 *
 *  { X = 0;  Y = 0; }
 *
 *  CPU0		CPU1				CPU2
 *
 *  spin_lock(S);	spin_lock(S);			r1 = READ_ONCE(Y);
 *  WRITE_ONCE(X, 1);	smp_mb__after_spinlock();	smp_rmb();
 *  spin_unlock(S);	r0 = READ_ONCE(X);		r2 = READ_ONCE(X);
 *			WRITE_ONCE(Y, 1);
 *			spin_unlock(S);
 *
 *      it is forbidden that CPU0's critical section executes before CPU1's
 *      critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
 *      and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
 *      preceding the calls to smp_rmb() in try_to_wake_up() for similar
 *      snippets but "projected" onto two CPUs.
 *
 * Property (2) upgrades the lock to an RCsc lock.
 *
 * Since most load-store architectures implement ACQUIRE with an smp_mb() after
 * the LL/SC loop, they need no further barriers. Similarly all our TSO
 * architectures imply an smp_mb() for each atomic instruction and equally don't
 * need more.
 *
 * Architectures that can implement ACQUIRE better need to take care.
 */
#ifndef smp_mb__after_spinlock
#define smp_mb__after_spinlock()
#endif
