#ifndef __ARCH_SPINLOCK_H__
#define __ARCH_SPINLOCK_H__

#include "compiler_gcc.h"

#ifdef __cplusplus
extern "C" {
#endif

typedef union {
    /**
     * bits  0..15 : serving_now
     * bits 16..31 : ticket
     */
    unsigned int lock;
    struct {
        unsigned short serving_now;
        unsigned short ticket;
    } h;
} arch_spinlock_t;

#define __ARCH_SPIN_LOCK_UNLOCKED   { .lock = 0 }

typedef struct {
    volatile unsigned int lock;
} arch_rwlock_t;

#define __ARCH_RW_LOCK_UNLOCKED     { 0 }


/**
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
 *
 * Simple spin lock operations.  There are two variants, one clears IRQ's
 * on the local processor, one does not.
 *
 * These are fair FIFO ticket locks
 *
 * (the type definitions are in asm/spinlock_types.h)
 */


/**
 * Ticket locks are conceptually two parts, one indicating the current head of
 * the queue, and the other indicating the current tail. The lock is acquired
 * by atomically noting the tail and incrementing it by one (thus adding
 * ourself to the queue and noting our position), then waiting until the head
 * becomes equal to the the initial value of the tail.
 */

static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
    unsigned int counters = ACCESS_ONCE(lock->lock);

    return ((counters >> 16) ^ counters) & 0xffff;
}

static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
    return lock.h.serving_now == lock.h.ticket;
}

#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
    while (arch_spin_is_locked(x)) { cpu_relax(); }

static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
    unsigned int counters = ACCESS_ONCE(lock->lock);

    return (((counters >> 16) - counters) & 0xffff) > 1;
}
#define arch_spin_is_contended  arch_spin_is_contended

static inline void arch_spin_lock(arch_spinlock_t *lock)
{

}

static inline void arch_spin_unlock(arch_spinlock_t *lock)
{

}

static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
{
    int tmp;

    return tmp;
}

/**
 * Read-write spinlocks, allowing multiple readers but only one writer.
 *
 * NOTE! it is quite common to have readers in interrupts but no interrupt
 * writers. For those circumstances we can "mix" irq-safe locks - any writer
 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
 * read-locks.
 */

/**
 * read_can_lock - would read_trylock() succeed?
 * @lock: the rwlock in question.
 */
#define arch_read_can_lock(rw)  ((rw)->lock >= 0)

/**
 * write_can_lock - would write_trylock() succeed?
 * @lock: the rwlock in question.
 */
#define arch_write_can_lock(rw) (!(rw)->lock)

static inline void arch_read_lock(arch_rwlock_t *rw)
{

}

static inline void arch_read_unlock(arch_rwlock_t *rw)
{

}

static inline void arch_write_lock(arch_rwlock_t *rw)
{

}

static inline void arch_write_unlock(arch_rwlock_t *rw)
{

}

static inline int arch_read_trylock(arch_rwlock_t *rw)
{
    int ret;

    return ret;
}

static inline int arch_write_trylock(arch_rwlock_t *rw)
{
    int ret;

    return ret;
}

#ifdef __cplusplus
}
#endif

#endif

