#pragma once

#include <linux/lockdep.h>
#include <linux/seqlock_types.h>

#include <asm/barrier.h>

static inline void __seqcount_init(seqcount_t *s, const char *name,
                                   struct lock_class_key *key)
{
    s->sequence = 0;
}

#define seqcount_init(s) __seqcount_init(s, NULL, NULL)

static inline unsigned __seqprop_sequence(const seqcount_t *s)
{
    return smp_load_acquire(&s->sequence);
}

static inline unsigned __seqprop_raw_spinlock_sequence(const seqcount_raw_spinlock_t *s)
{
    unsigned seq = smp_load_acquire(&s->seqcount.sequence);

    return seq;
}

static inline unsigned __seqprop_spinlock_sequence(const seqcount_spinlock_t *s)
{
    unsigned seq = smp_load_acquire(&s->seqcount.sequence);

    return seq;
}

#define __seqprop_case(s, lockname, prop) \
    seqcount_##lockname##_t : __seqprop_##lockname##_##prop

#define __seqprop(s, prop) _Generic(*(s),    \
    seqcount_t: __seqprop_##prop,            \
    __seqprop_case((s), raw_spinlock, prop), \
    __seqprop_case((s), spinlock, prop))

#define seqprop_sequence(s) __seqprop(s, sequence)(s)

#define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s)

/**
 * __read_seqcount_begin() - begin a seqcount_t read section
 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 *
 * Return: count to be passed to read_seqcount_retry()
 */
#define __read_seqcount_begin(s)                            \
    ({                                                      \
        unsigned __seq;                                     \
                                                            \
        while (unlikely((__seq = seqprop_sequence(s)) & 1)) \
            cpu_relax();                                    \
                                                            \
        __seq;                                              \
    })

static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
{
    smp_rmb();

    return READ_ONCE(s->sequence) != start;
}

/**
 * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 *
 * Return: count to be passed to read_seqcount_retry()
 */
#define raw_read_seqcount_begin(s) __read_seqcount_begin(s)

/**
 * read_seqcount_retry() - end a seqcount_t read critical section
 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 * @start: count, from read_seqcount_begin()
 *
 * read_seqcount_retry closes the read critical section of given
 * seqcount_t.  If the critical section was invalid, it must be ignored
 * (and typically retried).
 *
 * Return: true if a read section retry is required, else false
 */
#define read_seqcount_retry(s, start) \
    do_read_seqcount_retry(&((s)->seqcount), start)

#define DEFINE_SEQLOCK(sl) \
    seqlock_t sl = {}

/*
 * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
 * @s:		Pointer to the seqcount_LOCKNAME_t instance
 * @lock:	Pointer to the associated lock
 */
#define seqcount_LOCKNAME_init(s, _lock, lockname) \
    do                                             \
    {                                              \
        seqcount_##lockname##_t *____s = (s);      \
        seqcount_init(&____s->seqcount);           \
        ____s->lock = (_lock);                     \
    } while (0)

#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)

/**
 * raw_read_seqcount() - read the raw seqcount_t counter value
 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 *
 * raw_read_seqcount opens a read critical section of the given
 * seqcount_t, without any lockdep checking, and without checking or
 * masking the sequence counter LSB. Calling code is responsible for
 * handling that.
 *
 * Return: count to be passed to read_seqcount_retry()
 */
#define raw_read_seqcount(s)                  \
    ({                                        \
        unsigned __seq = seqprop_sequence(s); \
                                              \
        __seq;                                \
    })

/**
 * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
 *                        lockdep and w/o counter stabilization
 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 *
 * raw_seqcount_begin opens a read critical section of the given
 * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
 * for the count to stabilize. If a writer is active when it begins, it
 * will fail the read_seqcount_retry() at the end of the read critical
 * section instead of stabilizing at the beginning of it.
 *
 * Use this only in special kernel hot paths where the read section is
 * small and has a high probability of success through other external
 * means. It will save a single branching instruction.
 *
 * Return: count to be passed to read_seqcount_retry()
 */
#define raw_seqcount_begin(s)                                    \
    ({                                                           \
        /*                                                       \
         * If the counter is odd, let read_seqcount_retry() fail \
         * by decrementing the counter.                          \
         */                                                      \
        raw_read_seqcount(s) & ~1;                               \
    })

/**
 * read_seqlock_excl() - begin a seqlock_t locking reader section
 * @sl:	Pointer to seqlock_t
 *
 * read_seqlock_excl opens a seqlock_t locking reader critical section.  A
 * locking reader exclusively locks out *both* other writers *and* other
 * locking readers, but it does not update the embedded sequence number.
 *
 * Locking readers act like a normal spin_lock()/spin_unlock().
 *
 * Context: if the seqlock_t write section, *or other read sections*, can
 * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
 * variant of this function instead.
 *
 * The opened read section must be closed with read_sequnlock_excl().
 */
static inline void read_seqlock_excl(seqlock_t *sl)
{
    spin_lock(&sl->lock);
}

/**
 * read_sequnlock_excl() - end a seqlock_t locking reader critical section
 * @sl: Pointer to seqlock_t
 */
static inline void read_sequnlock_excl(seqlock_t *sl)
{
	spin_unlock(&sl->lock);
}

/**
 * read_seqcount_begin() - begin a seqcount_t read critical section
 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 *
 * Return: count to be passed to read_seqcount_retry()
 */
#define read_seqcount_begin(s)      \
    ({                              \
        raw_read_seqcount_begin(s); \
    })

/**
 * read_seqretry() - end a seqlock_t read side section
 * @sl: Pointer to seqlock_t
 * @start: count, from read_seqbegin()
 *
 * read_seqretry closes the read side critical section of given seqlock_t.
 * If the critical section was invalid, it must be ignored (and typically
 * retried).
 *
 * Return: true if a read section retry is required, else false
 */
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
    return read_seqcount_retry(&sl->seqcount, start);
}

static inline void do_raw_write_seqcount_barrier(seqcount_t *s)
{
    s->sequence++;
    smp_wmb();
    s->sequence++;
}

/**
 * raw_write_seqcount_barrier() - do a seqcount_t write barrier
 * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
 *
 * This can be used to provide an ordering guarantee instead of the usual
 * consistency guarantee. It is one wmb cheaper, because it can collapse
 * the two back-to-back wmb()s.
 *
 * Note that writes surrounding the barrier should be declared atomic (e.g.
 * via WRITE_ONCE): a) to ensure the writes become visible to other threads
 * atomically, avoiding compiler optimizations; b) to document which writes are
 * meant to propagate to the reader critical section. This is necessary because
 * neither writes before nor after the barrier are enclosed in a seq-writer
 * critical section that would ensure readers are aware of ongoing writes::
 *
 *	seqcount_t seq;
 *	bool X = true, Y = false;
 *
 *	void read(void)
 *	{
 *		bool x, y;
 *
 *		do {
 *			int s = read_seqcount_begin(&seq);
 *
 *			x = X; y = Y;
 *
 *		} while (read_seqcount_retry(&seq, s));
 *
 *		BUG_ON(!x && !y);
 *      }
 *
 *      void write(void)
 *      {
 *		WRITE_ONCE(Y, true);
 *
 *		raw_write_seqcount_barrier(seq);
 *
 *		WRITE_ONCE(X, false);
 *      }
 */
#define raw_write_seqcount_barrier(s)					\
    do_raw_write_seqcount_barrier(&((s)->seqcount))
