#pragma once

#include <asm/cmpxchg.h>

#define __atomic_release_fence() \
	__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");

static inline int arch_atomic_read(const atomic_t *v)
{
	return READ_ONCE(v->counter);
}

static inline void arch_atomic_set(atomic_t *v, int i)
{
	WRITE_ONCE(v->counter, i);
}

static inline int arch_atomic_add(int i, atomic_t *v)
{
	register int ret;

	__asm__ __volatile__(
		"amoadd.w %1, %2, %0"
		: "+A"(v->counter), "=r"(ret)
		: "r"(i)
		: "memory");

	return ret;
}

static inline int arch_atomic_add_return_relaxed(int i, atomic_t *v)
{
	register int ret;

	__asm__ __volatile__(
		" amoadd.w %1, %2, %0"
		: "+A"(v->counter), "=r"(ret)
		: "r"(i)
		: "memory");

	return ret;
}

static inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
	register int ret;

	__asm__ __volatile__(
		"	amoadd.w.aqrl  %1, %2, %0"
		: "+A"(v->counter), "=r"(ret)
		: "r"(i)
		: "memory");

	return ret;
}

static inline int arch_atomic_add_return(int i, atomic_t *v)
{
	return arch_atomic_fetch_add(i, v) + i;
}

static inline int arch_atomic_sub_return(int i, atomic_t *v)
{
	return arch_atomic_fetch_add(i, v) - i;
}

#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_add_return arch_atomic_add_return
#define arch_atomic_sub_return arch_atomic_sub_return
#define arch_atomic_fetch_add arch_atomic_fetch_add
#define arch_atomic_fetch_sub(i, v) arch_atomic_fetch_add(-(i), v)

#define arch_atomic_sub(i, v) arch_atomic_add(-(i), v)
