/*
 * DIM-SUM操作系统 - 锁
 *
 * Copyright (C) 2022 国科础石(重庆)软件有限公司
 *
 * 作者: Dong Peng <w-pengdong@kernelsoft.com>
 *
 * License terms: GNU General Public License (GPL) version 3
 *
 */

#ifndef __ASM_SMP_LOCK_H
#define __ASM_SMP_LOCK_H

#include <linux/compiler.h>
#include <asm/fence.h>
#include <asm/barrier.h>

#ifdef CONFIG_TICKET_SPINLOCK
#define TICKET_SHIFT	16

struct arch_smp_lock {
	u16 owner;
	u16 next;
} __attribute__((aligned(4)));

#define __ARCH_SMP_LOCK_UNLOCKED {0, 0}

static inline int arch_smp_lock_value_unlocked(struct arch_smp_lock lock)
{
	return lock.owner == lock.next;
}

static inline int arch_smp_lock_is_locked(struct arch_smp_lock *lock)
{
	return !arch_smp_lock_value_unlocked(READ_ONCE(*lock));
}

static inline void arch_smp_unlock(struct arch_smp_lock *lock)
{
	u32 tmp, res, lockval;
	asm volatile (
		"1:  lr.w	%[lockval], %[lock] \n"
		"    add 	%[ores], %[lockval], %[owner]\n" //lock->owner+1
		"    and 	%[ores], %[ores], %[mask] \n"
		"    sll 	%[mask], %[mask], 16 \n"
		"    and 	%[lockval], %[lockval], %[mask] \n"
		"    or 	%[lockval], %[lockval], %[ores] \n"
		"    sc.w 	%[tmp], %[lockval], %[lock] \n"
		"    bnez 	%[tmp], 1b \n"
		: [lock]"+A" (*lock), [lockval]"=&r" (lockval), [ores]"=&r" (res)
		: [tmp]"r" (tmp), [owner]"r"(1), [mask]"r" (0xffff) 
		: "memory"
	);
}

static inline void arch_smp_lock(struct arch_smp_lock *lock)
{

	u32 owner, next, tmp, res, lockval;

	asm volatile (
		"1:	lr.w	%[lockval], %[lock]\n"
		"   add 	%[tmp], %[lockval], %[shift]\n"		
		"   sc.w 	%[res], %[tmp], %[lock] \n"
		"   bnez 	%[res], 1b \n"
		"   srl 	%[next], %[lockval], 16 \n"
		"   and 	%[next], %[next], %[mask] \n"
		"   and 	%[owner], %[lockval], %[mask] \n"
		"   xor 	%[res], %[owner], %[next]\n"
		"   beqz 	%[res], 3f \n"
		"2: lr.w	%[tmp], %[lock] \n"
		"	srl 	%[next], %[tmp], 16 \n"
		"   and 	%[next], %[next], %[mask] \n"
		"   and 	%[owner], %[lockval], %[mask] \n"
		"   xor 	%[res], %[owner], %[next]\n"
		"   bnez 	%[res], 2b \n"
		RISCV_ACQUIRE_BARRIER
		"3: \n"
		: [lock]"+A" (*lock), [lockval]"=&r" (lockval), [tmp]"=&r" (tmp), 
		  [res]"=&r" (res), [owner]"=&r" (owner), [next]"=&r" (next) 
		: [shift]"r" (1 << TICKET_SHIFT), [mask]"r" (0xffff)
		: "memory"
	);
}

static inline int arch_smp_trylock(struct arch_smp_lock *lock)
{
	u32 owner, next, tmp, res, lockval;
	__asm__ __volatile__(
		"1:	lr.w	%[lockval], %[lock]\n"
		"	srl 	%[next], %[lockval], 16 \n"
		"   and 	%[next], %[next], %[mask] \n"
		"   and 	%[owner], %[lockval], %[mask] \n"
		"   xor 	%[res], %[owner], %[next] \n"
		"	bnez	%[res], 2f\n"
		"	add		%[lockval], %[lockval], %[shift]\n"
		"	sc.w 	%[res], %[lockval], %[lock]\n"
		"	bnez	%[res], 1b\n"
		"2: \n"
		: [lock]"+A" (*lock), [lockval]"=&r" (lockval), [tmp]"=&r" (tmp), 
		  [res]"=&r" (res), [owner]"=&r" (owner), [next]"=&r" (next) 
		: [shift]"r" (1 << TICKET_SHIFT), [mask]"r" (0xffff)
		: "memory"
	);

	return !res;
}

#else

struct arch_smp_lock {
	unsigned int lock;
} __attribute__((aligned(4)));

#define __ARCH_SMP_LOCK_UNLOCKED {0}

static inline int arch_smp_lock_value_unlocked(struct arch_smp_lock lock)
{
	return READ_ONCE((lock).lock) == 0;
}

static inline int arch_smp_lock_is_locked(struct arch_smp_lock *lock)
{
	return !arch_smp_lock_value_unlocked(READ_ONCE(*lock));
}

static inline void arch_smp_unlock(struct arch_smp_lock *lock)
{
	__smp_store_release(&lock->lock, 0);
}

static inline int arch_smp_trylock(struct arch_smp_lock *lock)
{
	int tmp = 1, busy;

	__asm__ __volatile__ (
		"	amoswap.w %0, %2, %1\n"
		RISCV_ACQUIRE_BARRIER
		: "=r" (busy), "+A" (lock->lock)
		: "r" (tmp)
		: "memory");

	return !busy;
}

static inline void arch_smp_lock(struct arch_smp_lock *lock)
{
	while (1) {
		if (arch_smp_lock_is_locked(lock))
			continue;

		if (arch_smp_trylock(lock))
			break;
	}
}
#endif

#endif /* __ASM_SMP_LOCK_H */
