#pragma once
#ifndef CLIB_SPINLOCK_H
#define CLIB_SPINLOCK_H

#include "clib_type.h"
#include <stdatomic.h>

#ifdef __cplusplus
extern "C" {
#endif

typedef struct {
	volatile i32_t lock;
} clib_spinlock_t;

// static inline void
// clib_spinlock_lock(clib_spinlock_t *sl)
// {
// 	int lock_val = 1;
// 	asm volatile (
// 			"1:\n"
// 			"xchg %[locked], %[lv]\n"
// 			"test %[lv], %[lv]\n"
// 			"jz 3f\n"
// 			"2:\n"
// 			"pause\n"
// 			"cmpl $0, %[locked]\n"
// 			"jnz 2b\n"
// 			"jmp 1b\n"
// 			"3:\n"
// 			: [locked] "=m" (sl->lock), [lv] "=q" (lock_val)
// 			: "[lv]" (lock_val)
// 			: "memory");
// }

// static inline void
// clib_spinlock_unlock (clib_spinlock_t *sl)
// {
// 	int unlock_val = 0;
// 	asm volatile (
// 			"xchg %[locked], %[ulv]\n"
// 			: [locked] "=m" (sl->lock), [ulv] "=q" (unlock_val)
// 			: "[ulv]" (unlock_val)
// 			: "memory");
// }


static inline void
clib_spinlock_init(clib_spinlock_t* lk)
{
    lk->lock = 0;
}

static inline void
clib_spinlock_lock(clib_spinlock_t* lk)
{
    i32_t lock = 0;
    i32_t suc  = 0;

    do
    {
        suc = __atomic_compare_exchange_n(&lk->lock,&lock,1,0,__ATOMIC_SEQ_CST,__ATOMIC_SEQ_CST);
        if(!suc) {
            atomic_thread_fence(__ATOMIC_ACQUIRE);
            lock = 0;
        }
    } while(!suc);
}

static inline i32_t
clib_spinlock_trylock(clib_spinlock_t* lk)
{
    i32_t lock = 0;
    return __atomic_compare_exchange_n(&lk->lock,&lock,1,0,__ATOMIC_SEQ_CST,__ATOMIC_SEQ_CST);
}

static inline void
clib_spinlock_unlock(clib_spinlock_t* lk)
{
    __atomic_store_n(&lk->lock, 0, __ATOMIC_RELEASE);
}

#ifdef __cplusplus
}
#endif

#endif