#include "znx_rwlock.h"

void
znx_rwlock_wlock(znx_rwlock_t *lock)
{
    int64_t i = 0, j = 0;

    for ( ;; ) {
        if (znx_atomic_int64_cas(&lock->write, 0, 1)) {
            znx_atomic_int64_add_fetch(&lock->counter, ZNX_RWLOCK_WLOCK);
            for ( ;; ) {
                if (znx_atomic_int64_load(&lock->counter) == ZNX_RWLOCK_WLOCK) {
                    return;
                }

                j++;
                if (j > ZNX_RWLOCK_SPIN) {
                    znx_sched_yield();
                }
            }
        }

        i++;
        if (i > ZNX_RWLOCK_SPIN) {
            znx_sched_yield();
        }
    }
}


void
znx_rwlock_rlock(znx_rwlock_t *lock)
{
    int64_t counter = 0;
    int64_t i = 0;

    for ( ;; ) {
        counter = znx_atomic_int64_load(&lock->counter);
        if (counter >= 0) {
            if (znx_atomic_int64_cas(&lock->counter, counter, counter + 1)) {
                return;
            }
        }

        i++;
        if (i > ZNX_RWLOCK_SPIN) {
            znx_sched_yield();
        }
    }
}


void
znx_rwlock_runlock(znx_rwlock_t *lock)
{
    // when unlocking, it means that it has been successfully locked before.
    int64_t counter = 0;
    for ( ;; ) {
        counter = znx_atomic_int64_load(&lock->counter);
        if (counter == ZNX_RWLOCK_WLOCK) {
            ZNX_ASSERT_FATAL("unexcept state");
        }

        if (znx_atomic_int64_cas(&lock->counter, counter, counter-1)) {
            return;
        }

        // no need schedule
    }
}
