// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <atomic>
#include <kthread/internal/kutex.h>                       // kutex_*
#include <kthread/internal/types.h>                       // kthread_cond_t

namespace kthread {
struct CondInternal {
    std::atomic<kthread_mutex_t*> m;
    std::atomic<int>* seq;
};

    static_assert(sizeof(CondInternal) == sizeof(kthread_cond_t),
              "sizeof_innercond_must_equal_cond");
    static_assert(offsetof(CondInternal, m) == offsetof(kthread_cond_t, m),
              "offsetof_cond_mutex_must_equal");
    static_assert(offsetof(CondInternal, seq) ==
              offsetof(kthread_cond_t, seq),
              "offsetof_cond_seq_must_equal");
}

extern "C" {

extern int kthread_mutex_unlock(kthread_mutex_t*);
extern int kthread_mutex_lock_contended(kthread_mutex_t*);

int kthread_cond_init(kthread_cond_t* __restrict c,
                      const kthread_condattr_t*) {
    c->m = nullptr;
    c->seq = kthread::kutex_create_checked<int>();
    *c->seq = 0;
    return 0;
}

int kthread_cond_destroy(kthread_cond_t* c) {
    kthread::kutex_destroy(c->seq);
    c->seq = nullptr;
    return 0;
}

int kthread_cond_signal(kthread_cond_t* c) {
    kthread::CondInternal* ic = reinterpret_cast<kthread::CondInternal*>(c);
    // ic is probably dereferenced after fetch_add, save required fields before
    // this point
    std::atomic<int>* const saved_seq = ic->seq;
    saved_seq->fetch_add(1, std::memory_order_release);
    // don't touch ic any more
    kthread::kutex_wake(saved_seq);
    return 0;
}

int kthread_cond_broadcast(kthread_cond_t* c) {
    kthread::CondInternal* ic = reinterpret_cast<kthread::CondInternal*>(c);
    kthread_mutex_t* m = ic->m.load(std::memory_order_relaxed);
    std::atomic<int>* const saved_seq = ic->seq;
    if (!m) {
        return 0;
    }
    void* const saved_kutex = m->kutex;
    // Wakeup one thread and requeue the rest on the mutex.
    ic->seq->fetch_add(1, std::memory_order_release);
    kthread::kutex_requeue(saved_seq, saved_kutex);
    return 0;
}

int kthread_cond_wait(kthread_cond_t* __restrict c,
                      kthread_mutex_t* __restrict m) {
    kthread::CondInternal* ic = reinterpret_cast<kthread::CondInternal*>(c);
    const int expected_seq = ic->seq->load(std::memory_order_relaxed);
    if (ic->m.load(std::memory_order_relaxed) != m) {
        // bind m to c
        kthread_mutex_t* expected_m = nullptr;
        if (!ic->m.compare_exchange_strong(
                expected_m, m, std::memory_order_relaxed)) {
            return EINVAL;
        }
    }
    kthread_mutex_unlock(m);
    int rc1 = 0;
    if (kthread::kutex_wait(ic->seq, expected_seq, nullptr) < 0 &&
        errno != EWOULDBLOCK && errno != EINTR/*note*/) {
        // EINTR should not be returned by cond_*wait according to docs on
        // pthread, however spurious wake-up is OK, just as we do here
        // so that users can check flags in the loop often companioning
        // with the cond_wait ASAP. For example:
        //   mutex.lock();
        //   while (!stop && other-predicates) {
        //     cond_wait(&mutex);
        //   }
        //   mutex.unlock();
        // After interruption, above code should wake up from the cond_wait
        // soon and check the `stop' flag and other predicates.
        rc1 = errno;
    }
    const int rc2 = kthread_mutex_lock_contended(m);
    return (rc2 ? rc2 : rc1);
}

int kthread_cond_timedwait(kthread_cond_t* __restrict c,
                           kthread_mutex_t* __restrict m,
                           const struct timespec* __restrict abstime) {
    kthread::CondInternal* ic = reinterpret_cast<kthread::CondInternal*>(c);
    const int expected_seq = ic->seq->load(std::memory_order_relaxed);
    if (ic->m.load(std::memory_order_relaxed) != m) {
        // bind m to c
        kthread_mutex_t* expected_m = nullptr;
        if (!ic->m.compare_exchange_strong(
                expected_m, m, std::memory_order_relaxed)) {
            return EINVAL;
        }
    }
    kthread_mutex_unlock(m);
    int rc1 = 0;
    if (kthread::kutex_wait(ic->seq, expected_seq, abstime) < 0 &&
        errno != EWOULDBLOCK && errno != EINTR/*note*/) {
        // note: see comments in kthread_cond_wait on EINTR.
        rc1 = errno;
    }
    const int rc2 = kthread_mutex_lock_contended(m);
    return (rc2 ? rc2 : rc1);
}

}  // extern "C"
