// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//
#include <ktest/ktest.h>
#include <turbo/base/compat.h>
#include <turbo/times/time.h>
#include <turbo/base/macros.h>
#include <turbo/strings/str_format.h>
#include <turbo/log/logging.h>
#include <kthread/internal/kthread.h>
#include <kthread/internal/kutex.h>
#include <kthread/internal/task_control.h>
#include <kthread/internal/mutex.h>
#include <gperftools/profiler.h>
#include <turbo/base/class_name.h>
#include <cinttypes>

namespace {
inline unsigned* get_kutex(kthread_mutex_t & m) {
    return m.kutex;
}

long start_time = turbo::Time::current_milliseconds();
int c = 0;
void* locker(void* arg) {
    kthread_mutex_t* m = (kthread_mutex_t*)arg;
    kthread_mutex_lock(m);
    printf("[%" PRIu64 "] I'm here, %d, %" PRId64 "ms\n", 
           pthread_numeric_id(), ++c, turbo::Time::current_milliseconds() - start_time);
    kthread_usleep(10000);
    kthread_mutex_unlock(m);
    return nullptr;
}

TEST(MutexTest, sanity) {
    kthread_mutex_t m;
    ASSERT_EQ(0, kthread_mutex_init(&m, nullptr));
    ASSERT_EQ(0u, *get_kutex(m));
    ASSERT_EQ(0, kthread_mutex_lock(&m));
    ASSERT_EQ(1u, *get_kutex(m));
    kthread_t th1;
    ASSERT_EQ(0, kthread_start_urgent(&th1, nullptr, locker, &m));
    usleep(5000); // wait for locker to run.
    ASSERT_EQ(257u, *get_kutex(m)); // contention
    ASSERT_EQ(0, kthread_mutex_unlock(&m));
    ASSERT_EQ(0, kthread_join(th1, nullptr));
    ASSERT_EQ(0u, *get_kutex(m));
    ASSERT_EQ(0, kthread_mutex_destroy(&m));
}

TEST(MutexTest, used_in_pthread) {
    kthread_mutex_t m;
    ASSERT_EQ(0, kthread_mutex_init(&m, nullptr));
    pthread_t th[8];
    for (size_t i = 0; i < TURBO_ARRAYSIZE(th); ++i) {
        ASSERT_EQ(0, pthread_create(&th[i], nullptr, locker, &m));
    }
    for (size_t i = 0; i < TURBO_ARRAYSIZE(th); ++i) {
        pthread_join(th[i], nullptr);
    }
    ASSERT_EQ(0u, *get_kutex(m));
    ASSERT_EQ(0, kthread_mutex_destroy(&m));
}

void* do_locks(void *arg) {
    struct timespec t = { -2, 0 };
    EXPECT_EQ(ETIMEDOUT, kthread_mutex_timedlock((kthread_mutex_t*)arg, &t));
    return nullptr;
}

TEST(MutexTest, timedlock) {
    kthread_cond_t c;
    kthread_mutex_t m1;
    kthread_mutex_t m2;
    ASSERT_EQ(0, kthread_cond_init(&c, nullptr));
    ASSERT_EQ(0, kthread_mutex_init(&m1, nullptr));
    ASSERT_EQ(0, kthread_mutex_init(&m2, nullptr));

    struct timespec t = { -2, 0 };

    kthread_mutex_lock (&m1);
    kthread_mutex_lock (&m2);
    kthread_t pth;
    ASSERT_EQ(0, kthread_start_urgent(&pth, nullptr, do_locks, &m1));
    ASSERT_EQ(ETIMEDOUT, kthread_cond_timedwait(&c, &m2, &t));
    ASSERT_EQ(0, kthread_join(pth, nullptr));
    kthread_mutex_unlock(&m1);
    kthread_mutex_unlock(&m2);
    kthread_mutex_destroy(&m1);
    kthread_mutex_destroy(&m2);
}

TEST(MutexTest, cpp_wrapper) {
    kthread::Mutex mutex;
    ASSERT_TRUE(mutex.try_lock());
    mutex.unlock();
    mutex.lock();
    mutex.unlock();
    {
        std::unique_lock lk(mutex);
    }
    {
        std::unique_lock<kthread::Mutex> lck1;
        std::unique_lock<kthread::Mutex> lck2(mutex);
        lck1.swap(lck2);
        lck1.unlock();
        lck1.lock();
    }
    ASSERT_TRUE(mutex.try_lock());
    mutex.unlock();
    {
        std::unique_lock lk(*mutex.native_handler());
    }
    {
        std::unique_lock<kthread_mutex_t> lck1;
        std::unique_lock<kthread_mutex_t> lck2(*mutex.native_handler());
        lck1.swap(lck2);
        lck1.unlock();
        lck1.lock();
    }
    ASSERT_TRUE(mutex.try_lock());
    mutex.unlock();
}

bool g_started = false;
bool g_stopped = false;

template <typename Mutex>
struct TURBO_CACHELINE_ALIGNED PerfArgs {
    Mutex* mutex;
    int64_t counter;
    int64_t elapse_ns;
    bool ready;

    PerfArgs() : mutex(nullptr), counter(0), elapse_ns(0), ready(false) {}
};

template <typename Mutex>
void* add_with_mutex(void* void_arg) {
    PerfArgs<Mutex>* args = (PerfArgs<Mutex>*)void_arg;
    args->ready = true;
    turbo::TimeCost t;
    while (!g_stopped) {
        if (g_started) {
            break;
        }
        kthread_usleep(1000);
    }
    t.reset();
    while (!g_stopped) {
        std::unique_lock lk(*args->mutex);
        ++args->counter;
    }
    t.stop();
    args->elapse_ns = t.n_elapsed();
    return nullptr;
}

int g_prof_name_counter = 0;

template <typename Mutex, typename ThreadId,
          typename ThreadCreateFn, typename ThreadJoinFn>
void PerfTest(Mutex* mutex,
              ThreadId* /*dummy*/,
              int thread_num,
              const ThreadCreateFn& create_fn,
              const ThreadJoinFn& join_fn) {
    g_started = false;
    g_stopped = false;
    ThreadId threads[thread_num];
    std::vector<PerfArgs<Mutex> > args(thread_num);
    for (int i = 0; i < thread_num; ++i) {
        args[i].mutex = mutex;
        create_fn(&threads[i], nullptr, add_with_mutex<Mutex>, &args[i]);
    }
    while (true) {
        bool all_ready = true;
        for (int i = 0; i < thread_num; ++i) {
            if (!args[i].ready) {
                all_ready = false;
                break;
            }
        }
        if (all_ready) {
            break;
        }
        usleep(1000);
    }
    g_started = true;
    char prof_name[32];
    snprintf(prof_name, sizeof(prof_name), "mutex_perf_%d.prof", ++g_prof_name_counter); 
    ProfilerStart(prof_name);
    usleep(500 * 1000);
    ProfilerStop();
    g_stopped = true;
    int64_t wait_time = 0;
    int64_t count = 0;
    for (int i = 0; i < thread_num; ++i) {
        join_fn(threads[i], nullptr);
        wait_time += args[i].elapse_ns;
        count += args[i].counter;
    }
    KLOG(INFO) << turbo::class_name<Mutex>() << " in "
              << ((void*)create_fn == (void*)pthread_create ? "pthread" : "kthread")
              << " thread_num=" << thread_num
              << " count=" << count
              << " average_time=" << wait_time / (double)count;
}

TEST(MutexTest, performance) {
    const int thread_num = 12;
        std::mutex base_mutex;
    PerfTest(&base_mutex, (pthread_t*)nullptr, thread_num, pthread_create, pthread_join);
    PerfTest(&base_mutex, (kthread_t*)nullptr, thread_num, kthread_start_background, kthread_join);
    kthread::Mutex bth_mutex;
    PerfTest(&bth_mutex, (pthread_t*)nullptr, thread_num, pthread_create, pthread_join);
    PerfTest(&bth_mutex, (kthread_t*)nullptr, thread_num, kthread_start_background, kthread_join);
}

template <typename Mutex>
void* loop_until_stopped(void* arg) {
    auto m = (Mutex*)arg;
    while (!g_stopped) {
        std::unique_lock lk(*m);
        kthread_usleep(20);
    }
    return nullptr;
}

TEST(MutexTest, mix_thread_types) {
    g_stopped = false;
    const int N = 16;
    const int M = N * 2;
    kthread::Mutex m;
    pthread_t pthreads[N];
    kthread_t kthreads[M];
    // reserve enough workers for test. This is a must since we have
    // KTHREAD_ATTR_PTHREAD kthreads which may cause deadlocks (the
    // bhtread_usleep below can't be scheduled and g_stopped is never
    // true, thus loop_until_stopped spins forever)
    kthread_setconcurrency(M);
    for (int i = 0; i < N; ++i) {
        ASSERT_EQ(0, pthread_create(&pthreads[i], nullptr, loop_until_stopped<kthread::Mutex>, &m));
    }
    for (int i = 0; i < M; ++i) {
        const kthread_attr_t *attr = i % 2 ? nullptr : &KTHREAD_ATTR_PTHREAD;
        ASSERT_EQ(0, kthread_start_urgent(&kthreads[i], attr, loop_until_stopped<kthread::Mutex>, &m));
    }
    kthread_usleep(1000L * 1000);
    g_stopped = true;
    for (int i = 0; i < M; ++i) {
        kthread_join(kthreads[i], nullptr);
    }
    for (int i = 0; i < N; ++i) {
        pthread_join(pthreads[i], nullptr);
    }
}

TEST(MutexTest, fast_pthread_mutex) {
    kthread::FastPthreadMutex mutex;
    ASSERT_TRUE(mutex.try_lock());
    mutex.unlock();
    mutex.lock();
    mutex.unlock();
    {
        std::unique_lock lk(mutex);
    }
    {
        std::unique_lock<kthread::FastPthreadMutex> lck1;
        std::unique_lock<kthread::FastPthreadMutex> lck2(mutex);
        lck1.swap(lck2);
        lck1.unlock();
        lck1.lock();
    }
    ASSERT_TRUE(mutex.try_lock());
    mutex.unlock();

    const int N = 16;
    pthread_t pthreads[N];
    for (int i = 0; i < N; ++i) {
        ASSERT_EQ(0, pthread_create(&pthreads[i], nullptr,
            loop_until_stopped<kthread::FastPthreadMutex>, &mutex));
    }
    kthread_usleep(1000L * 1000);
    g_stopped = true;
    for (int i = 0; i < N; ++i) {
        pthread_join(pthreads[i], nullptr);
    }
}

} // namespace
