// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <turbo/log/logging.h>
#include <kthread/internal/task_group.h>                // TaskGroup
#include <kthread/internal/task_control.h>              // TaskControl
#include <kthread/internal/timer_thread.h>
#include <kthread/internal/list_of_abafree_id.h>
#include <kthread/internal/kthread.h>
#include <kthread/internal/config.h>

namespace kthread {

    static bool validate_kthread_concurrency(const char *, int32_t val);

    static bool validate_kthread_min_concurrency(const char *, int32_t val);

    static bool validate_kthread_current_tag(std::string_view vstr, std::string *err) noexcept;

    static bool validate_kthread_concurrency_by_tag(std::string_view vstr, std::string *err) noexcept;

}  // namespace kthread

TURBO_FLAG(int32_t, kthread_concurrency, 8 + KTHREAD_EPOLL_THREAD_NUM + KTHREAD_TASK_EXECUTOR_THREAD_NUM,
           "Number of pthread workers").on_validate([](std::string_view value, std::string *error) noexcept -> bool {
            int32_t num;
            auto r = turbo::parse_flag(value, &num, error);
            if (!r) {
                return r;
            }
            r = kthread::validate_kthread_concurrency(nullptr, num);
            return r;
        })
        .on_update([]() noexcept {
            VKLOG(300) << "set kthread_concurrency to" << turbo::get_flag(FLAGS_kthread_concurrency);
        });

TURBO_FLAG(int32_t, kthread_min_concurrency, 0,
           "Initial number of pthread workers which will be added on-demand."
           " The laziness is disabled when this value is non-positive,"
           " and workers will be created eagerly according to -kthread_concurrency and kthread_setconcurrency(). ")
        .on_validate([](std::string_view value, std::string *error) noexcept -> bool {
            int32_t num;
            auto r = turbo::parse_flag(value, &num, error);
            if (!r) {
                return r;
            }
            r = kthread::validate_kthread_min_concurrency(nullptr, num);
            VKLOG(300) << "set kthread_min_concurrency on_validate num: " << num << " result: " << r;
            return r;
        })
        .on_update([]() noexcept {
            auto num = turbo::get_flag(FLAGS_kthread_min_concurrency);
            VKLOG(300) << "set kthread_min_concurrency to " << num;
        });

TURBO_FLAG(int32_t, kthread_current_tag, KTHREAD_TAG_DEFAULT, "Set kthread concurrency for this tag").on_validate(
        kthread::validate_kthread_current_tag);

TURBO_FLAG(int32_t, kthread_concurrency_by_tag, 0,
           "Number of pthread workers of FLAGS_kthread_current_tag").on_validate(
        kthread::validate_kthread_concurrency_by_tag);

namespace kthread {


    static bool never_set_kthread_concurrency = true;
    static bool never_set_kthread_concurrency_by_tag = true;

    std::mutex g_task_control_mutex;
    // Referenced in rpc, needs to be extern.
    // Notice that we can't declare the variable as atomic<TaskControl*> which
    // are not constructed before main().
    TaskControl *g_task_control = nullptr;

    extern TURBO_THREAD_LOCAL TaskGroup *tls_task_group;

    extern void (*g_worker_startfn)();

    extern void (*g_tagged_worker_startfn)(kthread_tag_t);

    extern void *(*g_create_span_func)();

    inline TaskControl *get_task_control() {
        return g_task_control;
    }

    bool validate_kthread_concurrency(const char *, int32_t val) {
        // kthread_setconcurrency sets the flag on success path which should
        // not be strictly in a validator. But it's OK for a int flag.
        if (!kthread::get_task_control()) {
            return true;
        }
        return kthread_setconcurrency(val) == 0;
    }

    static_assert(sizeof(TaskControl *) == sizeof(std::atomic<TaskControl *>), "atomic size match");

    inline TaskControl *get_or_new_task_control() {
        std::atomic<TaskControl *> *p = (std::atomic<TaskControl *> *) &g_task_control;
        TaskControl *c = p->load(std::memory_order_consume);
        if (c != nullptr) {
            return c;
        }
        std::unique_lock lk(g_task_control_mutex);
        c = p->load(std::memory_order_consume);
        if (c != nullptr) {
            return c;
        }
        c = new(std::nothrow) TaskControl;
        if (nullptr == c) {
            return nullptr;
        }
        int concurrency = turbo::get_flag(FLAGS_kthread_min_concurrency) > 0 ?
                          turbo::get_flag(FLAGS_kthread_min_concurrency) :
                          turbo::get_flag(FLAGS_kthread_concurrency);
        if (c->init(concurrency) != 0) {
            KLOG(ERROR) << "Fail to init g_task_control";
            delete c;
            return nullptr;
        }
        p->store(c, std::memory_order_release);
        return c;
    }

    static int add_workers_for_each_tag(int num) {
        int added = 0;
        auto c = get_task_control();
        for (auto i = 0; i < num; ++i) {
            added += c->add_workers(1, i % turbo::get_flag(FLAGS_task_group_ntags));
        }
        return added;
    }

    static bool validate_kthread_min_concurrency(const char *, int32_t val) {
        if (val <= 0) {
            return true;
        }
        if (val < KTHREAD_MIN_CONCURRENCY || val > turbo::get_flag(FLAGS_kthread_concurrency)) {
            return false;
        }
        TaskControl *c = get_task_control();
        if (!c) {
            return true;
        }
        std::unique_lock lk(g_task_control_mutex);
        int concurrency = c->concurrency();
        if (val > concurrency) {
            int added = kthread::add_workers_for_each_tag(val - concurrency);
            return added == (val - concurrency);
        } else {
            return true;
        }
    }

    bool validate_kthread_current_tag(std::string_view vstr, std::string *err) noexcept {
        int val;
        if (!::turbo::parse_flag(vstr, &val, err)) {
            return false;
        }
        if (val < KTHREAD_TAG_DEFAULT || val >= turbo::get_flag(FLAGS_task_group_ntags)) {
            return false;
        }
        std::unique_lock lk(kthread::g_task_control_mutex);
        auto c = kthread::get_task_control();
        if (c == nullptr) {
            turbo::set_flag(&FLAGS_kthread_concurrency_by_tag, 0);
            return true;
        }
        turbo::set_flag(&FLAGS_kthread_concurrency_by_tag, c->concurrency(val));
        return true;
    }

    bool validate_kthread_concurrency_by_tag(std::string_view vstr, std::string *err) noexcept {
        int val;
        if (!::turbo::parse_flag(vstr, &val, err)) {
            return false;
        }
        return kthread_setconcurrency_by_tag(val, turbo::get_flag(FLAGS_kthread_current_tag)) == 0;
    }

    __thread TaskGroup *tls_task_group_nosignal = nullptr;

    int start_from_non_worker(kthread_t *__restrict tid,
                              const kthread_attr_t *__restrict attr,
                              void *(*fn)(void *),
                              void *__restrict arg) {
        TaskControl *c = get_or_new_task_control();
        if (nullptr == c) {
            return ENOMEM;
        }
        auto tag = KTHREAD_TAG_DEFAULT;
        if (attr != nullptr && attr->tag != KTHREAD_TAG_INVALID) {
            tag = attr->tag;
        }
        if (attr != nullptr && (attr->flags & KTHREAD_NOSIGNAL)) {
            // Remember the TaskGroup to insert NOSIGNAL tasks for 2 reasons:
            // 1. NOSIGNAL is often for creating many kthreads in batch,
            //    inserting into the same TaskGroup maximizes the batch.
            // 2. kthread_flush() needs to know which TaskGroup to flush.
            auto g = tls_task_group_nosignal;
            if (nullptr == g) {
                g = c->choose_one_group(tag);
                tls_task_group_nosignal = g;
            }
            return g->start_background<true>(tid, attr, fn, arg);
        }
        return c->choose_one_group(tag)->start_background<true>(tid, attr, fn, arg);
    }

    // Meet one of the three conditions, can run in thread local
    // attr is nullptr
    // tag equal to thread local
    // tag equal to KTHREAD_TAG_INVALID
    bool can_run_thread_local(const kthread_attr_t *__restrict attr) {
        return attr == nullptr || attr->tag == kthread::tls_task_group->tag() ||
               attr->tag == KTHREAD_TAG_INVALID;
    }

    struct TidTraits {
        static const size_t BLOCK_SIZE = 63;
        static const size_t MAX_ENTRIES = 65536;
        static const size_t INIT_GC_SIZE = 65536;
        static const kthread_t ID_INIT;

        static bool exists(kthread_t id) { return kthread::TaskGroup::exists(id); }
    };

    const kthread_t TidTraits::ID_INIT = INVALID_KTHREAD;

    typedef ListOfABAFreeId<kthread_t, TidTraits> TidList;

    struct TidStopper {
        void operator()(kthread_t id) const { kthread_stop(id); }
    };

    struct TidJoiner {
        void operator()(kthread_t &id) const {
            kthread_join(id, nullptr);
            id = INVALID_KTHREAD;
        }
    };

}  // namespace kthread

extern "C" {

int kthread_start_urgent(kthread_t *__restrict tid,
                         const kthread_attr_t *__restrict attr,
                         void *(*fn)(void *),
                         void *__restrict arg) {
    kthread::TaskGroup *g = kthread::tls_task_group;
    if (g) {
        // if attribute is null use thread local task group
        if (kthread::can_run_thread_local(attr)) {
            return kthread::TaskGroup::start_foreground(&g, tid, attr, fn, arg);
        }
    }
    return kthread::start_from_non_worker(tid, attr, fn, arg);
}

int kthread_start_background(kthread_t *__restrict tid,
                             const kthread_attr_t *__restrict attr,
                             void *(*fn)(void *),
                             void *__restrict arg) {
    kthread::TaskGroup *g = kthread::tls_task_group;
    if (g) {
        // if attribute is null use thread local task group
        if (kthread::can_run_thread_local(attr)) {
            return g->start_background<false>(tid, attr, fn, arg);
        }
    }
    return kthread::start_from_non_worker(tid, attr, fn, arg);
}

void kthread_flush() {
    kthread::TaskGroup *g = kthread::tls_task_group;
    if (g) {
        return g->flush_nosignal_tasks();
    }
    g = kthread::tls_task_group_nosignal;
    if (g) {
        // NOSIGNAL tasks were created in this non-worker.
        kthread::tls_task_group_nosignal = nullptr;
        return g->flush_nosignal_tasks_remote();
    }
}

int kthread_interrupt(kthread_t tid, kthread_tag_t tag) {
    return kthread::TaskGroup::interrupt(tid, kthread::get_task_control(), tag);
}

int kthread_stop(kthread_t tid) {
    kthread::TaskGroup::set_stopped(tid);
    return kthread_interrupt(tid);
}

int kthread_stopped(kthread_t tid) {
    return (int) kthread::TaskGroup::is_stopped(tid);
}

kthread_t kthread_self(void) {
    kthread::TaskGroup *g = kthread::tls_task_group;
    // note: return 0 for main tasks now, which include main thread and
    // all work threads. So that we can identify main tasks from logs
    // more easily. This is probably questionable in future.
    if (g != nullptr && !g->is_current_main_task()/*note*/) {
        return g->current_tid();
    }
    return INVALID_KTHREAD;
}

int kthread_equal(kthread_t t1, kthread_t t2) {
    return t1 == t2;
}

void kthread_exit(void *retval) {
    kthread::TaskGroup *g = kthread::tls_task_group;
    if (g != nullptr && !g->is_current_main_task()) {
        throw kthread::ExitException(retval);
    } else {
        pthread_exit(retval);
    }
}

int kthread_join(kthread_t tid, void **thread_return) {
    return kthread::TaskGroup::join(tid, thread_return);
}

int kthread_attr_init(kthread_attr_t *a) {
    *a = KTHREAD_ATTR_NORMAL;
    return 0;
}

int kthread_attr_destroy(kthread_attr_t *) {
    return 0;
}

int kthread_getattr(kthread_t tid, kthread_attr_t *attr) {
    return kthread::TaskGroup::get_attr(tid, attr);
}

int kthread_getconcurrency(void) {
    return turbo::get_flag(FLAGS_kthread_concurrency);
}

int kthread_setconcurrency(int num) {
    if (num < KTHREAD_MIN_CONCURRENCY || num > KTHREAD_MAX_CONCURRENCY) {
        KLOG(ERROR) << "Invalid concurrency=" << num;
        return EINVAL;
    }
    if (turbo::get_flag(FLAGS_kthread_min_concurrency) > 0) {
        if (num < turbo::get_flag(FLAGS_kthread_min_concurrency)) {
            return EINVAL;
        }
        if (kthread::never_set_kthread_concurrency) {
            kthread::never_set_kthread_concurrency = false;
        }
        turbo::set_flag(&FLAGS_kthread_concurrency, num);
        return 0;
    }
    kthread::TaskControl *c = kthread::get_task_control();
    if (c != nullptr) {
        if (num < c->concurrency()) {
            return EPERM;
        } else if (num == c->concurrency()) {
            return 0;
        }
    }
    std::unique_lock lk(kthread::g_task_control_mutex);
    c = kthread::get_task_control();
    if (c == nullptr) {
        if (kthread::never_set_kthread_concurrency) {
            kthread::never_set_kthread_concurrency = false;
            turbo::set_flag(&FLAGS_kthread_concurrency, num);
        } else if (num > turbo::get_flag(FLAGS_kthread_concurrency)) {
            turbo::set_flag(&FLAGS_kthread_concurrency, num);
        }
        return 0;
    }
    if (turbo::get_flag(FLAGS_kthread_concurrency) != c->concurrency()) {
        KLOG(ERROR) << "KCHECK failed: kthread_concurrency="
                   << turbo::get_flag(FLAGS_kthread_concurrency)
                   << " != tc_concurrency=" << c->concurrency();
        turbo::set_flag(&FLAGS_kthread_concurrency, c->concurrency());
    }
    if (num > turbo::get_flag(FLAGS_kthread_concurrency)) {
        // Create more workers if needed.
        auto added = kthread::add_workers_for_each_tag(num - turbo::get_flag(FLAGS_kthread_concurrency));
        auto n = turbo::get_flag(FLAGS_kthread_concurrency);
        turbo::set_flag(&FLAGS_kthread_concurrency, added + n);
    }
    return (num == turbo::get_flag(FLAGS_kthread_concurrency) ? 0 : EPERM);
}

int kthread_getconcurrency_by_tag(kthread_tag_t tag) {
    std::unique_lock lk(kthread::g_task_control_mutex);
    auto c = kthread::get_task_control();
    if (c == nullptr) {
        return EPERM;
    }
    return c->concurrency(tag);
}

int kthread_setconcurrency_by_tag(int num, kthread_tag_t tag) {
    if (kthread::never_set_kthread_concurrency_by_tag) {
        kthread::never_set_kthread_concurrency_by_tag = false;
        return 0;
    }
    if (tag < KTHREAD_TAG_DEFAULT || tag >= turbo::get_flag(FLAGS_task_group_ntags)) {
        return EPERM;
    }
    auto c = kthread::get_or_new_task_control();
    std::unique_lock lk(kthread::g_task_control_mutex);
    auto ngroup = c->concurrency();
    auto tag_ngroup = c->concurrency(tag);
    auto add = num - tag_ngroup;
    if (ngroup + add > turbo::get_flag(FLAGS_kthread_concurrency)) {
        KLOG(ERROR) << "Fail to set concurrency by tag " << tag
                   << ", Total concurrency larger than kthread_concurrency";
        return EPERM;
    }
    auto added = 0;
    if (add > 0) {
        added = c->add_workers(add, tag);
        return (add == added ? 0 : EPERM);
    }
    return (num == tag_ngroup ? 0 : EPERM);
}

int kthread_about_to_quit() {
    kthread::TaskGroup *g = kthread::tls_task_group;
    if (g != nullptr) {
        kthread::TaskMeta *current_task = g->current_task();
        if (!(current_task->attr.flags & KTHREAD_NEVER_QUIT)) {
            current_task->about_to_quit = true;
        }
        return 0;
    }
    return EPERM;
}

int kthread_timer_add(kthread_timer_t *id, timespec abstime,
                      void (*on_timer)(void *), void *arg) {
    kthread::TaskControl *c = kthread::get_or_new_task_control();
    if (c == nullptr) {
        return ENOMEM;
    }
    kthread::TimerThread *tt = kthread::get_or_create_global_timer_thread();
    if (tt == nullptr) {
        return ENOMEM;
    }
    kthread_timer_t tmp = tt->schedule(on_timer, arg, abstime);
    if (tmp != 0) {
        *id = tmp;
        return 0;
    }
    return ESTOP;
}

int kthread_timer_del(kthread_timer_t id) {
    kthread::TaskControl *c = kthread::get_task_control();
    if (c != nullptr) {
        kthread::TimerThread *tt = kthread::get_global_timer_thread();
        if (tt == nullptr) {
            return EINVAL;
        }
        const int state = tt->unschedule(id);
        if (state >= 0) {
            return state;
        }
    }
    return EINVAL;
}

int kthread_usleep(uint64_t microseconds) {
    kthread::TaskGroup *g = kthread::tls_task_group;
    if (nullptr != g && !g->is_current_pthread_task()) {
        return kthread::TaskGroup::usleep(&g, microseconds);
    }
    return ::usleep(microseconds);
}

int kthread_yield(void) {
    kthread::TaskGroup *g = kthread::tls_task_group;
    if (nullptr != g && !g->is_current_pthread_task()) {
        kthread::TaskGroup::yield(&g);
        return 0;
    }
    // pthread_yield is not available on MAC
    return sched_yield();
}

int kthread_set_worker_startfn(void (*start_fn)()) {
    if (start_fn == nullptr) {
        return EINVAL;
    }
    kthread::g_worker_startfn = start_fn;
    return 0;
}

int kthread_set_tagged_worker_startfn(void (*start_fn)(kthread_tag_t)) {
    if (start_fn == nullptr) {
        return EINVAL;
    }
    kthread::g_tagged_worker_startfn = start_fn;
    return 0;
}

int kthread_set_create_span_func(void *(*func)()) {
    if (func == nullptr) {
        return EINVAL;
    }
    kthread::g_create_span_func = func;
    return 0;
}

void kthread_stop_world() {
    kthread::TaskControl *c = kthread::get_task_control();
    if (c != nullptr) {
        c->stop_and_join();
    }
}

int kthread_list_init(kthread_list_t *list,
                      unsigned /*size*/,
                      unsigned /*conflict_size*/) {
    list->impl = new(std::nothrow) kthread::TidList;
    if (nullptr == list->impl) {
        return ENOMEM;
    }
    // Set unused fields to zero as well.
    list->head = 0;
    list->size = 0;
    list->conflict_head = 0;
    list->conflict_size = 0;
    return 0;
}

void kthread_list_destroy(kthread_list_t *list) {
    delete static_cast<kthread::TidList *>(list->impl);
    list->impl = nullptr;
}

int kthread_list_add(kthread_list_t *list, kthread_t id) {
    if (list->impl == nullptr) {
        return EINVAL;
    }
    return static_cast<kthread::TidList *>(list->impl)->add(id);
}

int kthread_list_stop(kthread_list_t *list) {
    if (list->impl == nullptr) {
        return EINVAL;
    }
    static_cast<kthread::TidList *>(list->impl)->apply(kthread::TidStopper());
    return 0;
}

int kthread_list_join(kthread_list_t *list) {
    if (list->impl == nullptr) {
        return EINVAL;
    }
    static_cast<kthread::TidList *>(list->impl)->apply(kthread::TidJoiner());
    return 0;
}

kthread_tag_t kthread_self_tag(void) {
    return kthread::tls_task_group != nullptr ? kthread::tls_task_group->tag()
                                              : KTHREAD_TAG_DEFAULT;
}

}  // extern "C"
