// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <pthread.h>
#include <turbo/base/macros.h>
#include <atomic>
#include <turbo/threading/thread_key.h>
#include <tally/tally.h>
#include <kthread/internal/errno.h>                       // EAGAIN
#include <kthread/internal/task_group.h>                  // TaskGroup
#include <mutex>
// Implement kthread_key_t related functions

namespace kthread {

    class KeyTable;

    // defined in task_group.cpp
    extern __thread TaskGroup *tls_task_group;
    extern __thread LocalStorage tls_bls;
    static __thread bool tls_ever_created_keytable = false;

    // We keep thread specific data in a two-level array. The top-level array
    // contains at most KEY_1STLEVEL_SIZE pointers to dynamically allocated
    // arrays of at most KEY_2NDLEVEL_SIZE data pointers. Many applications
    // may just occupy one or two second level array, thus this machanism keeps
    // memory footprint smaller and we can change KEY_1STLEVEL_SIZE to a
    // bigger number more freely. The tradeoff is an additional memory indirection:
    // negligible at most time.
    static const uint32_t KEY_2NDLEVEL_SIZE = 32;

    // Notice that we're trying to make the memory of second level and first
    // level both 256 bytes to make memory allocator happier.
    static const uint32_t KEY_1STLEVEL_SIZE = 31;

    // Max tls in one thread, currently the value is 992 which should be enough
    // for most projects.
    static const uint32_t KEYS_MAX = KEY_2NDLEVEL_SIZE * KEY_1STLEVEL_SIZE;

    // destructors/version of TLS.
    struct KeyInfo {
        uint32_t version;

        void (*dtor)(void *, const void *);

        const void *dtor_args;
    };

    static KeyInfo s_key_info[KEYS_MAX] = {};

    // For allocating keys.
    static std::mutex s_key_mutex;
    static size_t nfreekey = 0;
    static size_t nkey = 0;
    static uint32_t s_free_keys[KEYS_MAX];

    // Stats.
    static std::atomic<size_t> nkeytable = 0;
    static std::atomic<size_t> nsubkeytable = 0;

    // The second-level array.
    // Align with cacheline to avoid false sharing.
    class TURBO_CACHELINE_ALIGNED SubKeyTable {
    public:
        SubKeyTable() {
            memset(_data, 0, sizeof(_data));
            nsubkeytable.fetch_add(1, std::memory_order_relaxed);
        }

        // NOTE: Call clear first.
        ~SubKeyTable() {
            nsubkeytable.fetch_sub(1, std::memory_order_relaxed);
        }

        void clear(uint32_t offset) {
            for (uint32_t i = 0; i < KEY_2NDLEVEL_SIZE; ++i) {
                void *p = _data[i].ptr;
                if (p) {
                    // Set the position to nullptr before calling dtor which may set
                    // the position again.
                    _data[i].ptr = nullptr;

                    KeyInfo info = kthread::s_key_info[offset + i];
                    if (info.dtor && _data[i].version == info.version) {
                        info.dtor(p, info.dtor_args);
                    }
                }
            }
        }

        bool cleared() const {
            // We need to iterate again to check if every slot is empty. An
            // alternative is remember if set_data() was called during clear.
            for (uint32_t i = 0; i < KEY_2NDLEVEL_SIZE; ++i) {
                if (_data[i].ptr) {
                    return false;
                }
            }
            return true;
        }

        inline void *get_data(uint32_t index, uint32_t version) const {
            if (_data[index].version == version) {
                return _data[index].ptr;
            }
            return nullptr;
        }

        inline void set_data(uint32_t index, uint32_t version, void *data) {
            _data[index].version = version;
            _data[index].ptr = data;
        }

    private:
        struct Data {
            uint32_t version;
            void *ptr;
        };
        Data _data[KEY_2NDLEVEL_SIZE];
    };

    // The first-level array.
    // Align with cacheline to avoid false sharing.
    class TURBO_CACHELINE_ALIGNED KeyTable {
    public:
        KeyTable() : next(nullptr) {
            memset(_subs, 0, sizeof(_subs));
            nkeytable.fetch_add(1, std::memory_order_relaxed);
        }

        ~KeyTable() {
            nkeytable.fetch_sub(1, std::memory_order_relaxed);
            for (int ntry = 0; ntry < PTHREAD_DESTRUCTOR_ITERATIONS; ++ntry) {
                for (uint32_t i = 0; i < KEY_1STLEVEL_SIZE; ++i) {
                    if (_subs[i]) {
                        _subs[i]->clear(i * KEY_2NDLEVEL_SIZE);
                    }
                }
                bool all_cleared = true;
                for (uint32_t i = 0; i < KEY_1STLEVEL_SIZE; ++i) {
                    if (_subs[i] != nullptr && !_subs[i]->cleared()) {
                        all_cleared = false;
                        break;
                    }
                }
                if (all_cleared) {
                    for (uint32_t i = 0; i < KEY_1STLEVEL_SIZE; ++i) {
                        delete _subs[i];
                    }
                    return;
                }
            }
            KLOG(ERROR) << "Fail to destroy all objects in KeyTable[" << this << ']';
        }

        inline void *get_data(kthread_key_t key) const {
            const uint32_t subidx = key.index / KEY_2NDLEVEL_SIZE;
            if (subidx < KEY_1STLEVEL_SIZE) {
                const SubKeyTable *sub_kt = _subs[subidx];
                if (sub_kt) {
                    return sub_kt->get_data(
                            key.index - subidx * KEY_2NDLEVEL_SIZE, key.version);
                }
            }
            return nullptr;
        }

        inline int set_data(kthread_key_t key, void *data) {
            const uint32_t subidx = key.index / KEY_2NDLEVEL_SIZE;
            if (subidx < KEY_1STLEVEL_SIZE &&
                key.version == s_key_info[key.index].version) {
                SubKeyTable *sub_kt = _subs[subidx];
                if (sub_kt == nullptr) {
                    sub_kt = new(std::nothrow) SubKeyTable;
                    if (nullptr == sub_kt) {
                        return ENOMEM;
                    }
                    _subs[subidx] = sub_kt;
                }
                sub_kt->set_data(key.index - subidx * KEY_2NDLEVEL_SIZE,
                                 key.version, data);
                return 0;
            }
            DKCHECK(false) << "kthread_setspecific is called on invalid " << key;
            return EINVAL;
        }

    public:
        KeyTable *next;
    private:
        SubKeyTable *_subs[KEY_1STLEVEL_SIZE];
    };

    struct KeyTableList {
        KeyTableList() {
            keytable = nullptr;
        }

        ~KeyTableList() {
            kthread::TaskGroup *g = kthread::tls_task_group;
            kthread::KeyTable *old_kt = kthread::tls_bls.keytable;
            while (keytable) {
                kthread::KeyTable *kt = keytable;
                keytable = kt->next;
                kthread::tls_bls.keytable = kt;
                if (g) {
                    g->current_task()->local_storage.keytable = kt;
                }
                delete kt;
                if (old_kt == kt) {
                    old_kt = nullptr;
                }
                g = kthread::tls_task_group;
            }
            kthread::tls_bls.keytable = old_kt;
            if (g) {
                g->current_task()->local_storage.keytable = old_kt;
            }
        }

        KeyTable *keytable;
    };

    static KeyTable *borrow_keytable(kthread_keytable_pool_t *pool) {
        if (pool != nullptr && (pool->list || pool->free_keytables)) {
            KeyTable *p;
            pthread_rwlock_rdlock(&pool->rwlock);
            auto list = (turbo::ThreadLocal<kthread::KeyTableList> *) pool->list;
            if (list && list->get()->keytable) {
                p = list->get()->keytable;
                list->get()->keytable = p->next;
                pthread_rwlock_unlock(&pool->rwlock);
                return p;
            }
            pthread_rwlock_unlock(&pool->rwlock);
            if (pool->free_keytables) {
                pthread_rwlock_wrlock(&pool->rwlock);
                p = (KeyTable *) pool->free_keytables;
                if (p) {
                    pool->free_keytables = p->next;
                    pthread_rwlock_unlock(&pool->rwlock);
                    return p;
                }
                pthread_rwlock_unlock(&pool->rwlock);
            }
        }
        return nullptr;
    }

    // Referenced in task_group.cpp, must be extern.
    // Caller of this function must hold the KeyTable
    void return_keytable(kthread_keytable_pool_t *pool, KeyTable *kt) {
        if (nullptr == kt) {
            return;
        }
        if (pool == nullptr) {
            delete kt;
            return;
        }
        pthread_rwlock_rdlock(&pool->rwlock);
        if (pool->destroyed) {
            pthread_rwlock_unlock(&pool->rwlock);
            delete kt;
            return;
        }
        auto list = (turbo::ThreadLocal<kthread::KeyTableList> *) pool->list;
        kt->next = list->get()->keytable;
        list->get()->keytable = kt;
        pthread_rwlock_unlock(&pool->rwlock);
    }

    static void cleanup_pthread(void *arg) {
        KeyTable *kt = static_cast<KeyTable *>(arg);
        if (kt) {
            delete kt;
            // After deletion: tls may be set during deletion.
            tls_bls.keytable = nullptr;
        }
    }

    static void arg_as_dtor(void *data, const void *arg) {
        typedef void (*KeyDtor)(void *);
        return ((KeyDtor) arg)(data);
    }

    static tally::FuncGauge<int> s_kthread_key_count(
            "kthread_key_count", "help", []() {
                std::unique_lock lk(kthread::s_key_mutex);
                return (int) nkey - (int) nfreekey;
            });
    static tally::FuncGauge<size_t> s_kthread_keytable_count(
            "kthread_keytable_count", "help", []() {
                return nkeytable.load(std::memory_order_relaxed);
            });
    static tally::FuncGauge<size_t> s_kthread_keytable_memory(
            "kthread_keytable_memory", "help", []() {
                const size_t n = nkeytable.load(std::memory_order_relaxed);
                const size_t nsub = nsubkeytable.load(std::memory_order_relaxed);
                return n * sizeof(KeyTable) + nsub * sizeof(SubKeyTable);
            });

}  // namespace kthread

extern "C" {

int kthread_keytable_pool_init(kthread_keytable_pool_t *pool) {
    if (pool == nullptr) {
        KLOG(ERROR) << "Param[pool] is nullptr";
        return EINVAL;
    }
    pthread_rwlock_init(&pool->rwlock, nullptr);
    pool->list = new turbo::ThreadLocal<kthread::KeyTableList>();
    pool->free_keytables = nullptr;
    pool->destroyed = 0;
    return 0;
}

int kthread_keytable_pool_destroy(kthread_keytable_pool_t *pool) {
    if (pool == nullptr) {
        KLOG(ERROR) << "Param[pool] is nullptr";
        return EINVAL;
    }
    kthread::KeyTable *saved_free_keytables = nullptr;
    pthread_rwlock_wrlock(&pool->rwlock);
    pool->destroyed = 1;
    delete (turbo::ThreadLocal<kthread::KeyTableList> *) pool->list;
    saved_free_keytables = (kthread::KeyTable *) pool->free_keytables;
    pool->list = nullptr;
    pool->free_keytables = nullptr;
    pthread_rwlock_unlock(&pool->rwlock);

    // Cheat get/setspecific and destroy the keytables.
    kthread::TaskGroup *g = kthread::tls_task_group;
    kthread::KeyTable *old_kt = kthread::tls_bls.keytable;
    while (saved_free_keytables) {
        kthread::KeyTable *kt = saved_free_keytables;
        saved_free_keytables = kt->next;
        kthread::tls_bls.keytable = kt;
        if (g) {
            g->current_task()->local_storage.keytable = kt;
        }
        delete kt;
        g = kthread::tls_task_group;
    }
    kthread::tls_bls.keytable = old_kt;
    if (g) {
        g->current_task()->local_storage.keytable = old_kt;
    }
    // TODO: return_keytable may race with this function, we don't destroy
    // the mutex right now.
    // pthread_mutex_destroy(&pool->mutex);
    return 0;
}

int kthread_keytable_pool_getstat(kthread_keytable_pool_t *pool,
                                  kthread_keytable_pool_stat_t *stat) {
    if (pool == nullptr || stat == nullptr) {
        KLOG(ERROR) << "Param[pool] or Param[stat] is nullptr";
        return EINVAL;
    }
    pthread_rwlock_rdlock(&pool->rwlock);
    size_t count = 0;
    kthread::KeyTable *p = (kthread::KeyTable *) pool->free_keytables;
    for (; p; p = p->next, ++count) {}
    stat->nfree = count;
    pthread_rwlock_unlock(&pool->rwlock);
    return 0;
}

// TODO: this is not strict `reserve' because we only check #free.
// Currently there's no way to track KeyTables that may be returned
// to the pool in future.
void kthread_keytable_pool_reserve(kthread_keytable_pool_t *pool,
                                   size_t nfree,
                                   kthread_key_t key,
                                   void *ctor(const void *),
                                   const void *ctor_args) {
    if (pool == nullptr) {
        KLOG(ERROR) << "Param[pool] is nullptr";
        return;
    }
    kthread_keytable_pool_stat_t stat;
    if (kthread_keytable_pool_getstat(pool, &stat) != 0) {
        KLOG(ERROR) << "Fail to getstat of pool=" << pool;
        return;
    }
    for (size_t i = stat.nfree; i < nfree; ++i) {
        kthread::KeyTable *kt = new(std::nothrow) kthread::KeyTable;
        if (kt == nullptr) {
            break;
        }
        void *data = ctor(ctor_args);
        if (data) {
            kt->set_data(key, data);
        }  // else append kt w/o data.

        pthread_rwlock_wrlock(&pool->rwlock);
        if (pool->destroyed) {
            pthread_rwlock_unlock(&pool->rwlock);
            delete kt;
            break;
        }
        kt->next = (kthread::KeyTable *) pool->free_keytables;
        pool->free_keytables = kt;
        pthread_rwlock_unlock(&pool->rwlock);
        if (data == nullptr) {
            break;
        }
    }
}

int kthread_key_create2(kthread_key_t *key,
                        void (*dtor)(void *, const void *),
                        const void *dtor_args) {
    uint32_t index = 0;
    {
        std::unique_lock lk(kthread::s_key_mutex);
        if (kthread::nfreekey > 0) {
            index = kthread::s_free_keys[--kthread::nfreekey];
        } else if (kthread::nkey < kthread::KEYS_MAX) {
            index = kthread::nkey++;
        } else {
            return EAGAIN;  // what pthread_key_create returns in this case.
        }
    }
    kthread::s_key_info[index].dtor = dtor;
    kthread::s_key_info[index].dtor_args = dtor_args;
    key->index = index;
    key->version = kthread::s_key_info[index].version;
    if (key->version == 0) {
        ++kthread::s_key_info[index].version;
        ++key->version;
    }
    return 0;
}

int kthread_key_create(kthread_key_t *key, void (*dtor)(void *)) {
    if (dtor == nullptr) {
        return kthread_key_create2(key, nullptr, nullptr);
    } else {
        return kthread_key_create2(key, kthread::arg_as_dtor, (const void *) dtor);
    }
}

int kthread_key_delete(kthread_key_t key) {
    if (key.index < kthread::KEYS_MAX &&
        key.version == kthread::s_key_info[key.index].version) {
        std::unique_lock lk(kthread::s_key_mutex);
        if (key.version == kthread::s_key_info[key.index].version) {
            if (++kthread::s_key_info[key.index].version == 0) {
                ++kthread::s_key_info[key.index].version;
            }
            kthread::s_key_info[key.index].dtor = nullptr;
            kthread::s_key_info[key.index].dtor_args = nullptr;
            kthread::s_free_keys[kthread::nfreekey++] = key.index;
            return 0;
        }
    }
    KCHECK(false) << "kthread_key_delete is called on invalid " << key;
    return EINVAL;
}

// NOTE: Can't borrow_keytable in kthread_setspecific, otherwise following
// memory leak may occur:
//  -> kthread_getspecific fails to borrow_keytable and returns nullptr.
//  -> kthread_setspecific succeeds to borrow_keytable and overwrites old data
//     at the position with newly created data, the old data is leaked.
int kthread_setspecific(kthread_key_t key, void *data) {
    kthread::KeyTable *kt = kthread::tls_bls.keytable;
    if (nullptr == kt) {
        kt = new(std::nothrow) kthread::KeyTable;
        if (nullptr == kt) {
            return ENOMEM;
        }
        kthread::tls_bls.keytable = kt;
        kthread::TaskGroup *const g = kthread::tls_task_group;
        if (g) {
            g->current_task()->local_storage.keytable = kt;
        } else {
            // Only cleanup keytable created by pthread.
            // keytable created by kthread will be deleted
            // in `return_keytable' or `kthread_keytable_pool_destroy'.
            if (!kthread::tls_ever_created_keytable) {
                kthread::tls_ever_created_keytable = true;
                KCHECK_EQ(0, turbo::thread_atexit(kthread::cleanup_pthread, kt));
            }
        }
    }
    return kt->set_data(key, data);
}

void *kthread_getspecific(kthread_key_t key) {
    kthread::KeyTable *kt = kthread::tls_bls.keytable;
    if (kt) {
        return kt->get_data(key);
    }
    kthread::TaskGroup *const g = kthread::tls_task_group;
    if (g) {
        kthread::TaskMeta *const task = g->current_task();
        kt = kthread::borrow_keytable(task->attr.keytable_pool);
        if (kt) {
            g->current_task()->local_storage.keytable = kt;
            kthread::tls_bls.keytable = kt;
            return kt->get_data(key);
        }
    }
    return nullptr;
}

void kthread_assign_data(void *data) {
    kthread::tls_bls.assigned_data = data;
}

void *kthread_get_assigned_data() {
    return kthread::tls_bls.assigned_data;
}

}  // extern "C"
