// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <turbo/base/compat.h>
#include <new>                                   // std::nothrow
#include <sys/poll.h>                            // poll()
#if defined(OS_MACOSX)
#include <sys/types.h>                           // struct kevent
#include <sys/event.h>                           // kevent(), kqueue()
#endif
#include <atomic>
#include <turbo/times/time.h>
#include <turbo/base/fd_utility.h>
#include <turbo/log/logging.h>
#include <turbo/hash/m3.h>
#include <turbo/memory/scope_guard.h>
#include <kthread/internal/kutex.h>                       // kutex_*
#include <kthread/internal/task_group.h>                  // TaskGroup
#include <kthread/internal/kthread.h>                             // kthread_start_urgent
#if defined(OS_LINUX)
#include <sys/epoll.h>
#elif defined(OS_MACOSX)
#endif
namespace kutil {
#if defined(OS_LINUX)

    static short epoll_to_poll_events(uint32_t epoll_events) {
        // Most POLL* and EPOLL* are same values.
        short poll_events = (epoll_events &
                             (EPOLLIN | EPOLLPRI | EPOLLOUT |
                              EPOLLRDNORM | EPOLLRDBAND |
                              EPOLLWRNORM | EPOLLWRBAND |
                              EPOLLMSG | EPOLLERR | EPOLLHUP));
        DKCHECK_EQ((uint32_t) poll_events, epoll_events);
        return poll_events;
    }

#elif defined(OS_MACOSX)
    short kqueue_to_poll_events(int kqueue_events) {
        //TODO: add more values?
        short poll_events = 0;
        if (kqueue_events == EVFILT_READ) {
            poll_events |= POLLIN;
        }
        if (kqueue_events == EVFILT_WRITE) {
            poll_events |= POLLOUT;
        }
        return poll_events;
    }
#endif
    int pthread_fd_wait(int fd, unsigned events,
                        const timespec *abstime) {
        int diff_ms = -1;
        if (abstime) {
            auto now = turbo::Time::current_time();
            auto abstime_time = turbo::Time::from_timespec(*abstime);
            if (abstime_time <= now) {
                errno = ETIMEDOUT;
                return -1;
            }
            diff_ms = turbo::Duration::to_milliseconds(abstime_time - now + turbo::Duration::microseconds(999L));
        }
#if defined(OS_LINUX)
        const short poll_events = epoll_to_poll_events(events);
#elif defined(OS_MACOSX)
        const short poll_events = kqueue_to_poll_events(events);
#endif
        if (poll_events == 0) {
            errno = EINVAL;
            return -1;
        }
        pollfd ufds = {fd, poll_events, 0};
        const int rc = poll(&ufds, 1, diff_ms);
        if (rc < 0) {
            return -1;
        }
        if (rc == 0) {
            errno = ETIMEDOUT;
            return -1;
        }
        if (ufds.revents & POLLNVAL) {
            errno = EBADF;
            return -1;
        }
        return 0;
    }
}

// Implement kthread functions on file descriptors

namespace kthread {

extern TURBO_THREAD_LOCAL TaskGroup* tls_task_group;

template <typename T, size_t NBLOCK, size_t BLOCK_SIZE>
class LazyArray {
    struct Block {
        std::atomic<T> items[BLOCK_SIZE];
    };

public:
    LazyArray() {
        memset(static_cast<void*>(_blocks), 0, sizeof(std::atomic<Block*>) * NBLOCK);
    }

    std::atomic<T>* get_or_new(size_t index) {
        const size_t block_index = index / BLOCK_SIZE;
        if (block_index >= NBLOCK) {
            return nullptr;
        }
        const size_t block_offset = index - block_index * BLOCK_SIZE;
        Block* b = _blocks[block_index].load(std::memory_order_consume);
        if (b != nullptr) {
            return b->items + block_offset;
        }
        b = new (std::nothrow) Block;
        if (nullptr == b) {
            b = _blocks[block_index].load(std::memory_order_consume);
            return (b ? b->items + block_offset : nullptr);
        }
        // Set items to default value of T.
        std::fill(b->items, b->items + BLOCK_SIZE, T());
        Block* expected = nullptr;
        if (_blocks[block_index].compare_exchange_strong(
                expected, b, std::memory_order_release,
                std::memory_order_consume)) {
            return b->items + block_offset;
        }
        delete b;
        return expected->items + block_offset;
    }

    std::atomic<T>* get(size_t index) const {
        const size_t block_index = index / BLOCK_SIZE;
        if (__builtin_expect(block_index < NBLOCK, 1)) {
            const size_t block_offset = index - block_index * BLOCK_SIZE;
            Block* const b = _blocks[block_index].load(std::memory_order_consume);
            if (__builtin_expect(b != nullptr, 1)) {
                return b->items + block_offset;
            }
        }
        return nullptr;
    }

private:
    std::atomic<Block*> _blocks[NBLOCK];
};

typedef std::atomic<int> EpollButex;

static EpollButex* const CLOSING_GUARD = (EpollButex*)(intptr_t)-1L;

#ifndef NDEBUG
    std::atomic<int> break_nums = 0;
#endif

// Able to address 67108864 file descriptors, should be enough.
LazyArray<EpollButex*, 262144/*NBLOCK*/, 256/*BLOCK_SIZE*/> fd_kutexes;

static const int KTHREAD_DEFAULT_EPOLL_SIZE = 65536;

class EpollThread {
public:
    EpollThread()
        : _epfd(-1)
        , _stop(false)
        , _tid(0) {
    }

    int start(int epoll_size) {
        if (started()) {
            return -1;
        }
        _start_mutex.lock();
        // Double check
        if (started()) {
            _start_mutex.unlock();
            return -1;
        }
#if defined(OS_LINUX)
        _epfd = epoll_create(epoll_size);
#elif defined(OS_MACOSX)
        _epfd = kqueue();
#endif
        _start_mutex.unlock();
        if (_epfd < 0) {
            PKLOG(FATAL) << "Fail to epoll_create/kqueue";
            return -1;
        }
        if (kthread_start_background(
                &_tid, nullptr, EpollThread::run_this, this) != 0) {
            close(_epfd);
            _epfd = -1;
            KLOG(FATAL) << "Fail to create epoll kthread";
            return -1;
        }
        return 0;
    }

    // Note: This function does not wake up suspended fd_wait. This is fine
    // since stop_and_join is only called on program's termination
    // (g_task_control.stop()), suspended kthreads do not block quit of
    // worker pthreads and completion of g_task_control.stop().
    int stop_and_join() {
        if (!started()) {
            return 0;
        }
        // No matter what this function returns, _epfd will be set to -1
        // (making started() false) to avoid latter stop_and_join() to
        // enter again.
        const int saved_epfd = _epfd;
        _epfd = -1;

        // epoll_wait cannot be woken up by closing _epfd. We wake up
        // epoll_wait by inserting a fd continuously triggering EPOLLOUT.
        // Visibility of _stop: constant EPOLLOUT forces epoll_wait to see
        // _stop (to be true) finally.
        _stop = true;
        int closing_epoll_pipe[2];
        if (pipe(closing_epoll_pipe)) {
            PKLOG(FATAL) << "Fail to create closing_epoll_pipe";
            return -1;
        }
#if defined(OS_LINUX)
        epoll_event evt = { EPOLLOUT, { nullptr } };
        if (epoll_ctl(saved_epfd, EPOLL_CTL_ADD,
                      closing_epoll_pipe[1], &evt) < 0) {
#elif defined(OS_MACOSX)
        struct kevent kqueue_event;
        EV_SET(&kqueue_event, closing_epoll_pipe[1], EVFILT_WRITE, EV_ADD | EV_ENABLE,
                0, 0, nullptr);
        if (kevent(saved_epfd, &kqueue_event, 1, nullptr, 0, nullptr) < 0) {
#endif
            PKLOG(FATAL) << "Fail to add closing_epoll_pipe into epfd="
                        << saved_epfd;
            return -1;
        }

        const int rc = kthread_join(_tid, nullptr);
        if (rc) {
            KLOG(FATAL) << "Fail to join EpollThread, " << km_error(rc);
            return -1;
        }
        close(closing_epoll_pipe[0]);
        close(closing_epoll_pipe[1]);
        close(saved_epfd);
        return 0;
    }

    int fd_wait(int fd, unsigned events, const timespec* abstime) {
        std::atomic<EpollButex*>* p = fd_kutexes.get_or_new(fd);
        if (nullptr == p) {
            errno = ENOMEM;
            return -1;
        }

        EpollButex* kutex = p->load(std::memory_order_consume);
        if (nullptr == kutex) {
            // It is rare to wait on one file descriptor from multiple threads
            // simultaneously. Creating singleton by optimistic locking here
            // saves mutexes for each kutex.
            kutex = kutex_create_checked<EpollButex>();
            kutex->store(0, std::memory_order_relaxed);
            EpollButex* expected = nullptr;
            if (!p->compare_exchange_strong(expected, kutex,
                                            std::memory_order_release,
                                            std::memory_order_consume)) {
                kutex_destroy(kutex);
                kutex = expected;
            }
        }
        
        while (kutex == CLOSING_GUARD) {  // kthread_close() is running.
            if (sched_yield() < 0) {
                return -1;
            }
            kutex = p->load(std::memory_order_consume);
        }
        // Save value of kutex before adding to epoll because the kutex may
        // be changed before kutex_wait. No memory fence because EPOLL_CTL_MOD
        // and EPOLL_CTL_ADD shall have release fence.
        const int expected_val = kutex->load(std::memory_order_relaxed);

#if defined(OS_LINUX)
# ifdef BAIDU_KERNEL_FIXED_EPOLLONESHOT_BUG
        epoll_event evt = { events | EPOLLONESHOT, { kutex } };
        if (epoll_ctl(_epfd, EPOLL_CTL_MOD, fd, &evt) < 0) {
            if (epoll_ctl(_epfd, EPOLL_CTL_ADD, fd, &evt) < 0 &&
                    errno != EEXIST) {
                PKLOG(FATAL) << "Fail to add fd=" << fd << " into epfd=" << _epfd;
                return -1;
            }
        }
# else
        epoll_event evt;
        evt.events = events;
        evt.data.fd = fd;
        if (epoll_ctl(_epfd, EPOLL_CTL_ADD, fd, &evt) < 0 &&
            errno != EEXIST) {
            PKLOG(ERROR) << "Fail to add fd=" << fd << " into epfd=" << _epfd;
            return -1;
        }
# endif
#elif defined(OS_MACOSX)
        struct kevent kqueue_event;
        EV_SET(&kqueue_event, fd, events, EV_ADD | EV_ENABLE | EV_ONESHOT,
                0, 0, kutex);
        if (kevent(_epfd, &kqueue_event, 1, nullptr, 0, nullptr) < 0) {
            PKLOG(FATAL) << "Fail to add fd=" << fd << " into kqueuefd=" << _epfd;
            return -1;
        }
#endif
        if (kutex_wait(kutex, expected_val, abstime) < 0 &&
            errno != EWOULDBLOCK && errno != EINTR) {
            return -1;
        }
        return 0;
    }

    int fd_close(int fd) {
        if (fd < 0) {
            // what close(-1) returns
            errno = EBADF;
            return -1;
        }
        std::atomic<EpollButex*>* pkutex = kthread::fd_kutexes.get(fd);
        if (nullptr == pkutex) {
            // Did not call kthread_fd functions, close directly.
            return close(fd);
        }
        EpollButex* kutex = pkutex->exchange(
            CLOSING_GUARD, std::memory_order_relaxed);
        if (kutex == CLOSING_GUARD) {
            // concurrent double close detected.
            errno = EBADF;
            return -1;
        }
        if (kutex != nullptr) {
            kutex->fetch_add(1, std::memory_order_relaxed);
            kutex_wake_all(kutex);
        }
#if defined(OS_LINUX)
        epoll_ctl(_epfd, EPOLL_CTL_DEL, fd, nullptr);
#elif defined(OS_MACOSX)
        struct kevent evt;
        EV_SET(&evt, fd, EVFILT_WRITE, EV_DELETE, 0, 0, nullptr);
        kevent(_epfd, &evt, 1, nullptr, 0, nullptr);
        EV_SET(&evt, fd, EVFILT_READ, EV_DELETE, 0, 0, nullptr);
        kevent(_epfd, &evt, 1, nullptr, 0, nullptr);
#endif
        const int rc = close(fd);
        pkutex->exchange(kutex, std::memory_order_relaxed);
        return rc;
    }

    bool started() const {
        return _epfd >= 0;
    }

private:
    static void* run_this(void* arg) {
        return static_cast<EpollThread*>(arg)->run();
    }

    void* run() {
        const int initial_epfd = _epfd;
        const size_t MAX_EVENTS = 32;
#if defined(OS_LINUX)
        epoll_event* e = new (std::nothrow) epoll_event[MAX_EVENTS];
#elif defined(OS_MACOSX)
        typedef struct kevent KEVENT;
        struct kevent* e = new (std::nothrow) KEVENT[MAX_EVENTS];
#endif
        if (nullptr == e) {
            KLOG(FATAL) << "Fail to new epoll_event";
            return nullptr;
        }

#if defined(OS_LINUX)
# ifndef BAIDU_KERNEL_FIXED_EPOLLONESHOT_BUG
        DKLOG(INFO) << "Use DEL+ADD instead of EPOLLONESHOT+MOD due to kernel bug. Performance will be much lower.";
# endif
#endif
        while (!_stop) {
            const int epfd = _epfd;
#if defined(OS_LINUX)
            const int n = epoll_wait(epfd, e, MAX_EVENTS, -1);
#elif defined(OS_MACOSX)
            const int n = kevent(epfd, nullptr, 0, e, MAX_EVENTS, nullptr);
#endif
            if (_stop) {
                break;
            }

            if (n < 0) {
                if (errno == EINTR) {
#ifndef NDEBUG
                    break_nums.fetch_add(1, std::memory_order_relaxed);
                    int* p = &errno;
                    const char* b = km_error();
                    const char* b2 = km_error(errno);
                    DKLOG(FATAL) << "Fail to epoll epfd=" << epfd << ", "
                                << errno << " " << p << " " <<  b << " " <<  b2;
#endif
                    continue;
                }

                PKLOG(INFO) << "Fail to epoll epfd=" << epfd;
                break;
            }

#if defined(OS_LINUX)
# ifndef BAIDU_KERNEL_FIXED_EPOLLONESHOT_BUG
            for (int i = 0; i < n; ++i) {
                epoll_ctl(epfd, EPOLL_CTL_DEL, e[i].data.fd, nullptr);
            }
# endif
#endif
            for (int i = 0; i < n; ++i) {
#if defined(OS_LINUX)
# ifdef BAIDU_KERNEL_FIXED_EPOLLONESHOT_BUG
                EpollButex* kutex = static_cast<EpollButex*>(e[i].data.ptr);
# else
                std::atomic<EpollButex*>* pkutex = fd_kutexes.get(e[i].data.fd);
                EpollButex* kutex = pkutex ?
                    pkutex->load(std::memory_order_consume) : nullptr;
# endif
#elif defined(OS_MACOSX)
                EpollButex* kutex = static_cast<EpollButex*>(e[i].udata);
#endif
                if (kutex != nullptr && kutex != CLOSING_GUARD) {
                    kutex->fetch_add(1, std::memory_order_relaxed);
                    kutex_wake_all(kutex);
                }
            }
        }

        delete [] e;
        DKLOG(INFO) << "EpollThread=" << _tid << "(epfd="
                   << initial_epfd << ") is about to stop";
        return nullptr;
    }

    int _epfd;
    bool _stop;
    kthread_t _tid;
    std::mutex _start_mutex;
};

EpollThread epoll_thread[KTHREAD_EPOLL_THREAD_NUM];

static inline EpollThread& get_epoll_thread(int fd) {
    if (KTHREAD_EPOLL_THREAD_NUM == 1UL) {
        EpollThread& et = epoll_thread[0];
        et.start(KTHREAD_DEFAULT_EPOLL_SIZE);
        return et;
    }

    EpollThread& et = epoll_thread[turbo::fmix32(fd) % KTHREAD_EPOLL_THREAD_NUM];
    et.start(KTHREAD_DEFAULT_EPOLL_SIZE);
    return et;
}

//TODO(zhujiashun): change name
int stop_and_join_epoll_threads() {
    // Returns -1 if any epoll thread failed to stop.
    int rc = 0;
    for (size_t i = 0; i < KTHREAD_EPOLL_THREAD_NUM; ++i) {
        if (epoll_thread[i].stop_and_join() < 0) {
            rc = -1;
        }
    }
    return rc;
}

// For pthreads.
int pthread_fd_wait(int fd, unsigned events,
                    const timespec* abstime) {
    return kutil::pthread_fd_wait(fd, events, abstime);
}

}  // namespace kthread

extern "C" {

int kthread_fd_wait(int fd, unsigned events) {
    if (fd < 0) {
        errno = EINVAL;
        return -1;
    }
    kthread::TaskGroup* g = kthread::tls_task_group;
    if (nullptr != g && !g->is_current_pthread_task()) {
        return kthread::get_epoll_thread(fd).fd_wait(
            fd, events, nullptr);
    }
    return kthread::pthread_fd_wait(fd, events, nullptr);
}

int kthread_fd_timedwait(int fd, unsigned events,
                         const timespec* abstime) {
    if (nullptr == abstime) {
        return kthread_fd_wait(fd, events);
    }
    if (fd < 0) {
        errno = EINVAL;
        return -1;
    }
    kthread::TaskGroup* g = kthread::tls_task_group;
    if (nullptr != g && !g->is_current_pthread_task()) {
        return kthread::get_epoll_thread(fd).fd_wait(
            fd, events, abstime);
    }
    return kthread::pthread_fd_wait(fd, events, abstime);
}

int kthread_connect(int sockfd, const sockaddr* serv_addr,
                    socklen_t addrlen) {
    kthread::TaskGroup* g = kthread::tls_task_group;
    if (nullptr == g || g->is_current_pthread_task()) {
        return ::connect(sockfd, serv_addr, addrlen);
    }

    bool is_blocking = turbo::is_blocking(sockfd);
    if (is_blocking) {
        turbo::make_non_blocking(sockfd);
    }
    // Scoped non-blocking.
    auto guard = turbo::MakeScopeGuard([is_blocking, sockfd]() {
        if (is_blocking) {
            turbo::make_blocking(sockfd);
        }
    });

    const int rc = ::connect(sockfd, serv_addr, addrlen);
    if (rc == 0 || errno != EINPROGRESS) {
        return rc;
    }
#if defined(OS_LINUX)
    if (kthread_fd_wait(sockfd, EPOLLOUT) < 0) {
#elif defined(OS_MACOSX)
    if (kthread_fd_wait(sockfd, EVFILT_WRITE) < 0) {
#endif
        return -1;
    }
    int err;
    socklen_t errlen = sizeof(err);
    if (getsockopt(sockfd, SOL_SOCKET, SO_ERROR, &err, &errlen) < 0) {
        PKLOG(FATAL) << "Fail to getsockopt";
        return -1;
    }
    if (err != 0) {
        KCHECK(err != EINPROGRESS);
        errno = err;
        return -1;
    }
    return 0;
}

int kthread_timed_connect(int sockfd, const struct sockaddr* serv_addr,
                          socklen_t addrlen, const timespec* abstime) {
    if (!abstime) {
        return kthread_connect(sockfd, serv_addr, addrlen);
    }

    bool is_blocking = turbo::is_blocking(sockfd);
    if (is_blocking) {
        turbo::make_non_blocking(sockfd);
    }
    // Scoped non-blocking.
    auto guard = turbo::MakeScopeGuard([is_blocking, sockfd]() {
        if (is_blocking) {
            turbo::make_blocking(sockfd);
        }
    });

    const int rc = ::connect(sockfd, serv_addr, addrlen);
    if (rc == 0 || errno != EINPROGRESS) {
        return rc;
    }
#if defined(OS_LINUX)
    if (kthread_fd_timedwait(sockfd, EPOLLOUT, abstime) < 0) {
#elif defined(OS_MACOSX)
    if (kthread_fd_timedwait(sockfd, EVFILT_WRITE, abstime) < 0) {
#endif
        return -1;
    }

    int err;
    socklen_t errlen = sizeof(err);
    if (getsockopt(sockfd, SOL_SOCKET, SO_ERROR, &err, &errlen) < 0) {
        PKLOG(FATAL) << "Fail to getsockopt";
        return -1;
    }
    if (err != 0) {
        KCHECK(err != EINPROGRESS);
        errno = err;
        return -1;
    }
    return 0;
}

// This does not wake pthreads calling kthread_fd_*wait.
int kthread_close(int fd) {
    return kthread::get_epoll_thread(fd).fd_close(fd);
}

}  // extern "C"
