#include <Common/setThreadName.h>
#include <Common/threadPoolCallbackRunner.h>

#include <Common/futex.h>

namespace DB
{

namespace ErrorCodes
{
    extern const int LOGICAL_ERROR;
}

ThreadPoolCallbackRunnerFast::ThreadPoolCallbackRunnerFast() = default;

void ThreadPoolCallbackRunnerFast::initThreadPool(ThreadPool & pool_, size_t max_threads_, ThreadName thread_name_, ThreadGroupPtr thread_group_)
{
    chassert(!pool);
    mode = Mode::ThreadPool;
    pool = &pool_;
    max_threads = max_threads_;
    thread_name = thread_name_;
    thread_group = thread_group_;
}

ThreadPoolCallbackRunnerFast::ThreadPoolCallbackRunnerFast(Mode mode_) : mode(mode_)
{
    chassert(mode != Mode::ThreadPool);
}

ThreadPoolCallbackRunnerFast::~ThreadPoolCallbackRunnerFast()
{
    shutdown();
}

void ThreadPoolCallbackRunnerFast::shutdown()
{
    /// May be called multiple times.
    std::unique_lock lock(mutex);
    shutdown_requested = true;
#ifdef OS_LINUX
    const UInt32 a_lot = UINT32_MAX / 4;
    queue_size += a_lot;
    futexWake(&queue_size, a_lot);
#else
    queue_cv.notify_all();
#endif
    shutdown_cv.wait(lock, [&] { return threads == 0; });

    if (mode == Mode::ThreadPool)
        chassert(active_tasks.load() == queue.size());
}

void ThreadPoolCallbackRunnerFast::startMoreThreadsIfNeeded(size_t active_tasks_, std::unique_lock<std::mutex> &)
{
    while (threads < max_threads && threads < active_tasks_ && !shutdown_requested)
    {
        pool->scheduleOrThrow([this] { threadFunction(); });
        ++threads; // only if scheduleOrThrow didn't throw
    }
}

void ThreadPoolCallbackRunnerFast::operator()(std::function<void()> f)
{
    if (mode == Mode::Disabled)
        throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread pool runner is not initialized");

    size_t active_tasks_ = 1 + active_tasks.fetch_add(1, std::memory_order_relaxed);

    {
        std::unique_lock lock(mutex);
        queue.push_back(std::move(f));

        startMoreThreadsIfNeeded(active_tasks_, lock);
    }

    if (mode == Mode::ThreadPool)
    {
#ifdef OS_LINUX
        UInt32 prev_size = queue_size.fetch_add(1, std::memory_order_release);
        if (prev_size < max_threads)
            futexWake(&queue_size, 1);
#else
        queue_cv.notify_one();
#endif
    }
}

void ThreadPoolCallbackRunnerFast::bulkSchedule(std::vector<std::function<void()>> fs)
{
    if (fs.empty())
        return;

    if (mode == Mode::Disabled)
        throw Exception(ErrorCodes::LOGICAL_ERROR, "Thread pool runner is not initialized");

    size_t n = fs.size();
    size_t active_tasks_ = n + active_tasks.fetch_add(n, std::memory_order_relaxed);

    {
        std::unique_lock lock(mutex);
        queue.insert(queue.end(), std::move_iterator(fs.begin()), std::move_iterator(fs.end()));

        try
        {
            startMoreThreadsIfNeeded(active_tasks_, lock);
        }
        catch (...)
        {
            /// Keep `queue` consistent with `queue_size`.
            queue.erase(queue.end() - n, queue.end());
            active_tasks.fetch_sub(n, std::memory_order_relaxed);
            throw;
        }
    }

    if (mode == Mode::ThreadPool)
    {
#ifdef OS_LINUX
        UInt32 prev_size = queue_size.fetch_add(n, std::memory_order_release);
        if (prev_size < max_threads)
            futexWake(&queue_size, n);
#else
        if (n < 4)
            for (size_t i = 0; i < n; ++i)
                queue_cv.notify_one();
        else
            queue_cv.notify_all();
#endif
    }
}

bool ThreadPoolCallbackRunnerFast::runTaskInline()
{
    std::function<void()> f;
    {
        std::unique_lock lock(mutex);
        if (queue.empty())
            return false;
        f = std::move(queue.front());
        queue.pop_front();
    }
    f();
    active_tasks.fetch_sub(1, std::memory_order_relaxed);
    return true;
}

void ThreadPoolCallbackRunnerFast::threadFunction()
{
    std::optional<ThreadGroupSwitcher> switcher;
    switcher.emplace(thread_group, thread_name);

    while (true)
    {
        bool timed_out = false;

#ifdef OS_LINUX
        UInt32 x = queue_size.load(std::memory_order_relaxed);
        while (true)
        {
            if (x == 0)
            {
                Int64 waited = futexTimedWait(&queue_size, 0, THREAD_IDLE_TIMEOUT_NS);
                x = queue_size.load(std::memory_order_relaxed);

                if (waited < 0 && errno == ETIMEDOUT && x == 0)
                {
                    timed_out = true;
                    break;
                }
            }
            else if (queue_size.compare_exchange_weak(
                        x, x - 1, std::memory_order_acquire, std::memory_order_relaxed))
                break;
        }
#endif

        std::function<void()> f;
        {
            std::unique_lock lock(mutex);

#ifdef OS_LINUX
            /// Important to never stop the last thread if queue is not empty (checked under the
            /// same `lock` as decrementing `threads`). Otherwise we'll deadlock like this:
            ///  0. `threads` == 1, queue is empty.
            ///  1. The worker thread times out; it didn't lock mutex or decrement `threads` yet.
            ///  2. A manager thread enqueues a task. It sees active_tasks == 1 and `threads` == 1,
            ///     so it doesn't start another thread.
            ///  3. The worker thread exits.
            ///  4. There are no threads, but the queue is not empty, oops.
            if (timed_out && !queue.empty() && !shutdown_requested)
                /// We can't just proceed to `queue.pop_front()` here because we haven't
                /// decremented queue_size.
                continue;
#else
            timed_out = !queue_cv.wait_for(
                lock, std::chrono::nanoseconds(THREAD_IDLE_TIMEOUT_NS),
                [&] { return shutdown_requested || !queue.empty(); });
#endif

            if (shutdown_requested || timed_out)
            {
                /// Important that we destroy the `ThreadGroupSwitcher` before decrementing `threads`.
                /// Otherwise ~ThreadGroupSwitcher may access global Context after the query is
                /// finished, which may race with mutating Context (specifically, Settings) at the
                /// start of next query.
                switcher.reset();

                threads -= 1;
                if (threads == 0)
                    shutdown_cv.notify_all();

                return;
            }

            chassert(!queue.empty());

            f = std::move(queue.front());
            queue.pop_front();
        }

        try
        {
            f();

            CurrentThread::updatePerformanceCountersIfNeeded();
        }
        catch (...)
        {
            tryLogCurrentException("FastThreadPool");
            chassert(false);
        }

        active_tasks.fetch_sub(1, std::memory_order_relaxed);
    }

    chassert(false);
}

bool ShutdownHelper::try_lock_shared()
{
    Int64 n = val.fetch_add(1, std::memory_order_acquire) + 1;
    chassert(n != SHUTDOWN_START);
    if (n >= SHUTDOWN_START)
    {
        unlock_shared();
        return false;
    }
    return true;
}

void ShutdownHelper::unlock_shared()
{
    Int64 n = val.fetch_sub(1, std::memory_order_release) - 1;
    chassert(n >= 0);
    if (n == SHUTDOWN_START)
    {
        /// We're the last completed task. Add SHUTDOWN_END to indicate that no further waiting
        /// or cv notifying is needed, even though `val` can get briefly bumped up and down by
        /// unsuccessful try_lock_shared() calls.
        val.fetch_add(SHUTDOWN_END);
        {
            /// Lock and unlock the mutex. This may look weird, but this is usually (always?)
            /// required to avoid race conditions when combining condition_variable with atomics.
            ///
            /// In this case, the prevented race condition is:
            ///  1. unlock_shared() sees n == SHUTDOWN_START,
            ///  2. shutdown thread enters cv.wait(lock, [&] { return val.load() >= SHUTDOWN_END; });
            ///     the callback does val.load(), gets SHUTDOWN_START, and is about
            ///     to return false; at this point, the cv.wait call is not monitoring
            ///     condition_variable notifications (remember that cv.wait with callback is
            ///     equivalent to a wait without callback in a loop),
            ///  3. the unlock_shared() assigns `val` and calls cv.notify_all(), which does
            ///     nothing because no thread is blocked on the condition variable,
            ///  4. the cv.wait callback returns false; the wait goes back to sleep and never
            ///     wakes up.
            std::unique_lock lock(mutex);
        }
        cv.notify_all();
    }
}

bool ShutdownHelper::shutdown_requested()
{
    return val.load(std::memory_order_relaxed) >= SHUTDOWN_START;
}

bool ShutdownHelper::begin_shutdown()
{
    Int64 n = val.fetch_add(SHUTDOWN_START) + SHUTDOWN_START;
    bool already_called = n >= SHUTDOWN_START * 2;
    if (already_called)
        n = val.fetch_sub(SHUTDOWN_START) - SHUTDOWN_START;
    if (n == SHUTDOWN_START)
    {
        val.fetch_add(SHUTDOWN_END);
        {
            std::unique_lock lock(mutex);
        }
        cv.notify_all();
    }
    return !already_called;
}

void ShutdownHelper::wait_shutdown()
{
    std::unique_lock lock(mutex);
    cv.wait(lock, [&] { return val.load() >= SHUTDOWN_END; });
}

void ShutdownHelper::shutdown()
{
    begin_shutdown();
    wait_shutdown();
}

template ThreadPoolCallbackRunnerUnsafe<void> threadPoolCallbackRunnerUnsafe<void>(ThreadPool & thread_pool, ThreadName thread);
template class ThreadPoolCallbackRunnerLocal<void>;

}
