//
// Created by liyinbin on 2021/4/4.
//


#include "abel/fiber/internal/stack_allocator.h"

//#include <malloc.h>  // `memalign`.
#include <immintrin.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <memory>

#include "abel/base/annotation.h"
#include "abel/log/logging.h"
#include "abel/functional/function.h"
#include "abel/memory/non_destroy.h"
#include "abel/memory/object_pool.h"

using namespace std::literals;

namespace abel {
    namespace fiber_internal {

        const auto kPageSize = getpagesize();

        constexpr auto kOutOfMemoryError =
                "Cannot create guard page below fiber stack. Check `/proc/[pid]/maps` to "
                "see if there are too many memory regions. There's a limit at around 64K "
                "by default. If you reached the limit, try either disabling guard page or "
                "increasing `vm.max_map_count` (suggested).";

        // All stacks (whether system stack or user stack) are registered here. This is
        // necessary for our GDB plugin to find all the stacks.
        //
        // Only _actual_ stack allocation / deallocation needs to touch this. For
        // allocations / deallocations covered by our object pool, they're irrelevant
        // here.
        //
        // Registration / deregistration can be slow. But that's okay as it's already
        // slow enough to _actually_ creating / destroying stacks. These operations
        // incur heavy VMA operations.
        struct StackRegistry {
            // Listed as public as they're our "public" interfaces to GDB plugin.
            //
            // Code in this TU should use methods below instead of touching these fields.
            void **stacks = nullptr;  // Leaked on exit. Doesn't matter.
            std::size_t used = 0;
            std::size_t capacity = 0;

            // Register a newly-allocated stack.
            //
            // `ptr` should point to stack bottom (i.e. one byte past the stack region).
            // That's where our fiber control block (GDB plugin need it) resides.
            void RegisterStack(void *ptr) {
                std::scoped_lock _(lock_);  // It's slow, so be it.
                ++used;
                auto slot = UnsafeFindSlotOf(nullptr);
                if (slot) {
                    *slot = ptr;
                    return;
                }

                UnsafeResizeRegistry();
                *UnsafeFindSlotOf(nullptr) = ptr;  // Must succeed this time.
            }

            // Deregister a going-to-be-freed stack. `ptr` points to stack bottom.
            void DeregisterStack(void *ptr) {
                std::scoped_lock _(lock_);

                abel::scoped_deferred __([&] {
                    // If `stacks` is too large we should consider shrinking it.
                    if (capacity > 1024 && capacity / 2 > used) {
                        UnsafeShrinkRegistry();
                    }
                });

                --used;
                if (auto p = UnsafeFindSlotOf(ptr)) {
                    *p = nullptr;
                    return;
                }
            }

        private:
            void **UnsafeFindSlotOf(void *ptr) {
                for (size_t i = 0; i != capacity; ++i) {
                    if (stacks[i] == ptr) {
                        return &stacks[i];
                    }
                }
                return nullptr;
            }

            void UnsafeShrinkRegistry() {
                auto new_capacity = capacity / 2;
                DCHECK(new_capacity);
                auto new_stacks = new void *[new_capacity];
                auto copied = 0;

                memset(new_stacks, 0, new_capacity * sizeof(void *));
                for (size_t i = 0; i != capacity; ++i) {
                    if (stacks[i]) {
                        new_stacks[copied++] = stacks[i];
                    }
                }

                DCHECK_EQ(static_cast<size_t>(copied), used);
                DCHECK_LE(static_cast<size_t>(copied), new_capacity);
                capacity = new_capacity;
                delete[] std::exchange(stacks, new_stacks);
            }

            void UnsafeResizeRegistry() {
                if (capacity == 0) {  // We haven't been initialized yet.
                    capacity = 8;
                    stacks = new void *[capacity];
                    memset(stacks, 0, sizeof(void *) * capacity);
                } else {
                    auto new_capacity = capacity * 2;
                    auto new_stacks = new void *[new_capacity];
                    memset(new_stacks, 0, new_capacity * sizeof(void *));
                    memcpy(new_stacks, stacks, capacity * sizeof(void *));
                    capacity = new_capacity;
                    delete[] std::exchange(stacks, new_stacks);
                }
            }

        private:
            std::mutex lock_;
        } stack_registry;  // Using global variable here. This makes looking up this
        // variable easy in GDB plugin.

        inline std::size_t GetBias() {
            return fiber_config::get_global_fiber_config().fiber_stack_enable_guard_page ? kPageSize : 0;
        }

        inline std::size_t GetAllocationSize() {
            DCHECK(fiber_config::get_global_fiber_config().fiber_stack_size % kPageSize == 0,
                        "user_stack size ({}) must be a multiple of page size ({}).",
                       fiber_config::get_global_fiber_config().fiber_stack_size, kPageSize);

            return fiber_config::get_global_fiber_config().fiber_stack_size + GetBias();
        }

        user_stack *create_user_stack_impl() {
            auto p = mmap(nullptr, GetAllocationSize(), PROT_READ | PROT_WRITE,
                          MAP_PRIVATE | MAP_ANONYMOUS /*| MAP_STACK*/, 0, 0);
            DLOG_CRITICAL_IF(p == nullptr, "{}", kOutOfMemoryError);
            DCHECK_EQ(reinterpret_cast<std::uintptr_t>(p) % kPageSize, 0ul);
            if (fiber_config::get_global_fiber_config().fiber_stack_enable_guard_page) {
                DLOG_CRITICAL_IF(mprotect(p, kPageSize, PROT_NONE) != 0, "{}",
                                   kOutOfMemoryError);
            }

            // Actual start (lowest address) of the stack.
            auto stack = reinterpret_cast<char *>(p) + GetBias();
            // One byte past the stack region.
            auto stack_bottom = stack + fiber_config::get_global_fiber_config().fiber_stack_size;

            // Register the stack.
            stack_registry.RegisterStack(stack_bottom);

            // Give it back to the caller.
            return reinterpret_cast<user_stack *>(stack);
        }

        void destroy_user_stack_impl(user_stack *ptr) {
            DCHECK(reinterpret_cast<std::uintptr_t>(ptr) % kPageSize == 0);

            // Remove the stack from our registry.
            auto stack_bottom =
                    reinterpret_cast<char *>(ptr) + fiber_config::get_global_fiber_config().fiber_stack_size;
            stack_registry.DeregisterStack(stack_bottom);

            //ABEL_PCHECK(munmap(reinterpret_cast<char *>(ptr) - GetBias(),
              //                  GetAllocationSize()) == 0);
        }

        system_stack *create_system_stack_impl() {
            // Rather simple.. Memory allocator should handle it well. We don't even have
            // to make it aligned to page boundary.

            // We'd still like to keep the resulting pointer 64-byte aligned. This is not
            // mandatory for us though, as `make_context` itself fixed adjust alignment as
            // it sees fit anyway.
            //
            // Using POSIX `memalign` here, C++17 `aligned_alloc` is not available on
            // CentOS 6.
            //auto stack = memalign(64, kSystemStackSize);
            auto stack = aligned_alloc(64, kSystemStackSize);
            DCHECK(reinterpret_cast<std::uintptr_t>(stack) % 64 == 0);
            auto stack_bottom = reinterpret_cast<char *>(stack) + kSystemStackSize;

            // Register it and return.
            stack_registry.RegisterStack(stack_bottom);
            return reinterpret_cast<system_stack *>(stack);
        }

        void destroy_system_stack_impl(system_stack *ptr) {
            auto stack_bottom = reinterpret_cast<char *>(ptr) + kSystemStackSize;
            stack_registry.DeregisterStack(stack_bottom);
            free(ptr);  // Let it go.
        }
    }  // namespace fiber_internal
}  // namespace abel
