// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <unistd.h>                               // getpagesize
#include <sys/mman.h>                             // mmap, munmap, mprotect
#include <algorithm>                              // std::max
#include <stdlib.h>                               // posix_memalign
#include <turbo/memory/leaky_singleton.h>
#include <tally/tally.h>
#include <kthread/internal/types.h>                        // KTHREAD_STACKTYPE_*
#include <kthread/internal/stack.h>
#include <kthread/internal/config.h>

namespace kthread {
    static_assert(KTHREAD_STACKTYPE_PTHREAD == STACK_TYPE_PTHREAD, "must match");
    static_assert(KTHREAD_STACKTYPE_SMALL == STACK_TYPE_SMALL, "must match");
    static_assert(KTHREAD_STACKTYPE_NORMAL == STACK_TYPE_NORMAL, "must match");
    static_assert(KTHREAD_STACKTYPE_LARGE == STACK_TYPE_LARGE, "must match");
    static_assert(STACK_TYPE_MAIN == 0, "must match 0");

    static std::atomic<int64_t> s_stack_count = 0;

    static int64_t get_stack_count(void *) {
        return s_stack_count.load(std::memory_order_relaxed);
    }

    static tally::FuncGauge<int64_t> kvar_stack_count(
        "kthread_stack_count", "help", []() {
            return s_stack_count.load(std::memory_order_relaxed);
        });

    int allocate_stack_storage(StackStorage *s, int stacksize_in, int guardsize_in) {
        const static int PAGESIZE = getpagesize();
        const int PAGESIZE_M1 = PAGESIZE - 1;
        const int MIN_STACKSIZE = PAGESIZE * 2;
        const int MIN_GUARDSIZE = PAGESIZE;

        // Align stacksize
        const int stacksize =
                (std::max(stacksize_in, MIN_STACKSIZE) + PAGESIZE_M1) &
                ~PAGESIZE_M1;

        if (guardsize_in <= 0) {
            void *mem = malloc(stacksize);
            if (nullptr == mem) {
                PKLOG_EVERY_N_SEC(ERROR, 1) << "Fail to malloc (size="
                                         << stacksize << ")";
                return -1;
            }
            s_stack_count.fetch_add(1, std::memory_order_relaxed);
            s->bottom = (char *) mem + stacksize;
            s->stacksize = stacksize;
            s->guardsize = 0;
            s->valgrind_stack_id = 0;
            return 0;
        } else {
            // Align guardsize
            const int guardsize =
                    (std::max(guardsize_in, MIN_GUARDSIZE) + PAGESIZE_M1) &
                    ~PAGESIZE_M1;

            const int memsize = stacksize + guardsize;
            void *const mem = mmap(nullptr, memsize, (PROT_READ | PROT_WRITE),
                                   (MAP_PRIVATE | MAP_ANONYMOUS), -1, 0);

            if (MAP_FAILED == mem) {
                PKLOG_EVERY_N_SEC(ERROR, 1)
                        << "Fail to mmap size=" << memsize << " stack_count="
                        << s_stack_count.load(std::memory_order_relaxed)
                        << ", possibly limited by /proc/sys/vm/max_map_count";
                // may fail due to limit of max_map_count (65536 in default)
                return -1;
            }

            void *aligned_mem = (void *) (((intptr_t) mem + PAGESIZE_M1) & ~PAGESIZE_M1);
            if (aligned_mem != mem) {
                KLOG_FIRST_N(ERROR, 1) << "addr=" << mem << " returned by mmap is not "
                                                     "aligned by pagesize=" << PAGESIZE;
            }
            const int offset = (char *) aligned_mem - (char *) mem;
            if (guardsize <= offset ||
                mprotect(aligned_mem, guardsize - offset, PROT_NONE) != 0) {
                munmap(mem, memsize);
                PKLOG_EVERY_N_SEC(ERROR, 1)
                        << "Fail to mprotect " << (void *) aligned_mem << " length="
                        << guardsize - offset;
                return -1;
            }

            s_stack_count.fetch_add(1, std::memory_order_relaxed);
            s->bottom = (char *) mem + memsize;
            s->stacksize = stacksize;
            s->guardsize = guardsize;
            s->valgrind_stack_id = 0;
            return 0;
        }
    }

    void deallocate_stack_storage(StackStorage *s) {
        const int memsize = s->stacksize + s->guardsize;
        if ((uintptr_t) s->bottom <= (uintptr_t) memsize) {
            return;
        }
        s_stack_count.fetch_sub(1, std::memory_order_relaxed);
        if (s->guardsize <= 0) {
            free((char *) s->bottom - memsize);
        } else {
            munmap((char *) s->bottom - memsize, memsize);
        }
    }

    int SmallStackClass::stack_size_flag() { return turbo::get_flag(FLAGS_stack_size_small); }

    int NormalStackClass::stack_size_flag() { return turbo::get_flag(FLAGS_stack_size_normal); };

    int LargeStackClass::stack_size_flag() { return turbo::get_flag(FLAGS_stack_size_large); }
} // namespace kthread
