// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <atomic>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>


#include <turbo/utility/status.h>
#include <nebula/types/type_fwd.h>
#include <turbo/base/macros.h>

namespace nebula {

    namespace internal {

        ///////////////////////////////////////////////////////////////////////
        // Helper tracking memory statistics

        /// \brief Memory pool statistics
        ///
        /// 64-byte aligned so that all atomic values are on the same cache line.
        class alignas(64) MemoryPoolStats {
        private:
            // All atomics are updated according to Acquire-Release ordering.
            // https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
            //
            // max_memory_, total_allocated_bytes_, and num_allocs_ only go up (they are
            // monotonically increasing) which can allow some optimizations.
            std::atomic<int64_t> max_memory_{0};
            std::atomic<int64_t> bytes_allocated_{0};
            std::atomic<int64_t> total_allocated_bytes_{0};
            std::atomic<int64_t> num_allocs_{0};

        public:
            [[nodiscard]] int64_t max_memory() const { return max_memory_.load(std::memory_order_acquire); }

            [[nodiscard]] int64_t bytes_allocated() const {
                return bytes_allocated_.load(std::memory_order_acquire);
            }

            [[nodiscard]] int64_t total_bytes_allocated() const {
                return total_allocated_bytes_.load(std::memory_order_acquire);
            }

            [[nodiscard]] int64_t num_allocations() const { return num_allocs_.load(std::memory_order_acquire); }

            inline void DidAllocateBytes(int64_t size) {
                // Issue the load before everything else. max_memory_ is monotonically increasing,
                // so we can use a relaxed load before the read-modify-write.
                auto max_memory = max_memory_.load(std::memory_order_relaxed);
                const auto old_bytes_allocated =
                        bytes_allocated_.fetch_add(size, std::memory_order_acq_rel);
                // Issue store operations on values that we don't depend on to proceed
                // with execution. When done, max_memory and old_bytes_allocated have
                // a higher chance of being available on CPU registers. This also has the
                // nice side-effect of putting 3 atomic stores close to each other in the
                // instruction stream.
                total_allocated_bytes_.fetch_add(size, std::memory_order_acq_rel);
                num_allocs_.fetch_add(1, std::memory_order_acq_rel);

                // If other threads are updating max_memory_ concurrently we leave the loop without
                // updating knowing that it already reached a value even higher than ours.
                const auto allocated = old_bytes_allocated + size;
                while (max_memory < allocated && !max_memory_.compare_exchange_weak(
                        /*expected=*/max_memory, /*desired=*/allocated,
                                     std::memory_order_acq_rel)) {
                }
            }

            inline void DidReallocateBytes(int64_t old_size, int64_t new_size) {
                if (new_size > old_size) {
                    DidAllocateBytes(new_size - old_size);
                } else {
                    DidFreeBytes(old_size - new_size);
                }
            }

            inline void DidFreeBytes(int64_t size) {
                bytes_allocated_.fetch_sub(size, std::memory_order_acq_rel);
            }
        };

    }  // namespace internal

    /// Base class for memory allocation on the CPU.
    ///
    /// Besides tracking the number of allocated bytes, the allocator also should
    /// take care of the required 64-byte alignment.
    class TURBO_EXPORT MemoryPool {
    public:
        virtual ~MemoryPool() = default;

        /// \brief EXPERIMENTAL. create a new instance of the default MemoryPool
        static std::unique_ptr<MemoryPool> CreateDefault();

        /// Allocate a new memory region of at least size bytes.
        ///
        /// The allocated region shall be 64-byte aligned.
        turbo::Status allocate(int64_t size, uint8_t **out) {
            return allocate(size, kDefaultBufferAlignment, out);
        }

        /// Allocate a new memory region of at least size bytes aligned to alignment.
        virtual turbo::Status allocate(int64_t size, int64_t alignment, uint8_t **out) = 0;

        /// resize an already allocated memory section.
        ///
        /// As by default most default allocators on a platform don't support aligned
        /// reallocation, this function can involve a copy of the underlying data.
        virtual turbo::Status reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
                                         uint8_t **ptr) = 0;

        turbo::Status reallocate(int64_t old_size, int64_t new_size, uint8_t **ptr) {
            return reallocate(old_size, new_size, kDefaultBufferAlignment, ptr);
        }

        /// Free an allocated region.
        ///
        /// @param buffer Pointer to the start of the allocated memory region
        /// @param size Allocated size located at buffer. An allocator implementation
        ///   may use this for tracking the amount of allocated bytes as well as for
        ///   faster deallocation if supported by its backend.
        /// @param alignment The alignment of the allocation. Defaults to 64 bytes.
        virtual void free(uint8_t *buffer, int64_t size, int64_t alignment) = 0;

        void free(uint8_t *buffer, int64_t size) {
            free(buffer, size, kDefaultBufferAlignment);
        }

        /// Return unused memory to the OS
        ///
        /// Only applies to allocators that hold onto unused memory.  This will be
        /// best effort, a memory pool may not implement this feature or may be
        /// unable to fulfill the request due to fragmentation.
        virtual void ReleaseUnused() {}

        /// The number of bytes that were allocated and not yet free'd through
        /// this allocator.
        [[nodiscard]] virtual int64_t bytes_allocated() const = 0;

        /// Return peak memory allocation in this memory pool
        ///
        /// \return Maximum bytes allocated. If not known (or not implemented),
        /// returns -1
        [[nodiscard]] virtual int64_t max_memory() const;

        /// The number of bytes that were allocated.
        [[nodiscard]] virtual int64_t total_bytes_allocated() const = 0;

        /// The number of allocations or reallocations that were requested.
        [[nodiscard]] virtual int64_t num_allocations() const = 0;

        /// The name of the backend used by this MemoryPool (e.g. "system" or "jemalloc").
        [[nodiscard]] virtual std::string backend_name() const = 0;

    protected:
        MemoryPool() = default;
    };

    class TURBO_EXPORT LoggingMemoryPool : public MemoryPool {
    public:
        explicit LoggingMemoryPool(MemoryPool *pool);

        ~LoggingMemoryPool() override = default;

        using MemoryPool::allocate;
        using MemoryPool::free;
        using MemoryPool::reallocate;

        turbo::Status allocate(int64_t size, int64_t alignment, uint8_t **out) override;

        turbo::Status reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
                                 uint8_t **ptr) override;

        void free(uint8_t *buffer, int64_t size, int64_t alignment) override;

        [[nodiscard]] int64_t bytes_allocated() const override;

        [[nodiscard]] int64_t max_memory() const override;

        [[nodiscard]] int64_t total_bytes_allocated() const override;

        [[nodiscard]] int64_t num_allocations() const override;

        [[nodiscard]] std::string backend_name() const override;

    private:
        MemoryPool *pool_;
    };

    /// Derived class for memory allocation.
    ///
    /// Tracks the number of bytes and maximum memory allocated through its direct
    /// calls. Actual allocation is delegated to MemoryPool class.
    class TURBO_EXPORT ProxyMemoryPool : public MemoryPool {
    public:
        explicit ProxyMemoryPool(MemoryPool *pool);

        ~ProxyMemoryPool() override;

        using MemoryPool::allocate;
        using MemoryPool::free;
        using MemoryPool::reallocate;

        turbo::Status allocate(int64_t size, int64_t alignment, uint8_t **out) override;

        turbo::Status reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
                                 uint8_t **ptr) override;

        void free(uint8_t *buffer, int64_t size, int64_t alignment) override;

       [[nodiscard]] int64_t bytes_allocated() const override;

        [[nodiscard]] int64_t max_memory() const override;

        [[nodiscard]] int64_t total_bytes_allocated() const override;

        [[nodiscard]] int64_t num_allocations() const override;

        [[nodiscard]] std::string backend_name() const override;

    private:
        class ProxyMemoryPoolImpl;

        std::unique_ptr<ProxyMemoryPoolImpl> impl_;
    };

    /// \brief Return a process-wide memory pool based on the system allocator.
    TURBO_EXPORT MemoryPool *system_memory_pool();



    /// \brief Return the names of the backends supported by this Nebula build.
    TURBO_EXPORT std::vector<std::string> supported_memory_backend_names();

}  // namespace nebula
