// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/core/memory_pool_internal.h>

#include <algorithm>  // IWYU pragma: keep
#include <atomic>
#include <cstdlib>   // IWYU pragma: keep
#include <cstring>   // IWYU pragma: keep
#include <iostream>  // IWYU pragma: keep
#include <limits>
#include <memory>
#include <mutex>
#include <optional>

#if defined(sun) || defined(__sun)
#include <stdlib.h>
#endif

#include <nebula/core/buffer.h>
#include <nebula/io/util_internal.h>

#include <turbo/utility/status.h>
#include <nebula/bits/bit_util.h>
#include <nebula/version.h>
#include <turbo/base/debug.h>
#include <nebula/numeric/int_util_overflow.h>
#include <turbo/utility/environment.h>
#include <turbo/log/logging.h>  // IWYU pragma: keep
#include <nebula/util/string.h>
#include <nebula/future/thread_pool.h>
#include <turbo/base/ubsan.h>

#ifdef __GLIBC__

#include <malloc.h>

#endif

namespace nebula {

    namespace memory_pool {

        namespace internal {

            alignas(kDefaultBufferAlignment) int64_t zero_size_area[1] = {kDebugXorSuffix};

        }  // namespace internal

    }  // namespace memory_pool

    namespace {

        constexpr char kDefaultBackendEnvVar[] = "NEBULA_DEFAULT_MEMORY_POOL";
        constexpr char kDebugMemoryEnvVar[] = "NEBULA_DEBUG_MEMORY_POOL";

        enum class MemoryPoolBackend : uint8_t {
            System, Jemalloc, Mimalloc
        };

        struct SupportedBackend {
            const char *name;
            MemoryPoolBackend backend;
        };
        const std::vector<SupportedBackend> &SupportedBackends() {
            static std::vector<SupportedBackend> backends = {
                    {"system", MemoryPoolBackend::System}
            };
            return backends;
        }

        // Return the MemoryPoolBackend selected by the user through the
        // NEBULA_DEFAULT_MEMORY_POOL environment variable, if any.
        std::optional<MemoryPoolBackend> UserSelectedBackend() {
            static auto user_selected_backend = []() -> std::optional<MemoryPoolBackend> {
                auto unsupported_backend = [](const std::string &name) {
                    std::vector<std::string> supported;
                    for (const auto backend: SupportedBackends()) {
                        supported.push_back(std::string("'") + backend.name + "'");
                    }
                    KLOG(WARNING) << "Unsupported backend '" << name << "' specified in "
                                        << kDefaultBackendEnvVar << " (supported backends are "
                                        << internal::JoinStrings(supported, ", ") << ")";
                };

                auto maybe_name = turbo::get_env_string(kDefaultBackendEnvVar);
                if (!maybe_name.ok()) {
                    return {};
                }
                const auto name = *std::move(maybe_name);
                if (name.empty()) {
                    // An empty environment variable is considered missing
                    return {};
                }
                const auto found = std::find_if(
                        SupportedBackends().begin(), SupportedBackends().end(),
                        [&](const SupportedBackend &backend) { return name == backend.name; });
                if (found != SupportedBackends().end()) {
                    return found->backend;
                }
                unsupported_backend(name);
                return {};
            }();

            return user_selected_backend;
        }

        MemoryPoolBackend DefaultBackend() {
            auto backend = UserSelectedBackend();
            if (backend.has_value()) {
                return backend.value();
            }
            struct SupportedBackend default_backend = SupportedBackends().front();
            return default_backend.backend;
        }

        using MemoryDebugHandler = std::function<void(uint8_t *ptr, int64_t size, const turbo::Status &)>;

        struct DebugState {
            void Invoke(uint8_t *ptr, int64_t size, const turbo::Status &st) {
                std::lock_guard<std::mutex> lock(mutex_);
                if (handler_) {
                    handler_(ptr, size, st);
                }
            }

            void SetHandler(MemoryDebugHandler handler) {
                std::lock_guard<std::mutex> lock(mutex_);
                handler_ = std::move(handler);
            }

            static DebugState *Instance() {
                // Instance is constructed on-demand. If it was a global static variable,
                // it could be constructed after being used.
                static DebugState instance;
                return &instance;
            }

        private:
            DebugState() = default;

            TURBO_DISALLOW_COPY_AND_ASSIGN(DebugState);

            std::mutex mutex_;
            MemoryDebugHandler handler_;
        };

        void DebugAbort(uint8_t *ptr, int64_t size, const turbo::Status &st) { st.abort(); }

        void DebugTrap(uint8_t *ptr, int64_t size, const turbo::Status &st) {
            KLOG(ERROR) << st.to_string();
            turbo::debug_trap();
        }

        void DebugWarn(uint8_t *ptr, int64_t size, const turbo::Status &st) {
            KLOG(WARNING) << st.to_string();
        }

        bool IsDebugEnabled() {
            static const bool is_enabled = []() {
                auto maybe_env_value = turbo::get_env_string(kDebugMemoryEnvVar);
                if (!maybe_env_value.ok()) {
                    return false;
                }
                auto env_value = *std::move(maybe_env_value);
                if (env_value.empty() || env_value == "none") {
                    return false;
                }
                auto debug_state = DebugState::Instance();
                if (env_value == "abort") {
                    debug_state->SetHandler(DebugAbort);
                    return true;
                }
                if (env_value == "trap") {
                    debug_state->SetHandler(DebugTrap);
                    return true;
                }
                if (env_value == "warn") {
                    debug_state->SetHandler(DebugWarn);
                    return true;
                }
                KLOG(WARNING) << "Invalid value for " << kDebugMemoryEnvVar << ": '" << env_value
                                    << "'. Valid values are 'abort', 'trap', 'warn', 'none'.";
                return false;
            }();

            return is_enabled;
        }

        // An allocator wrapper that adds a suffix at the end of allocation to check
        // for writes beyond the allocated area.
        template<typename WrappedAllocator>
        class DebugAllocator {
        public:
            static turbo::Status AllocateAligned(int64_t size, int64_t alignment, uint8_t **out) {
                if (size == 0) {
                    *out = memory_pool::internal::kZeroSizeArea;
                } else {
                    TURBO_MOVE_OR_RAISE(int64_t raw_size, RawSize(size));
                            DKCHECK(raw_size > size) << "bug in raw size computation: " << raw_size
                                                           << " for size " << size;
                    TURBO_RETURN_NOT_OK(WrappedAllocator::AllocateAligned(raw_size, alignment, out));
                    InitAllocatedArea(*out, size);
                }
                return turbo::OkStatus();
            }

            static void ReleaseUnused() { WrappedAllocator::ReleaseUnused(); }

            static turbo::Status ReallocateAligned(int64_t old_size, int64_t new_size, int64_t alignment,
                                                   uint8_t **ptr) {
                CheckAllocatedArea(*ptr, old_size, "reallocation");
                if (*ptr == memory_pool::internal::kZeroSizeArea) {
                    return AllocateAligned(new_size, alignment, ptr);
                }
                if (new_size == 0) {
                    // Note that an overflow check isn't needed as `old_size` is supposed to have
                    // been successfully passed to AllocateAligned() before.
                    WrappedAllocator::DeallocateAligned(*ptr, old_size + kOverhead, alignment);
                    *ptr = memory_pool::internal::kZeroSizeArea;
                    return turbo::OkStatus();
                }
                TURBO_MOVE_OR_RAISE(int64_t raw_new_size, RawSize(new_size));
                        DKCHECK(raw_new_size > new_size)
                        << "bug in raw size computation: " << raw_new_size << " for size " << new_size;
                TURBO_RETURN_NOT_OK(WrappedAllocator::ReallocateAligned(old_size + kOverhead, raw_new_size,
                                                                        alignment, ptr));
                InitAllocatedArea(*ptr, new_size);
                return turbo::OkStatus();
            }

            static void DeallocateAligned(uint8_t *ptr, int64_t size, int64_t alignment) {
                CheckAllocatedArea(ptr, size, "deallocation");
                if (ptr != memory_pool::internal::kZeroSizeArea) {
                    WrappedAllocator::DeallocateAligned(ptr, size + kOverhead, alignment);
                }
            }

        private:
            static turbo::Result<int64_t> RawSize(int64_t size) {
                if (TURBO_UNLIKELY(internal::AddWithOverflow(size, kOverhead, &size))) {
                    return turbo::resource_exhausted_error("Memory allocation size too large");
                }
                return size;
            }

            static void InitAllocatedArea(uint8_t *ptr, int64_t size) {
                        DKCHECK_NE(size, 0);
                turbo::safe_store(ptr + size, size ^ memory_pool::internal::kDebugXorSuffix);
            }

            static void CheckAllocatedArea(uint8_t *ptr, int64_t size, const char *context) {
                // Check that memory wasn't clobbered at the end of the allocated area.
                int64_t stored_size =
                        memory_pool::internal::kDebugXorSuffix ^ turbo::safe_load_as<int64_t>(ptr + size);
                if (TURBO_UNLIKELY(stored_size != size)) {
                    auto st = turbo::invalid_argument_error("Wrong size on ", context, ": given size = ", size,
                                                     ", actual size = ", stored_size);
                    DebugState::Instance()->Invoke(ptr, size, st);
                }
            }

            static constexpr int64_t kOverhead = sizeof(int64_t);
        };

        // Helper class directing allocations to the standard system allocator.
        class SystemAllocator {
        public:
            // Allocate memory according to the alignment requirements for Nebula
            // (as of May 2016 64 bytes)
            static turbo::Status AllocateAligned(int64_t size, int64_t alignment, uint8_t **out) {
                if (size == 0) {
                    *out = memory_pool::internal::kZeroSizeArea;
                    return turbo::OkStatus();
                }
#ifdef _WIN32
                // Special code path for Windows
                *out = reinterpret_cast<uint8_t*>(
                    _aligned_malloc(static_cast<size_t>(size), static_cast<size_t>(alignment)));
                if (!*out) {
                  return turbo::resource_exhausted_error("malloc of size ", size, " failed");
                }
#elif defined(sun) || defined(__sun)
                *out = reinterpret_cast<uint8_t*>(
                    memalign(static_cast<size_t>(alignment), static_cast<size_t>(size)));
                if (!*out) {
                  return turbo::resource_exhausted_error("malloc of size ", size, " failed");
                }
#else
                const int result =
                        posix_memalign(reinterpret_cast<void **>(out), static_cast<size_t>(alignment),
                                       static_cast<size_t>(size));
                if (result == ENOMEM) {
                    return turbo::resource_exhausted_error("malloc of size ", size, " failed");
                }

                if (result == EINVAL) {
                    return turbo::invalid_argument_error("invalid alignment parameter: ",
                                                  static_cast<size_t>(alignment));
                }
#endif
                return turbo::OkStatus();
            }

            static turbo::Status ReallocateAligned(int64_t old_size, int64_t new_size, int64_t alignment,
                                                   uint8_t **ptr) {
                uint8_t *previous_ptr = *ptr;
                if (previous_ptr == memory_pool::internal::kZeroSizeArea) {
                            DKCHECK_EQ(old_size, 0);
                    return AllocateAligned(new_size, alignment, ptr);
                }
                if (new_size == 0) {
                    DeallocateAligned(previous_ptr, old_size, alignment);
                    *ptr = memory_pool::internal::kZeroSizeArea;
                    return turbo::OkStatus();
                }
                // Note: We cannot use realloc() here as it doesn't guarantee alignment.

                // Allocate new chunk
                uint8_t *out = nullptr;
                TURBO_RETURN_NOT_OK(AllocateAligned(new_size, alignment, &out));
                        DKCHECK(out);
                // Copy contents and release old memory chunk
                memcpy(out, *ptr, static_cast<size_t>(std::min(new_size, old_size)));
#ifdef _WIN32
                _aligned_free(*ptr);
#else
                free(*ptr);
#endif  // defined(_WIN32)
                *ptr = out;
                return turbo::OkStatus();
            }

            static void DeallocateAligned(uint8_t *ptr, int64_t size, int64_t /*alignment*/) {
                if (ptr == memory_pool::internal::kZeroSizeArea) {
                            DKCHECK_EQ(size, 0);
                } else {
#ifdef _WIN32
                    _aligned_free(ptr);
#else
                    free(ptr);
#endif
                }
            }

            static void ReleaseUnused() {
#ifdef __GLIBC__
                // The return value of malloc_trim is not an error but to inform
                // you if memory was actually released or not, which we do not care about here
                TURBO_UNUSED(malloc_trim(0));
#endif
            }
        };

    }  // namespace

    int64_t MemoryPool::max_memory() const { return -1; }

    ///////////////////////////////////////////////////////////////////////
    // MemoryPool implementation that delegates its core duty
    // to an Allocator class.

#ifndef NDEBUG
    static constexpr uint8_t kAllocPoison = 0xBC;
    static constexpr uint8_t kReallocPoison = 0xBD;
    static constexpr uint8_t kDeallocPoison = 0xBE;
#endif

    template<typename Allocator>
    class BaseMemoryPoolImpl : public MemoryPool {
    public:
        ~BaseMemoryPoolImpl() override {}

        turbo::Status allocate(int64_t size, int64_t alignment, uint8_t **out) override {
            if (size < 0) {
                return turbo::invalid_argument_error("negative malloc size");
            }
            if (static_cast<uint64_t>(size) >= std::numeric_limits<size_t>::max()) {
                return turbo::resource_exhausted_error("malloc size overflows size_t");
            }
            TURBO_RETURN_NOT_OK(Allocator::AllocateAligned(size, alignment, out));
#ifndef NDEBUG
            // Poison data
            if (size > 0) {
                        DKCHECK_NE(*out, nullptr);
                (*out)[0] = kAllocPoison;
                (*out)[size - 1] = kAllocPoison;
            }
#endif

            stats_.DidAllocateBytes(size);
            return turbo::OkStatus();
        }

        turbo::Status reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
                                 uint8_t **ptr) override {
            if (new_size < 0) {
                return turbo::invalid_argument_error("negative realloc size");
            }
            if (static_cast<uint64_t>(new_size) >= std::numeric_limits<size_t>::max()) {
                return turbo::resource_exhausted_error("realloc overflows size_t");
            }
            TURBO_RETURN_NOT_OK(Allocator::ReallocateAligned(old_size, new_size, alignment, ptr));
#ifndef NDEBUG
            // Poison data
            if (new_size > old_size) {
                        DKCHECK_NE(*ptr, nullptr);
                (*ptr)[old_size] = kReallocPoison;
                (*ptr)[new_size - 1] = kReallocPoison;
            }
#endif

            stats_.DidReallocateBytes(old_size, new_size);
            return turbo::OkStatus();
        }

        void free(uint8_t *buffer, int64_t size, int64_t alignment) override {
#ifndef NDEBUG
            // Poison data
            if (size > 0) {
                        DKCHECK_NE(buffer, nullptr);
                buffer[0] = kDeallocPoison;
                buffer[size - 1] = kDeallocPoison;
            }
#endif
            Allocator::DeallocateAligned(buffer, size, alignment);

            stats_.DidFreeBytes(size);
        }

        void ReleaseUnused() override { Allocator::ReleaseUnused(); }

        int64_t bytes_allocated() const override { return stats_.bytes_allocated(); }

        int64_t max_memory() const override { return stats_.max_memory(); }

        int64_t total_bytes_allocated() const override {
            return stats_.total_bytes_allocated();
        }

        int64_t num_allocations() const override { return stats_.num_allocations(); }

    protected:
        internal::MemoryPoolStats stats_;
    };

    class SystemMemoryPool : public BaseMemoryPoolImpl<SystemAllocator> {
    public:
        std::string backend_name() const override { return "system"; }
    };

    class SystemDebugMemoryPool : public BaseMemoryPoolImpl<DebugAllocator<SystemAllocator>> {
    public:
        std::string backend_name() const override { return "system"; }
    };

    std::unique_ptr<MemoryPool> MemoryPool::CreateDefault() {
        auto backend = DefaultBackend();
        switch (backend) {
            case MemoryPoolBackend::System:
                return IsDebugEnabled() ? std::unique_ptr<MemoryPool>(new SystemDebugMemoryPool)
                                        : std::unique_ptr<MemoryPool>(new SystemMemoryPool);
            default:
                KLOG(FATAL) << "Internal error: cannot create default memory pool";
                return nullptr;
        }
    }

    static struct GlobalState {
        ~GlobalState() { finalizing_.store(true, std::memory_order_relaxed); }

        bool is_finalizing() const { return finalizing_.load(std::memory_order_relaxed); }

        MemoryPool *system_memory_pool() {
            if (IsDebugEnabled()) {
                return &system_debug_pool_;
            } else {
                return &system_pool_;
            }
        }

    private:
        std::atomic<bool> finalizing_{false};  // constructed first, destroyed last

        SystemMemoryPool system_pool_;
        SystemDebugMemoryPool system_debug_pool_;
    } global_state;

    MemoryPool *system_memory_pool() { return global_state.system_memory_pool(); }


    MemoryPool *default_memory_pool() {
        auto backend = DefaultBackend();
        switch (backend) {
            case MemoryPoolBackend::System:
                return global_state.system_memory_pool();
            default:
                KLOG(FATAL) << "Internal error: cannot create default memory pool";
                return nullptr;
        }
    }


    ///////////////////////////////////////////////////////////////////////
    // LoggingMemoryPool implementation

    LoggingMemoryPool::LoggingMemoryPool(MemoryPool *pool) : pool_(pool) {}

    turbo::Status LoggingMemoryPool::allocate(int64_t size, int64_t alignment, uint8_t **out) {
        turbo::Status s = pool_->allocate(size, alignment, out);
        std::cout << "Allocate: size = " << size << ", alignment = " << alignment << std::endl;
        return s;
    }

    turbo::Status LoggingMemoryPool::reallocate(int64_t old_size, int64_t new_size,
                                                int64_t alignment, uint8_t **ptr) {
        turbo::Status s = pool_->reallocate(old_size, new_size, ptr);
        std::cout << "reallocate: old_size = " << old_size << ", new_size = " << new_size
                  << ", alignment = " << alignment << std::endl;
        return s;
    }

    void LoggingMemoryPool::free(uint8_t *buffer, int64_t size, int64_t alignment) {
        pool_->free(buffer, size, alignment);
        std::cout << "Free: size = " << size << ", alignment = " << alignment << std::endl;
    }

    int64_t LoggingMemoryPool::bytes_allocated() const {
        int64_t nb_bytes = pool_->bytes_allocated();
        std::cout << "bytes_allocated: " << nb_bytes << std::endl;
        return nb_bytes;
    }

    int64_t LoggingMemoryPool::max_memory() const {
        int64_t mem = pool_->max_memory();
        std::cout << "max_memory: " << mem << std::endl;
        return mem;
    }

    int64_t LoggingMemoryPool::total_bytes_allocated() const {
        int64_t mem = pool_->total_bytes_allocated();
        std::cout << "total_bytes_allocated: " << mem << std::endl;
        return mem;
    }

    int64_t LoggingMemoryPool::num_allocations() const {
        int64_t mem = pool_->num_allocations();
        std::cout << "num_allocations: " << mem << std::endl;
        return mem;
    }

    std::string LoggingMemoryPool::backend_name() const { return pool_->backend_name(); }

    ///////////////////////////////////////////////////////////////////////
    // ProxyMemoryPool implementation

    class ProxyMemoryPool::ProxyMemoryPoolImpl {
    public:
        explicit ProxyMemoryPoolImpl(MemoryPool *pool) : pool_(pool) {}

        turbo::Status allocate(int64_t size, int64_t alignment, uint8_t **out) {
            TURBO_RETURN_NOT_OK(pool_->allocate(size, alignment, out));
            stats_.DidAllocateBytes(size);
            return turbo::OkStatus();
        }

        turbo::Status reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
                                 uint8_t **ptr) {
            TURBO_RETURN_NOT_OK(pool_->reallocate(old_size, new_size, alignment, ptr));
            stats_.DidReallocateBytes(old_size, new_size);
            return turbo::OkStatus();
        }

        void free(uint8_t *buffer, int64_t size, int64_t alignment) {
            pool_->free(buffer, size, alignment);
            stats_.DidFreeBytes(size);
        }

        int64_t bytes_allocated() const { return stats_.bytes_allocated(); }

        int64_t max_memory() const { return stats_.max_memory(); }

        int64_t total_bytes_allocated() const { return stats_.total_bytes_allocated(); }

        int64_t num_allocations() const { return stats_.num_allocations(); }

        std::string backend_name() const { return pool_->backend_name(); }

    private:
        MemoryPool *pool_;
        internal::MemoryPoolStats stats_;
    };

    ProxyMemoryPool::ProxyMemoryPool(MemoryPool *pool) {
        impl_.reset(new ProxyMemoryPoolImpl(pool));
    }

    ProxyMemoryPool::~ProxyMemoryPool() {}

    turbo::Status ProxyMemoryPool::allocate(int64_t size, int64_t alignment, uint8_t **out) {
        return impl_->allocate(size, alignment, out);
    }

    turbo::Status ProxyMemoryPool::reallocate(int64_t old_size, int64_t new_size, int64_t alignment,
                                              uint8_t **ptr) {
        return impl_->reallocate(old_size, new_size, alignment, ptr);
    }

    void ProxyMemoryPool::free(uint8_t *buffer, int64_t size, int64_t alignment) {
        return impl_->free(buffer, size, alignment);
    }

    int64_t ProxyMemoryPool::bytes_allocated() const { return impl_->bytes_allocated(); }

    int64_t ProxyMemoryPool::max_memory() const { return impl_->max_memory(); }

    int64_t ProxyMemoryPool::total_bytes_allocated() const {
        return impl_->total_bytes_allocated();
    }

    int64_t ProxyMemoryPool::num_allocations() const { return impl_->num_allocations(); }

    std::string ProxyMemoryPool::backend_name() const { return impl_->backend_name(); }

    std::vector<std::string> supported_memory_backend_names() {
        std::vector<std::string> supported;
        for (const auto backend: SupportedBackends()) {
            supported.push_back(backend.name);
        }
        return supported;
    }

    // -----------------------------------------------------------------------
    // Pool buffer and allocation

    /// A Buffer whose lifetime is tied to a particular MemoryPool
    class PoolBuffer final : public ResizableBuffer {
    public:
        explicit PoolBuffer(std::shared_ptr<MemoryManager> mm, MemoryPool *pool,
                            int64_t alignment)
                : ResizableBuffer(nullptr, 0, std::move(mm)), pool_(pool), alignment_(alignment) {}

        ~PoolBuffer() override {
            // Avoid calling pool_->Free if the global pools are destroyed
            // (XXX this will not work with user-defined pools)

            // This can happen if a Future is destructing on one thread while or
            // after memory pools are destructed on the main thread (as there is
            // no guarantee of destructor order between thread/memory pools)
            uint8_t *ptr = mutable_data();
            if (ptr && !global_state.is_finalizing()) {
                pool_->free(ptr, capacity_, alignment_);
            }
        }

        turbo::Status Reserve(const int64_t capacity) override {
            if (capacity < 0) {
                return turbo::invalid_argument_error("Negative buffer capacity: ", capacity);
            }
            uint8_t *ptr = mutable_data();
            if (!ptr || capacity > capacity_) {
                int64_t new_capacity = bit_util::RoundUpToMultipleOf64(capacity);
                if (ptr) {
                    TURBO_RETURN_NOT_OK(pool_->reallocate(capacity_, new_capacity, alignment_, &ptr));
                } else {
                    TURBO_RETURN_NOT_OK(pool_->allocate(new_capacity, alignment_, &ptr));
                }
                data_ = ptr;
                capacity_ = new_capacity;
            }
            return turbo::OkStatus();
        }

        turbo::Status resize(const int64_t new_size, bool shrink_to_fit = true) override {
            if (TURBO_UNLIKELY(new_size < 0)) {
                return turbo::invalid_argument_error("Negative buffer resize: ", new_size);
            }
            uint8_t *ptr = mutable_data();
            if (ptr && shrink_to_fit && new_size <= size_) {
                // Buffer is non-null and is not growing, so shrink to the requested size without
                // excess space.
                int64_t new_capacity = bit_util::RoundUpToMultipleOf64(new_size);
                if (capacity_ != new_capacity) {
                    // Buffer hasn't got yet the requested size.
                    TURBO_RETURN_NOT_OK(pool_->reallocate(capacity_, new_capacity, alignment_, &ptr));
                    data_ = ptr;
                    capacity_ = new_capacity;
                }
            } else {
                TURBO_RETURN_NOT_OK(Reserve(new_size));
            }
            size_ = new_size;

            return turbo::OkStatus();
        }

        static std::shared_ptr<PoolBuffer> MakeShared(MemoryPool *pool, int64_t alignment) {
            std::shared_ptr<MemoryManager> mm;
            if (pool == nullptr) {
                pool = default_memory_pool();
                mm = default_cpu_memory_manager();
            } else {
                mm = CPUDevice::memory_manager(pool);
            }
            return std::make_shared<PoolBuffer>(std::move(mm), pool, alignment);
        }

        static std::unique_ptr<PoolBuffer> MakeUnique(MemoryPool *pool, int64_t alignment) {
            std::shared_ptr<MemoryManager> mm;
            if (pool == nullptr) {
                pool = default_memory_pool();
                mm = default_cpu_memory_manager();
            } else {
                mm = CPUDevice::memory_manager(pool);
            }
            return std::make_unique<PoolBuffer>(std::move(mm), pool, alignment);
        }

    private:
        MemoryPool *pool_;
        int64_t alignment_;
    };

    namespace {
        // A utility that does most of the work of the `allocate_buffer` and
        // `allocate_resizable_buffer` methods. The argument `buffer` should be a smart pointer to
        // a PoolBuffer.
        template<typename BufferPtr, typename PoolBufferPtr>
        inline turbo::Result<BufferPtr> ResizePoolBuffer(PoolBufferPtr &&buffer, const int64_t size) {
            TURBO_RETURN_NOT_OK(buffer->resize(size));
            buffer->zero_padding();
            return std::move(buffer);
        }

    }  // namespace

    turbo::Result<std::unique_ptr<Buffer>> allocate_buffer(const int64_t size, MemoryPool *pool) {
        return allocate_buffer(size, kDefaultBufferAlignment, pool);
    }

    turbo::Result<std::unique_ptr<Buffer>> allocate_buffer(const int64_t size,
                                                          const int64_t alignment,
                                                          MemoryPool *pool) {
        return ResizePoolBuffer<std::unique_ptr<Buffer>>(
                PoolBuffer::MakeUnique(pool, alignment), size);
    }

    turbo::Result<std::unique_ptr<ResizableBuffer>> allocate_resizable_buffer(const int64_t size,
                                                                            MemoryPool *pool) {
        return allocate_resizable_buffer(size, kDefaultBufferAlignment, pool);
    }

    turbo::Result<std::unique_ptr<ResizableBuffer>> allocate_resizable_buffer(const int64_t size,
                                                                            const int64_t alignment,
                                                                            MemoryPool *pool) {
        return ResizePoolBuffer<std::unique_ptr<ResizableBuffer>>(
                PoolBuffer::MakeUnique(pool, alignment), size);
    }

}  // namespace nebula
