/*!
 * \file memory_pool.hpp
 * \project AI_CTP
 * 
 * \brief 高性能内存池实现，专为CTP数据结构优化
 *        减少内存分配开销，避免内存碎片
 */
#pragma once

#include <memory>
#include <vector>
#include <stack>
#include <mutex>
#include <atomic>
#include <cstdlib>
#include <cstring>
#include <thread>
#include <unordered_map>

namespace ctp_api {
namespace performance {

// 线程本地对象池
template<typename T>
class ThreadLocalObjectPool {
private:
    static constexpr size_t kDefaultBlockSize = 1024;
    
    struct Block {
        std::unique_ptr<T[]> objects;
        std::vector<T*> available;
        size_t capacity;
        
        explicit Block(size_t cap) : capacity(cap) {
            objects = std::make_unique<T[]>(capacity);
            available.reserve(capacity);
            for (size_t i = 0; i < capacity; ++i) {
                available.push_back(&objects[i]);
            }
        }
    };
    
    thread_local static std::vector<std::unique_ptr<Block>> blocks_;
    thread_local static std::stack<T*> available_objects_;
    
    static void ensure_available() {
        if (available_objects_.empty()) {
            auto block = std::make_unique<Block>(kDefaultBlockSize);
            for (auto* obj : block->available) {
                available_objects_.push(obj);
            }
            blocks_.push_back(std::move(block));
        }
    }

public:
    static T* acquire() {
        ensure_available();
        T* obj = available_objects_.top();
        available_objects_.pop();
        return obj;
    }
    
    static void release(T* obj) {
        if (obj) {
            obj->~T();
            new(obj) T();
            available_objects_.push(obj);
        }
    }
    
    // RAII包装器
    class UniquePtr {
    private:
        T* ptr_;
        
    public:
        explicit UniquePtr(T* p = nullptr) : ptr_(p) {}
        
        ~UniquePtr() {
            if (ptr_) {
                ThreadLocalObjectPool<T>::release(ptr_);
            }
        }
        
        UniquePtr(const UniquePtr&) = delete;
        UniquePtr& operator=(const UniquePtr&) = delete;
        
        UniquePtr(UniquePtr&& other) noexcept : ptr_(other.ptr_) {
            other.ptr_ = nullptr;
        }
        
        UniquePtr& operator=(UniquePtr&& other) noexcept {
            if (this != &other) {
                if (ptr_) {
                    ThreadLocalObjectPool<T>::release(ptr_);
                }
                ptr_ = other.ptr_;
                other.ptr_ = nullptr;
            }
            return *this;
        }
        
        T* get() { return ptr_; }
        const T* get() const { return ptr_; }
        T& operator*() { return *ptr_; }
        const T& operator*() const { return *ptr_; }
        T* operator->() { return ptr_; }
        const T* operator->() const { return ptr_; }
        
        T* release() {
            T* tmp = ptr_;
            ptr_ = nullptr;
            return tmp;
        }
        
        void reset(T* p = nullptr) {
            if (ptr_) {
                ThreadLocalObjectPool<T>::release(ptr_);
            }
            ptr_ = p;
        }
        
        explicit operator bool() const { return ptr_ != nullptr; }
    };
    
    static UniquePtr make_unique() {
        return UniquePtr(acquire());
    }
};

template<typename T>
thread_local std::vector<std::unique_ptr<typename ThreadLocalObjectPool<T>::Block>> 
ThreadLocalObjectPool<T>::blocks_;

template<typename T>
thread_local std::stack<T*> ThreadLocalObjectPool<T>::available_objects_;

// 高性能内存分配器
class HighPerformanceAllocator {
private:
    struct Chunk {
        char* data;
        size_t size;
        size_t offset;
        Chunk* next;
        
        Chunk(size_t sz) : size(sz), offset(0), next(nullptr) {
            // 使用对齐内存分配，提高缓存效率
            data = static_cast<char*>(std::aligned_alloc(64, sz));
            if (!data) {
                throw std::bad_alloc();
            }
            std::memset(data, 0, sz);
        }
        
        ~Chunk() {
            std::free(data);
        }
        
        void* allocate(size_t bytes, size_t alignment) {
            // 确保对齐
            size_t aligned_offset = (offset + alignment - 1) & ~(alignment - 1);
            if (aligned_offset + bytes > size) {
                return nullptr;
            }
            
            void* ptr = data + aligned_offset;
            offset = aligned_offset + bytes;
            return ptr;
        }
        
        void reset() {
            offset = 0;
        }
        
        size_t remaining() const {
            return size - offset;
        }
    };
    
    Chunk* current_chunk_;
    size_t default_chunk_size_;
    mutable std::mutex mutex_;
    
public:
    explicit HighPerformanceAllocator(size_t chunk_size = 1024 * 1024) 
        : current_chunk_(nullptr), default_chunk_size_(chunk_size) {}
    
    ~HighPerformanceAllocator() {
        std::lock_guard<std::mutex> lock(mutex_);
        Chunk* chunk = current_chunk_;
        while (chunk) {
            Chunk* next = chunk->next;
            delete chunk;
            chunk = next;
        }
    }
    
    void* allocate(size_t bytes, size_t alignment = 8) {
        std::lock_guard<std::mutex> lock(mutex_);
        
        if (!current_chunk_) {
            size_t chunk_size = std::max(bytes * 2, default_chunk_size_);
            current_chunk_ = new Chunk(chunk_size);
        }
        
        void* ptr = current_chunk_->allocate(bytes, alignment);
        if (!ptr) {
            // 当前chunk空间不足，分配新chunk
            size_t chunk_size = std::max(bytes * 2, default_chunk_size_);
            Chunk* new_chunk = new Chunk(chunk_size);
            new_chunk->next = current_chunk_;
            current_chunk_ = new_chunk;
            
            ptr = current_chunk_->allocate(bytes, alignment);
            if (!ptr) {
                throw std::bad_alloc();
            }
        }
        
        return ptr;
    }
    
    template<typename T, typename... Args>
    T* construct(Args&&... args) {
        void* ptr = allocate(sizeof(T), alignof(T));
        return new(ptr) T(std::forward<Args>(args)...);
    }
    
    // 重置所有chunk（不释放内存，复用内存块）
    void reset() {
        std::lock_guard<std::mutex> lock(mutex_);
        Chunk* chunk = current_chunk_;
        while (chunk) {
            chunk->reset();
            chunk = chunk->next;
        }
    }
    
    // 获取内存使用统计
    struct MemoryStats {
        size_t total_allocated = 0;
        size_t total_used = 0;
        size_t chunk_count = 0;
        double utilization = 0.0;
    };
    
    MemoryStats get_stats() const {
        std::lock_guard<std::mutex> lock(mutex_);
        MemoryStats stats;
        
        Chunk* chunk = current_chunk_;
        while (chunk) {
            stats.total_allocated += chunk->size;
            stats.total_used += chunk->offset;
            stats.chunk_count++;
            chunk = chunk->next;
        }
        
        if (stats.total_allocated > 0) {
            stats.utilization = static_cast<double>(stats.total_used) / stats.total_allocated;
        }
        
        return stats;
    }
};

// 专用于CTP数据结构的内存池
template<typename T>
class CtpDataPool {
private:
    ThreadLocalObjectPool<T> pool_;
    
public:
    using UniquePtr = typename ThreadLocalObjectPool<T>::UniquePtr;
    
    UniquePtr acquire() {
        return pool_.make_unique();
    }
    
    // 批量获取对象
    std::vector<UniquePtr> acquire_batch(size_t count) {
        std::vector<UniquePtr> result;
        result.reserve(count);
        
        for (size_t i = 0; i < count; ++i) {
            result.push_back(pool_.make_unique());
        }
        
        return result;
    }
};

// 全局内存池管理器
class MemoryPoolManager {
private:
    static std::unique_ptr<MemoryPoolManager> instance_;
    static std::once_flag init_flag_;
    
    HighPerformanceAllocator allocator_;
    std::unordered_map<std::type_index, std::unique_ptr<void, void(*)(void*)>> pools_;
    mutable std::shared_mutex pools_mutex_;
    
    MemoryPoolManager() = default;
    
public:
    static MemoryPoolManager& getInstance() {
        std::call_once(init_flag_, []() {
            instance_ = std::unique_ptr<MemoryPoolManager>(new MemoryPoolManager());
        });
        return *instance_;
    }
    
    template<typename T>
    CtpDataPool<T>& getPool() {
        std::type_index type_id = std::type_index(typeid(T));
        
        {
            std::shared_lock<std::shared_mutex> lock(pools_mutex_);
            auto it = pools_.find(type_id);
            if (it != pools_.end()) {
                return *static_cast<CtpDataPool<T>*>(it->second.get());
            }
        }
        
        std::unique_lock<std::shared_mutex> lock(pools_mutex_);
        auto pool = std::make_unique<CtpDataPool<T>>();
        auto* pool_ptr = pool.get();
        
        pools_[type_id] = std::unique_ptr<void, void(*)(void*)>(
            pool.release(),
            [](void* p) { delete static_cast<CtpDataPool<T>*>(p); }
        );
        
        return *pool_ptr;
    }
    
    // 获取全局内存统计
    HighPerformanceAllocator::MemoryStats getMemoryStats() {
        return allocator_.get_stats();
    }
};

std::unique_ptr<MemoryPoolManager> MemoryPoolManager::instance_;
std::once_flag MemoryPoolManager::init_flag_;

} // namespace performance
} // namespace ctp_api 