#include <iostream>
#include <vector>
#include <string>
#include <memory>
#include <unordered_map>
#include <functional>
#include <cstdint>
#include <cstdlib>
#include <cstring>

// ==================== 跨平台内存对齐分配 ====================
#ifdef _WIN32
#include <malloc.h>
#define ALIGNED_ALLOC(alignment, size) _aligned_malloc(size, alignment)
#define ALIGNED_FREE(ptr) _aligned_free(ptr)
#else
#include <stdlib.h>
#define ALIGNED_ALLOC(alignment, size) aligned_alloc(alignment, size)
#define ALIGNED_FREE(ptr) free(ptr)
#endif

// ==================== 基础数据结构 ====================
struct MemoryBlock {
    void* base_ptr;
    size_t total_size;
    size_t used_size;
    std::string name;
};

struct LayerMemoryRegion {
    int layer_id;
    std::string layer_name;
    MemoryBlock kv_cache;      // KV Cache存储
    MemoryBlock workspace;     // 计算工作区
    MemoryBlock shortcuts;     // 残差连接存储
    std::vector<MemoryBlock> intermediates; // 中间结果
};

struct TensorDescriptor {
    std::string name;
    void* data_ptr;
    std::vector<size_t> shape;
    size_t element_size;
    size_t total_size;
    int layer_id;
    bool is_persistent; // 是否持久化存储
};

// ==================== 层级化内存分配器 ====================
class LayerAwareMemoryAllocator {
private:
    std::vector<LayerMemoryRegion> layers_;
    MemoryBlock weights_region_;
    MemoryBlock io_region_;
    MemoryBlock global_workspace_;
    void* base_memory_ptr_;
    
    // 内存分配策略
    const size_t kAlignment = 64; // 64字节对齐
    
public:
    LayerAwareMemoryAllocator(size_t total_memory = 800 * 1024 * 1024) 
        : base_memory_ptr_(nullptr) {
        initialize_memory_regions(total_memory);
    }
    
    ~LayerAwareMemoryAllocator() {
        if (base_memory_ptr_) {
            ALIGNED_FREE(base_memory_ptr_);
        }
    }
    
    void initialize_memory_regions(size_t total_memory) {
        // 使用跨平台对齐分配
        base_memory_ptr_ = ALIGNED_ALLOC(kAlignment, total_memory);
        if (!base_memory_ptr_) throw std::bad_alloc();
        
        // 区域划分
        uint8_t* current_ptr = static_cast<uint8_t*>(base_memory_ptr_);
        
        // 1. 权重区域 (150MB)
        weights_region_ = {current_ptr, 157260000, 0, "Weights"};
        current_ptr += weights_region_.total_size;
        
        // 2. IO区域 (8MB)
        io_region_ = {current_ptr, 8388608, 0, "IO_Buffer"};
        current_ptr += io_region_.total_size;
        
        // 3. 全局工作区 (剩余内存)
        size_t remaining = total_memory - (weights_region_.total_size + io_region_.total_size);
        global_workspace_ = {current_ptr, remaining, 0, "Global_Workspace"};
        
        std::cout << "Memory initialized: " << total_memory / (1024*1024) << "MB total\n";
        std::cout << " - Weights: " << weights_region_.total_size / (1024*1024) << "MB\n";
        std::cout << " - IO: " << io_region_.total_size / (1024*1024) << "MB\n";
        std::cout << " - Workspace: " << global_workspace_.total_size / (1024*1024) << "MB\n";
    }
    
    // 为每一层分配精确的内存
    void allocate_layer_memory(int layer_id, const std::string& layer_name) {
        LayerMemoryRegion layer;
        layer.layer_id = layer_id;
        layer.layer_name = layer_name;
        
        // 从全局工作区分配层内存
        size_t layer_kv_size = calculate_kv_cache_size(layer_id);
        size_t layer_workspace_size = calculate_workspace_size(layer_id);
        size_t layer_shortcut_size = calculate_shortcut_size(layer_id);
        
        // KV Cache分配（持久化）
        layer.kv_cache = {
            allocate_from_global(layer_kv_size, true),
            layer_kv_size,
            0,
            "Layer_" + std::to_string(layer_id) + "_KV_Cache"
        };
        
        // 工作区分配（临时）
        layer.workspace = {
            allocate_from_global(layer_workspace_size, false),
            layer_workspace_size,
            0,
            "Layer_" + std::to_string(layer_id) + "_Workspace"
        };
        
        // 残差连接存储
        layer.shortcuts = {
            allocate_from_global(layer_shortcut_size, true),
            layer_shortcut_size,
            0,
            "Layer_" + std::to_string(layer_id) + "_Shortcuts"
        };
        
        layers_.push_back(layer);
        
        std::cout << "Allocated memory for layer " << layer_id << " (" << layer_name << "):\n";
        std::cout << "  KV Cache: " << layer_kv_size / 1024 << "KB\n";
        std::cout << "  Workspace: " << layer_workspace_size / 1024 << "KB\n";
        std::cout << "  Shortcuts: " << layer_shortcut_size / 1024 << "KB\n";
    }
    
    // 为特定层分配张量
    TensorDescriptor allocate_tensor(int layer_id, const std::string& name, 
                                   const std::vector<size_t>& shape, 
                                   size_t element_size = sizeof(float),
                                   bool persistent = false) {
        LayerMemoryRegion& layer = get_layer(layer_id);
        MemoryBlock& target_block = persistent ? layer.kv_cache : layer.workspace;
        
        size_t total_size = calculate_tensor_size(shape, element_size);
        total_size = align_size(total_size);
        
        if (target_block.used_size + total_size > target_block.total_size) {
            throw std::runtime_error("Layer " + std::to_string(layer_id) + 
                                   " memory exhausted for tensor: " + name);
        }
        
        TensorDescriptor desc;
        desc.name = name;
        desc.data_ptr = static_cast<uint8_t*>(target_block.base_ptr) + target_block.used_size;
        desc.shape = shape;
        desc.element_size = element_size;
        desc.total_size = total_size;
        desc.layer_id = layer_id;
        desc.is_persistent = persistent;
        
        target_block.used_size += total_size;
        
        std::cout << "  Tensor '" << name << "' allocated in layer " << layer_id 
                  << " at offset " << target_block.used_size - total_size 
                  << ", size: " << total_size << " bytes\n";
        
        return desc;
    }
    
    // 获取层内存信息
    LayerMemoryRegion& get_layer(int layer_id) {
        for (auto& layer : layers_) {
            if (layer.layer_id == layer_id) return layer;
        }
        throw std::runtime_error("Layer " + std::to_string(layer_id) + " not found");
    }
    
    // 重置层工作区（保留KV Cache）
    void reset_layer_workspace(int layer_id) {
        LayerMemoryRegion& layer = get_layer(layer_id);
        layer.workspace.used_size = 0;
        layer.intermediates.clear();
        std::cout << "Reset workspace for layer " << layer_id << "\n";
    }
    
    // 获取残差连接存储
    void* get_shortcut_storage(int layer_id, size_t required_size) {
        LayerMemoryRegion& layer = get_layer(layer_id);
        required_size = align_size(required_size);
        
        if (layer.shortcuts.used_size + required_size > layer.shortcuts.total_size) {
            throw std::runtime_error("Shortcut storage exhausted in layer " + std::to_string(layer_id));
        }
        
        void* ptr = static_cast<uint8_t*>(layer.shortcuts.base_ptr) + layer.shortcuts.used_size;
        layer.shortcuts.used_size += required_size;
        return ptr;
    }
    
    // 获取权重指针
    void* get_weights_ptr() const { return weights_region_.base_ptr; }
    
    // 获取IO缓冲区
    void* get_io_buffer() const { return io_region_.base_ptr; }
    
    // 打印内存状态
    void print_memory_status() const {
        std::cout << "\n=== Global Memory Status ===\n";
        std::cout << "Global workspace used: " << global_workspace_.used_size / 1024 
                  << "KB / " << global_workspace_.total_size / 1024 << "KB\n";
        
        for (const auto& layer : layers_) {
            std::cout << "Layer " << layer.layer_id << ":\n";
            std::cout << "  KV Cache: " << layer.kv_cache.used_size / 1024 
                      << "KB / " << layer.kv_cache.total_size / 1024 << "KB\n";
            std::cout << "  Workspace: " << layer.workspace.used_size / 1024 
                      << "KB / " << layer.workspace.total_size / 1024 << "KB\n";
            std::cout << "  Shortcuts: " << layer.shortcuts.used_size / 1024 
                      << "KB / " << layer.shortcuts.total_size / 1024 << "KB\n";
        }
    }
    
private:
    void* allocate_from_global(size_t size, bool persistent) {
        size = align_size(size);
        
        if (global_workspace_.used_size + size > global_workspace_.total_size) {
            throw std::runtime_error("Global workspace exhausted");
        }
        
        void* ptr = static_cast<uint8_t*>(global_workspace_.base_ptr) + global_workspace_.used_size;
        global_workspace_.used_size += size;
        
        return ptr;
    }
    
    size_t calculate_kv_cache_size(int layer_id) const {
        // 每层KV Cache: 2 * batch * heads * seq_len * head_dim
        return 2 * 1 * 32 * 2048 * 128 * sizeof(int8_t); // 16MB per layer
    }
    
    size_t calculate_workspace_size(int layer_id) const {
        // 根据层类型分配不同大小的workspace
        if (layer_id == 0) return 8 * 1024 * 1024; // 第一层需要更多空间
        return 6 * 1024 * 1024; // 其他层
    }
    
    size_t calculate_shortcut_size(int layer_id) const {
        // 残差连接存储
        return 2 * 1024 * 1024; // 2MB per layer
    }
    
    size_t calculate_tensor_size(const std::vector<size_t>& shape, size_t element_size) const {
        size_t total = element_size;
        for (auto dim : shape) total *= dim;
        return total;
    }
    
    size_t align_size(size_t size) const {
        return (size + kAlignment - 1) & ~(kAlignment - 1);
    }
};

// ==================== Decoder层定义 ====================
class DecoderLayer {
private:
    int layer_id_;
    std::string layer_name_;
    LayerAwareMemoryAllocator& memory_allocator_;
    
    // 层内存指针
    TensorDescriptor input_tensor_;
    TensorDescriptor output_tensor_;
    TensorDescriptor residual_shortcut_;
    
public:
    DecoderLayer(int id, const std::string& name, LayerAwareMemoryAllocator& allocator)
        : layer_id_(id), layer_name_(name), memory_allocator_(allocator) {
        
        // 分配层内存
        memory_allocator_.allocate_layer_memory(layer_id_, layer_name_);
    }
    
    void setup_memory() {
        std::cout << "\n=== Setting up memory for " << layer_name_ << " ===\n";
        
        // 分配输入输出张量
        input_tensor_ = memory_allocator_.allocate_tensor(
            layer_id_, "input", {1, 1, 4096}, sizeof(int8_t), false);
        
        output_tensor_ = memory_allocator_.allocate_tensor(
            layer_id_, "output", {1, 1, 4096}, sizeof(int8_t), false);
        
        // 分配残差连接存储
        residual_shortcut_ = memory_allocator_.allocate_tensor(
            layer_id_, "residual", {1, 1, 4096}, sizeof(int8_t), true);
    }
    
    void execute() {
        std::cout << "Executing " << layer_name_ << "\n";
        
        // 模拟计算过程
        simulate_attention();
        simulate_ffn();
        simulate_residual();
    }
    
    void cleanup() {
        memory_allocator_.reset_layer_workspace(layer_id_);
    }
    
    TensorDescriptor get_output() const { return output_tensor_; }
    TensorDescriptor get_residual() const { return residual_shortcut_; }
    int get_layer_id() const { return layer_id_; }
    
private:
    void simulate_attention() {
        // 分配attention相关张量
        auto q_tensor = memory_allocator_.allocate_tensor(
            layer_id_, "Q", {1, 32, 128}, sizeof(int8_t), false);
        
        auto k_tensor = memory_allocator_.allocate_tensor(
            layer_id_, "K", {1, 32, 128}, sizeof(int8_t), true); // KV Cache持久化
        
        auto v_tensor = memory_allocator_.allocate_tensor(
            layer_id_, "V", {1, 32, 128}, sizeof(int8_t), true);
        
        auto scores = memory_allocator_.allocate_tensor(
            layer_id_, "scores", {1, 32, 2048}, sizeof(float), false);
        
        std::cout << "  Attention computed with Q@" << q_tensor.data_ptr 
                  << ", K@" << k_tensor.data_ptr << "\n";
    }
    
    void simulate_ffn() {
        auto ffn_intermediate = memory_allocator_.allocate_tensor(
            layer_id_, "ffn_inter", {1, 1, 11008}, sizeof(int8_t), false);
        
        std::cout << "  FFN computed with intermediate@" << ffn_intermediate.data_ptr << "\n";
    }
    
    void simulate_residual() {
        std::cout << "  Residual connection stored@" << residual_shortcut_.data_ptr << "\n";
    }
};

// ==================== 完整的Decoder图 ====================
class LLaMADecoderGraph {
private:
    LayerAwareMemoryAllocator memory_allocator_;
    std::vector<std::unique_ptr<DecoderLayer>> layers_;
    
public:
    LLaMADecoderGraph() : memory_allocator_(800 * 1024 * 1024) {
        build_decoder_graph();
    }
    
    void build_decoder_graph() {
        std::cout << "=== Building LLaMA Decoder Graph ===\n";
        
        // 创建32个decoder层
        for (int i = 0; i < 32; i++) {
            std::string layer_name = "Decoder_Layer_" + std::to_string(i);
            layers_.push_back(std::make_unique<DecoderLayer>(i, layer_name, memory_allocator_));
        }
        
        // 设置每层内存
        for (auto& layer : layers_) {
            layer->setup_memory();
        }
    }
    
    void execute() {
        std::cout << "\n=== Executing Decoder Graph ===\n";
        
        for (size_t i = 0; i < layers_.size(); i++) {
            std::cout << "\n--- Processing Layer " << i << " ---\n";
            
            // 执行当前层
            layers_[i]->execute();
            
            // 处理层间连接
            if (i > 0) {
                std::cout << "  Layer connection: " << (i-1) << " → " << i << "\n";
            }
            
            // 清理工作内存
            layers_[i]->cleanup();
        }
        
        std::cout << "\n✅ Decoder execution completed!\n";
    }
    
    void print_memory_usage() {
        std::cout << "\n=== Memory Usage Summary ===\n";
        std::cout << "Total layers: " << layers_.size() << "\n";
        
        memory_allocator_.print_memory_status();
    }
};

// ==================== 测试代码 ====================
int main() {
    try {
        std::cout << "LLaMA 2 Decoder with Layer-wise Memory Allocation\n";
        std::cout << "=================================================\n";
        
        LLaMADecoderGraph decoder;
        decoder.execute();
        decoder.print_memory_usage();
        
        std::cout << "\n🎯 Layer-aware memory allocation successful!\n";
        
    } catch (const std::exception& e) {
        std::cerr << " Error: " << e.what() << std::endl;
        return 1;
    }
    
    return 0;
}
