#ifndef MLP_DCU_H
#define MLP_DCU_H

#include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#include <string>
#include <chrono>
#include <memory>
#include <random>
#include <cmath>
#include <cassert>
#include <cstring>

// 错误检查宏
#define HIP_CHECK(status) { \
    if (status != hipSuccess) { \
        std::cerr << "HIP错误: " << hipGetErrorString(status) << " at line " << __LINE__ << std::endl; \
        exit(EXIT_FAILURE); \
    } \
}

// 计时工具类
class Timer {
private:
    std::chrono::high_resolution_clock::time_point start_time;
    std::chrono::high_resolution_clock::time_point end_time;
    bool running;

public:
    Timer() : running(false) {}

    void start() {
        start_time = std::chrono::high_resolution_clock::now();
        running = true;
    }

    void stop() {
        end_time = std::chrono::high_resolution_clock::now();
        running = false;
    }

    double elapsedMilliseconds() const {
        auto end = running ? std::chrono::high_resolution_clock::now() : end_time;
        return std::chrono::duration<double, std::milli>(end - start_time).count();
    }
};

// 矩阵类，用于管理设备和主机内存
class Matrix {
private:
    int rows;
    int cols;
    double* h_data;  // 主机内存
    double* d_data;  // 设备内存
    bool device_allocated;
    bool host_allocated;

public:
    // 构造函数
    Matrix(int rows, int cols) : rows(rows), cols(cols), 
                                device_allocated(false), 
                                host_allocated(false),
                                h_data(nullptr), d_data(nullptr) {}

    // 析构函数
    ~Matrix() {
        freeMemory();
    }

    // 拷贝构造函数 - 深拷贝
    Matrix(const Matrix& other) : rows(other.rows), cols(other.cols),
                                 device_allocated(false),
                                 host_allocated(false),
                                 h_data(nullptr), d_data(nullptr) {
        // 复制主机数据（如果存在）
        if (other.host_allocated) {
            allocateHost();
            std::memcpy(h_data, other.h_data, rows * cols * sizeof(double));
        }
        
        // 复制设备数据（如果存在）
        if (other.device_allocated) {
            allocateDevice();
            HIP_CHECK(hipMemcpy(d_data, other.d_data, rows * cols * sizeof(double), hipMemcpyDeviceToDevice));
        }
    }

    // 移动构造函数
    Matrix(Matrix&& other) noexcept : rows(other.rows), cols(other.cols),
                                     h_data(other.h_data), d_data(other.d_data),
                                     device_allocated(other.device_allocated),
                                     host_allocated(other.host_allocated) {
        // 重置源对象，防止析构时释放内存
        other.h_data = nullptr;
        other.d_data = nullptr;
        other.device_allocated = false;
        other.host_allocated = false;
        other.rows = 0;
        other.cols = 0;
    }

    // 拷贝赋值运算符 - 深拷贝
    Matrix& operator=(const Matrix& other) {
        if (this != &other) {
            // 释放当前资源
            freeMemory();
            
            // 复制新资源
            rows = other.rows;
            cols = other.cols;
            
            // 复制主机数据（如果存在）
            if (other.host_allocated) {
                allocateHost();
                std::memcpy(h_data, other.h_data, rows * cols * sizeof(double));
            }
            
            // 复制设备数据（如果存在）
            if (other.device_allocated) {
                allocateDevice();
                HIP_CHECK(hipMemcpy(d_data, other.d_data, rows * cols * sizeof(double), hipMemcpyDeviceToDevice));
            }
        }
        return *this;
    }

    // 移动赋值运算符
    Matrix& operator=(Matrix&& other) noexcept {
        if (this != &other) {
            // 释放当前资源
            freeMemory();
            
            // 移动资源
            rows = other.rows;
            cols = other.cols;
            h_data = other.h_data;
            d_data = other.d_data;
            device_allocated = other.device_allocated;
            host_allocated = other.host_allocated;
            
            // 重置源对象，防止析构时释放内存
            other.h_data = nullptr;
            other.d_data = nullptr;
            other.device_allocated = false;
            other.host_allocated = false;
            other.rows = 0;
            other.cols = 0;
        }
        return *this;
    }

    // 获取维度
    int getRows() const { return rows; }
    int getCols() const { return cols; }
    int size() const { return rows * cols; }

    // 分配主机内存
    void allocateHost() {
        if (!host_allocated) {
            h_data = new double[rows * cols]();
            host_allocated = true;
        }
    }

    // 分配设备内存
    void allocateDevice() {
        if (!device_allocated) {
            HIP_CHECK(hipMalloc(&d_data, rows * cols * sizeof(double)));
            device_allocated = true;
        }
    }

    // 释放内存
    void freeMemory() {
        if (host_allocated && h_data != nullptr) {
            delete[] h_data;
            h_data = nullptr;
            host_allocated = false;
        }
        if (device_allocated && d_data != nullptr) {
            HIP_CHECK(hipFree(d_data));
            d_data = nullptr;
            device_allocated = false;
        }
    }

    // 随机初始化
    void randomInit(double min = -1.0, double max = 1.0) {
        if (!host_allocated) {
            allocateHost();
        }

        std::random_device rd;
        std::mt19937 gen(rd());
        std::uniform_real_distribution<double> dist(min, max);

        for (int i = 0; i < rows * cols; ++i) {
            h_data[i] = dist(gen);
        }
    }

    // 主机到设备的数据传输
    void copyToDevice() {
        if (!device_allocated) {
            allocateDevice();
        }
        if (host_allocated) {
            HIP_CHECK(hipMemcpy(d_data, h_data, rows * cols * sizeof(double), hipMemcpyHostToDevice));
        }
    }

    // 设备到主机的数据传输
    void copyToHost() {
        if (!host_allocated) {
            allocateHost();
        }
        if (device_allocated) {
            HIP_CHECK(hipMemcpy(h_data, d_data, rows * cols * sizeof(double), hipMemcpyDeviceToHost));
        }
    }

    // 获取设备数据指针
    double* getDeviceData() const {
        return d_data;
    }

    // 获取主机数据指针
    double* getHostData() const {
        return h_data;
    }

    // 打印矩阵内容（用于调试）
    void print(int max_rows = 5, int max_cols = 5) const {
        if (!host_allocated) {
            std::cout << "矩阵未在主机内存中分配" << std::endl;
            return;
        }

        int r = std::min(max_rows, rows);
        int c = std::min(max_cols, cols);

        for (int i = 0; i < r; ++i) {
            for (int j = 0; j < c; ++j) {
                std::cout << h_data[i * cols + j] << " ";
            }
            if (c < cols) std::cout << "...";
            std::cout << std::endl;
        }
        if (r < rows) std::cout << "..." << std::endl;
    }
};

// 层接口
class Layer {
public:
    virtual ~Layer() {}
    virtual void forward(const Matrix& input, Matrix& output) = 0;
    virtual std::string getName() const = 0;
};

// 全连接层
class LinearLayer : public Layer {
private:
    Matrix weights;
    Matrix bias;
    int input_dim;
    int output_dim;
    
    // 矩阵乘法核函数声明（实现在.cpp文件中）
    void matrixMultiply(const Matrix& A, const Matrix& B, Matrix& C);
    void addBias(Matrix& output, const Matrix& bias);

public:
    LinearLayer(int input_dim, int output_dim) 
        : weights(input_dim, output_dim), 
          bias(1, output_dim),
          input_dim(input_dim), 
          output_dim(output_dim) {
        
        // 初始化权重和偏置
        weights.allocateHost();
        weights.randomInit(-0.1, 0.1);  // Xavier初始化的简化版
        weights.copyToDevice();
        
        bias.allocateHost();
        bias.randomInit(-0.1, 0.1);
        bias.copyToDevice();
    }

    ~LinearLayer() {}

    void forward(const Matrix& input, Matrix& output) override;

    std::string getName() const override {
        return "Linear(" + std::to_string(input_dim) + ", " + std::to_string(output_dim) + ")";
    }

    // 获取权重和偏置
    const Matrix& getWeights() const { return weights; }
    const Matrix& getBias() const { return bias; }
};

// ReLU激活层
class ReLULayer : public Layer {
private:
    void applyReLU(Matrix& data);

public:
    ReLULayer() {}
    ~ReLULayer() {}

    void forward(const Matrix& input, Matrix& output) override;

    std::string getName() const override {
        return "ReLU";
    }
};

// 多层感知机网络
class MLP {
private:
    std::vector<std::shared_ptr<Layer>> layers;
    std::vector<Matrix> intermediate_outputs;
    Timer forward_timer;

public:
    MLP() {}
    ~MLP() {}

    // 添加层
    void addLayer(std::shared_ptr<Layer> layer) {
        layers.push_back(layer);
        // 为中间输出分配空间（将在forward时确定大小）
        intermediate_outputs.push_back(Matrix(0, 0));
    }

    // 前向传播
    Matrix forward(const Matrix& input) {
        if (layers.empty()) {
            throw std::runtime_error("网络中没有层");
        }

        forward_timer.start();

        // 确保中间输出的数量与层数匹配
        if (intermediate_outputs.size() != layers.size()) {
            intermediate_outputs.resize(layers.size(), Matrix(0, 0));
        }

        // 第一层的输入
        const Matrix* current_input = &input;
        
        // 逐层前向传播
        for (size_t i = 0; i < layers.size(); ++i) {
            // 执行当前层的前向传播
            layers[i]->forward(*current_input, intermediate_outputs[i]);
            current_input = &intermediate_outputs[i];
        }

        forward_timer.stop();
        
        // 返回最后一层的输出（创建一个深拷贝）
        return Matrix(*current_input);
    }

    // 打印网络结构
    void printArchitecture() const {
        std::cout << "MLP架构:" << std::endl;
        for (size_t i = 0; i < layers.size(); ++i) {
            std::cout << "  Layer " << i << ": " << layers[i]->getName() << std::endl;
        }
    }

    // 获取前向传播时间（毫秒）
    double getForwardTime() const {
        return forward_timer.elapsedMilliseconds();
    }
};

// 性能测试类
class PerformanceTester {
private:
    int num_runs;
    Timer total_timer;
    std::vector<double> run_times;

public:
    PerformanceTester(int num_runs = 10) : num_runs(num_runs) {}

    // 测试MLP前向传播性能
    void testMLPForward(MLP& mlp, const Matrix& input) {
        run_times.clear();
        
        // 预热运行
        Matrix warmup_output = mlp.forward(input);
        warmup_output.copyToHost();  // 确保完成
        
        // 同步设备
        HIP_CHECK(hipDeviceSynchronize());
        
        total_timer.start();
        
        // 多次运行以获取平均性能
        for (int i = 0; i < num_runs; ++i) {
            // 确保设备同步
            HIP_CHECK(hipDeviceSynchronize());
            
            // 每次创建新的输出矩阵
            Matrix output = mlp.forward(input);
            
            // 确保计算完成
            output.copyToHost();
            HIP_CHECK(hipDeviceSynchronize());
            
            // 记录时间
            run_times.push_back(mlp.getForwardTime());
            
            // 输出进度
            if (i % 5 == 0) {
                std::cout << "完成第 " << i+1 << "/" << num_runs << " 次运行" << std::endl;
            }
        }
        
        total_timer.stop();
    }

    // 打印性能结果
    void printResults() const {
        if (run_times.empty()) {
            std::cout << "没有性能测试数据" << std::endl;
            return;
        }

        double avg_time = 0.0;
        double min_time = run_times[0];
        double max_time = run_times[0];
        
        for (double time : run_times) {
            avg_time += time;
            min_time = std::min(min_time, time);
            max_time = std::max(max_time, time);
        }
        
        avg_time /= run_times.size();
        
        std::cout << "性能测试结果 (" << run_times.size() << " 次运行):" << std::endl;
        std::cout << "  平均时间: " << avg_time << " ms" << std::endl;
        std::cout << "  最小时间: " << min_time << " ms" << std::endl;
        std::cout << "  最大时间: " << max_time << " ms" << std::endl;
        std::cout << "  总运行时间: " << total_timer.elapsedMilliseconds() << " ms" << std::endl;
    }

    // 获取性能数据
    std::vector<double> getRunTimes() const {
        return run_times;
    }

    double getAverageTime() const {
        if (run_times.empty()) return 0.0;
        
        double sum = 0.0;
        for (double time : run_times) {
            sum += time;
        }
        return sum / run_times.size();
    }

    double getMinTime() const {
        if (run_times.empty()) return 0.0;
        
        double min_time = run_times[0];
        for (double time : run_times) {
            min_time = std::min(min_time, time);
        }
        return min_time;
    }

    double getTotalTime() const {
        return total_timer.elapsedMilliseconds();
    }
};

#endif // MLP_DCU_H
