#include "mlp_dcu.h"
#include <iomanip>
#include <fstream>

// 主程序 - 测试MLP前向传播性能
int main() {
    std::cout << "曙光DCU前馈神经网络性能测试" << std::endl;
    std::cout << "================================" << std::endl;

    // 初始化HIP设备
    int deviceCount;
    HIP_CHECK(hipGetDeviceCount(&deviceCount));
    std::cout << "检测到 " << deviceCount << " 个HIP设备" << std::endl;

    if (deviceCount == 0) {
        std::cerr << "错误: 未找到HIP设备" << std::endl;
        return -1;
    }

    // 选择第一个设备
    HIP_CHECK(hipSetDevice(0));
    
    hipDeviceProp_t deviceProp;
    HIP_CHECK(hipGetDeviceProperties(&deviceProp, 0));
    std::cout << "使用设备: " << deviceProp.name << std::endl;
    std::cout << "计算能力: " << deviceProp.major << "." << deviceProp.minor << std::endl;
    std::cout << "多处理器数量: " << deviceProp.multiProcessorCount << std::endl;
    std::cout << "全局内存: " << deviceProp.totalGlobalMem / (1024 * 1024) << " MB" << std::endl;
    std::cout << "================================" << std::endl;

    // 定义网络参数
    const int BATCH_SIZE = 1024;
    const int INPUT_DIM = 10;
    const int HIDDEN_DIM = 20;
    const int OUTPUT_DIM = 5;
    
    // 创建输入数据
    Matrix input(BATCH_SIZE, INPUT_DIM);
    input.allocateHost();
    input.randomInit(-1.0, 1.0);
    input.copyToDevice();
    
    std::cout << "输入矩阵大小: " << BATCH_SIZE << " x " << INPUT_DIM << std::endl;
    std::cout << "输入矩阵示例:" << std::endl;
    input.print(3, 5);
    
    // 创建MLP网络
    MLP mlp;
    
    // 添加层
    mlp.addLayer(std::make_shared<LinearLayer>(INPUT_DIM, HIDDEN_DIM));
    mlp.addLayer(std::make_shared<ReLULayer>());
    mlp.addLayer(std::make_shared<LinearLayer>(HIDDEN_DIM, OUTPUT_DIM));
    
    // 打印网络结构
    mlp.printArchitecture();
    
    // 性能测试
    std::cout << "\n开始性能测试..." << std::endl;
    
    // 创建性能测试器
    const int NUM_RUNS = 100;  // 增加运行次数以获得更稳定的性能数据
    PerformanceTester tester(NUM_RUNS);
    
    // 测试前向传播性能
    tester.testMLPForward(mlp, input);
    
    // 打印性能结果
    tester.printResults();
    
    // 获取最终输出
    Matrix output = mlp.forward(input);
    
    // 确保GPU计算已完成
    HIP_CHECK(hipDeviceSynchronize());
    
    // 检查输出矩阵是否有效
    if (!output.getDeviceData()) {
        std::cerr << "错误：输出矩阵设备内存未分配" << std::endl;
        return -1;
    }
    
    // 复制到主机
    output.copyToHost();
    
    std::cout << "\n输出矩阵大小: " << output.getRows() << " x " << output.getCols() << std::endl;
    std::cout << "输出矩阵示例:" << std::endl;
    output.print(5, 5);
    
    // 保存性能数据到文件
    std::ofstream perf_file("performance_data.csv");
    perf_file << "Run,Time(ms)" << std::endl;
    
    std::vector<double> run_times = tester.getRunTimes();
    for (size_t i = 0; i < run_times.size(); ++i) {
        perf_file << i + 1 << "," << run_times[i] << std::endl;
    }
    
    perf_file.close();
    std::cout << "性能数据已保存到 performance_data.csv" << std::endl;
    
    // 测试不同批次大小的性能
    std::cout << "\n测试不同批次大小的性能..." << std::endl;
    std::ofstream batch_perf_file("batch_performance.csv");
    batch_perf_file << "BatchSize,Time(ms)" << std::endl;
    
    const int batch_sizes[] = {32, 64, 128, 256, 512, 1024, 2048, 4096};
    const int num_batch_tests = sizeof(batch_sizes) / sizeof(batch_sizes[0]);
    
    for (int i = 0; i < num_batch_tests; ++i) {
        int batch_size = batch_sizes[i];
        std::cout << "测试批次大小: " << batch_size << std::endl;
        
        // 创建新的输入矩阵
        Matrix batch_input(batch_size, INPUT_DIM);
        batch_input.allocateHost();
        batch_input.randomInit(-1.0, 1.0);
        batch_input.copyToDevice();
        
        // 确保同步
        HIP_CHECK(hipDeviceSynchronize());
        
        // 创建新的性能测试器
        PerformanceTester batch_tester(10);  // 每个批次大小运行10次
        batch_tester.testMLPForward(mlp, batch_input);
        
        double avg_time = batch_tester.getAverageTime();
        std::cout << "  平均时间: " << avg_time << " ms" << std::endl;
        
        batch_perf_file << batch_size << "," << avg_time << std::endl;
    }
    
    batch_perf_file.close();
    std::cout << "批次性能数据已保存到 batch_performance.csv" << std::endl;
    
    // 测试不同隐藏层大小的性能
    std::cout << "\n测试不同隐藏层大小的性能..." << std::endl;
    std::ofstream hidden_perf_file("hidden_performance.csv");
    hidden_perf_file << "HiddenSize,Time(ms)" << std::endl;
    
    const int hidden_sizes[] = {10, 20, 50, 100, 200, 500};
    const int num_hidden_tests = sizeof(hidden_sizes) / sizeof(hidden_sizes[0]);
    
    // 创建一个固定大小的输出矩阵，用于所有隐藏层大小的测试
    Matrix hidden_output(BATCH_SIZE, OUTPUT_DIM);
    hidden_output.allocateDevice();
    
    for (int i = 0; i < num_hidden_tests; ++i) {
        int hidden_size = hidden_sizes[i];
        std::cout << "测试隐藏层大小: " << hidden_size << std::endl;
        
        // 创建新的MLP网络
        MLP hidden_mlp;
        hidden_mlp.addLayer(std::make_shared<LinearLayer>(INPUT_DIM, hidden_size));
        hidden_mlp.addLayer(std::make_shared<ReLULayer>());
        hidden_mlp.addLayer(std::make_shared<LinearLayer>(hidden_size, OUTPUT_DIM));
        
        // 确保同步
        HIP_CHECK(hipDeviceSynchronize());
        
        // 创建新的性能测试器
        PerformanceTester hidden_tester(10);  // 每个隐藏层大小运行10次
        hidden_tester.testMLPForward(hidden_mlp, input);
        
        double avg_time = hidden_tester.getAverageTime();
        std::cout << "  平均时间: " << avg_time << " ms" << std::endl;
        
        hidden_perf_file << hidden_size << "," << avg_time << std::endl;
    }
    
    hidden_perf_file.close();
    std::cout << "隐藏层性能数据已保存到 hidden_performance.csv" << std::endl;
    
    std::cout << "\n性能测试完成!" << std::endl;
    
    return 0;
}
