#include <iostream>
#include <vector>
#include <string>
#include <algorithm>
#include <memory>
#include <thread>
#include <atomic>
#include <chrono>
#include <future>
#include <optional>
#include "matrix.h"
#include "cpu_kernel.h"
#include "dcu_kernel.h"
#include "utils.h"
#include <stdexcept>
#include <cstring>

// 异构计算接口，根据矩阵大小和用户设置选择合适的计算设备
void heterogeneousMatrixMultiply(const Matrix& A, const Matrix& B, Matrix& C, 
                                bool useCPU, bool useDCU,
                                std::optional<double> forcedRatio = std::nullopt) {
    int M = A.getRows();
    int K = A.getCols();
    int N = B.getCols();
    size_t matrixSize = static_cast<size_t>(M) * K * N; // 使用 size_t 避免溢出

    // 决定计算策略
    bool shouldUseCPU = useCPU;
    bool shouldUseDCU = useDCU;

    // 如果未指定设备，则默认都使用
    if (!useCPU && !useDCU) {
        shouldUseCPU = true;
        shouldUseDCU = true;
    }

    // 策略1: 仅使用 CPU
    // 条件：用户指定仅CPU，或矩阵非常小 (DCU启动开销可能不划算)，或仅CPU可用
    if (shouldUseCPU && !shouldUseDCU) {
        std::cout << "策略：仅使用优化的CPU计算 (OpenMP Blocked)" << std::endl;
        matrixMultiplyOptimized(A, B, C);
    }
    // 策略2: 仅使用 DCU
    // 条件：用户指定仅DCU，且DCU可用
    else if (!shouldUseCPU && shouldUseDCU) {
         std::cout << "策略：仅使用DCU计算" << std::endl;
        try {
        dcuMatrixMultiply(A, B, C);
        } catch (const std::exception& e) {
            std::cerr << "DCU 计算失败: " << e.what() << std::endl;
            std::cerr << "回退到优化的CPU实现 (OpenMP Blocked)" << std::endl;
            matrixMultiplyOptimized(A, B, C);
        }
    }
    // 策略3: 混合模式 (CPU + DCU)
    // 条件：CPU和DCU都可用且用户允许，且矩阵达到一定规模
    else if (shouldUseCPU && shouldUseDCU && matrixSize >= 1000000) { // 混合模式阈值
        std::cout << "策略：混合模式 (CPU + DCU)" << std::endl;
        try {
            // --- Dynamic Load Balancing ---
            double cpuRatio = 0.5; // Default initial value

            if (forcedRatio.has_value()) { // Check for forced ratio first
                cpuRatio = forcedRatio.value();
                std::cout << "Info: 使用强制CPU比例: " << cpuRatio << std::endl;
                // Skip dynamic calculation below if ratio is forced
            } else {
                std::cout << "Info: 动态计算CPU比例..." << std::endl;
                // --- Dynamic Load Balancing --- (Existing logic starts here)
                const long long total_elements_involved = (long long)M * K + (long long)K * N + (long long)M * N; // A + B + C elements roughly
                const long long small_threshold = 512LL * 512LL * 3LL; // Threshold for small problems (e.g., < 512x512)
                const long long large_threshold = 2048LL * 2048LL * 3LL; // Threshold for large problems (e.g., > 4096x4096)

                if (M <= 64) { // Very small rows, give almost all to DCU (potentially better launch overhead)
                    cpuRatio = 0.95; // Keep unchanged for now
                } else if (total_elements_involved <= small_threshold) {
                    cpuRatio = 0.30; // Reduced from 0.8
                } else if (total_elements_involved >= large_threshold) {
                    cpuRatio = 0.01; // Significantly reduced from 0.10 (or original 0.5/0.1)
                } else {
                    cpuRatio = 0.10; // Reduced from 0.5 (Default for mid-range)
                }
            } // End of else for dynamic calculation

            // Ensure ratio is valid (Apply clamping AFTER potential forcing or dynamic calculation)
            cpuRatio = std::max(0.0, std::min(1.0, cpuRatio));
            std::cout << "Info: 最终使用的CPU比例 (调整前): " << cpuRatio << std::endl; // Log before min_chunk adjustment

            int cpuRows = static_cast<int>(M * cpuRatio);
            // Ensure each device gets a reasonable minimum chunk if M is large enough, avoid 0 rows unless M is tiny
            const int min_chunk_size = 32;
            if (M > min_chunk_size * 2) { // Only adjust if M is large enough for two minimal chunks
                if (cpuRows == 0) cpuRows = min_chunk_size;
                if (cpuRows > M - min_chunk_size) cpuRows = M - min_chunk_size;
            }
             // Handle edge case where M might be very small
            if (M > 0 && cpuRows == M) cpuRows = M - 1; // Ensure DCU gets at least 1 row if M > 0
            if (cpuRows < 0) cpuRows = 0; // Should not happen, but safety
            
            int dcuRows = M - cpuRows;
            if (dcuRows <= 0 && M > 0) { // If DCU got 0 or negative rows, adjust
                cpuRows = M - 1;
                dcuRows = 1;
                if (cpuRows < 0) { cpuRows = 0; dcuRows = M;} // Handle M=1 case
            }
            if (cpuRows == 0 && dcuRows == 0 && M > 0) { // Safety if M=1 resulted in 0/0 split
                dcuRows = M;
            }


            std::cout << "Hybrid Split: CPU Rows = " << cpuRows << ", DCU Rows = " << dcuRows 
                      << " (Ratio Used: " << (double)cpuRows / M << ")" << std::endl;
            
            // --- Memory Allocation and Data Splitting --- 
            // CPU part (A_cpu: cpuRows x K, C_cpu: cpuRows x N)
            Matrix A_cpu(cpuRows, K);
            Matrix C_cpu(cpuRows, N);
            
            // Copy relevant part of A to A_cpu (Row-major)
            const float* a_data = A.getData();
            float* a_cpu_data = A_cpu.getData();
            memcpy(a_cpu_data, a_data, cpuRows * K * sizeof(float));

            // --- Launch CPU and DCU tasks --- 
            // Launch CPU task using std::async
            auto cpuFuture = std::async(std::launch::async, [&]() {
                std::cout << "CPU异步任务开始计算 (Optimized)..." << std::endl;
                matrixMultiplyOptimized(A_cpu, B, C_cpu);
                std::cout << "CPU异步任务计算完成。" << std::endl;
                // Lambda implicitly returns void
            });

            // DCU part (A_dcu: dcuRows x K, C_dcu: dcuRows x N)
            Matrix A_dcu(dcuRows, K);
            Matrix C_dcu(dcuRows, N);
            // 复制 A 的下部分到 A_dcu
            float* a_dcu_data = A_dcu.getData();
            memcpy(a_dcu_data, a_data + static_cast<size_t>(cpuRows) * K, dcuRows * K * sizeof(float));
            
            // Call the standard DCU multiplication function.
            // It will internally select the best kernel based on A_dcu's dimensions (dcuRows, K) and B's dimensions (K, N).
            std::cout << "DCU开始计算 (尺寸: " << dcuRows << "x" << K << " * " << K << "x" << N << ")..." << std::endl;
            dcuMatrixMultiply(A_dcu, B, C_dcu);
            std::cout << "DCU计算完成。" << std::endl;
            
            // --- Synchronization and Merging --- 
            // Wait for CPU thread to finish
            // cpuThread.join(); // Replaced with future.get()
            // std::cout << "CPU线程已汇合。" << std::endl;

            // Wait for CPU async task to finish
            cpuFuture.get(); // Wait for the async task to complete (and rethrow exceptions if any)
            std::cout << "CPU异步任务已完成。" << std::endl;

            // Merge results (Row-major)
            // Copy C_cpu result
            float* c_data = C.getData();
            const float* c_cpu_data = C_cpu.getData();
            const float* c_dcu_data = C_dcu.getData();
            // 复制CPU结果
            memcpy(c_data, c_cpu_data, cpuRows * N * sizeof(float));
            // 复制DCU结果
            memcpy(c_data + static_cast<size_t>(cpuRows) * N, c_dcu_data, dcuRows * N * sizeof(float));
            std::cout << "结果合并完成。" << std::endl;

        } catch (const std::exception& e) {
            std::cerr << "混合模式执行失败: " << e.what() << std::endl;
            std::cerr << "回退到优化的CPU实现 (OpenMP Blocked)" << std::endl;
            matrixMultiplyOptimized(A, B, C);
        }
    }
    // 策略4: 默认回退 (例如矩阵较小，但用户未指定仅CPU)
    // 如果不满足以上任何策略，默认使用优化CPU (或根据情况选择DCU)
    else {
        std::cout << "策略：默认回退到优化的CPU计算 (OpenMP Blocked, 矩阵大小: " << matrixSize << ")" << std::endl;
        matrixMultiplyOptimized(A, B, C);
    }
}

// 验证矩阵乘法结果的正确性
bool validateResult(const Matrix& A, const Matrix& B, const Matrix& C, float epsilon = 1e-5) {
    // 使用串行计算作为参考
    Matrix refC(C.getRows(), C.getCols());
    matrixMultiplySerial(A, B, refC);
    
    // 比较结果
    return C.isApproxEqual(refC, epsilon);
}

// 执行一次矩阵乘法测试
void runTest(int M, int K, int N, bool useCPU, bool useDCU, bool validate,
             std::optional<double> forcedRatio = std::nullopt) {
    // 创建矩阵
    Matrix A(M, K);
    Matrix B(K, N);
    Matrix C(M, N);
    
    // 随机初始化输入矩阵
    A.randomInit(-1.0f, 1.0f);
    B.randomInit(-1.0f, 1.0f);
    
    // 打印测试信息
    std::cout << "矩阵大小: A(" << M << "x" << K << "), B(" << K << "x" << N 
              << "), C(" << M << "x" << N << ")" << std::endl;
    std::cout << "使用设备: " << (useCPU ? "CPU" : "") 
              << (useCPU && useDCU ? "+" : "") 
              << (useDCU ? "DCU" : "") << std::endl;
    
    // 创建计时器
    Timer timer("矩阵乘法");
    
    // 执行矩阵乘法
    timer.start();
    heterogeneousMatrixMultiply(A, B, C, useCPU, useDCU, forcedRatio);
    timer.stop();
    
    // 打印计时结果
    timer.printElapsed();
    std::cout << "RESULT_TIME_MS: " << timer.elapsedMilliseconds() << std::endl;
    
    // 计算GFLOPS
    double seconds = timer.elapsedSeconds();
    double gflops = ReportUtils::calculateGflops(M, N, K, seconds);
    std::cout << "性能: " << gflops << " GFLOPS" << std::endl;
    
    // 如果需要验证，检查结果的正确性
    if (validate) {
        bool isCorrect = validateResult(A, B, C);
        std::cout << "验证结果: " << (isCorrect ? "正确" : "错误") << std::endl;
        
        if (!isCorrect) {
            // 对于小矩阵，打印结果以便调试
            if (M <= 10 && N <= 10) {
                Matrix refC(M, N);
                matrixMultiplySerial(A, B, refC);
                
                std::cout << "输入矩阵:" << std::endl;
                A.print("A");
                B.print("B");
                
                std::cout << "计算结果:" << std::endl;
                C.print("C");
                
                std::cout << "参考结果:" << std::endl;
                refC.print("refC");
            }
        }
    }
    
    std::cout << std::string(50, '-') << std::endl;
}

// 矩阵乘法性能测试函数
void performanceTest() {
    std::cout << "\n====== DCU矩阵乘法性能测试 ======\n" << std::endl;
    
    // 测试不同大小的矩阵
    std::vector<int> sizes = {512, 1024, 2048, 4096};
    
    for (int size : sizes) {
        std::cout << "测试矩阵大小: " << size << "x" << size << std::endl;
        
        // 创建随机矩阵
        Matrix A(size, size);
        Matrix B(size, size);
        Matrix C(size, size);
        
        // 初始化矩阵
        A.randomize();
        B.randomize();
        
        // 测量执行时间
        auto startTime = std::chrono::high_resolution_clock::now();
    
        // 执行矩阵乘法 - 自动选择最优算法
        dcuMatrixMultiply(A, B, C);
        
        auto endTime = std::chrono::high_resolution_clock::now();
        auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(endTime - startTime);
        
        std::cout << "执行时间: " << duration.count() << " 毫秒" << std::endl;
        
        // 计算性能 (GFLOPS)
        // 矩阵乘法的浮点运算次数 = 2 * M * N * K
        double flops = 2.0 * size * size * size;
        double gflops = (flops / duration.count()) / 1e6; // 转换为GFLOPS
        
        std::cout << "性能: " << gflops << " GFLOPS" << std::endl;
        std::cout << "----------------------------------------" << std::endl;
    }
}

// Helper function to print usage information
void printUsage(const char* progName) {
    std::cerr << "Usage: " << progName << " [options] M K N [numThreads]" << std::endl;
    std::cerr << "Options:" << std::endl;
    std::cerr << "  -c          : Use CPU only for computation." << std::endl;
    std::cerr << "  -d          : Use DCU only for computation." << std::endl;
    std::cerr << "  -h          : Use hybrid mode (CPU + DCU). Overrides -c and -d if present." << std::endl;
    std::cerr << "  -v          : Validate the result against serial computation." << std::endl;
    std::cerr << "  -p          : Run performance test suite instead of single test." << std::endl;
    std::cerr << "  --help      : Display this help message." << std::endl;
    std::cerr << "Arguments:" << std::endl;
    std::cerr << "  M K N       : Dimensions of the matrices (A=MxK, B=KxN, C=MxN)." << std::endl;
    std::cerr << "  numThreads  : (Optional) Number of threads for OpenMP (default: system max)." << std::endl;
    }
    
int main(int argc, char** argv) {
    // --- Manual Argument Parsing --- 
    bool useCPU = false;
    bool useDCU = false;
    bool validate = false;
    bool runPerfTest = false;
    bool showHelp = false;
    bool useHybridExplicit = false; // Flag specifically for -h
    double forcedCpuRatio = -1.0; // Add variable for forced ratio, init to invalid
    std::vector<std::string> dimArgs;

    for (int i = 1; i < argc; ++i) {
        if (strcmp(argv[i], "--help") == 0) {
            showHelp = true;
            break; // No need to parse further
        } else if (strcmp(argv[i], "-p") == 0) {
            runPerfTest = true;
        } else if (strcmp(argv[i], "-h") == 0) {
            useHybridExplicit = true;
            // Don't set useCPU/useDCU yet, handle priority later
        } else if (strcmp(argv[i], "-c") == 0) {
            useCPU = true;
        } else if (strcmp(argv[i], "-d") == 0) {
            useDCU = true;
        } else if (strcmp(argv[i], "-v") == 0) {
            validate = true;
        } else if (strcmp(argv[i], "--force-cpu-ratio") == 0) { // Add parsing for new arg
            if (i + 1 < argc) {
                try {
                    forcedCpuRatio = std::stod(argv[i + 1]);
                    if (forcedCpuRatio < 0.0 || forcedCpuRatio > 1.0) {
                         std::cerr << "警告：--force-cpu-ratio 的值必须在 [0.0, 1.0] 范围内。忽略该参数。" << std::endl;
                         forcedCpuRatio = -1.0; // Reset if invalid
                    }
                    i++; // Skip the value argument
                } catch (const std::invalid_argument& e) {
                    std::cerr << "警告：无效的 --force-cpu-ratio 值：" << argv[i+1] << "。忽略该参数。" << std::endl;
                    forcedCpuRatio = -1.0;
                    i++; // Skip the invalid value argument
                } catch (const std::out_of_range& e) {
                     std::cerr << "警告：--force-cpu-ratio 值超出范围：" << argv[i+1] << "。忽略该参数。" << std::endl;
                     forcedCpuRatio = -1.0;
                     i++; // Skip the out-of-range value argument
                }
            } else {
                std::cerr << "警告：--force-cpu-ratio 参数需要一个值。忽略该参数。" << std::endl;
            }
        } else {
            // Assume it's a dimension argument
            dimArgs.push_back(argv[i]);
        }
    }

    // Handle help option
    if (showHelp) {
        printUsage(argv[0]);
        return 0;
    }

    // Handle performance test option
    if (runPerfTest) {
        // Need to ensure dcu init/release happens if perf test uses dcu
        if (!DCUMatrixMultiply::initDCU()) {
             std::cerr << "Warning: DCU init failed for perf test, results might be invalid." << std::endl;
             // Decide if perf test should proceed without DCU or exit
        }
        performanceTest();
        DCUMatrixMultiply::releaseDCU(); // Release after test
        return 0;
    }
    
    // --- Read dimensions M, K, N from Standard Input --- 
    int M = 0, K = 0, N = 0;
    std::cout << "Info: Attempting to read dimensions M, K, N from standard input..." << std::endl;
    if (!(std::cin >> M >> K >> N)) { // Read from stdin
        std::cerr << "Error: Failed to read dimensions M, K, N from standard input." << std::endl;
        // Print usage because input method is different from expected manual input
        // but error might stem from incorrect piping or file content in script.
        printUsage(argv[0]); 
        return 1;
    }
    std::cout << "Info: Read dimensions M=" << M << ", K=" << K << ", N=" << N << " from stdin." << std::endl;

    // Basic validation for dimensions read from stdin
    if (M <= 0 || K <= 0 || N <= 0) {
        std::cerr << "Error: Matrix dimensions M, K, N read from stdin must be positive." << std::endl;
        return 1;
    }

    // Note: numThreads is no longer processed or needed by runTest

    // --- Determine final device usage based on flags --- 
    bool finalUseCPU = false;
    bool finalUseDCU = false;

    if (useHybridExplicit) { // -h takes precedence
        std::cout << "Info: -h flag detected, forcing hybrid mode (CPU + DCU)." << std::endl;
        finalUseCPU = true;
        finalUseDCU = true;
        } else {
        if (useCPU && useDCU) { // -c and -d together means hybrid
             std::cout << "Info: Both -c and -d flags detected, enabling hybrid mode." << std::endl;
             finalUseCPU = true;
             finalUseDCU = true;
        } else if (useCPU) { // Only -c
             std::cout << "Info: -c flag detected, enabling CPU only mode." << std::endl;
             finalUseCPU = true;
             finalUseDCU = false;
        } else if (useDCU) { // Only -d
             std::cout << "Info: -d flag detected, enabling DCU only mode." << std::endl;
             finalUseCPU = false;
             finalUseDCU = true;
        } else { // No flags (-c, -d, -h) provided
             std::cout << "Info: No device flag specified (-c, -d, -h). Defaulting to CPU only." << std::endl;
             finalUseCPU = true; // Default to CPU
             finalUseDCU = false;
        }
    }

    // Wrap main logic in try-catch for other potential errors
    try {
        // Initialize DCU if needed
        bool dcuActuallyNeeded = finalUseDCU;
        bool dcuInitializedSuccessfully = false;
        if (dcuActuallyNeeded) {
            if (DCUMatrixMultiply::initDCU()) {
                 dcuInitializedSuccessfully = true;
                 std::cout << "DCU信息:" << std::endl;
                 DCUMatrixMultiply::printDeviceInfo();
                 std::cout << std::string(50, '-') << std::endl;
            } else {
                 std::cerr << "警告：DCU初始化失败，将只使用CPU计算" << std::endl;
                 finalUseDCU = false; // Disable DCU usage if init failed
                 // Ensure CPU is enabled if DCU fails and it wasn't originally
                 if (!finalUseCPU) finalUseCPU = true;
            }
        }

        // Print report header (moved here as it depends on final device choice)
        ReportUtils::printReportHeader();

        // Execute single test with final device settings
        // Wrap forced ratio in optional before passing
        std::optional<double> ratioOpt;
        if (forcedCpuRatio >= 0.0) {
            ratioOpt = forcedCpuRatio;
        }
        runTest(M, K, N, finalUseCPU, finalUseDCU, validate, ratioOpt);

        // Release DCU if it was initialized
        if (dcuInitializedSuccessfully) {
            DCUMatrixMultiply::releaseDCU();
        }

    } catch (const std::exception& e) {
        std::cerr << "运行时错误: " << e.what() << std::endl;
        // Attempt to release DCU if it might have been initialized before the error
        // This is a best-effort cleanup
        if (finalUseDCU) { // Check if DCU *should* have been initialized
             DCUMatrixMultiply::releaseDCU();
        }
        return 1;
    }
    
    return 0;
} 