// PCG (Preconditioned Conjugate Gradient) Solver Implementation
// 固定迭代次数的PCG求解器，用于实时混合试验
// 性能目标：50次迭代 ~750ms（比Cholesky 1122ms快33%）

#include "GPUSolver.h"
#include <cusparse.h>
#include <cuda_runtime.h>
#include <chrono>
#include <cmath>
#include <cstdio>

// C风格GPU向量操作函数声明（在FastVectorOps.cu中实现）
extern "C" {
    int gpuVectorDot(int n, const double* d_x, const double* d_y, double* result);
    int gpuVectorAxpy(int n, double alpha, const double* d_x, double* d_y);
    int gpuVectorCopy(int n, const double* d_x, double* d_y);
    int gpuVectorScal(int n, double alpha, double* d_x);
    int gpuVectorNorm2(int n, const double* d_x, double* result);
    void jacobiPrecondition(int n, const double* M_inv, const double* r, double* z);
}

// CUDA error checking macros
#define CHECK_CUDA_PCG(func) \
    do { \
        cudaError_t e = (func); \
        if (e != cudaSuccess) { \
            fprintf(stderr, "[PCG CUDA ERROR] %s:%d %s failed: %s\n", \
                    __FILE__, __LINE__, #func, cudaGetErrorString(e)); \
            return -1; \
        } \
    } while(0)

#define CHECK_CUSPARSE_PCG(func) \
    do { \
        cusparseStatus_t status = (func); \
        if (status != CUSPARSE_STATUS_SUCCESS) { \
            fprintf(stderr, "[PCG CUSPARSE ERROR] %s:%d %s failed: code %d\n", \
                    __FILE__, __LINE__, #func, (int)status); \
            return -1; \
        } \
    } while(0)

// 静态成员变量定义
GPUSolver::SolverType GPUSolver::currentSolverType = GPUSolver::SolverType::PCG;  // 默认使用PCG测试
int GPUSolver::pcgMaxIterations = 50;

// ====== PCG Solver Public Interface Implementation ======

void GPUSolver::setSolverType(SolverType type) {
    currentSolverType = type;
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
        "Solver type set to " << (type == SolverType::CHOLESKY ? "CHOLESKY" : "PCG"));
}

GPUSolver::SolverType GPUSolver::getSolverType() {
    return currentSolverType;
}

void GPUSolver::setPCGIterations(int iters) {
    pcgMaxIterations = iters;
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
        "PCG max iterations set to " << iters);
}

int GPUSolver::getPCGIterations() {
    return pcgMaxIterations;
}

// ====== PCG Solver Helper Methods Implementation ======

// 初始化PCG工作空间
int GPUSolver::initializePCG() {
    if (pcgInitialized) {
        return 0;  // 已初始化
    }

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
        "Initializing PCG workspace - size=" << size);

    // 分配GPU向量
    CHECK_CUDA_PCG(cudaMalloc(&d_r, size * sizeof(double)));
    CHECK_CUDA_PCG(cudaMalloc(&d_p, size * sizeof(double)));
    CHECK_CUDA_PCG(cudaMalloc(&d_Ap, size * sizeof(double)));
    CHECK_CUDA_PCG(cudaMalloc(&d_z, size * sizeof(double)));
    CHECK_CUDA_PCG(cudaMalloc(&d_M_inv, size * sizeof(double)));
    CHECK_CUDA_PCG(cudaMalloc(&d_scalars, 10 * sizeof(double)));  // alpha, beta, rho等

    pcgInitialized = true;

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
        "PCG workspace initialized successfully");

    return 0;
}

// 释放PCG工作空间
void GPUSolver::freePCG() {
    if (!pcgInitialized) {
        return;
    }

    if (d_r) cudaFree(d_r);
    if (d_p) cudaFree(d_p);
    if (d_Ap) cudaFree(d_Ap);
    if (d_z) cudaFree(d_z);
    if (d_M_inv) cudaFree(d_M_inv);
    if (d_scalars) cudaFree(d_scalars);

    d_r = d_p = d_Ap = d_z = d_M_inv = d_scalars = nullptr;
    pcgInitialized = false;

    GPU_DEBUG_PRINT(DebugModule::MEMORY_MANAGEMENT, DebugLevel::INFO,
        "PCG workspace freed");
}

// 构建Jacobi预条件矩阵（对角逆）
int GPUSolver::buildJacobiPreconditioner() {
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Building Jacobi preconditioner");

    // 提取对角元素
    std::vector<double> diag(size, 0.0);
    for (int i = 0; i < size; i++) {
        for (int j = rowPtr[i]; j < rowPtr[i + 1]; j++) {
            if (colInd[j] == i) {  // 对角元素
                diag[i] = val[j];
                break;
            }
        }
        // 如果对角元素为0或很小，设为1避免除零
        if (fabs(diag[i]) < 1e-14) {
            diag[i] = 1.0;
        }
    }

    // 计算对角逆
    std::vector<double> diag_inv(size);
    for (int i = 0; i < size; i++) {
        diag_inv[i] = 1.0 / diag[i];
    }

    // 传到GPU
    CHECK_CUDA_PCG(cudaMemcpy(d_M_inv, diag_inv.data(), size * sizeof(double),
                              cudaMemcpyHostToDevice));

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Jacobi preconditioner built successfully");

    return 0;
}

// PCG求解主函数
// 固定迭代次数的预条件共轭梯度法
int GPUSolver::solveWithPCG() {
    using namespace std::chrono;
    auto t_start = high_resolution_clock::now();

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
        "PCG solve started - size=" << size << ", max_iters=" << pcgMaxIterations);

    // 初始化PCG工作空间（首次调用）
    if (!pcgInitialized) {
        int ret = initializePCG();
        if (ret != 0) return ret;
    }

    // 构建Jacobi预条件矩阵
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Building Jacobi preconditioner");
    int ret = buildJacobiPreconditioner();
    if (ret != 0) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
            "Failed to build Jacobi preconditioner");
        return ret;
    }

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Transferring data to GPU");

    // 传输数据到GPU
    CHECK_CUDA_PCG(cudaMemcpy(d_val, val.data(), nnz * sizeof(double), cudaMemcpyHostToDevice));
    CHECK_CUDA_PCG(cudaMemcpy(d_B, &B(0), size * sizeof(double), cudaMemcpyHostToDevice));

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Initializing solution vector");

    // 初始猜测 x0 = 0
    CHECK_CUDA_PCG(cudaMemset(d_X, 0, size * sizeof(double)));

    // 计算初始残差 r0 = b - A*x0 = b（因为x0=0）
    CHECK_CUDA_PCG(cudaMemcpy(d_r, d_B, size * sizeof(double), cudaMemcpyDeviceToDevice));

    // 验证B和r的值
    double B_check[3] = {0, 0, 0};
    cudaMemcpy(B_check, d_B, 3 * sizeof(double), cudaMemcpyDeviceToHost);
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "B[0]=" << B_check[0] << ", B[1]=" << B_check[1] << ", B[2]=" << B_check[2]);

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Creating cusparse descriptors");

    // 创建cusparse SpMV描述符（如果还没有）
    cusparseSpMatDescr_t matA;
    cusparseDnVecDescr_t vecP, vecAp;

    CHECK_CUSPARSE_PCG(cusparseCreateCsr(&matA, size, size, nnz,
                                         d_rowPtr, d_colInd, d_val,
                                         CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
                                         CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F));
    CHECK_CUSPARSE_PCG(cusparseCreateDnVec(&vecP, size, d_p, CUDA_R_64F));
    CHECK_CUSPARSE_PCG(cusparseCreateDnVec(&vecAp, size, d_Ap, CUDA_R_64F));

    // 初始化Ap向量为0
    CHECK_CUDA_PCG(cudaMemset(d_Ap, 0, size * sizeof(double)));
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Initialized d_Ap to zero");

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Allocating SpMV buffer");

    // SpMV缓冲区
    size_t bufferSizeSpMV = 0;
    void* dBufferSpMV = nullptr;
    double alpha_spmv = 1.0, beta_spmv = 0.0;

    cusparseStatus_t buf_status = cusparseSpMV_bufferSize(
        handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
        &alpha_spmv, matA, vecP, &beta_spmv, vecAp,
        CUDA_R_64F, CUSPARSE_SPMV_ALG_DEFAULT, &bufferSizeSpMV);

    if (buf_status != CUSPARSE_STATUS_SUCCESS) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
            "cusparseSpMV_bufferSize failed with code " << (int)buf_status);
        return -1;
    }

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "SpMV buffer size: " << bufferSizeSpMV << " bytes");

    if (bufferSizeSpMV > 0) {
        cudaError_t malloc_err = cudaMalloc(&dBufferSpMV, bufferSizeSpMV);
        if (malloc_err != cudaSuccess) {
            GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
                "cudaMalloc for SpMV buffer failed: " << cudaGetErrorString(malloc_err));
            return -1;
        }
    }

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
        "SpMV buffer allocated successfully");

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
        "Starting PCG iterations (max=" << pcgMaxIterations << ")");

    // PCG迭代
    double rho = 0.0, rho_old = 1.0;
    int actual_iterations = 0;

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
        "Entering PCG loop");

    for (int iter = 0; iter < pcgMaxIterations; iter++) {
        actual_iterations = iter + 1;
        if (iter == 0) {
            GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
                "PCG iteration 0 started");
        }
        if (iter % 10 == 0 && iter > 0) {
            GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
                "PCG iteration " << iter);
        }

        // z = M^{-1} * r（Jacobi预条件）
        jacobiPrecondition(size, d_M_inv, d_r, d_z);

        if (iter == 0) {
            GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
                "Jacobi precondition applied");
        }

        // rho = r^T * z
        gpuVectorDot(size, d_r, d_z, &rho);

        if (iter == 0) {
            GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
                "Initial rho = " << rho);
            if (rho < 1e-30) {
                GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::WARNING,
                    "Initial residual too small (rho=" << rho << "), system may be trivial");
            }
        }

        if (iter == 0) {
            // p = z
            gpuVectorCopy(size, d_z, d_p);
            GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
                "Initial p copied from z");

            // 验证p的前几个元素
            double p_check[3] = {0, 0, 0};
            cudaMemcpy(p_check, d_p, 3 * sizeof(double), cudaMemcpyDeviceToHost);
            GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
                "p[0]=" << p_check[0] << ", p[1]=" << p_check[1] << ", p[2]=" << p_check[2]);
        } else {
            // beta = rho / rho_old
            double beta = rho / rho_old;

            // p = z + beta * p
            gpuVectorScal(size, beta, d_p);
            gpuVectorAxpy(size, 1.0, d_z, d_p);
        }

        GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
            "Iteration " << iter << ": About to call cusparseSpMV");

        // Ap = A * p
        cusparseStatus_t spmv_status = cusparseSpMV(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
                                        &alpha_spmv, matA, vecP, &beta_spmv, vecAp,
                                        CUDA_R_64F, CUSPARSE_SPMV_ALG_DEFAULT, dBufferSpMV);

        GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
            "Iteration " << iter << ": cusparseSpMV returned with code " << (int)spmv_status);

        // 第一次迭代验证SpMV输出
        if (iter == 0) {
            double Ap_check = 0.0;
            cudaMemcpy(&Ap_check, d_Ap, sizeof(double), cudaMemcpyDeviceToHost);
            GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
                "First Ap[0] = " << Ap_check << " (should be non-zero if SpMV works)");
        }

        if (spmv_status != CUSPARSE_STATUS_SUCCESS) {
            GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
                "cusparseSpMV failed at iteration " << iter << " with code " << (int)spmv_status);
            return -1;
        }

        GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
            "Iteration " << iter << ": SpMV completed, computing pAp");

        // alpha = rho / (p^T * Ap)
        double pAp = 0.0;
        gpuVectorDot(size, d_p, d_Ap, &pAp);

        if (iter < 3 || iter % 10 == 0) {
            GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
                "Iter " << iter << ": rho=" << rho << ", pAp=" << pAp);
        }

        if (fabs(pAp) < 1e-30) {
            GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
                "pAp too small (=" << pAp << ") at iteration " << iter << ", stopping");
            break;
        }

        double alpha = rho / pAp;

        // x = x + alpha * p
        gpuVectorAxpy(size, alpha, d_p, d_X);

        // r = r - alpha * Ap
        gpuVectorAxpy(size, -alpha, d_Ap, d_r);

        rho_old = rho;
    }

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
        "PCG loop completed - actual iterations: " << actual_iterations);

    // 确保所有GPU操作完成
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Calling cudaDeviceSynchronize()...");
    cudaDeviceSynchronize();
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "cudaDeviceSynchronize() completed");

    // 清理cusparse描述符
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Destroying cusparse descriptors...");
    cusparseDestroySpMat(matA);
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "matA destroyed");
    cusparseDestroyDnVec(vecP);
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "vecP destroyed");
    cusparseDestroyDnVec(vecAp);
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "vecAp destroyed");
    if (dBufferSpMV) {
        GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
            "Freeing SpMV buffer...");
        cudaFree(dBufferSpMV);
        GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
            "SpMV buffer freed");
    }

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Transferring result back to CPU - size=" << size << ", X.Size()=" << X.Size());

    // 检查X向量大小
    if (X.Size() != size) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
            "X vector size mismatch: X.Size()=" << X.Size() << ", expected=" << size);
        return -1;
    }

    // 传输结果回CPU
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Starting cudaMemcpy D2H: &X(0)=" << (void*)&X(0) << ", d_X=" << (void*)d_X << ", bytes=" << (size * sizeof(double)));

    cudaError_t memcpy_err = cudaMemcpy(&X(0), d_X, size * sizeof(double), cudaMemcpyDeviceToHost);

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "cudaMemcpy D2H returned: " << cudaGetErrorString(memcpy_err));

    if (memcpy_err != cudaSuccess) {
        GPU_DEBUG_PRINT(DebugModule::ERROR_HANDLING, DebugLevel::ERROR,
            "Failed to copy result back to CPU: " << cudaGetErrorString(memcpy_err));
        return -1;
    }

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
        "Result transferred back to CPU successfully");

    // 验证结果向量的前几个元素
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Verifying result: X[0]=" << X(0) << ", X[1]=" << X(1) <<
        ", X[" << (size-1) << "]=" << X(size-1));

    auto t_end = high_resolution_clock::now();
    double t_total = duration<double>(t_end - t_start).count();

    // 更新统计
    pcgTotalSolves++;
    pcgTotalIterations += actual_iterations;
    pcgTotalTime += t_total;

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
        "PCG solve completed - iterations=" << actual_iterations <<
        ", time=" << t_total << "s, avg=" << (pcgTotalTime / pcgTotalSolves) << "s");

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
        "solveWithPCG() returning 0");

    // 最后再次确保GPU完全同步
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Final cudaDeviceSynchronize before return...");
    cudaError_t final_sync = cudaDeviceSynchronize();
    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::DEBUG,
        "Final sync result: " << cudaGetErrorString(final_sync));

    GPU_DEBUG_PRINT(DebugModule::GPU_COMPUTE, DebugLevel::INFO,
        "About to return 0 from solveWithPCG()");

    return 0;
}
