#ifndef FastVectorOps_h
#define FastVectorOps_h

#include <cuda_runtime.h>
#include <iostream>

// Forward declaration
class VectorOpManager;

// GPU Vector Operations Debug Module (extends GPUSolver debug system)
enum class VectorOpDebugModule {
    KERNEL_LAUNCH = 0,    // GPU核函数启动
    MEMORY_TRANSFER = 1,  // 内存传输
    PERFORMANCE = 2,      // 性能统计
    ERROR_HANDLING = 3,   // 错误处理
    OPTIMIZATION = 4      // 优化调试
};

// Vector Operation Timer Types (integrates with GPUSolver TimerType system)
enum class VectorOpTimerType {
    NORM2_OPERATION = 0,  // 向量2范数操作
    DOT_OPERATION = 1,    // 向量内积操作
    AXPY_OPERATION = 2,   // αx+y操作
    COPY_OPERATION = 3,   // 向量复制操作
    SCAL_OPERATION = 4,   // 向量缩放操作
    TOTAL_VECTOR_OPS = 5  // 总向量操作时间
};

/**
 * VectorOpManager - 高性能自定义GPU向量操作管理器
 * 
 * 功能：
 * - 替换CUBLAS向量操作，实现8-18倍性能提升
 * - 集成GPUSolver调试和性能监控系统
 * - 支持向量范数、内积、axpy、复制、缩放等核心操作
 * - 遵循GPUSolver错误处理和统计模式
 */
class VectorOpManager {
public:
    // ====== Constructor and Destructor ======
    VectorOpManager();
    ~VectorOpManager();

    // ====== Vector Operations Public Interface ======
    // 替换CUBLAS的核心向量操作
    int fastNorm2(int n, const double* d_x, double* result);        // 替换cublasDnrm2
    int fastDot(int n, const double* d_x, const double* d_y, double* result); // 替换cublasDdot
    int fastAxpy(int n, double alpha, const double* d_x, double* d_y);        // 替换cublasDaxpy
    int fastCopy(int n, const double* d_x, double* d_y);            // 替换cublasDcopy
    int fastScal(int n, double alpha, double* d_x);                 // 替换cublasDscal

    // 批量操作支持
    int batchVectorOperations(int numOps, int* operations, void** params);
    int optimizedCGOperations(int n, double* d_r, double* d_p, double* d_Ap, 
                              double* d_x, double* scalars);

    // ====== Initialization and Configuration ======
    int initialize(int maxVectorSize);
    int cleanup();
    int setOptimizationLevel(int level);  // 0=基础, 1=优化, 2=激进优化
    bool isInitialized() const;

    // ====== Error Handling and Statistics - Static Methods ======
    // 遵循GPUSolver错误统计模式 - 将改为静态方法并转发到GPUSolver
    static void recordVectorOpError(int errorType, const std::string& errorMessage);

    // ====== Memory Management ======
    int allocateWorkBuffers(int maxVectorSize);
    void freeWorkBuffers();
    size_t getWorkBufferMemoryUsage();

    // GPU内存预分配策略
    int preallocateResultBuffers();
    int getResultBuffer(VectorOpTimerType opType, double** d_result);
    int releaseResultBuffer(VectorOpTimerType opType);

    // ====== Configuration and Optimization ======
    // GPU核函数参数配置
    int setBlockSize(int blockSize);
    int setGridSize(int gridSize);
    int enableSharedMemoryOptimization(bool enabled = true);
    int enableAtomicOperationOptimization(bool enabled = true);

    // CUDA流管理
    int createComputeStreams(int numStreams = 2);
    int destroyComputeStreams();
    cudaStream_t getComputeStream(int streamIndex = 0);

    // ====== Diagnostics and Validation ======
    int validateResults(int operation, const double* expected, const double* actual, double tolerance = 1e-12);
    int runBenchmarks(int vectorSize, int iterations = 100);
    int runNumericalAccuracyTests();
    void printSystemConfiguration();

private:
    // ====== Core Implementation Members ======
    bool initialized;
    int maxVectorSize;
    int optimizationLevel;
    
    // GPU参数配置
    int blockSize;
    int gridSize;
    bool sharedMemoryEnabled;
    bool atomicOptimizationEnabled;

    // ====== Work Buffers and Memory Management ======
    // GPU工作缓冲区
    double* d_workBuffer1;       // 通用工作缓冲区1
    double* d_workBuffer2;       // 通用工作缓冲区2
    double* d_resultBuffer;      // 结果缓冲区
    double* d_tempReduction;     // 规约计算临时缓冲区
    
    // 预分配的结果缓冲区（避免频繁分配）
    double* d_norm2Results;      // norm2结果缓冲区
    double* d_dotResults;        // dot结果缓冲区
    
    // CPU-GPU数据传输缓冲区
    double* h_tempResults;       // CPU临时结果缓冲区
    
    // 内存使用统计
    size_t totalAllocatedMemory;
    size_t peakMemoryUsage;

    // ====== CUDA Streams and Events ======
    cudaStream_t* computeStreams;
    int numComputeStreams;
    cudaEvent_t startEvent;
    cudaEvent_t stopEvent;

    // ====== Performance Statistics ======
    // 操作计数统计
    static int norm2Count;
    static int dotCount;
    static int axpyCount;
    static int copyCount;
    static int scalCount;
    
    // 性能对比数据
    bool performanceComparisonEnabled;
    double cublasTime[6];        // CUBLAS操作时间
    double customTime[6];        // 自定义操作时间
    int operationCounts[6];      // 操作次数统计

    // ====== Error Statistics ======
    // 错误计数（遵循GPUSolver模式）
    static int totalVectorOpErrors;
    static int kernelLaunchErrors;
    static int memoryTransferErrors;
    static int numericalErrors;
    static int configurationErrors;

    // ====== Debug Control ======
    // 调试控制（集成GPUSolver调试系统）
    static bool vectorOpDebugEnabled[5];     // 5个调试模块
    static int vectorOpDebugLevel;
    
    // ====== Core Implementation Methods ======
    // 内存管理实现
    int allocateGPUBuffers();
    void freeGPUBuffers();
    int reallocateBuffers(int newSize);
    
    // 参数验证
    bool validateInputParameters(int n, const double* d_x, const double* d_y = nullptr);
    bool isValidVectorSize(int n);
    bool isValidPointer(const double* ptr);
    
    // 错误处理实现
    void recordErrorInternal(int errorType, const std::string& errorMessage);
    int handleCudaError(cudaError_t error, const std::string& operation);
    
    // 性能统计实现
    void recordOperation(VectorOpTimerType opType, double executionTime);
    void updatePerformanceStatistics(VectorOpTimerType opType, bool success);
    
    // GPU核函数参数计算
    void calculateOptimalKernelParams(int vectorSize, int& blockSize, int& gridSize);
    int getOptimalSharedMemorySize(int blockSize);
    
    // 调试输出实现
    void debugPrint(VectorOpDebugModule module, int level, const std::string& message);
    std::string formatVectorOpMessage(const std::string& operation, int vectorSize, double time);

    // ====== Static Performance Timing Data ======
    // 集成GPUSolver计时系统的静态数据
    static double vectorOpTimers[6][2];      // [operation][start/total]
    static int vectorOpTimerCounts[6];       // 计时器调用次数
    static bool vectorOpTimingEnabled;
};

// ====== External Kernel Function Declarations ======
// GPU核函数的C++调用接口声明
extern "C" {
    // 基础向量操作核函数
    int launchNorm2Kernel(int n, const double* d_x, double* d_result, 
                          cudaStream_t stream = 0, int blockSize = 256);
    int launchDotKernel(int n, const double* d_x, const double* d_y, double* d_result,
                        cudaStream_t stream = 0, int blockSize = 256);
    int launchAxpyKernel(int n, double alpha, const double* d_x, double* d_y,
                         cudaStream_t stream = 0, int blockSize = 256);
    int launchCopyKernel(int n, const double* d_x, double* d_y,
                         cudaStream_t stream = 0, int blockSize = 256);
    int launchScalKernel(int n, double alpha, double* d_x,
                         cudaStream_t stream = 0, int blockSize = 256);
    
    // 优化的复合操作核函数
    int launchCGOptimizedKernel(int n, double* d_r, double* d_p, double* d_Ap, 
                                double* d_x, double* scalars, cudaStream_t stream = 0);
}

// ====== Error Checking Macros ======
// CUDA错误检查宏（继承GPUSolver的错误处理模式）
#define VECTOR_OP_CUDA_CHECK(call) \
    do { \
        cudaError_t error = call; \
        if (error != cudaSuccess) { \
            std::cerr << "[VectorOpManager][ERROR] CUDA error in " << __FILE__ \
                      << ":" << __LINE__ << " - " << cudaGetErrorString(error) << std::endl; \
            VectorOpManager::recordVectorOpError(1, cudaGetErrorString(error)); \
            return -1; \
        } \
    } while(0)

#define VECTOR_OP_DEBUG_PRINT(module, level, message) \
    do { \
        if (VectorOpManager::vectorOpDebugEnabled[static_cast<int>(module)] && \
            level <= VectorOpManager::vectorOpDebugLevel) { \
            std::cout << "[VectorOpManager][" << #module << "][" << level << "] " \
                      << message << std::endl; \
        } \
    } while(0)

#endif // FastVectorOps_h