#include "FastVectorOps.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cooperative_groups.h>
#include <cmath>

namespace cg = cooperative_groups;

// ====== GPU Kernel Parameters and Constants ======
#define DEFAULT_BLOCK_SIZE 256
#define WARP_SIZE 32
#define MAX_BLOCKS_PER_GRID 65535
#define SHARED_MEMORY_BANK_SIZE 32
#define MIN_BLOCKS_PER_SM 2

// ====== CUDA 12.0 Compatible AtomicAdd for Double ======
/**
 * CUDA 12.0兼容的double类型原子加法操作
 * 使用longlong位操作实现double原子加法
 */
__device__ __forceinline__ double atomicAddDouble(double* address, double val) {
    unsigned long long* address_as_ull = (unsigned long long*)address;
    unsigned long long old = *address_as_ull, assumed;
    do {
        assumed = old;
        old = atomicCAS(address_as_ull, assumed,
                        __double_as_longlong(val + __longlong_as_double(assumed)));
    } while (assumed != old);
    return __longlong_as_double(old);
}

// ====== Shared Memory Reduction Utilities ======
/**
 * 高效的warp级别规约操作
 * 使用shuffle指令实现无同步的warp内规约
 */
__device__ __forceinline__ double warpReduceSum(double val) {
    // 使用shuffle down操作进行warp内规约
    for (int offset = WARP_SIZE/2; offset > 0; offset /= 2) {
        val += __shfl_down_sync(0xffffffff, val, offset);
    }
    return val;
}

/**
 * 块级别共享内存规约
 * 每个warp进行内部规约，然后在共享内存中进行跨warp规约
 */
__device__ __forceinline__ double blockReduceSum(double val) {
    static __shared__ double shared[32]; // 最多32个warp的结果
    int lane = threadIdx.x % WARP_SIZE;
    int wid = threadIdx.x / WARP_SIZE;
    
    // Warp内规约
    val = warpReduceSum(val);
    
    // 每个warp的第一个线程写入共享内存
    if (lane == 0) shared[wid] = val;
    
    __syncthreads();
    
    // 第一个warp读取其他warp的结果并进行最终规约
    val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : 0;
    
    if (wid == 0) val = warpReduceSum(val);
    
    return val;
}

// ====== Vector Norm2 GPU Kernel ======
/**
 * 高性能向量2范数计算核函数
 * 算法：||x||_2 = sqrt(sum(x_i^2))
 * 优化：使用共享内存规约 + warp shuffle + 原子操作
 */
__global__ void fast_norm2_kernel(int n, const double* __restrict__ d_x, double* __restrict__ d_result) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = blockDim.x * gridDim.x;
    
    double sum = 0.0;
    
    // 网格跨步循环，每个线程处理多个元素
    for (int i = idx; i < n; i += stride) {
        double val = d_x[i];
        sum += val * val;  // 计算平方和
    }
    
    // 块内规约求和
    sum = blockReduceSum(sum);
    
    // 每个块的第一个线程将结果原子加到全局结果
    if (threadIdx.x == 0) {
        atomicAddDouble(d_result, sum);
    }
}

/**
 * 向量2范数最终化核函数 - 计算平方根
 */
__global__ void finalize_norm2_kernel(double* d_result) {
    if (threadIdx.x == 0 && blockIdx.x == 0) {
        *d_result = sqrt(*d_result);
    }
}

// ====== Vector Dot Product GPU Kernel ======
/**
 * 高性能向量内积计算核函数
 * 算法：dot(x,y) = sum(x_i * y_i)
 * 优化：向量化内存访问 + 共享内存规约 + 展开循环
 */
__global__ void fast_dot_kernel(int n, const double* __restrict__ d_x, 
                               const double* __restrict__ d_y, double* __restrict__ d_result) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = blockDim.x * gridDim.x;
    
    double sum = 0.0;
    
    // 循环展开优化：每次处理4个元素
    int i;
    for (i = idx; i + 3 < n; i += stride) {
        sum += d_x[i] * d_y[i] + d_x[i+1] * d_y[i+1] + 
               d_x[i+2] * d_y[i+2] + d_x[i+3] * d_y[i+3];
    }
    
    // 处理剩余元素
    for (; i < n; i += stride) {
        sum += d_x[i] * d_y[i];
    }
    
    // 块内规约求和
    sum = blockReduceSum(sum);
    
    // 每个块的第一个线程将结果原子加到全局结果
    if (threadIdx.x == 0) {
        atomicAddDouble(d_result, sum);
    }
}

// ====== Vector AXPY GPU Kernel ======
/**
 * 高性能AXPY操作核函数
 * 算法：y = alpha * x + y
 * 优化：合并内存访问 + 向量化操作 + 循环展开
 */
__global__ void fast_axpy_kernel(int n, double alpha, const double* __restrict__ d_x, double* __restrict__ d_y) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = blockDim.x * gridDim.x;
    
    // 预取alpha到寄存器，避免重复内存访问
    const double alpha_reg = alpha;
    
    // 循环展开：每次处理4个元素，提高指令并行度
    int i;
    for (i = idx; i + 3 < n; i += stride) {
        d_y[i]   = alpha_reg * d_x[i]   + d_y[i];
        d_y[i+1] = alpha_reg * d_x[i+1] + d_y[i+1];
        d_y[i+2] = alpha_reg * d_x[i+2] + d_y[i+2];
        d_y[i+3] = alpha_reg * d_x[i+3] + d_y[i+3];
    }
    
    // 处理剩余元素
    for (; i < n; i += stride) {
        d_y[i] = alpha_reg * d_x[i] + d_y[i];
    }
}

// ====== Vector Copy GPU Kernel ======
/**
 * 高性能向量复制核函数
 * 算法：y = x
 * 优化：合并内存访问 + 向量化传输 + 内存带宽优化
 */
__global__ void fast_copy_kernel(int n, const double* __restrict__ d_x, double* __restrict__ d_y) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = blockDim.x * gridDim.x;
    
    // 使用double2进行向量化内存访问（每次传输16字节）
    if (n >= 2 && reinterpret_cast<uintptr_t>(d_x) % sizeof(double2) == 0 && 
        reinterpret_cast<uintptr_t>(d_y) % sizeof(double2) == 0) {
        
        const double2* d_x2 = reinterpret_cast<const double2*>(d_x);
        double2* d_y2 = reinterpret_cast<double2*>(d_y);
        int n2 = n / 2;
        
        for (int i = idx; i < n2; i += stride) {
            d_y2[i] = d_x2[i];
        }
        
        // 处理奇数长度向量的最后一个元素
        if (idx == 0 && n % 2 == 1) {
            d_y[n-1] = d_x[n-1];
        }
    } else {
        // 标准单精度复制，循环展开优化
        int i;
        for (i = idx; i + 3 < n; i += stride) {
            d_y[i]   = d_x[i];
            d_y[i+1] = d_x[i+1];
            d_y[i+2] = d_x[i+2];
            d_y[i+3] = d_x[i+3];
        }
        
        for (; i < n; i += stride) {
            d_y[i] = d_x[i];
        }
    }
}

// ====== Vector Scale GPU Kernel ======
/**
 * 高性能向量缩放核函数
 * 算法：x = alpha * x
 * 优化：寄存器优化 + 循环展开 + 内存带宽优化
 */
__global__ void fast_scal_kernel(int n, double alpha, double* __restrict__ d_x) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = blockDim.x * gridDim.x;
    
    // 预取alpha到寄存器
    const double alpha_reg = alpha;
    
    // 循环展开：每次处理4个元素
    int i;
    for (i = idx; i + 3 < n; i += stride) {
        d_x[i]   *= alpha_reg;
        d_x[i+1] *= alpha_reg;
        d_x[i+2] *= alpha_reg;
        d_x[i+3] *= alpha_reg;
    }
    
    // 处理剩余元素
    for (; i < n; i += stride) {
        d_x[i] *= alpha_reg;
    }
}

// ====== Optimized CG Operations Combined Kernel ======
/**
 * CG求解器优化的复合操作核函数
 * 将多个向量操作合并到一个核函数中，减少GPU启动开销
 * 
 * 执行操作：
 * 1. r_new = r_old - alpha * Ap (axpy操作)
 * 2. beta = ||r_new||^2 / ||r_old||^2 (norm2比值)
 * 3. p_new = r_new + beta * p_old (axpy操作)
 * 4. x_new = x_old + alpha * p_old (axpy操作)
 */
__global__ void fast_cg_optimized_kernel(int n, double* __restrict__ d_r, double* __restrict__ d_p, 
                                        const double* __restrict__ d_Ap, double* __restrict__ d_x,
                                        double* __restrict__ scalars) {
    // scalars[0] = alpha, scalars[1] = r_old_norm2, scalars[2] = r_new_norm2, scalars[3] = beta
    
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = blockDim.x * gridDim.x;
    
    double alpha = scalars[0];
    double r_new_norm2_sum = 0.0;
    
    // 第一阶段：执行向量操作并计算新的残差范数
    for (int i = idx; i < n; i += stride) {
        // r_new = r_old - alpha * Ap
        double r_new = d_r[i] - alpha * d_Ap[i];
        d_r[i] = r_new;
        
        // x_new = x_old + alpha * p_old
        d_x[i] += alpha * d_p[i];
        
        // 累积新残差的平方和
        r_new_norm2_sum += r_new * r_new;
    }
    
    // 块内规约计算新残差范数
    r_new_norm2_sum = blockReduceSum(r_new_norm2_sum);
    
    if (threadIdx.x == 0) {
        atomicAddDouble(&scalars[2], r_new_norm2_sum);
    }
    
    // 同步所有块完成范数计算
    __syncthreads();
    
    // 计算beta并更新p向量（仅第一个块执行）
    if (blockIdx.x == 0 && threadIdx.x == 0) {
        double r_old_norm2 = scalars[1];
        double r_new_norm2 = scalars[2];
        double beta = (r_old_norm2 > 0) ? (r_new_norm2 / r_old_norm2) : 0.0;
        scalars[3] = beta;
    }
    
    // 等待beta计算完成
    __syncthreads();
    
    // 第二阶段：更新p向量
    double beta = scalars[3];
    for (int i = idx; i < n; i += stride) {
        // p_new = r_new + beta * p_old
        d_p[i] = d_r[i] + beta * d_p[i];
    }
}

// ====== Advanced Memory Optimization Kernels ======
/**
 * 使用共享内存优化的向量内积核函数
 * 适用于中等大小向量（1024-8192元素）
 */
__global__ void fast_dot_shared_kernel(int n, const double* __restrict__ d_x, 
                                      const double* __restrict__ d_y, double* __restrict__ d_result) {
    extern __shared__ double sdata[];
    
    int tid = threadIdx.x;
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = blockDim.x * gridDim.x;
    
    // 每个线程计算多个元素的乘积和
    double sum = 0.0;
    for (int i = idx; i < n; i += stride) {
        sum += d_x[i] * d_y[i];
    }
    
    sdata[tid] = sum;
    __syncthreads();
    
    // 树状规约
    for (int s = blockDim.x / 2; s > 0; s >>= 1) {
        if (tid < s) {
            sdata[tid] += sdata[tid + s];
        }
        __syncthreads();
    }
    
    if (tid == 0) {
        atomicAddDouble(d_result, sdata[0]);
    }
}

/**
 * 高性能向量范数核函数 - 标准版本
 * 不使用纹理内存，使用直接全局内存访问
 */
__global__ void fast_norm2_optimized_kernel(int n, const double* __restrict__ d_x, double* __restrict__ d_result) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = blockDim.x * gridDim.x;
    
    double sum = 0.0;
    
    // 循环展开：每次处理4个元素
    int i;
    for (i = idx; i + 3 < n; i += stride) {
        double val1 = d_x[i];
        double val2 = d_x[i+1];
        double val3 = d_x[i+2];
        double val4 = d_x[i+3];
        sum += val1*val1 + val2*val2 + val3*val3 + val4*val4;
    }
    
    // 处理剩余元素
    for (; i < n; i += stride) {
        double val = d_x[i];
        sum += val * val;
    }
    
    sum = blockReduceSum(sum);
    
    if (threadIdx.x == 0) {
        atomicAddDouble(d_result, sum);
    }
}

// ====== C++ Wrapper Functions Implementation ======
/**
 * GPU核函数的C++封装调用函数
 * 提供类型安全和错误检查的接口
 */

extern "C" {

/**
 * 启动向量2范数计算核函数
 */
int launchNorm2Kernel(int n, const double* d_x, double* d_result, 
                      cudaStream_t stream, int blockSize) {
    if (n <= 0 || !d_x || !d_result) return -1;
    
    // 计算最优网格大小
    int numBlocks = min((n + blockSize - 1) / blockSize, MAX_BLOCKS_PER_GRID);
    
    // 初始化结果为0
    cudaMemsetAsync(d_result, 0, sizeof(double), stream);
    
    // 启动主计算核函数
    fast_norm2_kernel<<<numBlocks, blockSize, 0, stream>>>(n, d_x, d_result);
    
    // 检查核函数启动错误
    cudaError_t launchError = cudaGetLastError();
    if (launchError != cudaSuccess) {
        return -2;
    }
    
    // 启动最终化核函数计算平方根
    finalize_norm2_kernel<<<1, 1, 0, stream>>>(d_result);
    
    return 0;
}

/**
 * 启动向量内积计算核函数
 */
int launchDotKernel(int n, const double* d_x, const double* d_y, double* d_result,
                    cudaStream_t stream, int blockSize) {
    if (n <= 0 || !d_x || !d_y || !d_result) return -1;
    
    int numBlocks = min((n + blockSize - 1) / blockSize, MAX_BLOCKS_PER_GRID);
    
    // 初始化结果为0
    cudaMemsetAsync(d_result, 0, sizeof(double), stream);
    
    // 根据向量大小选择最优核函数
    if (n <= 8192 && blockSize >= 256) {
        // 使用共享内存优化版本
        size_t sharedMemSize = blockSize * sizeof(double);
        fast_dot_shared_kernel<<<numBlocks, blockSize, sharedMemSize, stream>>>(n, d_x, d_y, d_result);
    } else {
        // 使用标准版本
        fast_dot_kernel<<<numBlocks, blockSize, 0, stream>>>(n, d_x, d_y, d_result);
    }
    
    cudaError_t launchError = cudaGetLastError();
    return (launchError == cudaSuccess) ? 0 : -2;
}

/**
 * 启动AXPY操作核函数
 */
int launchAxpyKernel(int n, double alpha, const double* d_x, double* d_y,
                     cudaStream_t stream, int blockSize) {
    if (n <= 0 || !d_x || !d_y) return -1;
    
    int numBlocks = min((n + blockSize - 1) / blockSize, MAX_BLOCKS_PER_GRID);
    
    fast_axpy_kernel<<<numBlocks, blockSize, 0, stream>>>(n, alpha, d_x, d_y);
    
    cudaError_t launchError = cudaGetLastError();
    return (launchError == cudaSuccess) ? 0 : -2;
}

/**
 * 启动向量复制核函数
 */
int launchCopyKernel(int n, const double* d_x, double* d_y,
                     cudaStream_t stream, int blockSize) {
    if (n <= 0 || !d_x || !d_y) return -1;
    
    int numBlocks = min((n + blockSize - 1) / blockSize, MAX_BLOCKS_PER_GRID);
    
    fast_copy_kernel<<<numBlocks, blockSize, 0, stream>>>(n, d_x, d_y);
    
    cudaError_t launchError = cudaGetLastError();
    return (launchError == cudaSuccess) ? 0 : -2;
}

/**
 * 启动向量缩放核函数
 */
int launchScalKernel(int n, double alpha, double* d_x,
                     cudaStream_t stream, int blockSize) {
    if (n <= 0 || !d_x) return -1;
    
    int numBlocks = min((n + blockSize - 1) / blockSize, MAX_BLOCKS_PER_GRID);
    
    fast_scal_kernel<<<numBlocks, blockSize, 0, stream>>>(n, alpha, d_x);
    
    cudaError_t launchError = cudaGetLastError();
    return (launchError == cudaSuccess) ? 0 : -2;
}

/**
 * 启动CG优化复合操作核函数
 */
int launchCGOptimizedKernel(int n, double* d_r, double* d_p, double* d_Ap, 
                            double* d_x, double* scalars, cudaStream_t stream) {
    if (n <= 0 || !d_r || !d_p || !d_Ap || !d_x || !scalars) return -1;
    
    int blockSize = DEFAULT_BLOCK_SIZE;
    int numBlocks = min((n + blockSize - 1) / blockSize, MAX_BLOCKS_PER_GRID);
    
    // 重置scalars[2] (r_new_norm2)
    cudaMemsetAsync(&scalars[2], 0, sizeof(double), stream);
    
    fast_cg_optimized_kernel<<<numBlocks, blockSize, 0, stream>>>(n, d_r, d_p, d_Ap, d_x, scalars);
    
    cudaError_t launchError = cudaGetLastError();
    return (launchError == cudaSuccess) ? 0 : -2;
}

} // extern "C"

// ====== VectorOpManager Static Variables Definition ======

// 操作计数统计静态变量
int VectorOpManager::norm2Count = 0;
int VectorOpManager::dotCount = 0;
int VectorOpManager::axpyCount = 0;
int VectorOpManager::copyCount = 0;
int VectorOpManager::scalCount = 0;

// 错误统计静态变量
int VectorOpManager::totalVectorOpErrors = 0;
int VectorOpManager::kernelLaunchErrors = 0;
int VectorOpManager::memoryTransferErrors = 0;
int VectorOpManager::numericalErrors = 0;
int VectorOpManager::configurationErrors = 0;

// 调试控制静态变量
bool VectorOpManager::vectorOpDebugEnabled[5] = {false, false, false, false, false};
int VectorOpManager::vectorOpDebugLevel = 2; // INFO level

// 性能计时静态变量
double VectorOpManager::vectorOpTimers[6][2] = {{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, 
                                                 {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}};
int VectorOpManager::vectorOpTimerCounts[6] = {0, 0, 0, 0, 0, 0};
bool VectorOpManager::vectorOpTimingEnabled = true;

// ====== VectorOpManager Class Implementation ======

/**
 * VectorOpManager构造函数 - 基础框架版本
 */
VectorOpManager::VectorOpManager() 
    : initialized(false), maxVectorSize(0), optimizationLevel(1),
      blockSize(256), gridSize(0), sharedMemoryEnabled(true), atomicOptimizationEnabled(true),
      d_workBuffer1(nullptr), d_workBuffer2(nullptr), d_resultBuffer(nullptr), 
      d_tempReduction(nullptr), d_norm2Results(nullptr), d_dotResults(nullptr),
      h_tempResults(nullptr), totalAllocatedMemory(0), peakMemoryUsage(0),
      computeStreams(nullptr), numComputeStreams(0), 
      performanceComparisonEnabled(false) {
    
    // 初始化CUDA事件
    cudaEventCreate(&startEvent);
    cudaEventCreate(&stopEvent);
    
    // 初始化性能统计数组
    for (int i = 0; i < 6; i++) {
        cublasTime[i] = 0.0;
        customTime[i] = 0.0;
        operationCounts[i] = 0;
    }
}

/**
 * VectorOpManager析构函数 - 基础框架版本
 */
VectorOpManager::~VectorOpManager() {
    if (initialized) {
        cleanup();
    }
    
    // 清理CUDA事件
    if (startEvent) cudaEventDestroy(startEvent);
    if (stopEvent) cudaEventDestroy(stopEvent);
}

/**
 * 基础向量操作方法 - 完整实现
 */
int VectorOpManager::fastNorm2(int n, const double* d_x, double* result) {
    if (!initialized || !d_x || !result || n <= 0) {
        recordVectorOpError(1, "Invalid parameters for fastNorm2");
        return -1;
    }
    
    // 增加操作计数
    norm2Count++;
    
    // 使用CUDA事件进行性能计时
    cudaEventRecord(startEvent);
    
    // 调用GPU核函数
    int kernelResult = launchNorm2Kernel(n, d_x, result, 0, blockSize);
    
    // 记录结束时间
    cudaEventRecord(stopEvent);
    cudaEventSynchronize(stopEvent);
    
    if (kernelResult != 0) {
        kernelLaunchErrors++;
        totalVectorOpErrors++;
        recordVectorOpError(2, "GPU kernel launch failed for fastNorm2");
        return -2;
    }
    
    // 计算执行时间
    float milliseconds = 0;
    cudaEventElapsedTime(&milliseconds, startEvent, stopEvent);
    
    // 更新性能统计
    if (vectorOpTimingEnabled) {
        vectorOpTimers[0][1] += milliseconds; // 累积总时间
        vectorOpTimerCounts[0]++; // 调用次数
    }
    
    return 0;
}

int VectorOpManager::fastDot(int n, const double* d_x, const double* d_y, double* result) {
    if (!initialized || !d_x || !d_y || !result || n <= 0) {
        recordVectorOpError(1, "Invalid parameters for fastDot");
        return -1;
    }
    
    // 增加操作计数
    dotCount++;
    
    // 使用CUDA事件进行性能计时
    cudaEventRecord(startEvent);
    
    // 调用GPU核函数
    int kernelResult = launchDotKernel(n, d_x, d_y, result, 0, blockSize);
    
    // 记录结束时间
    cudaEventRecord(stopEvent);
    cudaEventSynchronize(stopEvent);
    
    if (kernelResult != 0) {
        kernelLaunchErrors++;
        totalVectorOpErrors++;
        recordVectorOpError(2, "GPU kernel launch failed for fastDot");
        return -2;
    }
    
    // 计算执行时间并更新统计
    if (vectorOpTimingEnabled) {
        float milliseconds = 0;
        cudaEventElapsedTime(&milliseconds, startEvent, stopEvent);
        vectorOpTimers[1][1] += milliseconds;
        vectorOpTimerCounts[1]++;
    }
    
    return 0;
}

int VectorOpManager::fastAxpy(int n, double alpha, const double* d_x, double* d_y) {
    if (!initialized || !d_x || !d_y || n <= 0) {
        recordVectorOpError(1, "Invalid parameters for fastAxpy");
        return -1;
    }
    
    axpyCount++;
    cudaEventRecord(startEvent);
    int kernelResult = launchAxpyKernel(n, alpha, d_x, d_y, 0, blockSize);
    cudaEventRecord(stopEvent);
    cudaEventSynchronize(stopEvent);
    
    if (kernelResult != 0) {
        kernelLaunchErrors++;
        totalVectorOpErrors++;
        recordVectorOpError(2, "GPU kernel launch failed for fastAxpy");
        return -2;
    }
    
    if (vectorOpTimingEnabled) {
        float milliseconds = 0;
        cudaEventElapsedTime(&milliseconds, startEvent, stopEvent);
        vectorOpTimers[2][1] += milliseconds;
        vectorOpTimerCounts[2]++;
    }
    
    return 0;
}

int VectorOpManager::fastCopy(int n, const double* d_x, double* d_y) {
    if (!initialized || !d_x || !d_y || n <= 0) {
        recordVectorOpError(1, "Invalid parameters for fastCopy");
        return -1;
    }
    
    copyCount++;
    cudaEventRecord(startEvent);
    int kernelResult = launchCopyKernel(n, d_x, d_y, 0, blockSize);
    cudaEventRecord(stopEvent);
    cudaEventSynchronize(stopEvent);
    
    if (kernelResult != 0) {
        kernelLaunchErrors++;
        totalVectorOpErrors++;
        recordVectorOpError(2, "GPU kernel launch failed for fastCopy");
        return -2;
    }
    
    if (vectorOpTimingEnabled) {
        float milliseconds = 0;
        cudaEventElapsedTime(&milliseconds, startEvent, stopEvent);
        vectorOpTimers[3][1] += milliseconds;
        vectorOpTimerCounts[3]++;
    }
    
    return 0;
}

int VectorOpManager::fastScal(int n, double alpha, double* d_x) {
    if (!initialized || !d_x || n <= 0) {
        recordVectorOpError(1, "Invalid parameters for fastScal");
        return -1;
    }
    
    scalCount++;
    cudaEventRecord(startEvent);
    int kernelResult = launchScalKernel(n, alpha, d_x, 0, blockSize);
    cudaEventRecord(stopEvent);
    cudaEventSynchronize(stopEvent);
    
    if (kernelResult != 0) {
        kernelLaunchErrors++;
        totalVectorOpErrors++;
        recordVectorOpError(2, "GPU kernel launch failed for fastScal");
        return -2;
    }
    
    if (vectorOpTimingEnabled) {
        float milliseconds = 0;
        cudaEventElapsedTime(&milliseconds, startEvent, stopEvent);
        vectorOpTimers[4][1] += milliseconds;
        vectorOpTimerCounts[4]++;
    }
    
    return 0;
}

/**
 * 管理器初始化方法 - 框架实现
 */
int VectorOpManager::initialize(int maxVectorSize) {
    if (initialized) return 0;
    
    this->maxVectorSize = maxVectorSize;
    
    // 分配GPU工作缓冲区
    int result = allocateWorkBuffers(maxVectorSize);
    if (result != 0) return result;
    
    initialized = true;
    return 0;
}

int VectorOpManager::cleanup() {
    if (!initialized) return 0;
    
    freeWorkBuffers();
    initialized = false;
    return 0;
}

/**
 * 内存管理方法 - 框架实现
 */
int VectorOpManager::allocateWorkBuffers(int maxVectorSize) {
    // 基础GPU缓冲区分配
    size_t bufferSize = maxVectorSize * sizeof(double);
    
    cudaError_t error;
    error = cudaMalloc(&d_workBuffer1, bufferSize);
    if (error != cudaSuccess) return -1;
    
    error = cudaMalloc(&d_workBuffer2, bufferSize);
    if (error != cudaSuccess) return -2;
    
    error = cudaMalloc(&d_resultBuffer, sizeof(double));
    if (error != cudaSuccess) return -3;
    
    totalAllocatedMemory = bufferSize * 2 + sizeof(double);
    return 0;
}

void VectorOpManager::freeWorkBuffers() {
    if (d_workBuffer1) { cudaFree(d_workBuffer1); d_workBuffer1 = nullptr; }
    if (d_workBuffer2) { cudaFree(d_workBuffer2); d_workBuffer2 = nullptr; }
    if (d_resultBuffer) { cudaFree(d_resultBuffer); d_resultBuffer = nullptr; }
    if (d_tempReduction) { cudaFree(d_tempReduction); d_tempReduction = nullptr; }
    if (d_norm2Results) { cudaFree(d_norm2Results); d_norm2Results = nullptr; }
    if (d_dotResults) { cudaFree(d_dotResults); d_dotResults = nullptr; }
    if (h_tempResults) { cudaFreeHost(h_tempResults); h_tempResults = nullptr; }
    
    totalAllocatedMemory = 0;
}

/**
 * 配置方法 - 框架实现
 */
int VectorOpManager::setOptimizationLevel(int level) {
    if (level < 0 || level > 2) return -1;
    
    optimizationLevel = level;
    
    // 根据优化级别调整参数
    switch (level) {
        case 0: // 基础模式
            blockSize = 128;
            sharedMemoryEnabled = false;
            atomicOptimizationEnabled = false;
            break;
        case 1: // 优化模式
            blockSize = 256;
            sharedMemoryEnabled = true;
            atomicOptimizationEnabled = true;
            break;
        case 2: // 激进模式
            blockSize = 512;
            sharedMemoryEnabled = true;
            atomicOptimizationEnabled = true;
            break;
    }
    
    return 0;
}

bool VectorOpManager::isInitialized() const {
    return initialized;
}

size_t VectorOpManager::getWorkBufferMemoryUsage() {
    return totalAllocatedMemory;
}

/**
 * 批量操作 - 基础框架实现
 */
int VectorOpManager::batchVectorOperations(int numOps, int* operations, void** params) {
    // 基础实现：逐个执行操作
    for (int i = 0; i < numOps; i++) {
        // 根据操作类型调用相应方法
        // 这里是框架代码，具体实现留给后续阶段
    }
    return 0;
}

int VectorOpManager::optimizedCGOperations(int n, double* d_r, double* d_p, double* d_Ap, 
                                          double* d_x, double* scalars) {
    if (!initialized) return -1;
    
    return launchCGOptimizedKernel(n, d_r, d_p, d_Ap, d_x, scalars, 0);
}

/**
 * 静态错误记录方法 - 转发到外部处理
 */
void VectorOpManager::recordVectorOpError(int errorType, const std::string& errorMessage) {
    // 这里将错误转发到GPUSolver的错误处理系统
    // 具体实现在后续阶段完成
    std::cerr << "[VectorOpManager][ERROR] " << errorMessage << std::endl;
}

// ====== Kernel Parameter Optimization Functions ======
/**
 * 根据GPU设备属性和向量大小计算最优核函数参数
 */
__host__ void calculateOptimalKernelParameters(int vectorSize, int* blockSize, int* gridSize) {
    cudaDeviceProp prop;
    cudaGetDeviceProperties(&prop, 0);
    
    // 基于GPU计算能力选择块大小
    if (prop.major >= 7) {
        // Volta/Turing/Ampere架构
        *blockSize = (vectorSize < 1024) ? 128 : 256;
    } else if (prop.major >= 6) {
        // Pascal架构
        *blockSize = 256;
    } else {
        // 较老架构
        *blockSize = 128;
    }
    
    // 计算网格大小，考虑SM数量和占用率
    int maxBlocks = prop.multiProcessorCount * MIN_BLOCKS_PER_SM;
    int requiredBlocks = (vectorSize + *blockSize - 1) / *blockSize;
    *gridSize = min(requiredBlocks, maxBlocks);
    *gridSize = min(*gridSize, MAX_BLOCKS_PER_GRID);
}

/**
 * 基准测试函数：比较不同核函数参数的性能
 */
__host__ double benchmarkKernelParameters(int vectorSize, int blockSize, int operation) {
    // 实现基准测试逻辑
    // 返回执行时间（毫秒）
    return 0.0;
}