#include "dcu_kernel.h"
#include <iostream>
#include <stdexcept>
#include <algorithm>
#include <hip/hip_runtime.h>
#include <hipblas.h>
#include "cpu_kernel.h"

// 核函数实现：矩阵乘法基础版本
__global__ void matrixMultiplyKernel(const float* A, const float* B, float* C, 
                                    int M, int N, int K) {
    // 计算当前线程负责的行和列
    int row = blockIdx.y * blockDim.y + threadIdx.y;
    int col = blockIdx.x * blockDim.x + threadIdx.x;
    
    // 确保线程在矩阵范围内
    if (row < M && col < N) {
        float sum = 0.0f;
        for (int k = 0; k < K; ++k) {
            sum += A[row * K + k] * B[k * N + col];
        }
        C[row * N + col] = sum;
    }
}

// 核函数实现：使用共享内存优化的矩阵乘法
// 添加 __launch_bounds__ 以告知编译器最大线程数
__global__ void __launch_bounds__(1024) matrixMultiplySharedKernel(const float* A, const float* B, float* C,
                                         int M, int N, int K) {
    // 共享内存声明，用于缓存子矩阵
    __shared__ float subA[32][32];
    __shared__ float subB[32][32];
    
    int bx = blockIdx.x;
    int by = blockIdx.y;
    int tx = threadIdx.x;
    int ty = threadIdx.y;
    
    // 标识出子矩阵内的行与列
    int row = by * blockDim.y + ty;
    int col = bx * blockDim.x + tx;
    
    float sum = 0.0f;
    
    // 遍历所有子矩阵对
    for (int m = 0; m < (K + blockDim.x - 1) / blockDim.x; ++m) {
        // 加载A子矩阵到共享内存
        if (row < M && m * blockDim.x + tx < K) {
            subA[ty][tx] = A[row * K + m * blockDim.x + tx];
        } else {
            subA[ty][tx] = 0.0f;
        }
        
        // 加载B子矩阵到共享内存
        if (col < N && m * blockDim.y + ty < K) {
            subB[ty][tx] = B[(m * blockDim.y + ty) * N + col];
        } else {
            subB[ty][tx] = 0.0f;
        }
        
        // 同步以确保所有线程都完成加载
        __syncthreads();
        
        // 计算子矩阵乘积
        for (int k = 0; k < blockDim.x; ++k) {
            sum += subA[ty][k] * subB[k][tx];
        }
        
        // 在加载下一个子矩阵之前同步
        __syncthreads();
    }
    
    // 写出结果
    if (row < M && col < N) {
        C[row * N + col] = sum;
    }
}

// Buffer实现
void Buffer::alloc(int size) {
    hipHostMalloc(&res_warp, size);
    hipMalloc(&d_res_warp, size);
    hipStreamCreate(&stream);
}

void Buffer::dealloc() {
    hipHostFree(res_warp);
    hipFree(d_res_warp);
    hipStreamDestroy(stream);
}

// 初始化内存池静态成员
std::map<size_t, std::vector<float*>> DCUMemoryPool::freeBuffers;
std::mutex DCUMemoryPool::mtx;
bool DCUMemoryPool::initialized = false;

// DCU内存池实现
void DCUMemoryPool::init() {
    if (!initialized) {
        initialized = true;
    }
}

void DCUMemoryPool::cleanup() {
    std::lock_guard<std::mutex> lock(mtx);
    for (auto& pair : freeBuffers) {
        for (float* ptr : pair.second) {
            hipFree(ptr);
        }
    }
    freeBuffers.clear();
    initialized = false;
}

float* DCUMemoryPool::allocate(size_t size) {
    if (!initialized) {
        init();
    }
    
    std::lock_guard<std::mutex> lock(mtx);
    
    // 查找是否有可用的缓冲区
    auto it = freeBuffers.find(size);
    if (it != freeBuffers.end() && !it->second.empty()) {
        float* ptr = it->second.back();
        it->second.pop_back();
        return ptr;
    }
    
    // 没有可用缓冲区，分配新内存
    float* ptr = nullptr;
    hipError_t error = hipMalloc(&ptr, size);
    if (error != hipSuccess) {
        throw std::runtime_error("Failed to allocate device memory");
    }
    return ptr;
}

void DCUMemoryPool::release(float* ptr, size_t size) {
    if (!initialized || ptr == nullptr) {
        return;
    }
    
    std::lock_guard<std::mutex> lock(mtx);
    
    // 将内存放回缓冲池
    // 限制每种大小的缓冲区数量，避免内存占用过多
    if (freeBuffers[size].size() < 5) {
        freeBuffers[size].push_back(ptr);
    } else {
        hipFree(ptr);
    }
}

// DCU矩阵的实现 (Integrate DCUMemoryPool)
DCUMatrix::DCUMatrix(int rows, int cols)
    : rows(rows), cols(cols), ownsData(true), fromPool(true) {
    if (rows <= 0 || cols <= 0) {
        throw std::invalid_argument("DCUMatrix dimensions must be positive");
    }
    size_t size = static_cast<size_t>(rows) * cols * sizeof(float);
    // Allocate from pool
    deviceData = DCUMemoryPool::allocate(size);
    // Initialize memory (optional, could be done later if always overwritten)
    HIP_CHECK(hipMemset(deviceData, 0, size));
}

DCUMatrix::DCUMatrix(int rows, int cols, float* externalData, bool takeOwnership)
    : rows(rows), cols(cols), deviceData(externalData), ownsData(takeOwnership), fromPool(false) {
    if (rows <= 0 || cols <= 0) {
        throw std::invalid_argument("DCUMatrix dimensions must be positive");
    }
    if (externalData == nullptr) {
        throw std::invalid_argument("External data pointer cannot be null");
    }
}

DCUMatrix::DCUMatrix(const Matrix& hostMatrix) 
    : rows(hostMatrix.getRows()), cols(hostMatrix.getCols()), ownsData(true), fromPool(true) {
    if (rows <= 0 || cols <= 0) {
        throw std::invalid_argument("DCUMatrix dimensions must be positive");
    }
    size_t size = static_cast<size_t>(rows) * cols * sizeof(float);
    // Allocate from pool
    deviceData = DCUMemoryPool::allocate(size);
    // Copy data from host
    copyFromHost(hostMatrix);
}

DCUMatrix::~DCUMatrix() {
    if (ownsData && deviceData != nullptr) {
        if (fromPool) {
            // Release back to pool
            DCUMemoryPool::release(deviceData, static_cast<size_t>(rows) * cols * sizeof(float));
        } else {
            // Free memory not from pool (if ownsData is true for external data)
            hipFree(deviceData);
        }
        deviceData = nullptr; // Prevent dangling pointer
    }
}

// 使用页锁定内存进行更快的数据传输
void DCUMatrix::copyFromHostAsync(const Matrix& hostMatrix, hipStream_t stream) {
    if (rows != hostMatrix.getRows() || cols != hostMatrix.getCols()) {
        throw std::invalid_argument("Matrix dimensions do not match for copy");
    }
    
    size_t size = rows * cols * sizeof(float);
    HIP_CHECK(hipMemcpyAsync(deviceData, hostMatrix.getData(), size, 
                            hipMemcpyHostToDevice, stream));
}

void DCUMatrix::copyToHostAsync(const Matrix& hostMatrix, hipStream_t stream) const {
    if (rows != hostMatrix.getRows() || cols != hostMatrix.getCols()) {
        throw std::invalid_argument("Matrix dimensions do not match for copy");
    }
    
    size_t size = rows * cols * sizeof(float);
    HIP_CHECK(hipMemcpyAsync(const_cast<float*>(hostMatrix.getData()), deviceData, size, 
                           hipMemcpyDeviceToHost, stream));
}

void DCUMatrix::copyFromHost(const Matrix& hostMatrix) {
    if (rows != hostMatrix.getRows() || cols != hostMatrix.getCols()) {
        throw std::invalid_argument("Matrix dimensions do not match for copy");
    }
    
    size_t size = rows * cols * sizeof(float);
    HIP_CHECK(hipMemcpy(deviceData, hostMatrix.getData(), size, hipMemcpyHostToDevice));
}

void DCUMatrix::copyToHost(Matrix& hostMatrix) const {
    if (rows != hostMatrix.getRows() || cols != hostMatrix.getCols()) {
        throw std::invalid_argument("Matrix dimensions do not match for copy");
    }
    
    size_t size = rows * cols * sizeof(float);
    HIP_CHECK(hipMemcpy(hostMatrix.getData(), deviceData, size, hipMemcpyDeviceToHost));
}

// DCU矩阵乘法实现
namespace DCUMatrixMultiply {
    // hipBLAS句柄
    hipblasHandle_t handle = nullptr;
    
    // 当前选择的设备ID
    int currentDevice = 0;
    
    // 异步操作用的流
    hipStream_t stream = nullptr;
    
    // 检查DCU错误
    void checkError(hipError_t error, const char* file, int line) {
        if (error != hipSuccess) {
            std::cerr << "HIP error at " << file << ":" << line << " - " 
                     << hipGetErrorString(error) << std::endl;
            throw std::runtime_error("HIP API error");
        }
    }
    
    // 检查hipBLAS错误
    void checkBlasError(hipblasStatus_t error, const char* file, int line) {
        if (error != HIPBLAS_STATUS_SUCCESS) {
            std::cerr << "hipBLAS error at " << file << ":" << line << " - ";
            
            switch (error) {
                case HIPBLAS_STATUS_NOT_INITIALIZED:
                    std::cerr << "HIPBLAS_STATUS_NOT_INITIALIZED";
                    break;
                case HIPBLAS_STATUS_ALLOC_FAILED:
                    std::cerr << "HIPBLAS_STATUS_ALLOC_FAILED";
                    break;
                case HIPBLAS_STATUS_INVALID_VALUE:
                    std::cerr << "HIPBLAS_STATUS_INVALID_VALUE";
                    break;
                case HIPBLAS_STATUS_MAPPING_ERROR:
                    std::cerr << "HIPBLAS_STATUS_MAPPING_ERROR";
                    break;
                case HIPBLAS_STATUS_EXECUTION_FAILED:
                    std::cerr << "HIPBLAS_STATUS_EXECUTION_FAILED";
                    break;
                case HIPBLAS_STATUS_INTERNAL_ERROR:
                    std::cerr << "HIPBLAS_STATUS_INTERNAL_ERROR";
                    break;
                case HIPBLAS_STATUS_NOT_SUPPORTED:
                    std::cerr << "HIPBLAS_STATUS_NOT_SUPPORTED";
                    break;
                default:
                    std::cerr << "Unknown hipBLAS error";
            }
            
            std::cerr << std::endl;
            throw std::runtime_error("hipBLAS API error");
        }
    }
    
    // 初始化DCU环境
    bool initDCU() {
        // 检查DCU是否可用
        int deviceCount = 0;
        HIP_CHECK(hipGetDeviceCount(&deviceCount));
        
        if (deviceCount == 0) {
            std::cerr << "No HIP-capable devices found" << std::endl;
            return false;
        }
        
        // 使用设备0
        HIP_CHECK(hipSetDevice(0));
        currentDevice = 0;
        
        // 创建hipBLAS句柄
        if (handle == nullptr) {
            hipblasCreate(&handle);
            
            // 设置hipBLAS优化项
            // 注意：这些设置可能需要根据实际DCU硬件调整
            HIPBLAS_CHECK(hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST));
            
        }
        
        // 初始化内存池
        DCUMemoryPool::init();
        
        return true;
    }
    
    // 释放DCU资源
    void releaseDCU() {
        if (handle != nullptr) {
            hipblasDestroy(handle);
            handle = nullptr;
        }
        
        // 清理内存池
        DCUMemoryPool::cleanup();
    }
    
    // 获取可用DCU数量
    int getDeviceCount() {
        int deviceCount = 0;
        HIP_CHECK(hipGetDeviceCount(&deviceCount));
        return deviceCount;
    }
    
    // 打印DCU设备信息
    void printDeviceInfo(int deviceId) {
        hipDeviceProp_t props;
        HIP_CHECK(hipGetDeviceProperties(&props, deviceId));
        
        std::cout << "Device " << deviceId << ": " << props.name << std::endl;
        std::cout << "  Total memory: " << (props.totalGlobalMem / (1024.0 * 1024.0)) << " MB" << std::endl;
        std::cout << "  Compute capability: " << props.major << "." << props.minor << std::endl;
        std::cout << "  Multi-processor count: " << props.multiProcessorCount << std::endl;
        std::cout << "  Max threads per block: " << props.maxThreadsPerBlock << std::endl;
        std::cout << "  Warp size: " << props.warpSize << std::endl;
    }
    
    // 使用hipBLAS执行矩阵乘法 C = A * B（优化版本）
    void multiplyHipBLAS(const Matrix& A, const Matrix& B, Matrix& C) {
        int M = A.getRows();
        int K = A.getCols();
        int N = B.getCols();
        
        if (B.getRows() != K || C.getRows() != M || C.getCols() != N) {
            throw std::invalid_argument("Matrix dimensions do not match for multiplication");
        }
        
        // 确保DCU环境已初始化
        if (handle == nullptr) {
            if (!initDCU()) {
                throw std::runtime_error("Failed to initialize DCU environment");
            }
        }
        
        // Use the default HIP stream (0 or nullptr)
        hipStream_t stream = 0; 
        
        // 创建页锁定内存以加速数据传输
        float *h_A = nullptr, *h_B = nullptr, *h_C = nullptr; // Add h_C for async copy back
        bool useHostPinned = true; // Keep using pinned memory
        
        if (useHostPinned) {
            // 使用页锁定内存
            HIP_CHECK(hipHostMalloc(&h_A, M * K * sizeof(float), hipHostMallocDefault));
            HIP_CHECK(hipHostMalloc(&h_B, K * N * sizeof(float), hipHostMallocDefault));
            HIP_CHECK(hipHostMalloc(&h_C, M * N * sizeof(float), hipHostMallocDefault)); // Allocate pinned memory for C
            
            // 复制数据到页锁定内存 (Synchronous for simplicity before async ops)
            memcpy(h_A, A.getData(), M * K * sizeof(float));
            memcpy(h_B, B.getData(), K * N * sizeof(float));
        }
        
        // 创建DCU矩阵 (Allocate device memory only)
        DCUMatrix d_A(M, K);
        DCUMatrix d_B(K, N); 
        DCUMatrix d_C(M, N);
        
        // 如果使用页锁定内存，则异步拷贝数据到设备
        if (useHostPinned) {
            HIP_CHECK(hipMemcpyAsync(d_A.getDeviceData(), h_A, M * K * sizeof(float), 
                                   hipMemcpyHostToDevice, stream));
            HIP_CHECK(hipMemcpyAsync(d_B.getDeviceData(), h_B, K * N * sizeof(float), 
                                   hipMemcpyHostToDevice, stream));
        } else {
            // Otherwise, copy directly from host matrix (synchronous)
            d_A.copyFromHost(A);
            d_B.copyFromHost(B);
        }
        
        // 将hipBLAS操作与流关联
        HIPBLAS_CHECK(hipblasSetStream(handle, stream));
        
        // 设置乘法参数
        const float alpha = 1.0f; const float beta = 0.0f;
        
        // --- Core BLAS Call (Row-Major to Column-Major Adaptation) ---
        // Compute C(MxN) = A(MxK) * B(KxN) using C_blas(NxM) = B_blas(NxK) * A_blas(KxM)
        HIPBLAS_CHECK(hipblasSgemm( handle,
                                    HIPBLAS_OP_N, HIPBLAS_OP_N, // Keep OP_N
                                    N, M, K,        // m=N, n=M, k=K
            &alpha,                      
                                    d_B.getDeviceData(), N, // B_blas (NxK col-major) uses B data (KxN row-major), ldb=N
                                    d_A.getDeviceData(), K, // A_blas (KxM col-major) uses A data (MxK row-major), lda=K
            &beta,                       
                                    d_C.getDeviceData(), N // C_blas (NxM col-major) uses C data (MxN row-major), ldc=N
        ));
        // --- End of BLAS Call ---
        
        // 异步复制结果回主机 (Copy to pinned host memory if used)
        if (useHostPinned) {
            HIP_CHECK(hipMemcpyAsync(h_C, d_C.getDeviceData(), M * N * sizeof(float), 
                               hipMemcpyDeviceToHost, stream));
        } else {
             d_C.copyToHost(C); // Synchronous copy if not using pinned memory
        }
        
        // 同步流，确保所有操作完成
        HIP_CHECK(hipStreamSynchronize(stream));

        // If using pinned memory, copy result from pinned host memory to final C matrix
        if (useHostPinned) {
             memcpy(C.getData(), h_C, M * N * sizeof(float));
        }
        
        // 添加设备同步，确保所有DCU操作完成 (Good practice after stream sync)
        HIP_CHECK(hipDeviceSynchronize());
        
        // 释放页锁定内存
        if (useHostPinned) {
            HIP_CHECK(hipHostFree(h_A));
            HIP_CHECK(hipHostFree(h_B));
            HIP_CHECK(hipHostFree(h_C)); // Free pinned C memory
        }
    }
    
    // 大矩阵专用乘法 (Refactored for device-side operations)
    void multiplyLargeMatrix(const Matrix& A, const Matrix& B, Matrix& C) {
        int M = A.getRows();
        int K = A.getCols();
        int N = B.getCols();
        
        if (B.getRows() != K || C.getRows() != M || C.getCols() != N) {
            throw std::invalid_argument("Matrix dimensions do not match for multiplication");
        }
        // Ensure DCU environment is initialized (checks handle)
            if (!initDCU()) {
                throw std::runtime_error("Failed to initialize DCU environment");
            }

        // 1. Allocate full device matrices & copy A, B from host
        // DCUMatrix constructor handles allocation and host-to-device copy via copyFromHost
        DCUMatrix d_A(A); 
        DCUMatrix d_B(B);
        DCUMatrix d_C(M, N); // Allocate device memory for C

        // 2. Initialize d_C to zero on device
        HIP_CHECK(hipMemset(d_C.getDeviceData(), 0, static_cast<size_t>(M) * N * sizeof(float)));

        // 3. Blocking strategy
        // Use a potentially smaller block size for better sub-problem performance
        const int BLOCK_SIZE = 512; 
        const int NUM_STREAMS = 4; // Use multiple streams
        hipStream_t streams[NUM_STREAMS];
        for (int i = 0; i < NUM_STREAMS; ++i) {
            HIP_CHECK(hipStreamCreate(&streams[i]));
        }

        const float alpha = 1.0f;
        // Since d_C is pre-zeroed, we can use beta = 1.0 for all sgemm calls for accumulation
        const float beta = 1.0f; 

        // Get raw device pointers once to avoid repeated function calls
        const float* dA_ptr = d_A.getDeviceData();
        const float* dB_ptr = d_B.getDeviceData();
        float* dC_ptr = d_C.getDeviceData();
        
        // 4. Loop over C blocks and K blocks, call hipblasSgemm directly on device pointers
        int streamIdx = 0;
        std::cout << "Refactored multiplyLargeMatrix using BLOCK_SIZE=" << BLOCK_SIZE << std::endl;
        for (int i = 0; i < M; i += BLOCK_SIZE) {
            int blockM = std::min(BLOCK_SIZE, M - i);
            for (int j = 0; j < N; j += BLOCK_SIZE) {
                int blockN = std::min(BLOCK_SIZE, N - j);
                
                // Select stream for this C(i,j) block computation
                hipStream_t currentStream = streams[streamIdx];
                streamIdx = (streamIdx + 1) % NUM_STREAMS;
                // Associate the hipBLAS call with the current stream
                HIPBLAS_CHECK(hipblasSetStream(handle, currentStream));
                
                // Pointer to the top-left element of the current C block in device memory
                // Note: Using original N (leading dimension of C)
                float* dC_block_ptr = dC_ptr + static_cast<size_t>(i) * N + j;

                for (int k = 0; k < K; k += BLOCK_SIZE) {
                    int blockK = std::min(BLOCK_SIZE, K - k);
                    
                    // Pointers to the top-left elements of A and B blocks in device memory
                    // Note: Using original K and N as leading dimensions respectively
                    const float* dA_block_ptr = dA_ptr + static_cast<size_t>(i) * K + k;
                    const float* dB_block_ptr = dB_ptr + static_cast<size_t>(k) * N + j;

                    // Call hipblasSgemm using device pointers and row-major adaptation
                    // We want C(blockM x blockN) = A(blockM x blockK) * B(blockK x blockN)
                    // BLAS computes C_blas(blockN x blockM) = B_blas(blockN x blockK) * A_blas(blockK x blockM)
                    HIPBLAS_CHECK(hipblasSgemm(handle,
                                               HIPBLAS_OP_N, HIPBLAS_OP_N, // Keep OP_N
                                               blockN, blockM, blockK, // m=N, n=M, k=K
                                               &alpha,
                                               dB_block_ptr, N, // B sub-block data, leading dimension = Original N
                                               dA_block_ptr, K, // A sub-block data, leading dimension = Original K
                                               &beta,          // Accumulate (beta=1.0)
                                               dC_block_ptr, N // C sub-block data, leading dimension = Original N
                                               ));
                } // end k loop (accumulation over K dimension for the C block)
            } // end j loop (iterate over columns of C blocks)
        } // end i loop (iterate over rows of C blocks)

        // 5. Synchronize streams and copy final result back to host
        for (int i = 0; i < NUM_STREAMS; ++i) {
            HIP_CHECK(hipStreamSynchronize(streams[i]));
            HIP_CHECK(hipStreamDestroy(streams[i]));
                }
                
        // Copy the final accumulated result from d_C back to host C
        d_C.copyToHost(C);

        // Final device synchronization (optional but good practice for timing/cleanup)
        HIP_CHECK(hipDeviceSynchronize());
        
        // DCUMatrix d_A, d_B, d_C will be destroyed, releasing memory back to pool
    }
    
    // 使用DCU执行矩阵乘法
    void multiply(const Matrix& A, const Matrix& B, Matrix& C) {
        int M = A.getRows();
        int K = A.getCols();
        int N = B.getCols();
        
        size_t matrixSize = static_cast<size_t>(M) * N * K;
        bool isLargeMatrix = M >= 4096 && N >= 4096 && K >= 4096;
        bool isPotentiallyMedium = (M >= 1024 || N >= 1024 || K >= 1024);
        // Redefine small matrix check based on dimensions, not just total size
        bool useSharedCandidate = (M <= 512 && N <= 512 && K <= 512);
        // Keep size threshold for general small cases if not meeting dimension criteria
        bool isGenerallySmall = matrixSize < 1000000; 

        // 根据矩阵大小选择最优算法 (Reordered Logic)
        if (isLargeMatrix) {
            // 1. Very Large matrices: Custom blocking
            std::cout << "使用分块多流算法处理大矩阵 " << M << "x" << N << "..." << std::endl;
            multiplyLargeMatrix(A, B, C);
        } 
        else if (useSharedCandidate) {
            // 2. Explicitly small square-ish matrices: Shared memory kernel
             std::cout << "使用共享内存优化算法处理小矩阵 (维度判定) " << M << "x" << N << "..." << std::endl;
             multiplyShared(A, B, C);
        }
        else if (isPotentiallyMedium) {
            // 3. Medium matrices (at least one large dimension, not tiny total size): Pipelined
            // Note: Pipelined is expected to handle cases where !isGenerallySmall implicitly
             std::cout << "使用流水线优化算法处理中等矩阵 " << M << "x" << N << "..." << std::endl;
             multiplyPipelined(A, B, C);
        }
        else if (isGenerallySmall) {
             // 4. Generally small matrices (by total size, didn't meet shared candidate): Shared memory kernel might still be okay?
             // Let's try Shared first before falling back to BLAS.
             std::cout << "使用共享内存优化算法处理小矩阵 (大小判定) " << M << "x" << N << "..." << std::endl;
             multiplyShared(A, B, C);
        }
        else {
             // 5. Fallback: hipBLAS for everything else (e.g., not large, not potentially medium, not small)
            std::cout << "使用hipBLAS库处理矩阵 (回退情况) " << M << "x" << N << "..." << std::endl;
            multiplyHipBLAS(A, B, C);
        }
    }
    
    // 使用共享内存的核函数实现矩阵乘法
    void multiplyShared(const Matrix& A, const Matrix& B, Matrix& C) {
        int M = A.getRows();
        int K = A.getCols();
        int N = B.getCols();
        
        if (B.getRows() != K || C.getRows() != M || C.getCols() != N) {
            throw std::invalid_argument("Matrix dimensions do not match for multiplication");
        }
        
        // 确保DCU环境已初始化
        if (!initDCU()) {
            throw std::runtime_error("Failed to initialize DCU environment");
        }
        
        // Use the default HIP stream (0 or nullptr)
        hipStream_t stream = 0;
        
        // 为DCU上的矩阵分配内存
        DCUMatrix d_A(A);
        DCUMatrix d_B(B);
        DCUMatrix d_C(M, N);
        
        // 计算核函数启动参数，使用32x32的线程块
        dim3 blockSize(32, 32);
        dim3 gridSize((N + blockSize.x - 1) / blockSize.x, 
                     (M + blockSize.y - 1) / blockSize.y);
        
        // 启动核函数
        matrixMultiplySharedKernel<<<gridSize, blockSize, 0, stream>>>(
            d_A.getDeviceData(), 
            d_B.getDeviceData(), 
            d_C.getDeviceData(), 
            M, N, K
        );
        
        // 检查核函数启动错误
        HIP_CHECK(hipGetLastError());
        
        // 将结果复制回主机
        d_C.copyToHost(C);
        
        // 同步流确保计算完成
        HIP_CHECK(hipStreamSynchronize(stream));
        
        // 添加设备同步，确保所有DCU操作完成
        HIP_CHECK(hipDeviceSynchronize());
    }
    
    // 流水线优化的矩阵乘法
    void multiplyPipelined(const Matrix& A, const Matrix& B, Matrix& C) {
        int M = A.getRows();
        int K = A.getCols();
        int N = B.getCols();
        
        if (B.getRows() != K || C.getRows() != M || C.getCols() != N) {
            throw std::invalid_argument("Matrix dimensions do not match for multiplication");
        }
        
        // 确保DCU环境已初始化
            if (!initDCU()) {
                throw std::runtime_error("Failed to initialize DCU environment");
            }
        
        // 将矩阵划分为多个区间，采用双缓冲机制
        const int numIntervals = 4;
        double rangeM = M / static_cast<double>(numIntervals);
        
        // 创建两个缓冲区进行交替使用
        Buffer buffers[2];
        // 提前计算最大可能需要的缓冲区大小 (对于不均匀分割的情况)
        int maxRowsPerInterval = (M + numIntervals - 1) / numIntervals;
        size_t bufferSizeInBytes = static_cast<size_t>(maxRowsPerInterval) * N * sizeof(float);
        buffers[0].alloc(bufferSizeInBytes);
        buffers[1].alloc(bufferSizeInBytes);
        
        // 初始化结果矩阵
        C.fill(0.0f);
        
        // --- 优化点：提前创建设备矩阵 ---
        DCUMatrix d_A(A);
        DCUMatrix d_B(B);
        // --------------------------------

        // 计算第一个区间，并异步拷贝
        int currBuf = 0;
        for (int i = 0; i < numIntervals; i++) {
            int startRow = static_cast<int>(i * rangeM);
            int endRow = static_cast<int>((i + 1) * rangeM);
            if (i == numIntervals - 1) endRow = M; // 确保最后一个区间包含所有剩余行
            
            int currentIntervalRows = endRow - startRow;
            if (currentIntervalRows <= 0) continue; // Skip empty intervals

            // --- 优化点：移除主机端 subA 创建和拷贝 ---
            // Matrix subA(currentIntervalRows, K);
            // // 复制子矩阵数据
            // for (int r = 0; r < currentIntervalRows; r++) {
            //     for (int c = 0; c < K; c++) {
            //         subA.set(r, c, A.get(startRow + r, c));
            //     }
            // }
            // DCUMatrix d_subA(subA); // 移除这个

            // --- 优化点：计算 d_A 子块指针 ---
            const float* d_A_sub_ptr = d_A.getDeviceData() + static_cast<size_t>(startRow) * K;
            // ---------------------------------

            // 使用当前缓冲区
            Buffer& currentBuffer = buffers[currBuf];

            // 定义核函数启动参数
            dim3 blockSize(32, 32);
            dim3 gridSize((N + blockSize.x - 1) / blockSize.x,
                         (currentIntervalRows + blockSize.y - 1) / blockSize.y);

            // ---- Start of Core Computation for Interval ----
            // Check size of the interval and choose kernel accordingly
            const int sharedKernelThreshold = 1024; // Use shared kernel for M <= 1024
            if (currentIntervalRows <= sharedKernelThreshold) {
                // Use shared memory kernel for smaller intervals
                dim3 blockSize(32, 32);
                dim3 gridSize((N + blockSize.x - 1) / blockSize.x,
                             (currentIntervalRows + blockSize.y - 1) / blockSize.y);

                matrixMultiplySharedKernel<<<gridSize, blockSize, 0, currentBuffer.stream>>>(
                    d_A_sub_ptr,             // Use sub-block pointer
                    d_B.getDeviceData(),
                    currentBuffer.d_res_warp, // Result buffer for this interval
                    currentIntervalRows, N, K
                );
                 // Ensure kernel launch is successful
                HIP_CHECK(hipGetLastError());
            } else {
                // Use hipBLAS for larger intervals
                const float alpha = 1.0f;
                const float beta = 0.0f; // Target buffer d_res_warp should be overwritten

                // Associate BLAS call with the current stream
                HIPBLAS_CHECK(hipblasSetStream(handle, currentBuffer.stream));

                // Call hipblasSgemm using device pointers (Row-Major Adaptation)
                // Compute C_interval(currRows x N) = A_interval(currRows x K) * B(K x N)
                // BLAS computes C_blas(N x currRows) = B_blas(N x K) * A_blas(K x currRows)
                HIPBLAS_CHECK(hipblasSgemm( handle,
                                            HIPBLAS_OP_N, HIPBLAS_OP_N, // Keep OP_N
                                            N, currentIntervalRows, K, // m=N, n=M_interval, k=K
                                            &alpha,
                                            d_B.getDeviceData(), N, // Full B matrix data, leading dimension = N
                                            d_A_sub_ptr, K,         // A interval data, leading dimension = K
                                            &beta,                  // Overwrite (beta=0.0)
                                            currentBuffer.d_res_warp, N // C interval buffer, leading dimension = N
                                            ));
            }
            // ---- End of Core Computation for Interval ----

            // 异步复制结果到主机 (使用正确的 size)
            size_t currentCopySizeInBytes = static_cast<size_t>(currentIntervalRows) * N * sizeof(float);
            HIP_CHECK(hipMemcpyAsync(
                currentBuffer.res_warp,
                currentBuffer.d_res_warp,
                currentCopySizeInBytes,
                hipMemcpyDeviceToHost,
                currentBuffer.stream
            ));

            // 如果不是第一个区间，等待前一个缓冲区完成并处理结果
            if (i > 0) {
                Buffer& prevBuffer = buffers[1 - currBuf];
                int prevStartRow = static_cast<int>((i - 1) * rangeM);
                int prevEndRow = static_cast<int>(i * rangeM);
                int prevIntervalRows = prevEndRow - prevStartRow;

                if (prevIntervalRows > 0) {
                    // 等待前一个缓冲区的操作完成
                    HIP_CHECK(hipStreamSynchronize(prevBuffer.stream));

                    // 将结果复制到结果矩阵 (直接读取 float)
                    for (int r = 0; r < prevIntervalRows; r++) {
                        for (int c = 0; c < N; c++) {
                            C.set(prevStartRow + r, c, prevBuffer.res_warp[static_cast<size_t>(r) * N + c]); // 使用 size_t 索引
                        }
                    }
                }
            }

            // 切换缓冲区
            currBuf = 1 - currBuf;
        }
        
        // 处理最后一个缓冲区的结果
        Buffer& lastBuffer = buffers[1 - currBuf];
        int lastStartRow = static_cast<int>((numIntervals - 1) * rangeM);
        int lastIntervalRows = M - lastStartRow;

        if (lastIntervalRows > 0) {
            HIP_CHECK(hipStreamSynchronize(lastBuffer.stream));

            // 将最后一个区间的结果复制到结果矩阵 (直接读取 float)
            for (int r = 0; r < lastIntervalRows; r++) {
                for (int c = 0; c < N; c++) {
                     C.set(lastStartRow + r, c, lastBuffer.res_warp[static_cast<size_t>(r) * N + c]); // 使用 size_t 索引
                }
            }
        }

        // 释放资源
        buffers[0].dealloc();
        buffers[1].dealloc();

        // d_A 和 d_B 会在函数结束时自动析构并释放内存

        // 添加设备同步，确保所有DCU操作完成
        HIP_CHECK(hipDeviceSynchronize());
    }
}

// DCU矩阵乘法工厂函数
void dcuMatrixMultiply(const Matrix& A, const Matrix& B, Matrix& C) {
    int M = A.getRows();
    int N = B.getCols();
    int K = A.getCols();
    
    // 检查矩阵维度
    if (B.getRows() != K || C.getRows() != M || C.getCols() != N) {
        throw std::invalid_argument("矩阵维度不匹配，无法执行乘法运算");
    }
    
    // 获取当前设备ID
    int deviceId = 0;
    HIP_CHECK(hipGetDevice(&deviceId));
    
    try {
        // 确保DCU环境已初始化
        if (DCUMatrixMultiply::handle == nullptr) {
            if (!DCUMatrixMultiply::initDCU()) {
                throw std::runtime_error("DCU环境初始化失败");
            }
        }
        
        size_t matrixSize = static_cast<size_t>(M) * N * K;
        bool isLargeMatrix = M >= 4096 && N >= 4096 && K >= 4096;
        bool isPotentiallyMedium = (M >= 1024 || N >= 1024 || K >= 1024);
        // Redefine small matrix check based on dimensions, not just total size
        bool useSharedCandidate = (M <= 512 && N <= 512 && K <= 512);
        // Keep size threshold for general small cases if not meeting dimension criteria
        bool isGenerallySmall = matrixSize < 1000000; 
        
        // 根据矩阵大小选择最优算法 (Reordered Logic)
        if (isLargeMatrix) {
            // 1. Very Large matrices: Custom blocking
            std::cout << "使用分块多流算法处理大矩阵 " << M << "x" << N << "..." << std::endl;
            DCUMatrixMultiply::multiplyLargeMatrix(A, B, C);
        } 
        else if (useSharedCandidate) {
            // 2. Explicitly small square-ish matrices: Shared memory kernel
             std::cout << "使用共享内存优化算法处理小矩阵 (维度判定) " << M << "x" << N << "..." << std::endl;
             DCUMatrixMultiply::multiplyShared(A, B, C);
        }
        else if (isPotentiallyMedium) {
            // 3. Medium matrices (at least one large dimension, not tiny total size): Pipelined
            // Note: Pipelined is expected to handle cases where !isGenerallySmall implicitly
             std::cout << "使用流水线优化算法处理中等矩阵 " << M << "x" << N << "..." << std::endl;
             DCUMatrixMultiply::multiplyPipelined(A, B, C);
        }
        else if (isGenerallySmall) {
             // 4. Generally small matrices (by total size, didn't meet shared candidate): Shared memory kernel might still be okay?
             // Let's try Shared first before falling back to BLAS.
             std::cout << "使用共享内存优化算法处理小矩阵 (大小判定) " << M << "x" << N << "..." << std::endl;
             DCUMatrixMultiply::multiplyShared(A, B, C);
        }
        else {
             // 5. Fallback: hipBLAS for everything else (e.g., not large, not potentially medium, not small)
            std::cout << "使用hipBLAS库处理矩阵 (回退情况) " << M << "x" << N << "..." << std::endl;
            DCUMatrixMultiply::multiplyHipBLAS(A, B, C);
        }
    }
    catch (const std::exception& e) {
        std::cerr << "DCU矩阵乘法失败: " << e.what() << std::endl;
        // 失败时回退到优化的CPU实现
        std::cerr << "回退到优化的CPU实现" << std::endl;
        matrixMultiplyOptimized(A, B, C); // 调用优化版本
        // throw; // 可以选择不重新抛出异常，因为已经回退了
    }
} 