#ifndef MATRIXOPERATIONSGPU_H
#define MATRIXOPERATIONSGPU_H

#include <stdexcept>
#include <cuda_runtime.h>
#include "matrix_operations.h"
#include <device_launch_parameters.h>
#include <vector>
#include <iostream>
#include <chrono>

#define TILE_WIDTH 16

__global__ void matrixMultiplyOptimizedKernel(float *A, float *B, float *C, int rowsA, int colsA, int colsB) {
    __shared__ float s_A[TILE_WIDTH][TILE_WIDTH];
    __shared__ float s_B[TILE_WIDTH][TILE_WIDTH];

    int bx = blockIdx.x, by = blockIdx.y;
    int tx = threadIdx.x, ty = threadIdx.y;
    int Row = by * blockDim.y + ty;
    int Col = bx * blockDim.x + tx;
    float Cvalue = 0;

    for (int m = 0; m < (colsA-1)/TILE_WIDTH+1; ++m) {
        if (Row < rowsA && m*TILE_WIDTH+tx < colsA)
            s_A[ty][tx] = A[Row*colsA + m*TILE_WIDTH+tx];
        else
            s_A[ty][tx] = 0.0;
        if (Col < colsB && m*TILE_WIDTH+ty < colsA)
            s_B[ty][tx] = B[(m*TILE_WIDTH+ty)*colsB+Col];
        else
            s_B[ty][tx] = 0.0;

        __syncthreads();
        for (int k = 0; k < TILE_WIDTH; ++k)
            Cvalue += s_A[ty][k] * s_B[k][tx];
        __syncthreads();
    }
    if (Row < rowsA && Col < colsB)
        C[Row*colsB+Col] = Cvalue;
}


class MatrixOperationsGPU : public MatrixOperations {
public:

    // 元素间乘法
    std::vector<std::vector<float>> elementwiseMultiply(const std::vector<std::vector<float>>& matrix1, const std::vector<std::vector<float>>& matrix2) {
        std::vector<std::vector<float>> result(matrix1.size(), std::vector<float>(matrix1[0].size()));
        for (size_t i = 0; i < matrix1.size(); ++i) {
            for (size_t j = 0; j < matrix1[i].size(); ++j) {
                result[i][j] = matrix1[i][j] * matrix2[i][j];
            }
        }
        return result;
    }

    //矩阵乘法
    std::vector<std::vector<float>> multiply(const std::vector<std::vector<float>>& matA, const std::vector<std::vector<float>>& matB) {
        size_t rowsA = matA.size(), colsA = matA[0].size(), rowsB = matB.size(), colsB = matB[0].size();
        if (colsA != rowsB) throw std::invalid_argument("Matrix dimensions do not match for multiplication");
        // auto startGPU2 = std::chrono::high_resolution_clock::now();
        // Flatten the matrices
        std::vector<float> flatA, flatB;
        for (const auto& row : matA)
            for (float val : row)
                flatA.push_back(val);
        for (const auto& row : matB)
            for (float val : row)
                flatB.push_back(val);

        std::vector<float> flatResult(rowsA * colsB);
        float *d_a, *d_b, *d_c;
        cudaMalloc(&d_a, rowsA * colsA * sizeof(float));
        cudaMalloc(&d_b, rowsB * colsB * sizeof(float));
        cudaMalloc(&d_c, rowsA * colsB * sizeof(float));
        // auto startGPU1 = std::chrono::high_resolution_clock::now();
        cudaMemcpy(d_a, flatA.data(), rowsA * colsA * sizeof(float), cudaMemcpyHostToDevice);
        cudaMemcpy(d_b, flatB.data(), rowsB * colsB * sizeof(float), cudaMemcpyHostToDevice);
        cudaDeviceSynchronize();
        // auto startGPU = std::chrono::high_resolution_clock::now();
        dim3 threadsPerBlock(TILE_WIDTH, TILE_WIDTH);
        dim3 blocksPerGrid((colsB + TILE_WIDTH - 1) / TILE_WIDTH, (rowsA + TILE_WIDTH - 1) / TILE_WIDTH);

        matrixMultiplyOptimizedKernel<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_b, d_c, rowsA, colsA, colsB);
        cudaDeviceSynchronize();
        // auto endGPU = std::chrono::high_resolution_clock::now();
        // std::chrono::duration<double, std::milli> gpuTime = endGPU - startGPU;
        // std::cout << "GPU Time: " << gpuTime.count() << " ms" << std::endl;

        cudaMemcpy(flatResult.data(), d_c, rowsA * colsB * sizeof(float), cudaMemcpyDeviceToHost);
        cudaDeviceSynchronize();
        // auto endGPU1 = std::chrono::high_resolution_clock::now();
        // std::chrono::duration<double, std::milli> gpuTime1 = endGPU1 - startGPU1;
        // std::cout << "GPU1 Time: " << gpuTime1.count() << " ms" << std::endl;
        cudaFree(d_a);
        cudaFree(d_b);
        cudaFree(d_c);

        // Reshape the flat result back into a 2D vector
        std::vector<std::vector<float>> result(rowsA, std::vector<float>(colsB, 0));
        for (size_t i = 0; i < rowsA; ++i)
            for (size_t j = 0; j < colsB; ++j)
                result[i][j] = flatResult[i * colsB + j];
        // auto endGPU2 = std::chrono::high_resolution_clock::now();
        // std::chrono::duration<double, std::milli> gpuTime2 = endGPU2 - startGPU2;
        // std::cout << "GPU2 Time: " << gpuTime2.count() << " ms" << std::endl;
        return result;
    }

    // 矩阵转置
    std::vector<std::vector<float>> transpose(const std::vector<std::vector<float>>& mat) override {
        size_t rows = mat.size();
        size_t cols = mat[0].size();
        std::vector<std::vector<float>> result(cols, std::vector<float>(rows));
        for (size_t i = 0; i < rows; ++i) {
            for (size_t j = 0; j < cols; ++j) {
                result[j][i] = mat[i][j];
            }
        }
        return result;
    }

    // 逐元素减法
    std::vector<std::vector<float>> subtract(const std::vector<std::vector<float>>& matrixA, const std::vector<std::vector<float>>& matrixB) {
        if (matrixA.size() != matrixB.size() || matrixA[0].size() != matrixB[0].size()) {
            throw std::invalid_argument("Matrices dimensions do not match for subtraction.");
        }
        size_t numRows = matrixA.size();
        size_t numCols = matrixA[0].size();
        std::vector<std::vector<float>> result(numRows, std::vector<float>(numCols, 0.0));
        for (size_t i = 0; i < numRows; ++i) {
            for (size_t j = 0; j < numCols; ++j) {
                result[i][j] = matrixA[i][j] - matrixB[i][j];
            }
        }
        return result;
    }

};
#endif // MATRIXOPERATIONSGPU_H
