#ifndef MATRIXOPERATIONSWMMA_CUH
#define MATRIXOPERATIONSWMMA_CUH

#include <stdexcept>
#include "matrix_operations.h"
#include <vector>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <mma.h>
#include <algorithm>
#include <c++/11/bits/algorithmfwd.h>
#include <iostream>
#include <iomanip>
#include <chrono>
using namespace nvcuda;

#define WMMA_M 16
#define WMMA_N 16
#define WMMA_K 16
#define WMMA_ALIGN 16
//仅支持16*16的矩阵
extern "C"

    __global__ void simple_wmma_gemmv1(half *a, half *b, float *c, float *d, int m_ld,
                                    int n_ld, int k_ld, float alpha, float beta) {
        // Leading dimensions. Packed with no transpositions.
        int lda = k_ld;
        int ldb = k_ld;
        int ldc = n_ld;

        // Tile using a 2D grid  (0~64) * 128 + (0~128) = 8192 / 32 = 256
        int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
        int warpN = (blockIdx.y * blockDim.y + threadIdx.y);
        //           (0~64) * 4 + (0 ~4) = 256 ... 256 * 16 = 4096
        // Declare the fragments
        wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::row_major> a_frag;
        wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> b_frag;
        wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> acc_frag;
        wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> c_frag;

        wmma::fill_fragment(acc_frag, 0.0f);

        // Loop over k
        for (int i = 0; i < k_ld; i += WMMA_K) {
            int aCol = i;
            int aRow = warpM * WMMA_M;
            int bCol = warpN * WMMA_N;
            int bRow = i;
            // aCol bRow a的列 和 b的行 以 WMMA_K递增，aRow a的行 以 WMMA_M递增， bCol b的列，以 WMM_N递增。
            // Bounds checking
            if (aRow < m_ld && aCol < k_ld && bRow < k_ld && bCol < n_ld) {
            // Load the inputs
            wmma::load_matrix_sync(a_frag, a + aCol + aRow * lda, lda);
            wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb);

            // Perform the matrix multiplication
            wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
            }
        }

        // Load in the current value of c, scale it by beta, and add this our result
        // scaled by alpha
        int cCol = warpN * WMMA_N;
        int cRow = warpM * WMMA_M;

        if (cRow < m_ld && cCol < n_ld) {
            wmma::load_matrix_sync(c_frag, c + cCol + cRow * ldc, ldc,
                                wmma::mem_row_major);

            for (int i = 0; i < c_frag.num_elements; i++) {
            c_frag.x[i] = alpha * acc_frag.x[i] + beta * c_frag.x[i];
            }

            // Store the output
            wmma::store_matrix_sync(d + cCol + cRow * ldc, c_frag, ldc,
                                    wmma::mem_row_major);
        }
    }


    __global__ void convertFloatToHalfKernel(const float *input, half *output, int rows, int cols, bool colMajor) {
        int idx = blockIdx.x * blockDim.x + threadIdx.x;
        int total = rows * cols;
        if (idx < total) {
            int r = idx / cols;
            int c = idx % cols;
            int newIdx = colMajor ? c * rows + r : r * cols + c;
            output[newIdx] = __float2half(input[idx]);
        }
    }
  

    __global__ void simple_wmma_gemm(half *a, half *b, float *d, int m_ld, int n_ld, int k_ld) {
        // Leading dimensions
        int lda = k_ld;
        int ldb = k_ld;
        int ldc = n_ld;

        // Calculate warp indices
        int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
        int warpN = (blockIdx.y * blockDim.y + threadIdx.y);

        // Declare the fragments
        wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::row_major> a_frag;
        wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> b_frag;
        wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> acc_frag;
        wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> d_frag;

        // Loop over k dimension
        #pragma unroll
        for (int i = 0; i < k_ld; i += WMMA_K) {
            int aCol = i;
            int aRow = warpM * WMMA_M;
            int bCol = warpN * WMMA_N;
            int bRow = i;

            // Load A and B matrix fragments
            if ((aRow < m_ld) && (bCol < n_ld)) {
                wmma::load_matrix_sync(a_frag, a + aRow * lda + aCol, lda);
                wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb);
                wmma::fill_fragment(acc_frag, 0.0f);
                // Perform the matrix multiplication and accumulate
                wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
            }
        }

        // Store the result
        int dCol = warpN * WMMA_N;
        int dRow = warpM * WMMA_M;
        if (dRow < m_ld && dCol < n_ld) {
            wmma::load_matrix_sync(d_frag, d + dRow * ldc + dCol, ldc, wmma::mem_row_major);
            for (int i = 0; i < acc_frag.num_elements; i++) {
                d_frag.x[i] = acc_frag.x[i];
            }
            wmma::store_matrix_sync(d + dRow * ldc + dCol, d_frag, ldc, wmma::mem_row_major);
        }
    }


    #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
    inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
        if (code != cudaSuccess) {
            std::cerr << "GPUassert: " << cudaGetErrorString(code) << " " << file << " " << line << std::endl;
            if (abort) exit(code);
        }
    }

class MatrixOperationsWMMA : public MatrixOperations {
public:

    // Pad matrix to make its dimensions a multiple of 16 and convert to flat array
    std::vector<float> padAndFlatten(const std::vector<std::vector<float>>& matrix, int targetRow, int targetCol) {
        std::vector<float> flatMatrix(targetRow * targetCol, 0.0f);
        for (size_t i = 0; i < matrix.size(); ++i) {
            for (size_t j = 0; j < matrix[i].size(); ++j) {
                flatMatrix[i * targetCol + j] = matrix[i][j];
            }
        }
        return flatMatrix;
    }

    //V1: 精度转换交给GPU
    std::vector<std::vector<float>> multiply(const std::vector<std::vector<float>>& matA,
                                            const std::vector<std::vector<float>>& matB) {
            int m = matA.size();
            int k = matA[0].size();
            int n = matB[0].size();
            if (k != matB.size()) {
                throw std::runtime_error("Inner matrix dimensions must agree.");
            }

            int paddedM = (m + 15) / 16 * 16;
            int paddedK = (k + 15) / 16 * 16;
            int paddedN = (n + 15) / 16 * 16;

            std::vector<float> d_aFlat = padAndFlatten(matA, paddedM, paddedK);
            std::vector<float> d_bFlat = padAndFlatten(matB, paddedK, paddedN);

            half *a, *b;
            float *d;
            gpuErrchk(cudaMalloc(&a, paddedM * paddedK * sizeof(half)));
            gpuErrchk(cudaMalloc(&b, paddedK * paddedN * sizeof(half)));
            gpuErrchk(cudaMalloc(&d, paddedM * paddedN * sizeof(float)));

            // Create temporary float buffers on the GPU and copy data
            float *temp_a, *temp_b;
            gpuErrchk(cudaMalloc(&temp_a, paddedM * paddedK * sizeof(float)));
            gpuErrchk(cudaMalloc(&temp_b, paddedK * paddedN * sizeof(float)));
            gpuErrchk(cudaMemcpy(temp_a, d_aFlat.data(), paddedM * paddedK * sizeof(float), cudaMemcpyHostToDevice));
            gpuErrchk(cudaMemcpy(temp_b, d_bFlat.data(), paddedK * paddedN * sizeof(float), cudaMemcpyHostToDevice));

            // Launch kernel to convert float to half
            int threadsPerBlock = 256;
            // Launch kernel to convert float to half for matrix A
            int blocksPerGridA = (paddedM * paddedK + threadsPerBlock - 1) / threadsPerBlock;
            convertFloatToHalfKernel<<<blocksPerGridA, threadsPerBlock>>>(temp_a, a, paddedM, paddedK, false);

            // Launch kernel to convert float to half for matrix B (Column-major)
            int blocksPerGridB = (paddedK * paddedN + threadsPerBlock - 1) / threadsPerBlock;
            convertFloatToHalfKernel<<<blocksPerGridB, threadsPerBlock>>>(temp_b, b, paddedK, paddedN, true);


            // Free temporary float buffers
            cudaFree(temp_a);
            cudaFree(temp_b);
            cudaDeviceSynchronize();
            // auto startWMMA = std::chrono::high_resolution_clock::now();
            dim3 threadsPerBlockDim(128, 4);
            dim3 numBlocks(paddedN, paddedM);
            simple_wmma_gemm<<<numBlocks, threadsPerBlockDim>>>(a, b, d, paddedM, paddedN, paddedK);
            cudaDeviceSynchronize();
            // auto endWMMA = std::chrono::high_resolution_clock::now();
            // std::chrono::duration<double, std::milli> wmmaTime = endWMMA - startWMMA;
            // std::cout << "WMMA Time: " << wmmaTime.count() << " ms" << std::endl;
            std::vector<float> d_d(paddedM * paddedN);
            cudaMemcpy(d_d.data(), d, paddedM * paddedN * sizeof(float), cudaMemcpyDeviceToHost);
            cudaGetLastError();

            cudaFree(a);
            cudaFree(b);
            cudaFree(d);

            std::vector<std::vector<float>> result(m, std::vector<float>(n));
            for (int i = 0; i < m; i++) {
                for (int j = 0; j < n; j++) {
                    result[i][j] = d_d[i * paddedN + j];
                }
            }
            return result;
        }


    //V2: 使用流
    std::vector<std::vector<float>> multiplyV2(const std::vector<std::vector<float>>& matA,
                                            const std::vector<std::vector<float>>& matB) {
        int m = matA.size();
        int k = matA[0].size();
        int n = matB[0].size();
        if (k != matB.size()) {
            throw std::runtime_error("Inner matrix dimensions must agree.");
        }

        int paddedM = (m + 15) / 16 * 16;
        int paddedK = (k + 15) / 16 * 16;
        int paddedN = (n + 15) / 16 * 16;

        std::vector<float> d_aFlat = padAndFlatten(matA, paddedM, paddedK);
        std::vector<float> d_bFlat = padAndFlatten(matB, paddedK, paddedN);

        half *a, *b;
        float *d;
        gpuErrchk(cudaMalloc(&a, paddedM * paddedK * sizeof(half)));
        gpuErrchk(cudaMalloc(&b, paddedK * paddedN * sizeof(half)));
        gpuErrchk(cudaMalloc(&d, paddedM * paddedN * sizeof(float)));


        // Create CUDA streams
        cudaStream_t streamForCopy, streamForCompute;
        cudaStreamCreate(&streamForCopy);
        cudaStreamCreate(&streamForCompute);

        // Create temporary float buffers on the GPU and copy data using streams
        float *temp_a, *temp_b;
        gpuErrchk(cudaMalloc(&temp_a, paddedM * paddedK * sizeof(float)));
        gpuErrchk(cudaMalloc(&temp_b, paddedK * paddedN * sizeof(float)));
        gpuErrchk(cudaMemcpyAsync(temp_a, d_aFlat.data(), paddedM * paddedK * sizeof(float), cudaMemcpyHostToDevice, streamForCopy));
        gpuErrchk(cudaMemcpyAsync(temp_b, d_bFlat.data(), paddedK * paddedN * sizeof(float), cudaMemcpyHostToDevice, streamForCopy));
        gpuErrchk(cudaMemsetAsync(d, 0, paddedM * paddedN * sizeof(float), streamForCompute));
        // Launch kernel to convert float to half
        int threadsPerBlock = 256;
        int blocksPerGridA = (paddedM * paddedK + threadsPerBlock - 1) / threadsPerBlock;
        int blocksPerGridB = (paddedK * paddedN + threadsPerBlock - 1) / threadsPerBlock;
        convertFloatToHalfKernel<<<blocksPerGridA, threadsPerBlock, 0, streamForCompute>>>(temp_a, a, paddedM, paddedK, false);
        convertFloatToHalfKernel<<<blocksPerGridB, threadsPerBlock, 0, streamForCompute>>>(temp_b, b, paddedK, paddedN, true);

        // Synchronize streams before proceeding
        cudaStreamSynchronize(streamForCopy);
        cudaStreamSynchronize(streamForCompute);
        cudaDeviceSynchronize();
        // auto startWMMA = std::chrono::high_resolution_clock::now();
        dim3 threadsPerBlockDim(128, 4);
        dim3 numBlocks(paddedN, paddedM);
        simple_wmma_gemm<<<numBlocks, threadsPerBlockDim, 0, streamForCompute>>>(a, b, d, paddedM, paddedN, paddedK);
        cudaDeviceSynchronize();
            // auto endWMMA = std::chrono::high_resolution_clock::now();
            // std::chrono::duration<double, std::milli> wmmaTime = endWMMA - startWMMA;
            // std::cout << "WMMA Time: " << wmmaTime.count() << " ms" << std::endl;
        std::vector<float> d_d(paddedM * paddedN);
        gpuErrchk(cudaMemcpy(d_d.data(), d, paddedM * paddedN * sizeof(float), cudaMemcpyDeviceToHost));
        gpuErrchk(cudaDeviceSynchronize());
        gpuErrchk(cudaGetLastError());

        // Clean up
        cudaFree(a);
        cudaFree(b);
        cudaFree(d);
        cudaFree(temp_a);
        cudaFree(temp_b);
        cudaStreamDestroy(streamForCopy);
        cudaStreamDestroy(streamForCompute);

        std::vector<std::vector<float>> result(m, std::vector<float>(n));
        for (int i = 0; i < m; i++) {
            for (int j = 0; j < n; j++) {
                result[i][j] = d_d[i * paddedN + j];
            }
        }
        return result;
    }

    

    //V0 CPU转换精度
    std::vector<std::vector<float>> multiplyV0(const std::vector<std::vector<float>>& matA,
                                            const std::vector<std::vector<float>>& matB) {
        int m = matA.size();
        int k = matA[0].size();
        int n = matB[0].size();
        
        if (k != matB.size()) {
            throw std::runtime_error("Inner matrix dimensions must agree.");
        }

        // Determine new dimensions
        int paddedM = (m + 15) / 16 * 16;
        int paddedK = (k + 15) / 16 * 16;
        int paddedN = (n + 15) / 16 * 16;

        // Pad matrices
        auto d_aPadded = padMatrix(matA, paddedM, paddedK);
        auto d_bPadded = padMatrix(matB, paddedK, paddedN);
        std::vector<float> d_cPadded(paddedM * paddedN, 0);  // Initialize matrix C with zeros
        std::vector<float> d_dPadded(paddedM * paddedN);     // Result matrix D

        // Convert to half precision
        std::vector<half> d_a = convertToHalf(d_aPadded);
        std::vector<half> d_b = convertToHalf(d_bPadded, true);
        std::vector<float> d_d(paddedM * paddedN);

        half *a, *b;
        float *d;
        gpuErrchk(cudaMalloc(&a, paddedM * paddedK * sizeof(half)));
        gpuErrchk(cudaMalloc(&b, paddedK * paddedN * sizeof(half)));
        gpuErrchk(cudaMalloc(&d, paddedM * paddedN * sizeof(float)));

        gpuErrchk(cudaMemcpy(a, d_a.data(), paddedM * paddedK * sizeof(half), cudaMemcpyHostToDevice));
        gpuErrchk(cudaMemcpy(b, d_b.data(), paddedK * paddedN * sizeof(half), cudaMemcpyHostToDevice));

        // Assuming dimensions are multiples of 16 and WMMA_M, WMMA_N, WMMA_K are defined
        dim3 threadsPerBlock(128, 4);
        dim3 numBlocks(paddedN, paddedM);

        simple_wmma_gemm<<<numBlocks, threadsPerBlock>>>(a, b, d, paddedM, paddedN, paddedK);

        gpuErrchk(cudaMemcpy(d_d.data(), d, paddedM * paddedN * sizeof(float), cudaMemcpyDeviceToHost));
        gpuErrchk(cudaDeviceSynchronize());
        gpuErrchk(cudaGetLastError());

        cudaFree(a);
        cudaFree(b);
        cudaFree(d);

        // Convert flat result matrix back to 2D vector
        std::vector<std::vector<float>> result(m, std::vector<float>(n));
        for (int i = 0; i < m; i++) {
            for (int j = 0; j < n; j++) {
                result[i][j] = d_d[i * paddedN + j];
            }
        }
        return result;
    }



        std::vector<half> convertToHalf(const std::vector<std::vector<float>>& matrix, bool colMajor = false) {
        int rows = matrix.size();
        int cols = matrix[0].size();
        std::vector<half> flatMatrix(rows * cols);
        if (colMajor) {
            for (int col = 0; col < cols; ++col) {
                for (int row = 0; row < rows; ++row) {
                    flatMatrix[col * rows + row] = __float2half(matrix[row][col]);
                }
            }
        } else {
            for (int row = 0; row < rows; ++row) {
                for (int col = 0; col < cols; ++col) {
                    flatMatrix[row * cols + col] = __float2half(matrix[row][col]);
                }
            }
        }
        return flatMatrix;
    }

    // Pad matrix to make its dimensions a multiple of 16
    std::vector<std::vector<float>> padMatrix(const std::vector<std::vector<float>>& matrix, int targetRow, int targetCol) {
        std::vector<std::vector<float>> paddedMatrix(targetRow, std::vector<float>(targetCol, 0.0f));
        for (size_t i = 0; i < matrix.size(); ++i) {
            for (size_t j = 0; j < matrix[i].size(); ++j) {
                paddedMatrix[i][j] = matrix[i][j];
            }
        }
        return paddedMatrix;
    }

    void printMatrix(const std::vector<std::vector<float>>& matrix) {
        for (const auto& row : matrix) {
            for (float elem : row) {
                std::cout << std::setw(10) << elem << " ";
            }
            std::cout << std::endl;
        }
    }


    // 元素间乘法
    std::vector<std::vector<float>> elementwiseMultiply(const std::vector<std::vector<float>>& matrix1, const std::vector<std::vector<float>>& matrix2) {
        std::vector<std::vector<float>> result(matrix1.size(), std::vector<float>(matrix1[0].size()));
        for (size_t i = 0; i < matrix1.size(); ++i) {
            for (size_t j = 0; j < matrix1[i].size(); ++j) {
                result[i][j] = matrix1[i][j] * matrix2[i][j];
            }
        }
        return result;
    }

    // 矩阵转置
    std::vector<std::vector<float>> transpose(const std::vector<std::vector<float>>& mat) override {
        size_t rows = mat.size();
        size_t cols = mat[0].size();
        std::vector<std::vector<float>> result(cols, std::vector<float>(rows));
        for (size_t i = 0; i < rows; ++i) {
            for (size_t j = 0; j < cols; ++j) {
                result[j][i] = mat[i][j];
            }
        }
        return result;
    }

    // 逐元素减法
    std::vector<std::vector<float>> subtract(const std::vector<std::vector<float>>& matrixA, const std::vector<std::vector<float>>& matrixB) {
        if (matrixA.size() != matrixB.size() || matrixA[0].size() != matrixB[0].size()) {
            throw std::invalid_argument("Matrices dimensions do not match for subtraction.");
        }
        size_t numRows = matrixA.size();
        size_t numCols = matrixA[0].size();
        std::vector<std::vector<float>> result(numRows, std::vector<float>(numCols, 0.0));
        for (size_t i = 0; i < numRows; ++i) {
            for (size_t j = 0; j < numCols; ++j) {
                result[i][j] = matrixA[i][j] - matrixB[i][j];
            }
        }
        return result;
    }
    
};

#endif // MATRIXOPERATIONSWMMA_CUH
