#ifndef MATRIXOPERATIONSWMMA_CUH
#define MATRIXOPERATIONSWMMA_CUH

#include <stdexcept>
#include "matrix_operations.h"
#include <vector>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <mma.h>
#include <algorithm>
#include <c++/11/bits/algorithmfwd.h>
#include <iostream>

using namespace nvcuda;

#define WMMA_M 16
#define WMMA_N 16
#define WMMA_K 16

//仅支持16*16的矩阵
extern "C"
__global__ void wmmaMatrixMulKernelBase(half *a, half *b, float *c, int M, int N, int K) {
    int row = blockIdx.y * blockDim.y + threadIdx.y;
    int col = blockIdx.x * blockDim.x + threadIdx.x;

    if(row >= M || col >= N) return; // 边界检查
    // Declare WMMA fragments
    wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::row_major> a_frag;
    wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::row_major> b_frag;
    wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> c_frag;

    // Initialize the output fragment
    wmma::fill_fragment(c_frag, 0.0f);

    // Load the entire matrices into fragments and perform matrix multiplication
    wmma::load_matrix_sync(a_frag, a, WMMA_K);
    wmma::load_matrix_sync(b_frag, b, WMMA_K);
    wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);

    // Store the result
    wmma::store_matrix_sync(c, c_frag, WMMA_N, wmma::mem_row_major);
}


__global__ void simple_wmma_gemm(half *a, half *b, float *c, float *d, int m_ld,
                                 int n_ld, int k_ld, float alpha, float beta) {
  // Leading dimensions. Packed with no transpositions.
  int lda = k_ld;
  int ldb = k_ld;
  int ldc = n_ld;

  // Tile using a 2D grid  (0~64) * 128 + (0~128) = 8192 / 32 = 256
  int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
  int warpN = (blockIdx.y * blockDim.y + threadIdx.y);
  //           (0~64) * 4 + (0 ~4) = 256 ... 256 * 16 = 4096
  // Declare the fragments
  wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::row_major>
      a_frag;
  wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major>
      b_frag;
  wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> acc_frag;
  wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> c_frag;

  wmma::fill_fragment(acc_frag, 0.0f);

  // Loop over k
  for (int i = 0; i < k_ld; i += WMMA_K) {
    int aCol = i;
    int aRow = warpM * WMMA_M;
    int bCol = warpN * WMMA_N;
    int bRow = i;
    // aCol bRow a的列 和 b的行 以 WMMA_K递增，aRow a的行 以 WMMA_M递增， bCol b的列，以 WMM_N递增。
    // Bounds checking
    if (aRow < m_ld && aCol < k_ld && bRow < k_ld && bCol < n_ld) {
      // Load the inputs
      wmma::load_matrix_sync(a_frag, a + aCol + aRow * lda, lda);
      wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb);

      // Perform the matrix multiplication
      wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
    }
  }

  // Load in the current value of c, scale it by beta, and add this our result
  // scaled by alpha
  int cCol = warpN * WMMA_N;
  int cRow = warpM * WMMA_M;

  if (cRow < m_ld && cCol < n_ld) {
    wmma::load_matrix_sync(c_frag, c + cCol + cRow * ldc, ldc,
                           wmma::mem_row_major);

    for (int i = 0; i < c_frag.num_elements; i++) {
      c_frag.x[i] = alpha * acc_frag.x[i] + beta * c_frag.x[i];
    }

    // Store the output
    wmma::store_matrix_sync(d + cCol + cRow * ldc, c_frag, ldc,
                            wmma::mem_row_major);
  }
}



#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
    if (code != cudaSuccess) {
        std::cerr << "GPUassert: " << cudaGetErrorString(code) << " " << file << " " << line << std::endl;
        if (abort) exit(code);
    }
}

class MatrixOperationsWMMA : public MatrixOperations {
public:

    // Helper function to convert float matrix to half precision
    std::vector<half> convertToHalf(const std::vector<std::vector<float>>& matrix) {
        int rows = matrix.size();
        int cols = matrix[0].size();
        std::vector<half> flatMatrix(rows * cols);
        for (int i = 0; i < rows; i++) {
            for (int j = 0; j < cols; j++) {
                flatMatrix[i * cols + j] = __float2half(matrix[i][j]);
            }
        }
        return flatMatrix;
    }

    // Pad matrix to make its dimensions a multiple of 16
    std::vector<std::vector<float>> padMatrix(const std::vector<std::vector<float>>& matrix, int targetRow, int targetCol) {
        std::vector<std::vector<float>> paddedMatrix(targetRow, std::vector<float>(targetCol, 0.0f));
        for (size_t i = 0; i < matrix.size(); ++i) {
            for (size_t j = 0; j < matrix[i].size(); ++j) {
                paddedMatrix[i][j] = matrix[i][j];
            }
        }
        return paddedMatrix;
    }


    std::vector<std::vector<float>> multiply(const std::vector<std::vector<float>>& matA,
                                            const std::vector<std::vector<float>>& matB) {
        int m = matA.size();
        int k = matA[0].size();
        int n = matB[0].size();
        
        if (k != matB.size()) {
            throw std::runtime_error("Inner matrix dimensions must agree.");
        }

        // Determine new dimensions
        int paddedM = (m + 15) / 16 * 16;
        int paddedK = (k + 15) / 16 * 16;
        int paddedN = (n + 15) / 16 * 16;

        // Pad matrices
        auto d_aPadded = padMatrix(matA, paddedM, paddedK);
        auto d_bPadded = padMatrix(matB, paddedK, paddedN);
        std::vector<float> d_cPadded(paddedM * paddedN, 0);  // Initialize matrix C with zeros
        std::vector<float> d_dPadded(paddedM * paddedN);     // Result matrix D

        // Convert to half precision
        std::vector<half> d_a = convertToHalf(d_aPadded);
        std::vector<half> d_b = convertToHalf(d_bPadded);
        std::vector<float> d_c(paddedM * paddedN, 0);  // Initialize matrix C with zeros
        std::vector<float> d_d(paddedM * paddedN);     // Result matrix D

        half *a, *b;
        float *c, *d;
        cudaMalloc(&a, paddedM * paddedK * sizeof(half));
        cudaMalloc(&b, paddedK * paddedN * sizeof(half));
        cudaMalloc(&c, paddedM * paddedN * sizeof(float));
        cudaMalloc(&d, paddedM * paddedN * sizeof(float));

        cudaMemcpy(a, d_a.data(), paddedM * paddedK * sizeof(half), cudaMemcpyHostToDevice);
        cudaMemcpy(b, d_b.data(), paddedK * paddedN * sizeof(half), cudaMemcpyHostToDevice);
        cudaMemcpy(c, d_c.data(), paddedM * paddedN * sizeof(float), cudaMemcpyHostToDevice);
        cudaDeviceSynchronize(); 
        auto startWMMA = std::chrono::high_resolution_clock::now();
        float alpha = -1.0f;
        float beta = 0.0f;

        // Assuming dimensions are multiples of 16 and WMMA_M, WMMA_N, WMMA_K are defined
        dim3 threadsPerBlock(128, 4);
        dim3 numBlocks(paddedN, paddedM);

        simple_wmma_gemm<<<numBlocks, threadsPerBlock>>>(a, b, c, d, paddedM, paddedN, paddedK, alpha, beta);
         cudaDeviceSynchronize(); 
        auto endWMMA = std::chrono::high_resolution_clock::now();
        std::chrono::duration<double, std::milli> wmmaTime = endWMMA - startWMMA;
        std::cout << "WMMA Time: " << wmmaTime.count() << " ms" << std::endl;
        cudaMemcpy(d_d.data(), d, paddedM * paddedN * sizeof(float), cudaMemcpyDeviceToHost);

        cudaFree(a);
        cudaFree(b);
        cudaFree(c);
        cudaFree(d);

        // Convert flat result matrix back to 2D vector
        std::vector<std::vector<float>> result(m, std::vector<float>(n));
        for (int i = 0; i < m; i++) {
            for (int j = 0; j < n; j++) {
                result[i][j] = d_d[i * paddedN + j];
            }
        }
        
        return result;
    }

    // 元素间乘法
    std::vector<std::vector<float>> elementwiseMultiply(const std::vector<std::vector<float>>& matrix1, const std::vector<std::vector<float>>& matrix2) {
        std::vector<std::vector<float>> result(matrix1.size(), std::vector<float>(matrix1[0].size()));
        for (size_t i = 0; i < matrix1.size(); ++i) {
            for (size_t j = 0; j < matrix1[i].size(); ++j) {
                result[i][j] = matrix1[i][j] * matrix2[i][j];
            }
        }
        return result;
    }




    
    // 矩阵转置
    std::vector<std::vector<float>> transpose(const std::vector<std::vector<float>>& mat) override {
        size_t rows = mat.size();
        size_t cols = mat[0].size();
        std::vector<std::vector<float>> result(cols, std::vector<float>(rows));
        for (size_t i = 0; i < rows; ++i) {
            for (size_t j = 0; j < cols; ++j) {
                result[j][i] = mat[i][j];
            }
        }
        return result;
    }

    // 逐元素减法
    std::vector<std::vector<float>> subtract(const std::vector<std::vector<float>>& matrixA, const std::vector<std::vector<float>>& matrixB) {
        if (matrixA.size() != matrixB.size() || matrixA[0].size() != matrixB[0].size()) {
            throw std::invalid_argument("Matrices dimensions do not match for subtraction.");
        }
        size_t numRows = matrixA.size();
        size_t numCols = matrixA[0].size();
        std::vector<std::vector<float>> result(numRows, std::vector<float>(numCols, 0.0));
        for (size_t i = 0; i < numRows; ++i) {
            for (size_t j = 0; j < numCols; ++j) {
                result[i][j] = matrixA[i][j] - matrixB[i][j];
            }
        }
        return result;
    }
    
};

#endif // MATRIXOPERATIONSWMMA_CUH
