#ifndef MATRIXOPERATIONSWMMA1_CUH
#define MATRIXOPERATIONSWMMA1_CUH
#define WMMA_M 16
#define WMMA_N 16
#define WMMA_K 16

#define WARP_SIZE 32
#define div_ceil(dividend, divisor) ((dividend + divisor - 1) / divisor)
#include <stdexcept>
#include "matrix_operations.h"
#include <vector>
#include <cuda_runtime.h>
#include <mma.h>
#include <algorithm>
#include <iostream>
#include <cuda_fp16.h>
#include <iomanip> 
#include <chrono>
using namespace nvcuda;

__global__ void wmmaNaiveKernel(const half *__restrict__ A, const half *__restrict__ B, float *__restrict__ C, size_t M,
                                size_t N, size_t K) {
    const size_t K_tiles = div_ceil(K, WMMA_K);

    const size_t warp_row = blockIdx.y * WMMA_M;
    const size_t warp_col = blockIdx.x * WMMA_N;

    if (warp_row >= M && warp_col >= N) {
        return;
    }

    wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> C_frag;

    wmma::fill_fragment(C_frag, 0.0f);

#pragma unroll
    for (size_t i = 0; i < K_tiles; ++i) {
        wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::row_major> A_frag;
        wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> B_frag;

        wmma::load_matrix_sync(A_frag, A + warp_row * K + i * WMMA_K, K);
        wmma::load_matrix_sync(B_frag, B + i * WMMA_K + warp_col * K, K);

        wmma::mma_sync(C_frag, A_frag, B_frag, C_frag);
    }

    wmma::store_matrix_sync(C + warp_row * N + warp_col, C_frag, N, wmma::mem_row_major);
}












class MatrixOperationsWMMA1 : public MatrixOperations {
public:

    void wmmaNaive(half *A, half *B, float *C, size_t M, size_t N, size_t K) {
    dim3 block(WARP_SIZE);
    dim3 grid(div_ceil(N, WMMA_N), div_ceil(M, WMMA_M));

    wmmaNaiveKernel<<<grid, block>>>(A, B, C, M, N, K);
}
std::vector<half> convertToHalf(const std::vector<std::vector<float>>& matrix) {
    int rows = matrix.size();
    int cols = matrix[0].size();
    std::vector<half> flatMatrix(rows * cols);
    for (int i = 0; i < rows; i++) {
        for (int j = 0; j < cols; j++) {
            flatMatrix[i * cols + j] = __float2half(matrix[i][j]);
            }
        }
    return flatMatrix;
}

std::vector<half> convertToHalfV2(const std::vector<std::vector<float>>& matrix) {
    int rows = matrix.size();
    int cols = matrix[0].size();
    std::vector<half> flatMatrix(rows * cols);
        for (int j = 0; j < cols; j++) { // 首先遍历列
        for (int i = 0; i < rows; i++) { // 再遍历行
                flatMatrix[j * rows + i] = __float2half(matrix[i][j]); // 注意这里的索引也改变了
            }
        }
        return flatMatrix;
}
void printMatrix(const std::vector<std::vector<float>>& matrix) {
    for (const auto& row : matrix) {
        for (float elem : row) {
            std::cout << std::setw(10) << elem << " ";
        }
        std::cout << std::endl;
    }
    std::cout<<matrix.size()<<std::endl;
    std::cout<<matrix[0].size()<<std::endl;
}
std::vector<half> padAndFlatten(const std::vector<std::vector<float>>& matrix, int targetRow, int targetCol) {
        std::vector<half> flatMatrix(targetRow * targetCol, __float2half(0.0f));
        for (size_t i = 0; i < matrix.size(); ++i) {
            for (size_t j = 0; j < matrix[0].size(); ++j) {
                flatMatrix[i * targetCol + j] = matrix[i][j];
            }
        }
        return flatMatrix;
    }
std::vector<half> padAndFlattenV2(const std::vector<std::vector<float>>& matrix, int targetRow, int targetCol) {
        std::vector<half> flatMatrix(targetRow * targetCol, __float2half(0.0f));
        for (size_t j = 0; j < matrix[0].size(); ++j) {
            for (size_t i = 0; i < matrix.size(); ++i) {
                flatMatrix[j * targetRow + i] = matrix[i][j];
            }
        }
        return flatMatrix;
    }
std::vector<std::vector<float>> multiply(const std::vector<std::vector<float>>& matA,
                                            const std::vector<std::vector<float>>& matB)
{
   
    int REALM = matA.size();
    int REALK = matB.size();
    int REALN = matB[0].size();
    if (REALK != matA[0].size()) {
            throw std::runtime_error("Inner matrix dimensions must agree.");
        }

    int M = (REALM + 15) / 16 * 16;
    int K = (REALK + 15) / 16 * 16;
    int N = (REALN + 15) / 16 * 16;
    if (M>N) 
        N=M;
    else 
        M=N; 
    half *d_A, *d_B;
    float *d_C; // 设备上的半精度数组
    int numElementsA = M * K; // A的元素数量
    int numElementsB = K * N; // B的元素数量
    int numElementsC = M * N; // C的元素数量

    
    std::vector<half> d_a = padAndFlatten(matA,M,K);
    std::vector<half> d_b = padAndFlattenV2(matB,K,N);
    

    cudaMalloc((void**)&d_A, numElementsA * sizeof(half));
    cudaMalloc((void**)&d_B, numElementsB * sizeof(half));
    cudaMalloc((void**)&d_C, numElementsC * sizeof(float));
    cudaMemcpy(d_A, d_a.data(), numElementsA * sizeof(half), cudaMemcpyHostToDevice);
    cudaMemcpy(d_B, d_b.data(), numElementsB * sizeof(half), cudaMemcpyHostToDevice);

    wmmaNaive(d_A, d_B, d_C, M, N, K);
    cudaDeviceSynchronize(); // 等待GPU完成操作
    float *h_C = new float[numElementsC];
    cudaMemcpy(h_C, d_C, numElementsC * sizeof(float), cudaMemcpyDeviceToHost);
    cudaFree(d_A);
    cudaFree(d_B);
    cudaFree(d_C);
    std::vector<std::vector<float>> result(REALM, std::vector<float>(REALN));
    for (int i = 0; i < REALM; i++) {
        for (int j = 0; j < REALN; j++) {
            result[i][j] = h_C[i * M + j];
        }
    }
    return result;
} 


    // 元素间乘法
    std::vector<std::vector<float>> elementwiseMultiply(const std::vector<std::vector<float>>& matrix1, const std::vector<std::vector<float>>& matrix2) {
        std::vector<std::vector<float>> result(matrix1.size(), std::vector<float>(matrix1[0].size()));
        for (size_t i = 0; i < matrix1.size(); ++i) {
            for (size_t j = 0; j < matrix1[i].size(); ++j) {
                result[i][j] = matrix1[i][j] * matrix2[i][j];
            }
        }
        return result;
    }

    // 矩阵转置
    std::vector<std::vector<float>> transpose(const std::vector<std::vector<float>>& mat) override {
        size_t rows = mat.size();
        size_t cols = mat[0].size();
        std::vector<std::vector<float>> result(cols, std::vector<float>(rows));
        for (size_t i = 0; i < rows; ++i) {
            for (size_t j = 0; j < cols; ++j) {
                result[j][i] = mat[i][j];
            }
        }
        return result;
    }

    // 逐元素减法
    std::vector<std::vector<float>> subtract(const std::vector<std::vector<float>>& matrixA, const std::vector<std::vector<float>>& matrixB) {
        if (matrixA.size() != matrixB.size() || matrixA[0].size() != matrixB[0].size()) {
            throw std::invalid_argument("Matrices dimensions do not match for subtraction.");
        }
        size_t numRows = matrixA.size();
        size_t numCols = matrixA[0].size();
        std::vector<std::vector<float>> result(numRows, std::vector<float>(numCols, 0.0));
        for (size_t i = 0; i < numRows; ++i) {
            for (size_t j = 0; j < numCols; ++j) {
                result[i][j] = matrixA[i][j] - matrixB[i][j];
            }
        }
        return result;
    }
    
};

#endif // MATRIXOPERATIONSWMMA1_CUH