#include "benchmark_config.h"
#include "gpu_timer.h"
#include "utils.h"
#include <bits/stdint-intn.h>
#include <cblas.h>
#include <cublas_v2.h>
#include <cuda.h>
#include <cuda_fp16.h>
#include <stdio.h>
#include <sys/time.h>
#include <type_traits> // is_same

template <typename T, typename S>
void allocate_memory(int m, int n, int k, T **A, T **B, S **C) {
  cudaMallocManaged(A, m * k * sizeof(T));
  cudaMallocManaged(B, k * n * sizeof(T));
  cudaMallocManaged(C, m * n * sizeof(S));
}

template <typename T, typename S> void free_memory(T *A, T *B, S *C) {
  cudaFree(A);
  cudaFree(B);
  cudaFree(C);
}

template <typename T, typename S>
bool cublas_gemm_ex(cublasHandle_t handle, cublasOperation_t transA,
                    cublasOperation_t transB, const int m, const int n,
                    const int k, T *A, T *B, S *C, int lda, int ldb, int ldc,
                    const S *alpha, const S *beta, int algo) {
  cudaDataType_t AType, BType, CType, ComputeType;
  if (std::is_same<T, double>::value) {
    AType = BType = CType = ComputeType = CUDA_R_64F;
  } else if (std::is_same<T, float>::value) {
    AType = BType = CType = ComputeType = CUDA_R_32F;
  } else if (std::is_same<T, __half>::value) {
    AType = BType = CType = ComputeType = CUDA_R_16F;
  } else if (std::is_same<T, int8_t>::value) {
    AType = BType = CUDA_R_8I;
    CType = ComputeType = CUDA_R_32I;
  } else {
    printf("Not supported data type.");
    return false;
  }

  cublasStatus_t status = cublasGemmEx(
      handle, transA, transB, m, n, k, alpha, A, AType, lda, B, BType, ldb,
      beta, C, CType, ldc, ComputeType, static_cast<cublasGemmAlgo_t>(algo));

  return (status == CUBLAS_STATUS_SUCCESS);
}

template <typename T, typename S>
void test_gemm(cublasHandle_t handle, const int m, const int n, const int k,
               T *A, T *B, S *C, const S *alpha, const S *beta, int algo,
               const int iteration) {

  // warm up
  for(int i = 0; i < WARMUPNUM; i++){
    cublas_gemm_ex(handle, CUBLAS_OP_N, CUBLAS_OP_N,
                       n, // number of rows of matrix A and C
                       m, // number of columns of matrix B and C
                       k, // number of columns of A and rows of B
                       B, A, C,
                       n, // lda
                       k, // ldb
                       n, // ldc
                       alpha, beta, static_cast<cublasGemmAlgo_t>(algo));
  }
  cudaDeviceSynchronize();
  double total_time = 0;
  GpuTimer timer;
  for (int i = 0; i < iteration; ++i) {
    timer.start();
    bool success =
        cublas_gemm_ex(handle, CUBLAS_OP_N, CUBLAS_OP_N,
                       n, // number of rows of matrix A and C
                       m, // number of columns of matrix B and C
                       k, // number of columns of A and rows of B
                       B, A, C,
                       n, // lda
                       k, // ldb
                       n, // ldc
                       alpha, beta, static_cast<cublasGemmAlgo_t>(algo));
    timer.stop_and_wait();

    if (!success)
      break;
    else if (i > 0) {
      total_time += timer.duration(1);
    }
  }
  if (total_time > 0.0) {
    double avg_time = total_time / (iteration - 1);
    printf("algo %d: %.3f ms\n", algo, avg_time);
    if (std::is_same<T, double>::value)
      printf("Average gemm_fp64 kernel execution time: %.3f ms\nAverage "
             "gemm_fp64 kernel TFlops: ",
             avg_time);
    if (std::is_same<T, float>::value)
      printf("Average gemm_fp32 kernel execution time: %.3f ms\nAverage "
             "gemm_fp32 kernel TFlops: ",
             avg_time);
    if (std::is_same<T, __half>::value)
      printf("Average gemm_fp16 kernel execution time: %.3f ms\nAverage "
             "gemm_fp16 kernel TFlops: ",
             avg_time);
    if (std::is_same<T, int8_t>::value)
      printf("Average gemm_int8 kernel execution time: %.3f ms\nAverage "
             "gemm_int8 kernel Tops: ",
             avg_time);
    performance(m, n, k, std::is_same<T, int8_t>::value, avg_time * 1e-3);
  }
}

int main(int argc, char *argv[]) {
  if (argc != 6) {
    printf("Usage: %s <M> <N> <K> <iterations> <type>\n", argv[0]);
    printf("C = A X B (A: M * K, B: K * N, C: M * N)\n");
    return 1;
  }
  const int m = atoi(argv[1]);
  const int n = atoi(argv[2]);
  const int k = atoi(argv[3]);
  const int iteration = atoi(argv[4]);
  const int type = atoi(argv[5]);

  printf("shape: (%d, %d) x (%d, %d)\n", m, k, k, n);
  int start_algo = CUBLAS_GEMM_DEFAULT;
  int end_algo = CUBLAS_GEMM_DEFAULT;

  const double d_alpha = 1.0, d_beta = 0.0;
  const float f_alpha = 1.f, f_beta = 0.f;
  const __half h_alpha = __float2half_rn(1.f), h_beta = __float2half_rn(0.f);
  const int32_t i_alpha = 1, i_beta = 0;

  double *dA, *dB, *dC, *goldendC;
  float *fA, *fB, *fC, *goldenhC;
  __half *hA, *hB, *hC;
  int8_t *iA, *iB;
  int32_t *iC;
  float *goldeniC;
  float *goldenhA, *goldenhB;
  float *goldeniA, *goldeniB;
  float* goldenfC;

  goldendC = (double *)malloc(m * n * sizeof(double));
  goldenfC = (float *)malloc(m * n * sizeof(float));
  goldenhC = (float *)malloc(m * n * sizeof(float));
  goldeniC = (float *)malloc(m * n * sizeof(float));
  goldenhA = (float *)malloc(m * k * sizeof(float));
  goldenhB = (float *)malloc(k * n * sizeof(float));
  goldeniA = (float *)malloc(m * k * sizeof(float));
  goldeniB = (float *)malloc(k * n * sizeof(float));

  allocate_memory(m, n, k, &dA, &dB, &dC);
  allocate_memory(m, n, k, &fA, &fB, &fC);
  allocate_memory(m, n, k, &hA, &hB, &hC);
  allocate_memory(m, n, k, &iA, &iB, &iC);

  for (int i = 0; i < m * k; ++i) {
    dA[i] = double(i % 255 - 127) / 127;
    fA[i] = float(i % 255 - 127) / 127;
    hA[i] = __float2half_rn(fA[i]);
    iA[i] = float2int8(fA[i], 127);
    goldenhA[i] = fp16_to_fp32(hA[i]);
    goldeniA[i] = float(iA[i]);
  }
  for (int i = 0; i < k * n; ++i) {
    dB[i] = double(i % 255 - 127) / 127;
    fB[i] = float(i % 255 - 127) / 127;
    hB[i] = __float2half_rn(fB[i]);
    iB[i] = float2int8(fB[i], 127);
    goldenhB[i] = fp16_to_fp32(hB[i]);
    goldeniB[i] = float(iB[i]);
  }
  cublasHandle_t handle;
  cublasCreate(&handle);
  if (type == 0) {
    printf(">>>>>>>>>>>>>>>>> test fp64 >>>>>>>>>>>>>>>>>\n");
    for (int algo = start_algo; algo <= end_algo; ++algo)
      test_gemm(handle, m, n, k, dA, dB, dC, &d_alpha, &d_beta, algo,
                iteration);
    printf(">>>>>>>>>>>>>>>>> compare result >>>>>>>>>>>>>>>>>\n");
    printf("fp64: ");
    cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, k, 1.0, dA, k,
                dB, n, 0.0, goldendC, n);
    // verify result with cblas
    double sumSquaredError = 0;
    for (int i = 0; i < m; i++) {
      for (int j = 0; j < n; j++) {
        double error = dC[i * n + j] - goldendC[i * n + j];
        sumSquaredError += error * error;
      }
    }
    auto rmse = std::sqrt(sumSquaredError / (m * n));
    printf("fp64 RMSE:%.8lf\n", rmse);
    if (rmse > 1e-6)
      printf("FAIL\n");
    else
      printf("PASS\n");
  }

  if (type == 1) {
    printf(">>>>>>>>>>>>>>>>> test fp32 >>>>>>>>>>>>>>>>>\n");
    for (int algo = start_algo; algo <= end_algo; ++algo)
      test_gemm(handle, m, n, k, fA, fB, fC, &f_alpha, &f_beta, algo,
                iteration);
    printf(">>>>>>>>>>>>>>>>> compare result >>>>>>>>>>>>>>>>>\n");

    printf("fp32: ");
    // verify result
    cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, k, 1.0, fA, k,
                fB, n, 0.0, goldenfC, n);
    float sumSquaredError = 0.0f;
    for (int i = 0; i < m; i++) {
      for (int j = 0; j < n; j++) {
        float error = fC[i * n + j] - goldenfC[i * n + j];
        sumSquaredError += error * error;
      }
    }
    auto rmse = std::sqrt(sumSquaredError / (m * n));
    printf("fp32 RMSE:%.8lf\n", rmse);
    if (rmse > 1e-4)
      printf("FAIL\n");
    else
      printf("PASS\n");
  }

  if (type == 2) {
    printf(">>>>>>>>>>>>>>>>> test fp16 >>>>>>>>>>>>>>>>>\n");
    for (int algo = start_algo; algo <= end_algo; ++algo)
      test_gemm(handle, m, n, k, hA, hB, hC, &h_alpha, &h_beta, algo,
                iteration);
    printf(">>>>>>>>>>>>>>>>> compare result >>>>>>>>>>>>>>>>>\n");

    printf("fp16: ");
    float sumSquaredError = 0.0f;
    cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, k, 1.0,
                goldenhA, k, goldenhB, n, 0.0, goldenhC, n);
    for (int i = 0; i < m; i++) {
      for (int j = 0; j < n; j++) {
        float error =
            fp16_to_fp32((hC[i * n + j])) - float(goldenfC[i * n + j]);
        sumSquaredError += error * error;
      }
    }
    auto rmse = std::sqrt(sumSquaredError / (m * n));
    printf("fp16 RMSE:%.8lf\n", rmse);
    if (rmse > 1e-4)
      printf("FAIL\n");
    else
      printf("PASS\n");
  }

  if (type == 3) {

    printf(">>>>>>>>>>>>>>>>> test int8 >>>>>>>>>>>>>>>>>\n");
    for (int algo = start_algo; algo <= end_algo; ++algo)
      test_gemm(handle, m, n, k, iA, iB, iC, &i_alpha, &i_beta, algo,
                iteration);
    printf(">>>>>>>>>>>>>>>>> compare result >>>>>>>>>>>>>>>>>\n");
    printf("int8: ");
    // verify result
    cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, k, 1.0,
                goldeniA, k, goldeniB, n, 0.0, goldeniC, n);
    float sumSquaredError = 0.0f;
    for (int i = 0; i < m; i++) {
      for (int j = 0; j < n; j++) {
        float error = float(iC[i * n + j]) - goldeniC[i * n + j];
        sumSquaredError += error * error;
      }
    }

    auto rmse = std::sqrt(sumSquaredError / (m * n));
    printf("int8 RMSE:%.8lf\n", rmse);
    if (rmse > 1e-6)
      printf("FAIL\n");
    else
      printf("PASS\n");
  }
  free_memory(dA, dB, dC);
  free_memory(fA, fB, fC);
  free_memory(hA, hB, hC);
  free_memory(iA, iB, iC);
  free(goldendC);
  free(goldenfC);
  free(goldenhC);
  free(goldenhA);
  free(goldenhB);
  free(goldeniA);
  free(goldeniB);
  free(goldeniC);
  return 0;
}
