#include "benchmark_config.h"
#include "gpu_timer.h"
#include <cblas.h>
#include <cstddef>
#include <cstring>
#include <cublas_v2.h>
#include <cuda_bf16.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <type_traits>
#include <vector>

template <int v> struct Int2Type {
  enum { value = v };
};

float convert(__half val) { return __half2float(val); }

float convert(__nv_bfloat16 val) { return __bfloat162float(val); }

float convert(float val) { return val; }

float convert(int8_t val) { return val; }

float convert(int val) { return val; }

int init_type;

template <typename TA, typename TB, typename TC, typename TScalar>
class blasTest {
  cudaDataType AType = CUDA_R_16F;
  cudaDataType BType = CUDA_R_16F;
  cudaDataType CType = CUDA_R_16F;
  cublasComputeType_t computeType = CUBLAS_COMPUTE_32F;

public:
  const char *type = "FP16";

protected:
  bool isEx;
  bool isSB;
  bool transA;
  bool transB;
  bool isInitialized = false;
  size_t m;
  size_t n;
  size_t k;
  size_t lda;
  size_t ldb;
  size_t ldc;
  size_t strideA;
  size_t strideB;
  size_t strideC;
  size_t batch;
  size_t szA;
  size_t szB;
  size_t szC;
  TScalar alpha, beta;
  TA *dA, *hA;
  TB *dB, *hB;
  TC *dC, *hC;
  float *CheckA, *CheckB, *CheckC;
  cublasHandle_t handle;

public:
  blasTest(bool isEx, bool isSB, bool transA, bool transB, size_t m, size_t n,
           size_t k, size_t lda, size_t ldb, size_t ldc, size_t batch,
           TScalar alpha, TScalar beta) {
    this->isEx = isEx;
    this->isSB = isSB;
    this->transA = transA;
    this->transB = transB;
    this->m = m;
    this->n = n;
    this->k = k;
    if (lda <= (transA ? k : m))
      this->lda = transA ? k : m;
    if (ldb <= (transB ? n : k))
      this->ldb = transB ? n : k;
    if (ldc <= m)
      this->ldc = m;
    szA = this->lda * (transA ? m : k);
    szB = this->ldb * (transB ? k : n);
    szC = this->ldc * n;
    if (isSB) {
      this->batch = batch;
      this->strideA = this->lda * (transA ? m : k);
      this->strideB = this->ldb * (transB ? k : n);
      this->strideC = this->ldc * n;
      szA *= batch;
      szB *= batch;
      szC *= batch;
    }
    this->alpha = alpha;
    this->beta = beta;
    hA = reinterpret_cast<TA *>(malloc(szA * sizeof(TA)));
    hB = reinterpret_cast<TB *>(malloc(szB * sizeof(TB)));
    hC = reinterpret_cast<TC *>(malloc(szC * sizeof(TC)));
    this->CheckA = reinterpret_cast<float *>(malloc(szA * sizeof(float)));
    this->CheckB = reinterpret_cast<float *>(malloc(szB * sizeof(float)));
    this->CheckC = reinterpret_cast<float *>(malloc(szC * sizeof(float)));
    for (int j = 0; j < szA; j++) {
      switch (init_type) {
      case 0:
        hA[j] = static_cast<TA>(0.0);
        break;
      case 1:
        hA[j] = static_cast<TA>(float(rand()));
        break;
      case 2:
        if constexpr (std::is_same_v<int8_t, TA>)
          hA[j] = static_cast<TA>(sinf(rand()) * 10.f);
        else
          hA[j] = static_cast<TA>(sinf(rand()));
        break;
      default:
        break;
      }
      CheckA[j] = convert(hA[j]);
    }

    for (int j = 0; j < szB; j++) {
      switch (init_type) {
      case 0:
        hB[j] = static_cast<TB>(0.0);
        break;
      case 1:
        hB[j] = static_cast<TB>(float(rand()));
        break;
      case 2:
        if constexpr (std::is_same_v<int8_t, TB>)
          hB[j] = static_cast<TB>(cosf(rand()) * 10.f);
        else
          hB[j] = static_cast<TB>(cosf(rand()));
        break;
      default:
        break;
      }
      CheckB[j] = convert(hB[j]);
    }

    for (int j = 0; j < szC; j++) {
      switch (init_type) {
      case 0:
        hC[j] = static_cast<TC>(0.0);
        break;
      case 1:
        hC[j] = static_cast<TC>(float(rand()));
        break;
      case 2:
        hC[j] = static_cast<TC>(sinf(rand()));
        break;
      default:
        break;
      }
    }
    // 分配device侧并拷贝数据
    auto statusA = cudaMalloc(reinterpret_cast<void **>(&dA), szA * sizeof(TA));
    auto statusB = cudaMalloc(reinterpret_cast<void **>(&dB), szB * sizeof(TB));
    auto statusC = cudaMalloc(reinterpret_cast<void **>(&dC), szC * sizeof(TC));
    if (statusA != cudaSuccess) {
      printf("Error Allocating Memory for dA:%d\n", statusA);
      exit(1);
    }
    if (statusB != cudaSuccess) {
      printf("Error Allocating Memory for dB:%d\n", statusB);
      exit(1);
    }
    if (statusC != cudaSuccess) {
      printf("Error Allocating Memory for dC:%d\n", statusC);
      exit(1);
    }
    statusA = cudaMemcpy(dA, hA, szA * sizeof(TA), cudaMemcpyHostToDevice);
    statusB = cudaMemcpy(dB, hB, szB * sizeof(TB), cudaMemcpyHostToDevice);
    statusC = cudaMemcpy(dC, hC, szC * sizeof(TC), cudaMemcpyHostToDevice);
    if (statusA != cudaSuccess) {
      printf("Error copying data from hA to dA:%d\n", statusA);
      exit(2);
    }
    if (statusB != cudaSuccess) {
      printf("Error copying data from hB to dB:%d\n", statusB);
      exit(2);
    }
    if (statusC != cudaSuccess) {
      printf("Error copying data from hC to dC:%d\n", statusC);
      exit(2);
    }
    cublasCreate(&handle);
    isInitialized = true;
  }

  virtual cublasStatus_t runSingleTest() = 0;

  virtual void verify() {
    cblas_sgemm(CblasColMajor, this->transA ? CblasTrans : CblasNoTrans,
                this->transB ? CblasTrans : CblasNoTrans, this->m, this->n,
                this->k, this->alpha, this->CheckA, this->lda, this->CheckB,
                this->ldb, this->beta, this->CheckC, this->ldc);
    cudaMemcpy(hC, dC, szC * sizeof(TC), cudaMemcpyDeviceToHost);
    cudaDeviceSynchronize();
  }

  void runTest(int warmup = 100, int iters = 200) {
    // warmup
    for (int ii = 0; ii < warmup; ii++) {
      this->runSingleTest();
    }
    cudaDeviceSynchronize();
    double msecMean = 0.0f;
    GpuTimer timer;
    for (int ii = 0; ii < iters; ii++) {
      timer.start();
      cublasStatus_t status = runSingleTest();
      timer.stop_and_wait();
      if (status != CUBLAS_STATUS_SUCCESS) {
        printf("BLAS FAILED WITH ERROR CODE:%d\n", status);
        exit(3);
      }
      msecMean += timer.duration(1) * 1000;
    }

    this->verify();

    if (iters > 0)
      msecMean /= iters;
    double ips1 =
        isSB ? (double)m * (double)k * (double)n * (double)batch * 2. / 1000. /
                   1000. / 1000.
             : (double)m * (double)k * (double)n * 2. / 1000. / 1000. / 1000.;
    ips1 = ips1 / (double)msecMean * 1000.;
    if (isSB) {
      printf("[DataType:%s]isEx:%d,trans:%s%s,m:%ld,n:%ld,k:%ld,lda:%ld,ldb:%"
             "ld,ldc:%ld,batch:%"
             "ld\n",
             this->type, this->isEx, transA ? "T" : "N", transB ? "T" : "N", m,
             n, k, lda, ldb, ldc, batch);
      if (std::strcmp(this->type, "I8") == 0) {
        printf("Time(us):%f,Tops:%lf\n", msecMean, ips1);
        printf("Average Blas %s kernel Tops: %lf\n", this->type, ips1);

      } else {
        printf("Time(us):%f,Tflops:%lf\n", msecMean, ips1);
        printf("Average Blas %s kernel Tflops: %lf\n", this->type, ips1);
      }
    } else {
      printf("[DataType:%s]isEx:%d,trans:%s%s,m:%ld,n:%ld,k:%ld,lda:%ld,ldb:%"
             "ld,ldc:%ld\n",
             this->type, this->isEx, transA ? "T" : "N", transB ? "T" : "N", m,
             n, k, lda, ldb, ldc);
      if (std::strcmp(this->type, "I8") == 0) {
        printf("Time(us):%f,Tops:%lf\n", msecMean, ips1);
        printf("Average Blas %s kernel Tops: %lf\n", this->type, ips1);
      } else {
        printf("Time(us):%f,Tflops:%lf\n", msecMean, ips1);
        printf("Average Blas %s kernel Tflops: %lf\n", this->type, ips1);
      }
    }
  }

  ~blasTest() {
    if (!isInitialized)
      return;
    cublasDestroy(handle);
    free(CheckA);
    free(CheckB);
    free(CheckC);
    free(hA);
    free(hB);
    free(hC);
    cudaFree(dA);
    cudaFree(dB);
    cudaFree(dC);
    this->isInitialized = false;
  }
};

class blasTestFP16 : public blasTest<__half, __half, __half, float> {
  cudaDataType AType = CUDA_R_16F;
  cudaDataType BType = CUDA_R_16F;
  cudaDataType CType = CUDA_R_16F;
  cublasComputeType_t computeType = CUBLAS_COMPUTE_32F;

public:
  blasTestFP16(bool isEx, bool isSB, bool transA, bool transB, size_t m,
               size_t n, size_t k, size_t lda, size_t ldb, size_t ldc,
               size_t batch, float alpha, float beta)
      : blasTest(isEx, isSB, transA, transB, m, n, k, lda, ldb, ldc, batch,
                 alpha, beta) {
    this->type = "FP16";
  }

  cublasStatus_t runSingleTest() {
    cublasStatus_t status;

    // cublasGemmEx 或者 cublasGemmStridedBatchedEx
    if (this->isEx) {
      if (this->isSB) {
        status = cublasGemmStridedBatchedEx(
            this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
            this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m, this->n, this->k,
            (void *)&(this->alpha), (void *)this->dA, this->AType, this->lda,
            this->strideA, (void *)this->dB, this->BType, this->ldb,
            this->strideB, (void *)&(this->beta), (void *)this->dC, this->CType,
            this->ldc, this->strideC, this->batch, this->computeType,
            CUBLAS_GEMM_DEFAULT);
      } else { // isStridedBatched
        status = cublasGemmEx(
            this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
            this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m, this->n, this->k,
            (void *)&(this->alpha), (void *)this->dA, this->AType, this->lda,
            (void *)this->dB, this->BType, this->ldb, (void *)&(this->beta),
            (void *)this->dC, this->CType, this->ldc, this->computeType,
            CUBLAS_GEMM_DEFAULT);
      }
    } else { // isEx
      __half halfal = this->alpha;
      __half halfbe = this->beta;
      if (this->isSB) {
        status = cublasHgemmStridedBatched(
            this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
            this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m, this->n, this->k,
            &(halfal), this->dA, this->lda, this->strideA, this->dB, this->ldb,
            this->strideB, &(halfbe), this->dC, this->ldc, this->strideC,
            this->batch);
      } else {
        status =
            cublasHgemm(this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
                        this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m,
                        this->n, this->k, &(halfal), this->dA, this->lda,
                        this->dB, this->ldb, &(halfbe), this->dC, this->ldc);
      }
    }
    return status;
  }

  void verify() override {
    blasTest::verify();
    double sumSquaredError = 0;
    for (int i = 0; i < m; ++i) {
      for (int j = 0; j < n; ++j) {
        double error =
            convert(hC[i * n + j]) - (float)(this->CheckC[i * n + j]);
        sumSquaredError += error * error;
      }
    }
    auto rmse = std::sqrt(sumSquaredError / (m * n));
    printf("RMSE: %.8f\n", rmse);
    if (rmse > 1e-2)
      printf("FAIL\n");
    else
      printf("PASS\n");
  }
};

class blasTestBF16
    : public blasTest<__nv_bfloat16, __nv_bfloat16, __nv_bfloat16, float> {
  cudaDataType AType = CUDA_R_16BF;
  cudaDataType BType = CUDA_R_16BF;
  cudaDataType CType = CUDA_R_16BF;
  cublasComputeType_t computeType = CUBLAS_COMPUTE_32F;

public:
  blasTestBF16(bool isEx, bool isSB, bool transA, bool transB, size_t m,
               size_t n, size_t k, size_t lda, size_t ldb, size_t ldc,
               size_t batch, float alpha, float beta)
      : blasTest(isEx, isSB, transA, transB, m, n, k, lda, ldb, ldc, batch,
                 alpha, beta) {
    this->type = "BF16";
  };

  cublasStatus_t runSingleTest() {
    cublasStatus_t status;

    // cublasGemmEx or cublasGemmStridedBatchedEx
    if (this->isEx) {
      if (this->isSB) {
        status = cublasGemmStridedBatchedEx(
            this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
            this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m, this->n, this->k,
            (void *)&(this->alpha), (void *)this->dA, this->AType, this->lda,
            this->strideA, (void *)this->dB, this->BType, this->ldb,
            this->strideB, (void *)&(this->beta), (void *)this->dC, this->CType,
            this->ldc, this->strideC, this->batch, this->computeType,
            CUBLAS_GEMM_DEFAULT);
      } else { // isStridedBatched
        status = cublasGemmEx(
            this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
            this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m, this->n, this->k,
            (void *)&(this->alpha), (void *)this->dA, this->AType, this->lda,
            (void *)this->dB, this->BType, this->ldb, (void *)&(this->beta),
            (void *)this->dC, this->CType, this->ldc, this->computeType,
            CUBLAS_GEMM_DEFAULT);
      }
    } else { // isEx
      printf("[ERROR]:TYPE %s not support non-Ex mode!\n", this->type);
      exit(3);
    }
    return status;
  }

  void verify() override {
    blasTest::verify();
    double sumSquaredError = 0;
    for (int i = 0; i < m; ++i) {
      for (int j = 0; j < n; ++j) {
        double error =
            convert(hC[i * n + j]) - (float)(this->CheckC[i * n + j]);
        sumSquaredError += error * error;
      }
    }
    auto rmse = std::sqrt(sumSquaredError / (m * n));
    printf("RMSE: %.8f\n", rmse);
    if (rmse > 1e-1)
      printf("FAIL\n");
    else
      printf("PASS\n");
  }
};

class blasTestI8 : public blasTest<int8_t, int8_t, int, int> {
  cudaDataType AType = CUDA_R_8I;
  cudaDataType BType = CUDA_R_8I;
  cudaDataType CType = CUDA_R_32I;
  cublasComputeType_t computeType = CUBLAS_COMPUTE_32I;

public:
  blasTestI8(bool isEx, bool isSB, bool transA, bool transB, size_t m, size_t n,
             size_t k, size_t lda, size_t ldb, size_t ldc, size_t batch,
             float alpha, float beta)
      : blasTest(isEx, isSB, transA, transB, m, n, k, lda, ldb, ldc, batch,
                 alpha, beta) {
    this->type = "I8";
  };

  cublasStatus_t runSingleTest() {
    cublasStatus_t status;

    // cublasGemmEx or cublasGemmStridedBatchedEx
    if (this->isEx) {
      if (this->isSB) {
        status = cublasGemmStridedBatchedEx(
            this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
            this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m, this->n, this->k,
            (void *)&(this->alpha), (void *)this->dA, this->AType, this->lda,
            this->strideA, (void *)this->dB, this->BType, this->ldb,
            this->strideB, (void *)&(this->beta), (void *)this->dC, this->CType,
            this->ldc, this->strideC, this->batch, this->computeType,
            CUBLAS_GEMM_DEFAULT);
      } else { // isStridedBatched
        status = cublasGemmEx(
            this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
            this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m, this->n, this->k,
            (void *)&(this->alpha), (void *)this->dA, this->AType, this->lda,
            (void *)this->dB, this->BType, this->ldb, (void *)&(this->beta),
            (void *)this->dC, this->CType, this->ldc, this->computeType,
            CUBLAS_GEMM_DEFAULT);
      }
    } else { // isEx
      printf("[ERROR]:TYPE %s not support non-Ex mode!\n", this->type);
      exit(3);
    }
    return status;
  }

  void verify() override {
    blasTest::verify();
    double sumSquaredError = 0;
    for (int i = 0; i < m; ++i) {
      for (int j = 0; j < n; ++j) {
        double error =
            convert(hC[i * n + j]) - (float)(this->CheckC[i * n + j]);
        sumSquaredError += error * error;
      }
    }
    auto rmse = std::sqrt(sumSquaredError / (m * n));
    printf("RMSE: %.8f\n", rmse);
    if (rmse > 1e-6)
      printf("FAIL\n");
    else
      printf("PASS\n");
  }
};

class blasTestTF32 : public blasTest<float, float, float, float> {
  cudaDataType AType = CUDA_R_32F;
  cudaDataType BType = CUDA_R_32F;
  cudaDataType CType = CUDA_R_32F;
  cublasComputeType_t computeType = CUBLAS_COMPUTE_32F_FAST_TF32;

public:
  blasTestTF32(bool isEx, bool isSB, bool transA, bool transB, size_t m,
               size_t n, size_t k, size_t lda, size_t ldb, size_t ldc,
               size_t batch, float alpha, float beta)
      : blasTest(isEx, isSB, transA, transB, m, n, k, lda, ldb, ldc, batch,
                 alpha, beta) {
    this->type = "TF32";
  };

  cublasStatus_t runSingleTest() {
    cublasStatus_t status;

    // cublasGemmEx or cublasGemmStridedBatchedEx
    if (this->isEx) {
      if (this->isSB) {
        status = cublasGemmStridedBatchedEx(
            this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
            this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m, this->n, this->k,
            (void *)&(this->alpha), (void *)this->dA, this->AType, this->lda,
            this->strideA, (void *)this->dB, this->BType, this->ldb,
            this->strideB, (void *)&(this->beta), (void *)this->dC, this->CType,
            this->ldc, this->strideC, this->batch, this->computeType,
            CUBLAS_GEMM_DEFAULT);
      } else { // isStridedBatched
        status = cublasGemmEx(
            this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
            this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m, this->n, this->k,
            (void *)&(this->alpha), (void *)this->dA, this->AType, this->lda,
            (void *)this->dB, this->BType, this->ldb, (void *)&(this->beta),
            (void *)this->dC, this->CType, this->ldc, this->computeType,
            CUBLAS_GEMM_DEFAULT);
      }
    } else { // isEx
      printf("[ERROR]:TYPE %s not support non-Ex mode!\n", this->type);
      exit(3);
    }
    return status;
  }

  void verify() override {
    blasTest::verify();
    double sumSquaredError = 0;
    for (int i = 0; i < m; ++i) {
      for (int j = 0; j < n; ++j) {
        double error =
            convert(hC[i * n + j]) - (float)(this->CheckC[i * n + j]);
        sumSquaredError += error * error;
      }
    }
    auto rmse = std::sqrt(sumSquaredError / (m * n));
    printf("RMSE: %.8f\n", rmse);
    if (rmse > 1e-2)
      printf("FAIL\n");
    else
      printf("PASS\n");
  }
};

class blasTestFP32 : public blasTest<float, float, float, float> {
  cudaDataType AType = CUDA_R_32F;
  cudaDataType BType = CUDA_R_32F;
  cudaDataType CType = CUDA_R_32F;
  cublasComputeType_t computeType = CUBLAS_COMPUTE_32F;

public:
  blasTestFP32(bool isEx, bool isSB, bool transA, bool transB, size_t m,
               size_t n, size_t k, size_t lda, size_t ldb, size_t ldc,
               size_t batch, float alpha, float beta)
      : blasTest(isEx, isSB, transA, transB, m, n, k, lda, ldb, ldc, batch,
                 alpha, beta) {
    this->type = "FP32";
  };

  cublasStatus_t runSingleTest() {
    cublasStatus_t status;

    // cublasGemmEx 或者 cublasGemmStridedBatchedEx
    if (this->isEx) {
      if (this->isSB) {
        status = cublasGemmStridedBatchedEx(
            this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
            this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m, this->n, this->k,
            (void *)&(this->alpha), (void *)this->dA, this->AType, this->lda,
            this->strideA, (void *)this->dB, this->BType, this->ldb,
            this->strideB, (void *)&(this->beta), (void *)this->dC, this->CType,
            this->ldc, this->strideC, this->batch, this->computeType,
            CUBLAS_GEMM_DEFAULT);
      } else { // isStridedBatched
        status = cublasGemmEx(
            this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
            this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m, this->n, this->k,
            (void *)&(this->alpha), (void *)this->dA, this->AType, this->lda,
            (void *)this->dB, this->BType, this->ldb, (void *)&(this->beta),
            (void *)this->dC, this->CType, this->ldc, this->computeType,
            CUBLAS_GEMM_DEFAULT);
      }
    } else { // isEx
      if (this->isSB) {
        status = cublasSgemmStridedBatched(
            this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
            this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m, this->n, this->k,
            &(this->alpha), this->dA, this->lda, this->strideA, this->dB,
            this->ldb, this->strideB, &(this->beta), this->dC, this->ldc,
            this->strideC, this->batch);
      } else {
        status = cublasSgemm(
            this->handle, this->transA ? CUBLAS_OP_T : CUBLAS_OP_N,
            this->transB ? CUBLAS_OP_T : CUBLAS_OP_N, this->m, this->n, this->k,
            &(this->alpha), this->dA, this->lda, this->dB, this->ldb,
            &(this->beta), this->dC, this->ldc);
      }
    }
    return status;
  }

  void verify() override {
    blasTest::verify();
    double sumSquaredError = 0;
    for (int i = 0; i < m; ++i) {
      for (int j = 0; j < n; ++j) {
        double error =
            convert(hC[i * n + j]) - (float)(this->CheckC[i * n + j]);
        sumSquaredError += error * error;
      }
    }
    auto rmse = std::sqrt(sumSquaredError / (m * n));
    printf("RMSE: %.8f\n", rmse);
    if (rmse > 1e-4)
      printf("FAIL\n");
    else
      printf("PASS\n");
  }
};

std::vector<std::vector<long long int>> readlist(std::string filename) {
  std::ifstream file(filename);
  std::string line;
  std::vector<std::vector<long long int>> testlist;
  while (getline(file, line)) {
    std::stringstream ss;
    std::vector<long long int> record;
    ss << line;
    if (!ss.eof()) {
      long long int temp;
      for (int index = 0; index < 12; index++) {
        ss >> temp;
        record.push_back(temp);
      }
      testlist.push_back(record);
    }
  }
  return testlist;
}

int main(int argc, char *argv[]) {
  init_type = (argc > 1) ? std::stoi(argv[1]) : 1;
  int test_iter = (argc > 2) ? std::stoi(argv[2]) : 200;
  int warmup_iter = WARMUPNUM;
  int test_type = (argc > 3) ? std::stoi(argv[3]) : 0;
  printf("init_type: %d, test_iter: %d, warmup_iter: %d\n", init_type,
         test_iter, warmup_iter);

  auto testlist = readlist("config.txt");
  for (auto elem : testlist) {
    // 0:fp16 1:bf16 2:I8 3:tf32 4:fp32
    if (elem[0] == 0 && test_type == 0) {
      blasTestFP16 current(bool(elem[1]), bool(elem[2]), elem[3], elem[4],
                           elem[5], elem[6], elem[7], elem[8], elem[9],
                           elem[10], elem[11], 1.0, 0.0);
      current.runTest(warmup_iter, test_iter);
    } else if (elem[0] == 1 && test_type == 1) {
      blasTestBF16 current(bool(elem[1]), bool(elem[2]), elem[3], elem[4],
                           elem[5], elem[6], elem[7], elem[8], elem[9],
                           elem[10], elem[11], 1.0, 0.0);
      current.runTest(warmup_iter, test_iter);
    } else if (elem[0] == 2 && test_type == 2) {
      blasTestI8 current(bool(elem[1]), bool(elem[2]), elem[3], elem[4],
                         elem[5], elem[6], elem[7], elem[8], elem[9], elem[10],
                         elem[11], 1.0, 0.0);
      current.runTest(warmup_iter, test_iter);
    } else if (elem[0] == 3 && test_type == 3) {
      blasTestTF32 current(bool(elem[1]), bool(elem[2]), elem[3], elem[4],
                           elem[5], elem[6], elem[7], elem[8], elem[9],
                           elem[10], elem[11], 1.0, 0.0);
      current.runTest(warmup_iter, test_iter);
    } else if (elem[0] == 4 && test_type == 4) {
      blasTestFP32 current(bool(elem[1]), bool(elem[2]), elem[3], elem[4],
                           elem[5], elem[6], elem[7], elem[8], elem[9],
                           elem[10], elem[11], 1.0, 0.0);
      current.runTest(warmup_iter, test_iter);
    }
  }
}
