/*
 * Copyright (c) 2020-2021, NVIDIA CORPORATION.  All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <cstdio>
#include <cstdlib>
#include <cuda_fp16.h>
#include <cuda_profiler_api.h>
#include <ctime>
#include <sys/time.h>
#include "common.h"
#include <vector>
using namespace std;
using namespace fastertransformer;

// Utility function to print customMatmulPerf_t structure
int printPerfStructure(int m, int n, int k, const customMatmulPerf_t &perf, FILE* fout, int is_fp16, int hasPrint) {
    int algoId, tile, swizzle, customOption, numSplitsK, reductionScheme, stages;
    
    const cublasLtMatmulAlgo_t *matmulAlgo = &perf.algo;
    cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo,  CUBLASLT_ALGO_CONFIG_ID, &algoId, sizeof(algoId), NULL);
    cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo,  CUBLASLT_ALGO_CONFIG_TILE_ID, &tile, sizeof(tile), NULL);
    cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo,  CUBLASLT_ALGO_CONFIG_SPLITK_NUM, &numSplitsK, sizeof(numSplitsK), NULL);
    cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo,  CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME, &reductionScheme, sizeof(reductionScheme), NULL);
    cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo,  CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING, &swizzle, sizeof(swizzle), NULL);
    cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo,  CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION, &customOption, sizeof(customOption), NULL);
#ifdef CUDA11_MODE
    cublasLtMatmulAlgoConfigGetAttribute( matmulAlgo,  CUBLASLT_ALGO_CONFIG_STAGES_ID, &stages, sizeof(stages), NULL);
#else
    stages=0;
#endif

    printf("algo={ Id=%d, tileIdx=%d (%s) splitK=%d reduc=%d swizzle=%d custom=%d stages=%d} status %d "
        "time %fms workspace=%d mathMode=%d waves=%f\n",       
        algoId, tile, matmulTileName[tile],
        numSplitsK, reductionScheme,
        swizzle, customOption, stages,
        perf.status,
        perf.time,
        (int)perf.workspaceSize,
        (int)perf.mathMode,
        perf.wavesCount);
    if (hasPrint == 0){ 
      fprintf(fout, "%d %d %d %d %d %d %d %d %d %d %d %d %d %f\n", is_fp16 ? HALF_DATATYPE:FLOAT_DATATYPE, 1, m, n, k,
                algoId, customOption, tile, numSplitsK, swizzle, reductionScheme, (int)perf.workspaceSize, stages, perf.time);
      return 1;
    }
    else{
      return hasPrint;
    }

}

static inline bool
time_compare(const customMatmulPerf_t &perf_a, const customMatmulPerf_t &perf_b) {
    return ((perf_a.status == CUBLAS_STATUS_SUCCESS) && (perf_a.time < perf_b.time));
}


static cublasStatus_t 
customMatmulRun(cublasLtHandle_t ltHandle,  // to get the capabilities (required a GPU)
                 cublasLtMatmulDesc_t operationDesc,
                 const void *alpha, /* host or device pointer */
                 const void *A,
                 cublasLtMatrixLayout_t Adesc,
                 const void *B,
                 cublasLtMatrixLayout_t Bdesc,
                 const void *beta, /* host or device pointer */
                 const void *C,
                 cublasLtMatrixLayout_t Cdesc,
                 void *D,
                 cublasLtMatrixLayout_t Ddesc,
                 const cublasLtMatmulAlgo_t &algo,
                 int kernelRepeats,  
                 void *workSpace,
                 size_t workSpaceSizeInBytes,                 
                 customMatmulPerf_t &perfResults,                 
                 cudaStream_t stream,
                 cudaEvent_t &startEvent,
                 cudaEvent_t &stopEvent)
{
    cublasLtMatmulHeuristicResult_t heurResult;
    /* Looping over the Algo */
    int repeats = kernelRepeats;    
    cublasStatus_t algoStatus = cublasLtMatmulAlgoCheck( ltHandle,
                                                         operationDesc,
                                                         Adesc,
                                                         Bdesc,
                                                         Cdesc,
                                                         Ddesc,
                                                         &algo, 
                                                         &heurResult);     
                                                                                 
    if (algoStatus == CUBLAS_STATUS_SUCCESS) {
        if (heurResult.workspaceSize <= workSpaceSizeInBytes) {
            cudaError_t err, err1, err2, err3;
            err  = cudaEventRecord(startEvent, stream);
            for (int loop = 0; loop < repeats; loop++) {
                cublasStatus_t oneRunStatus = cublasLtMatmul( ltHandle,
                                                              operationDesc,
                                                              alpha,
                                                              A, Adesc,
                                                              B, Bdesc,
                                                              beta,
                                                              C, Cdesc,
                                                              D, Ddesc,
                                                              &algo,
                                                              workSpace,
                                                              workSpaceSizeInBytes,
                                                              stream);
                if (oneRunStatus != CUBLAS_STATUS_SUCCESS) {
                    algoStatus = oneRunStatus;
                    break;
                }
            }
            err1 = cudaEventRecord(stopEvent, stream);
            err2 = cudaEventSynchronize(stopEvent);
            float time;
            err3 = cudaEventElapsedTime(&time, startEvent, stopEvent);
            if ((err != cudaSuccess) || (err1 != cudaSuccess) || (err2 != cudaSuccess) || (err3 != cudaSuccess)) {
                algoStatus = CUBLAS_STATUS_INTERNAL_ERROR;
            }                                     
            // For the moment only add successful findings
            if (algoStatus == CUBLAS_STATUS_SUCCESS) {
                perfResults.algo = algo;  
                perfResults.time = time/repeats;  
                perfResults.workspaceSize = heurResult.workspaceSize; 
                perfResults.wavesCount = heurResult.wavesCount;                                                                       
            }
        }
        else {
            //printf("not enough workspace! %ld\n", heurResult.workspaceSize);
            algoStatus = CUBLAS_STATUS_NOT_SUPPORTED; //Not enough workspace
        }        
    }
    
    return algoStatus;
}

template<typename T>
int LtgemmCustomFind(cublasLtHandle_t ltHandle,
                  int m,
                  int n,
                  int k,
                  const T *alpha, /* host pointer */
                  const T *A,
                  const T *B,
                  const T *beta, /* host pointer */
                  T *C,
                  void *workSpace,
                  size_t workSpaceSize,
                  FILE* fout,
                  customMatmulPerf_t perfResults[],
                  int AlgoCombinations)
{
    
    cublasStatus_t status = CUBLAS_STATUS_SUCCESS;
    cudaEvent_t startEvent;
    cudaEvent_t stopEvent;
    int is_fp16 = (sizeof(T) == sizeof(half) ? 1 : 0);

    cublasLtMatmulDesc_t operationDesc = NULL;
    cublasLtMatrixLayout_t Adesc = NULL, Bdesc = NULL, Cdesc = NULL;

    cudaStream_t stream = 0;
    // SplitK value that we are going to try when SplitK is supported for a given algo
    const int splitKSequenceA[] = {2, 3, 4, 5, 6, 8, 12, 16, 32};
     // Let try a fixed number of combinations
    int AlgoCount = 0;
    int AlgoCountRestrict = 0;  // workspace == 0
    int maxNumTraversal = 50;  // max number of traversal
    cublasLtMatmulAlgo_t algos[AlgoCombinations];  // 0 <= workspace <= 32MB
    cublasLtMatmulAlgo_t algosRestrict[AlgoCombinations];  // workspace == 0
    int kernelRepeats = 100; //number of time the CUDA kernels will be run back to back
    int nbAlgoIds = 0;  // Number of algorithms actually returned by cublasLtMatmulAlgoGetIds function.
    #define ALGO_IDS 200  // Number of algorithms requested.
    int algoIdA[ALGO_IDS];  // 	Array containing the algorithm IDs returned by cublasLtMatmulAlgoGetIds function.
    cudaDataType_t Atype, Btype, Ctype, scaleType;
#ifdef CUDA11_MODE
    cublasComputeType_t computeType;
#else
    cudaDataType_t computeType;
#endif

    if(sizeof(T) == sizeof(float)){
      scaleType = CUDA_R_32F, Atype = CUDA_R_32F, Btype = CUDA_R_32F, Ctype = CUDA_R_32F;
#ifdef CUDA11_MODE
      computeType = CUBLAS_COMPUTE_32F;
#else
      computeType = CUDA_R_32F;
#endif
    }else{
      scaleType = CUDA_R_16F, Atype = CUDA_R_16F, Btype = CUDA_R_16F, Ctype = CUDA_R_16F;
#ifdef CUDA11_MODE
      computeType = CUBLAS_COMPUTE_16F;
#else
      computeType = CUDA_R_16F;
#endif
    }

	// Create operation descriptor; see cublasLtMatmulDescAttributes_t for details about defaults; here we just need to
    // set the transforms for A and B
#ifdef CUDA11_MODE
    status = cublasLtMatmulDescCreate(&operationDesc, computeType, scaleType); //  creates a matrix multiply descriptor 
#else
    status = cublasLtMatmulDescCreate(&operationDesc, computeType);
#endif
    if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;

    // Create matrix descriptors. We are good with the details here so no need to set any extra attributes
    status = cublasLtMatrixLayoutCreate(
        &Adesc, Atype, m, k, m);
    if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
    status = cublasLtMatrixLayoutCreate(
        &Bdesc, Btype, k, n, k);
    if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;

    status = cublasLtMatrixLayoutCreate(&Cdesc, Ctype, m, n, m);
    if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP;
    
    // Create CUDA event to time the execution time of each algo    
    if (cudaEventCreate(&startEvent, cudaEventBlockingSync) != cudaSuccess) {
        goto CLEANUP;
    }
    if (cudaEventCreate(&stopEvent, cudaEventBlockingSync) != cudaSuccess) {       
        goto CLEANUP;
    } 

    // Request the 200 first AlgoId available
    status = cublasLtMatmulAlgoGetIds( ltHandle, computeType, scaleType, Atype, Btype, Ctype, Ctype, ALGO_IDS, algoIdA, &nbAlgoIds);
    if (status != CUBLAS_STATUS_SUCCESS) goto CLEANUP; 
    
    // Loop over the Algo IDs
    for (int idx = 0; (idx < nbAlgoIds) && (AlgoCount < AlgoCombinations); idx++) {   
        cublasLtMatmulAlgo_t algo;
        size_t sizeWritten = 0;
        /* Initialize algo structure with given Algp ID */
        status = cublasLtMatmulAlgoInit(ltHandle, computeType, scaleType, Atype, Btype, Ctype, Ctype, algoIdA[idx], &algo);
        if (status != CUBLAS_STATUS_SUCCESS) {
            continue;
        }
        // Query the tiles enums supported by that algo
        cublasLtMatmulAlgoCapGetAttribute( &algo, CUBLASLT_ALGO_CAP_TILE_IDS, NULL, 0, &sizeWritten);
        int nbTiles = int(sizeWritten/sizeof(int));
        int *tileA = new int[ nbTiles == 0 ? 1:nbTiles];
        if(nbTiles == 0){
            tileA[0] = CUBLASLT_MATMUL_TILE_UNDEFINED;
            nbTiles = 1;
        }
#ifdef CUDA11_MODE
        cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_STAGES_IDS, NULL, 0, &sizeWritten);
        int nbStages = int(sizeWritten/sizeof(int));
        vector<int> stagesA(nbStages == 0 ? 1 : nbStages);
        if (nbStages == 0) {
            stagesA[0] = CUBLASLT_MATMUL_STAGES_UNDEFINED;
            nbStages = 1;
        } else {
            cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_STAGES_IDS, stagesA.data(), sizeof(int)*nbStages, &sizeWritten);
        }
#endif
        int splitkSupport, redMask, swizzlingMax, customOptionMax;
        // Retrieve Algo Capabilities attributes to be able to setup loop over the different combinations
        cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_TILE_IDS, tileA, sizeof(int)*nbTiles, &sizeWritten);
        cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_SPLITK_SUPPORT, &splitkSupport, sizeof(splitkSupport), &sizeWritten);
        cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_REDUCTION_SCHEME_MASK, &redMask, sizeof(redMask), &sizeWritten);
        cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_CTA_SWIZZLING_SUPPORT, &swizzlingMax, sizeof(swizzlingMax), &sizeWritten);        
        cublasLtMatmulAlgoCapGetAttribute(&algo, CUBLASLT_ALGO_CAP_CUSTOM_OPTION_MAX, &customOptionMax, sizeof(customOptionMax), &sizeWritten);
        
        /* Loop over the different tiles */        
        for (int tileIdx = 0; tileIdx < nbTiles; tileIdx++) {
#ifdef CUDA11_MODE
            /* Loop over different stages count */
          for (int stagesIdx = 0; stagesIdx < nbStages; stagesIdx++) {
            cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_STAGES_ID, &stagesA[stagesIdx], sizeof(stagesA[stagesIdx]));
#endif
            /* Loop over the different custom option if any */
            for (int customOption = 0; customOption <= customOptionMax; customOption++) {
               cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_CUSTOM_OPTION, &customOption, sizeof(customOption));
               /* Loop over the CTAs swizzling support */
               for (int k = 0; k <= swizzlingMax; k++) {
                    int splitK_trial = 0;
                    if (splitkSupport) {
                        splitK_trial += sizeof(splitKSequenceA) / sizeof(splitKSequenceA[0]);
                    }
                    // Loop over the splitK value over a fixed sequence splitKSequenceA in addtion to the case where splitK is not enabled
                    for (int l = 0; (l < (1 + splitK_trial)) && (AlgoCount < AlgoCombinations); l++) {
                        /* Setup attribute of the algo to run */                                                
                       cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_TILE_ID, &tileA[tileIdx], sizeof(tileA[tileIdx]));
                       int splitK_val = 0;
                       int redScheme = CUBLASLT_REDUCTION_SCHEME_NONE;
                       cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_SPLITK_NUM, &splitK_val, sizeof(splitK_val)); 
                       cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_CTA_SWIZZLING, &k, sizeof(k)); 
                       cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME, &redScheme, sizeof(int));  
                                                                        
                        if (l > 0) { // Split-K case
                            splitK_val = splitKSequenceA[l - 1];
                            cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_SPLITK_NUM, &splitKSequenceA[l - 1], sizeof(splitKSequenceA[l - 1]));
                            /* Going over all the reduction scheme  */
                            for (redScheme = 1 ; redScheme < (int)CUBLASLT_REDUCTION_SCHEME_MASK && (AlgoCount < AlgoCombinations); redScheme = redScheme << 1) {
                                if (redScheme & redMask) {
                                    cublasLtMatmulAlgoConfigSetAttribute(&algo, CUBLASLT_ALGO_CONFIG_REDUCTION_SCHEME, &redScheme, sizeof(redScheme));
                                    
                                    cublasLtMatmulHeuristicResult_t heurResult;
                                    cublasStatus_t algoStatus = cublasLtMatmulAlgoCheck( ltHandle,
                                                                                        operationDesc,
                                                                                        Adesc,
                                                                                        Bdesc,
                                                                                        Cdesc,
                                                                                        Cdesc,
                                                                                        &algo, 
                                                                                        &heurResult);
                                    if (heurResult.workspaceSize > workSpaceSize) {
                                      // printf("not enough workspace! %ld\n", heurResult.workspaceSize);
                                      algoStatus = CUBLAS_STATUS_NOT_SUPPORTED; //Not enough workspace
                                    }else if(heurResult.workspaceSize == 0){
                                      if(algoStatus == CUBLAS_STATUS_SUCCESS){
                                        algosRestrict[AlgoCountRestrict++] = algo;
                                      }
                                    }
                                    if(algoStatus == CUBLAS_STATUS_SUCCESS){
                                      algos[AlgoCount++] = algo;
                                    }                      
                                } // end if
                            } // end for
                        } else { // Non-splitK case
                            /* if user preference is ok with workspace */
                            if (AlgoCount < AlgoCombinations) {       
                                cublasLtMatmulHeuristicResult_t heurResult;
                                cublasStatus_t algoStatus = cublasLtMatmulAlgoCheck( ltHandle,
                                                                                    operationDesc,
                                                                                    Adesc,
                                                                                    Bdesc,
                                                                                    Cdesc,
                                                                                    Cdesc,
                                                                                    &algo, 
                                                                                    &heurResult);
                                if (heurResult.workspaceSize > workSpaceSize) {
                                  // printf("not enough workspace! %ld\n", heurResult.workspaceSize);
                                  algoStatus = CUBLAS_STATUS_NOT_SUPPORTED; //Not enough workspace
                                }else if(heurResult.workspaceSize == 0){
                                  if(algoStatus == CUBLAS_STATUS_SUCCESS){
                                    algosRestrict[AlgoCountRestrict++] = algo;
                                  }
                                }
                                if(algoStatus == CUBLAS_STATUS_SUCCESS){
                                  algos[AlgoCount++] = algo;
                                }
                            }
                        }
                    }  // end l
                }  // end k
            } //end customOption
#ifdef CUDA11_MODE
          } // end stagesIdx       
#endif                  
        } // end tileIdx
        delete [] tileA;
    } // end idx

    printf("AlgoCount: %d\n", AlgoCount);
    if(AlgoCount < maxNumTraversal){
      // 0 <= workspacesize <= 32MB
      for(int i=0;i<AlgoCount;i++){
        status = customMatmulRun( ltHandle,
                                  operationDesc,
                                  alpha, /* host or device pointer */
                                  A, Adesc,
                                  B, Bdesc,
                                  beta, /* host or device pointer */
                                  C, Cdesc,
                                  C, Cdesc,
                                  algos[i],
                                  kernelRepeats,  
                                  workSpace,
                                  workSpaceSize,                 
                                  perfResults[i],
                                  stream,
                                  startEvent, stopEvent);
        perfResults[i].status = status;
        // if (status == CUBLAS_STATUS_SUCCESS) AlgoCount++;
      }
    }else{
      // Heuristic + workspacesize==0
      AlgoCount = 0;
      nbAlgoIds = 0;
      cublasLtMatmulPreference_t pref;
      cublasLtMatmulPreferenceCreate(&pref);
      uint64_t maxWorkSpaceSize = workSpaceSize; //(32MB)
      cublasLtMatmulPreferenceSetAttribute(
        pref, 
        CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES,
        &maxWorkSpaceSize,
        sizeof(maxWorkSpaceSize));
      cublasLtMatmulHeuristicResult_t heuristicResultsArray[maxNumTraversal];

      cublasLtMatmulAlgoGetHeuristic(
            ltHandle,
            operationDesc,
            Adesc,
            Bdesc,
            Cdesc,
            Cdesc,
            pref,
            maxNumTraversal,
            heuristicResultsArray,
            &nbAlgoIds);
      cublasLtMatmulPreferenceDestroy(pref);
      printf("return %d and run heuristic algo\n", nbAlgoIds);
      for(int i = 0; i < nbAlgoIds; i++){
        if(heuristicResultsArray[i].state == CUBLAS_STATUS_SUCCESS){
          status = customMatmulRun( ltHandle,
                                  operationDesc,
                                  alpha, /* host or device pointer */
                                  A, Adesc,
                                  B, Bdesc,
                                  beta, /* host or device pointer */
                                  C, Cdesc,
                                  C, Cdesc,
                                  heuristicResultsArray[i].algo,
                                  kernelRepeats,  
                                  workSpace,
                                  workSpaceSize,                 
                                  perfResults[AlgoCount],
                                  stream,
                                  startEvent, stopEvent);
          perfResults[AlgoCount].status = status;
          if (status == CUBLAS_STATUS_SUCCESS) AlgoCount++;
        }
      }

      // workspacesize==0
      printf("workspacesize==0, run %d algos\n", AlgoCountRestrict);
      for(int i=0;i<AlgoCountRestrict && i<(maxNumTraversal - nbAlgoIds);i++){
        status = customMatmulRun( ltHandle,
                                  operationDesc,
                                  alpha, /* host or device pointer */
                                  A, Adesc,
                                  B, Bdesc,
                                  beta, /* host or device pointer */
                                  C, Cdesc,
                                  C, Cdesc,
                                  algosRestrict[i],
                                  kernelRepeats,  
                                  NULL,
                                  0,                 
                                  perfResults[AlgoCount],
                                  stream,
                                  startEvent, stopEvent);
        perfResults[AlgoCount].status = status;
        if (status == CUBLAS_STATUS_SUCCESS) AlgoCount++;
      }
    }
    

    // Sort the results per run duration 
    std::sort(perfResults, perfResults + AlgoCount, time_compare);
    // Print timing and perf details 
    for (int i = 0, hasPrint = 1; i < AlgoCount; i++) {                
        printf( "result %03d : ", i);
        hasPrint = printPerfStructure(m, n, k, perfResults[i], fout, is_fp16, hasPrint);                          
    }


CLEANUP:
    // Descriptors are no longer needed as all GPU work was already enqueued
    if (Cdesc) cublasLtMatrixLayoutDestroy(Cdesc);
    if (Bdesc) cublasLtMatrixLayoutDestroy(Bdesc);
    if (Adesc) cublasLtMatrixLayoutDestroy(Adesc);
    if (operationDesc) cublasLtMatmulDescDestroy(operationDesc);
    if (startEvent) cudaEventDestroy(startEvent);
    if (stopEvent) cudaEventDestroy(stopEvent);
    return status == CUBLAS_STATUS_SUCCESS ? 0 : 1;
}

template<typename T>
void generate_decoding_gemm_config(int batch_size,
                                  int beam_width,
                                  int head_number,
                                  int size_per_head,
                                  int vocab_size,
                                  int seq_len,
                                  int memory_hidden_units)
{
  FILE* fd = fopen("decoding_gemm_config.in", "w");
  if(fd == NULL)
  {
    printf("[ERROR] Cannot write to file decoding_gemm_config.in\n");
    return;
  }

  const int hidden_units = head_number * size_per_head;
  const int gemm_num = 7;
  int M[gemm_num];
  int N[gemm_num];
  int K[gemm_num];
  char mess[gemm_num][256];
  
  //gemm1 
  M[0] = batch_size * beam_width;
  K[0] = hidden_units;
  N[0] = std::is_same<T, float>::value ? vocab_size : (int)(ceil(vocab_size / 8.) * 8);
  strcpy(mess[0], "decoder_output * embedding_kernel -> embedding_output");

  //gemm2
  M[1] = batch_size * beam_width;
  K[1] = hidden_units;
  N[1] = hidden_units;
  strcpy(mess[1], "from_tensor * weightQ/K/V in masked attention");

  //gemm3
  M[2] = M[0] * seq_len;
  K[2] = memory_hidden_units;
  N[2] = hidden_units;
  strcpy(mess[2], "from_tensor * weightK/V in cross attention");

  M[3] = batch_size * beam_width;
  K[3] = hidden_units;
  N[3] = hidden_units * 4;
  strcpy(mess[3], "ffn gemm1 ");

  M[4] = batch_size * beam_width;
  K[4] = hidden_units * 4;
  N[4] = hidden_units; 
  strcpy(mess[4], "ffn gemm2");

  M[5] = batch_size * beam_width;
  K[5] = hidden_units;
  N[5] = hidden_units;
  strcpy(mess[5], "from_tensor * QKV (batchstridedgemm) in masked attention");

  M[6] = batch_size * beam_width;
  K[6] = hidden_units;
  N[6] = hidden_units * 3;
  strcpy(mess[6], "from_tensor * weight_QKV in one normal gemm");

  cublasHandle_t cublas_handle;
  check_cuda_error(cublasCreate(&cublas_handle));
  cublasLtHandle_t ltHandle;
  check_cuda_error(cublasLtCreate(&ltHandle));

  cudaDataType_t AType;
  cudaDataType_t BType;
  cudaDataType_t CType;
  cudaDataType_t computeType;
  int startAlgo, endAlgo;
  const int ites = 100;
  struct timeval start, end;
  
  if(sizeof(T) == sizeof(float)){
    AType = CUDA_R_32F;
    BType = CUDA_R_32F;
    CType = CUDA_R_32F;
    computeType = CUDA_R_32F;
    startAlgo = (int)CUBLAS_GEMM_DEFAULT;
    endAlgo = (int)CUBLAS_GEMM_ALGO23;
  }
  else{
    AType = CUDA_R_16F;
    BType = CUDA_R_16F;
    CType = CUDA_R_16F;
    computeType = CUDA_R_16F;
    startAlgo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP;
    endAlgo = (int)CUBLAS_GEMM_ALGO15_TENSOR_OP;
  }
  T alpha = (T)1.0f;
  T beta = (T)0.0f;
  fprintf(fd, "dataType, batchCount, n, m, k, algoId, customOption, tile, numSplitsK, swizzle, reductionScheme, workspaceSize, stages, exec_time\n");

  printf("***Decoding Gemm Testing***\n");
  for(int i = 0; i < gemm_num; ++i)
  {
    int m = M[i], n = N[i], k = K[i];
    printf("\n-----------------------------\n");
    printf("GEMM test %d: [M: %d, K: %d, N: %d] %s\n", i, m, k, n, mess[i]);
    T* d_A;
    T* d_B;
    T* d_C;

    if(i == 5)
    {
      check_cuda_error(cudaMalloc((void**)&d_A, sizeof(T) * m * k));
      check_cuda_error(cudaMalloc((void**)&d_B, sizeof(T) * k * n * 3));
      check_cuda_error(cudaMalloc((void**)&d_C, sizeof(T) * m * n * 3));
    }
    else
    {
      check_cuda_error(cudaMalloc((void**)&d_A, sizeof(T) * m * k));
      check_cuda_error(cudaMalloc((void**)&d_B, sizeof(T) * k * n));
      check_cuda_error(cudaMalloc((void**)&d_C, sizeof(T) * m * n));
    }

    float exec_time = 99999.0f;
    int fast_algo = 0;
    for(int algo = startAlgo; algo <= endAlgo; algo++)
    {
      cublasStatus_t status;
      cudaDeviceSynchronize();
      gettimeofday(&start, NULL);
      for(int ite = 0; ite < ites; ++ite)
      {
        if(i == 5)
        {
          status = cublasGemmStridedBatchedEx(cublas_handle,
                CUBLAS_OP_N, CUBLAS_OP_N,
                n, m, k,
                &alpha,
                d_B, BType, n, k*n,
                d_A, AType, k, 0,
                &beta,
                d_C, CType, n, m*n,
                3,
                computeType,
                static_cast<cublasGemmAlgo_t>(algo));
        }
        else
        {
          status = cublasGemmEx(cublas_handle, 
                                CUBLAS_OP_N, CUBLAS_OP_N,
                                n, m, k, 
                                &alpha, 
                                d_B, BType, n, 
                                d_A, AType, k, 
                                &beta, 
                                d_C, CType, n, 
                                computeType, 
                                static_cast<cublasGemmAlgo_t>(algo));
        }
      }
      cudaDeviceSynchronize();
      gettimeofday(&end, NULL);
      if(status == CUBLAS_STATUS_SUCCESS)
      {
        printf("algo_%d costs %.3fms \n", algo, diffTime(start, end) / ites);
        if(diffTime(start, end) / ites < exec_time)
        {
          exec_time = diffTime(start, end) / ites;
          fast_algo = algo;
        }
      }
    }
    printf("fast_algo %d costs %.3f ms\n", fast_algo, exec_time);
    int is_fp16 = 0;
    if (sizeof(T) == sizeof(half))
        is_fp16 = 1;

    //we compare cublasLt for fp16
    if(i != 5 && is_fp16 == 1){
      void *workSpace = NULL;
      int workSpaceSize = CUBLAS_WORKSPACE_SIZE;
      cudaMalloc((void **)&workSpace, workSpaceSize);
      printf("***cublasLt Gemm Testing Beign***\n");
       // Let try a fixed number of combinations
      int ALGO_COMBINATIONS = 5000;
      customMatmulPerf_t perfResults[ALGO_COMBINATIONS];
      
      LtgemmCustomFind<T>(ltHandle, n, m, k, &alpha, d_B, d_A, 
                      &beta, d_C, workSpace, workSpaceSize, fd, perfResults, ALGO_COMBINATIONS);
      if(perfResults[0].time < exec_time){
        printPerfStructure(n, m, k, perfResults[0], fd, is_fp16, 0);
      }else{
        fprintf(fd, "%d %d %d %d %d %d %d %d %d %d %d %d %d %f\n",  is_fp16 ? HALF_DATATYPE:FLOAT_DATATYPE, i == 5 ? 3:1, 
                          n, m, k, fast_algo, -1, -1, -1, -1, -1, -1, -1, exec_time);
      }
      printf("***cublasLt Gemm Testing End***\n");
      cudaFree(workSpace);
    }else{
      fprintf(fd, "%d %d %d %d %d %d %d %d %d %d %d %d %d %f\n",  is_fp16 ? HALF_DATATYPE:FLOAT_DATATYPE, i == 5 ? 3:1,
                           n, m, k, fast_algo, -1, -1, -1, -1, -1, -1, -1, exec_time);
    }
    cudaFree(d_A);
    cudaFree(d_B);
    cudaFree(d_C);
  }
}

template<typename T>
void generate_gpt_gemm_config(int local_batch_size,
                              int context_local_batch_size,
                              int head_number,
                              int size_per_head,
                              int vocab_size,
                              int start_len,
                              int tensor_para_size)
{
  FILE* fd = fopen("decoding_gemm_config.in", "w");
  if(fd == NULL)
  {
    printf("[ERROR] Cannot write to file decoding_gemm_config.in\n");
    return;
  }

  if(head_number % tensor_para_size != 0)
  {
    printf("[ERROR] head_num mod tensor_para_size should be 0. Here, head_number is %d, and tensor_para_size is %d. \n", head_number, tensor_para_size);
    exit(-1);
  }

  const int hidden_units = head_number * size_per_head;
  const int local_hidden_units = hidden_units / tensor_para_size;
  const int local_head_number = head_number / tensor_para_size;
  const int gemm_num = 14;
  int M[gemm_num];
  int N[gemm_num];
  int K[gemm_num];
  char mess[gemm_num][256];
  int batch_count[gemm_num];

  //gemm1
  M[0] = local_batch_size;
  K[0] = hidden_units;
  N[0] = std::is_same<T, float>::value ? vocab_size : (int)(ceil(vocab_size / (8. * tensor_para_size))) * 8;
  batch_count[0] = 1;
  strcpy(mess[0], "decoder_output * embedding_kernel -> embedding_output");

  //gemm2
  M[1] = local_batch_size;
  K[1] = hidden_units;
  N[1] = 3 * local_hidden_units;
  batch_count[1] = 1;
  strcpy(mess[1], "from_tensor * weightQKV in masked attention (fused QKV weights)");

  //gemm3
  M[2] = local_batch_size;
  K[2] = hidden_units;
  N[2] = local_hidden_units;
  batch_count[2] = 3;
  strcpy(mess[2], "from_tensor * weightK/V in masked attention (GemmBatched)");

  //gemm4
  M[3] = local_batch_size;
  K[3] = hidden_units;
  N[3] = local_hidden_units;
  batch_count[3] = 1;
  strcpy(mess[3], "from_tensor * weightK/V in masked attention (unfused)");

  //gemm5
  M[4] = local_batch_size;
  K[4] = local_hidden_units;
  N[4] = hidden_units;
  batch_count[4] = 1;
  strcpy(mess[4], "masked_attention_output * output_gemm (unfused)");

  //gemm6
  M[5] = local_batch_size;
  K[5] = hidden_units;
  N[5] = 4 * local_hidden_units;
  batch_count[5] = 1;
  strcpy(mess[5], "ffn gemm1");

  //gemm7
  M[6] = local_batch_size;
  K[6] = 4 * local_hidden_units;
  N[6] = hidden_units;
  batch_count[6] = 1;
  strcpy(mess[6], "ffn gemm2");

  //gemm8
  M[7] = context_local_batch_size * start_len;
  K[7] = hidden_units;
  N[7] = 3 * local_hidden_units;
  batch_count[7] = 1;
  strcpy(mess[7], "from_tensor * weightQKV in masked attention (fused QKV weights in forward_context)");

  //gemm9
  M[8] = context_local_batch_size * start_len;
  K[8] = hidden_units;
  N[8] = local_hidden_units;
  batch_count[8] = 1;
  strcpy(mess[8], "from_tensor * weight_Q/K/V in masked attention (unfused QKV weights in forward_context)");

  //gemm10
  M[9] = start_len;
  K[9] = size_per_head;
  N[9] = start_len;
  batch_count[9] = context_local_batch_size * local_head_number;
  strcpy(mess[9], "Q * K in masked attention (GemmStridedBatchedEx TN forward_context)");

  //gemm11
  M[10] = start_len;
  K[10] = start_len;
  N[10] = size_per_head;
  batch_count[10] = context_local_batch_size * local_head_number;
  strcpy(mess[10], "QK * V in masked attention (GemmStridedBatchedEx NN forward_context)");

  //gemm12
  M[11] = local_batch_size * start_len;
  K[11] = local_hidden_units;
  N[11] = hidden_units;
  batch_count[11] = 1;
  strcpy(mess[11], "masked_attention_output * output_gemm (in forward_context)");

  //gemm13
  M[12] = context_local_batch_size * start_len;
  K[12] = hidden_units;
  N[12] = 4 * local_hidden_units;
  batch_count[12] = 1;
  strcpy(mess[12], "ffn1 (in forward_context)");

  //gemm14
  M[13] = context_local_batch_size * start_len;
  K[13] = 4 * local_hidden_units;
  N[13] = hidden_units;
  batch_count[13] = 1;
  strcpy(mess[13], "ffn2 (in forward_context)");

  cublasHandle_t cublas_handle;
  check_cuda_error(cublasCreate(&cublas_handle));
  cublasLtHandle_t ltHandle;
  check_cuda_error(cublasLtCreate(&ltHandle));

  cudaDataType_t AType;
  cudaDataType_t BType;
  cudaDataType_t CType;
  cudaDataType_t computeType;
  int startAlgo, endAlgo;
  const int ites = 100;
  struct timeval start, end;

  if(sizeof(T) == sizeof(float)){
    AType = CUDA_R_32F;
    BType = CUDA_R_32F;
    CType = CUDA_R_32F;
    computeType = CUDA_R_32F;
    startAlgo = (int)CUBLAS_GEMM_DEFAULT;
    endAlgo = (int)CUBLAS_GEMM_ALGO23;
  }
  else{
    AType = CUDA_R_16F;
    BType = CUDA_R_16F;
    CType = CUDA_R_16F;
    computeType = CUDA_R_16F;
    startAlgo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP;
    endAlgo = (int)CUBLAS_GEMM_ALGO15_TENSOR_OP;
  }
  T alpha = (T)1.0f;
  T beta = (T)0.0f;
  fprintf(fd, "dataType, batchCount, n, m, k, algoId, customOption, tile, numSplitsK, swizzle, reductionScheme, workspaceSize, stages, exec_time\n");

  printf("***Decoding Gemm Testing***\n");
  for(int i = 0; i < gemm_num; ++i)
  {
    int m = M[i], n = N[i], k = K[i];
    const int b = batch_count[i];
    printf("\n-----------------------------\n");
    printf("GEMM test %d: [B: %d, M: %d, K: %d, N: %d] %s\n", i, b, m, k, n, mess[i]);
    T* d_A;
    T* d_B;
    T* d_C;

    check_cuda_error(cudaMalloc((void**)&d_A, sizeof(T) * m * k * b));
    check_cuda_error(cudaMalloc((void**)&d_B, sizeof(T) * k * n * b));
    check_cuda_error(cudaMalloc((void**)&d_C, sizeof(T) * m * n * b));

    T* harray[9];
    T** darray = 0;
    check_cuda_error(cudaMalloc((void**)&darray, sizeof(T*) * 9));

    if(i == 2)
    {
      harray[0] = (T*)d_A;
      harray[1] = (T*)(d_A + m * k);
      harray[2] = (T*)(d_A + 2 * m * k);
      harray[3] = (T*)d_B;
      harray[4] = (T*)d_B + k * n;
      harray[5] = (T*)d_B + 2 * k * n;
      harray[6] = (T*)d_C;
      harray[7] = (T*)d_C + m * n;
      harray[8] = (T*)d_C + 2 * m * n;
      cudaMemcpy((void*)darray, (void*)harray, sizeof(T*) * 9, cudaMemcpyHostToDevice);
    }

    T** dAarray = darray;
    T** dBarray = darray + 3;
    T** dCarray = darray + 6;

    float exec_time = 99999.0f;
    int fast_algo = 0;
    for(int algo = startAlgo; algo <= endAlgo; algo++)
    {
      cublasStatus_t status;
      cudaDeviceSynchronize();
      gettimeofday(&start, NULL);
      for(int ite = 0; ite < ites; ++ite)
      {
        if(i == 2)
        {
          status = cublasGemmBatchedEx(cublas_handle,
                                       CUBLAS_OP_N, CUBLAS_OP_N,
                                       n, m, k,
                                       &alpha,
                                       (const void* const*) dBarray, BType, n,
                                       (const void* const*) dAarray, AType, k,
                                       &beta,
                                       (void* const*)dCarray, CType, n,
                                       b,
                                       computeType,
                                       static_cast<cublasGemmAlgo_t>(algo));
        }
        else if(i == 9)
        {
          status = cublasGemmStridedBatchedEx(cublas_handle,
                                              CUBLAS_OP_T, CUBLAS_OP_N,
                                              n, m, k,
                                              &alpha,
                                              d_B, BType, k, k * n,
                                              d_A, AType, k, m * k,
                                              &beta,
                                              d_C, CType, n, m * n,
                                              b,
                                              computeType,
                                              static_cast<cublasGemmAlgo_t>(algo));
        }
        else if(i == 10)
        {
          status = cublasGemmStridedBatchedEx(cublas_handle,
                                              CUBLAS_OP_N, CUBLAS_OP_N,
                                              n, m, k,
                                              &alpha,
                                              d_B, BType, n, k * n,
                                              d_A, AType, k, m * k,
                                              &beta,
                                              d_C, CType, n, m * n,
                                              b,
                                              computeType,
                                              static_cast<cublasGemmAlgo_t>(algo));
        }
        else
        {
          status = cublasGemmEx(cublas_handle,
                                CUBLAS_OP_N, CUBLAS_OP_N,
                                n, m, k,
                                &alpha,
                                d_B, BType, n,
                                d_A, AType, k,
                                &beta,
                                d_C, CType, n,
                                computeType,
                                static_cast<cublasGemmAlgo_t>(algo));
        }
      }
      cudaDeviceSynchronize();
      gettimeofday(&end, NULL);
      if(status == CUBLAS_STATUS_SUCCESS)
      {
        printf("algo_%d costs %.3fms \n", algo, diffTime(start, end) / ites);
        if(diffTime(start, end) / ites < exec_time)
        {
          exec_time = diffTime(start, end) / ites;
          fast_algo = algo;
        }
      }
    }

    printf("fast_algo %d costs %.3f ms\n", fast_algo, exec_time);
    int is_fp16 = 0;
    if (sizeof(T) == sizeof(half))
        is_fp16 = 1;

    //we compare cublasLt for fp16
    if((i != 2 && i != 9 && i != 10) && is_fp16 == 1){
      void *workSpace = NULL;
      int workSpaceSize = CUBLAS_WORKSPACE_SIZE;
      cudaMalloc((void **)&workSpace, workSpaceSize);
      printf("***cublasLt Gemm Testing Beign***\n");
       // Let try a fixed number of combinations
      int ALGO_COMBINATIONS = 5000;
      customMatmulPerf_t perfResults[ALGO_COMBINATIONS];

      LtgemmCustomFind<T>(ltHandle, n, m, k, &alpha, d_B, d_A,
                      &beta, d_C, workSpace, workSpaceSize, fd, perfResults, ALGO_COMBINATIONS);
      if(perfResults[0].time < exec_time){
        printPerfStructure(n, m, k, perfResults[0], fd, is_fp16, 0);
      }else{
        fprintf(fd, "%d %d %d %d %d %d %d %d %d %d %d %d %d %f\n",  is_fp16 ? HALF_DATATYPE:FLOAT_DATATYPE, b,
                          n, m, k, fast_algo, -1, -1, -1, -1, -1, -1, -1, exec_time);
      }
      printf("***cublasLt Gemm Testing End***\n");
      cudaFree(workSpace);
    }else{
      fprintf(fd, "%d %d %d %d %d %d %d %d %d %d %d %d %d %f\n",  is_fp16 ? HALF_DATATYPE:FLOAT_DATATYPE, b,
                           n, m, k, fast_algo, -1, -1, -1, -1, -1, -1, -1, exec_time);
    }

    check_cuda_error(cudaFree(darray));
    check_cuda_error(cudaFree(d_A));
    check_cuda_error(cudaFree(d_B));
    check_cuda_error(cudaFree(d_C));
  }
}

