/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*!
 * \file ffn_quant.cpp
 * \brief
 */
#ifndef FFN_QUANT_CPP
#define FFN_QUANT_CPP

#include "ffn_quant.h"


namespace FFN {

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::Init(__gm__ uint8_t* x, __gm__ uint8_t* weight1,
                                                           __gm__ uint8_t* weight2, __gm__ uint8_t* expertTokens,
                                                           __gm__ uint8_t* bias1, __gm__ uint8_t* bias2,
                                                           __gm__ uint8_t* scale, __gm__ uint8_t* offset, __gm__ uint8_t* deqScale1,
                                                           __gm__ uint8_t* deqScale2, __gm__ uint8_t* y, __gm__ uint8_t* workSpace,
                                                           const FFNTilingData* __restrict tiling, TPipe* tPipe) {
  curBlockIdx = GetBlockIdx();
  subBlockIdx = GetSubBlockIdx();
  coreIdx = curBlockIdx / GetTaskRation();
  tilingData = tiling;
  pipe = tPipe;
  InitTilingData();

  // init global buffer
  xGm.SetGlobalBuffer((__gm__ T*)x);
  weight1Gm.SetGlobalBuffer((__gm__ T*)weight1);
  if (bias1 != nullptr) {
    hasBias1 = true;
    bias1Gm.SetGlobalBuffer((__gm__ biasT*)bias1);
  }
  weight2Gm.SetGlobalBuffer((__gm__ T*)weight2);
  if (bias2 != nullptr) {
    hasBias2 = true;
    bias2Gm.SetGlobalBuffer((__gm__ biasT*)bias2);
  }
  yGm.SetGlobalBuffer((__gm__ c2T*)y);
  mm1WorkspaceGm.SetGlobalBuffer((__gm__ c1T*)workSpace);
  mm2WorkspaceGm.SetGlobalBuffer((__gm__ T*)workSpace);
  mm2OutWorkspaceGm.SetGlobalBuffer((__gm__ c1T*)workSpace);

  if constexpr (isSmooth) {
    ScaleGm.SetGlobalBuffer((__gm__ float*)scale);
  } else {
    quantScale = (__gm__ float*)scale;
  }
  quantOffset = (__gm__ float*)offset;

  InitDequantScale(workSpace, deqScale1, deqScale2);

  InitQueue();
  TBuf<QuePosition::VECCALC> eTokens64Buf;
  pipe->InitBuffer(eTokens64Buf, AlignUp<UB_BLOCK_UNIT_SIZE>(expertNum * sizeof(int64_t)));  // 32Byte alignment
  ubTokens = eTokens64Buf.Get<int64_t>();
  if (likely(expertTokens != nullptr)) {
    // copy tokens array from GM
    expertTokensGm.SetGlobalBuffer((__gm__ int64_t*)expertTokens);
    DataCopy(ubTokens, expertTokensGm, AlignUp<EXPERT_NUM_ALIGN>(expertNum));  // 32Byte alignment
    set_flag(PIPE_MTE2, PIPE_S, EVENT_ID0);
    wait_flag(PIPE_MTE2, PIPE_S, EVENT_ID0);
    if (tilingData->ffnBaseParams.tokensIndexFlag) {
      TokensIndicesToValues(ubTokens, expertNum);
    }
  } else {
    ubTokens.SetValue(0, static_cast<int64_t>(tilingData->ffnBaseParams.maxTokens));
  }
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::InitDequantScale(__gm__ uint8_t* workSpace,
                                                                                                                  __gm__ uint8_t* deqScale1,
                                                                                                                  __gm__ uint8_t* deqScale2) {
  if constexpr (IsSameType<dequantT, float>::value) {
    deqScale1GmVector.SetGlobalBuffer((__gm__ uint32_t*)deqScale1);
    deqScale2GmVector.SetGlobalBuffer((__gm__ uint32_t*)deqScale2);
    deqScale1UInt64Gm.SetGlobalBuffer((__gm__ uint64_t*)workSpace);
    deqScale2UInt64Gm.SetGlobalBuffer((__gm__ uint64_t*)workSpace);
    deqScale1FloatGm.SetGlobalBuffer((__gm__ uint32_t*)workSpace);
    deqScale2FloatGm.SetGlobalBuffer((__gm__ uint32_t*)workSpace);
  }
  deqScale1Gm.SetGlobalBuffer((__gm__ dequantT*)deqScale1);
  deqScale2Gm.SetGlobalBuffer((__gm__ dequantT*)deqScale2);
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::InitQueue() {
  if (n1 != 0) {
    pipe->InitBuffer(vecInQueue, 1, ubCalSize * sizeof(c1T));
    if constexpr (IsSameType<c2T, bfloat16_t>::value) {
      // for mm2 output(int32), init size not less than 8*baseN for float32 deq_scale
      pipe->InitBuffer(vecOutQueue, 1, ubCalSize * sizeof(c2T));
    } else {
      // for quant output(int8), init size not less than 8*baseN for float32 deq_scale
      pipe->InitBuffer(vecOutQueue, 1, ubCalSize * sizeof(T));
    }
    if constexpr (IsSameType<dequantT, uint64_t>::value == false || isSmooth) {
      uint32_t scaleSize = AlignUp<UB_BLOCK_UNIT_SIZE>(baseN1 * sizeof(float));
      pipe->InitBuffer(scaleQueue, 1, scaleSize);
      ubRestBytes -= scaleSize;
    }
    TBuf<TPosition::VECCALC> tmpTBuf;
    pipe->InitBuffer(tmpTBuf, ubRestBytes);
    tmpBuff = tmpTBuf.Get<uint8_t>();
    actOut = tmpBuff.ReinterpretCast<actT>();
    actTmp = tmpBuff[ubCalSize * sizeof(actT)];
    quantTmp = actTmp;
    if constexpr (IsSameType<dequantT, bfloat16_t>::value) {
      dequantOut = actOut[ubCalSize];
      actTmp = tmpBuff[ubCalSize * sizeof(actT) * 2];
    }
  } else {
    pipe->InitBuffer(vecInQueue, 1, AlignUp<UB_BLOCK_UNIT_SIZE>(baseN2 * sizeof(biasT)));
    pipe->InitBuffer(vecOutQueue, 1, AlignUp<UB_BLOCK_UNIT_SIZE>(baseN2 * sizeof(c2T)));
    pipe->InitBuffer(scaleQueue, 1, AlignUp<UB_BLOCK_UNIT_SIZE>(baseN2 * sizeof(dequantT)));
    if constexpr (IsSameType<c2T, half>::value && IsSameType<dequantT, float>::value) {
      pipe->InitBuffer(dequantOutBuf, AlignUp<UB_BLOCK_UNIT_SIZE>(baseN2 * sizeof(dequantT)));
      dequantTmpOut = dequantOutBuf.Get<dequantT>();
    }
  }
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::InitTilingData() {
  totalTokens = tilingData->ffnBaseParams.totalTokens;
  maxTokens = tilingData->ffnBaseParams.maxTokens;
  k1 = tilingData->ffnBaseParams.k1;
  n1 = tilingData->ffnBaseParams.n1;
  k2 = n1;
  n2 = tilingData->ffnBaseParams.n2;
  expertNum = tilingData->ffnBaseParams.expertNum;
  cubeCoreNum = tilingData->ffnBaseParams.coreNum;
  coreNum = cubeCoreNum * GetTaskRation();
  activeType = tilingData->ffnBaseParams.activeType;
  dataTypeSize = sizeof(T);
  outTypeSize = sizeof(c2T);
  mmOutTypeSize = sizeof(c1T);

  baseM1 = tilingData->ffnSingleCoreParams.baseM1;
  baseN1 = tilingData->ffnSingleCoreParams.baseN1;
  baseN2 = tilingData->ffnSingleCoreParams.baseN2;
  ubCalSize = tilingData->ffnSingleCoreParams.ubCalSize;
  ubRestBytes = tilingData->ffnSingleCoreParams.ubRestBytes;
  workspace1Size = tilingData->ffnBaseParams.workspace1Size;
  workspace2Size = tilingData->ffnBaseParams.workspace2Size;
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::ActivationFunction(
  LocalTensor<actT> activeResUb, LocalTensor<actT> actInput, uint32_t computeSize) {

  ActiveType active = ActiveType(activeType);
  if (active == ActiveType::FASTGELU) {
    FasterGelu(activeResUb, actInput, actTmp, computeSize);
  } else if (active == ActiveType::RELU) {
    Relu(activeResUb, actInput, computeSize);
    pipe_barrier(PIPE_V);
  } else if (active == ActiveType::SILU) {
    Silu(activeResUb, actInput, computeSize);
  } else if (active == ActiveType::GELU) {
    Gelu(activeResUb, actInput, actTmp, computeSize);
  }
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::CreateIndex(uint32_t indexNum) {
  LocalTensor<int32_t> tmpS81 = tmpBuff.ReinterpretCast<int32_t>();
  LocalTensor<int32_t> tmpS82 = tmpS81[indexNum];
  LocalTensor<uint32_t> tmpU81 = tmpBuff.ReinterpretCast<uint32_t>();
  LocalTensor<uint32_t> tmpU82 = tmpU81[indexNum];
  int firstValue = 0;
  CreateVecIndex(tmpS81, firstValue, indexNum);
  pipe_barrier(PIPE_V);
  uint32_t scalarValue = 1;
  ShiftRight(tmpU82, tmpU81, scalarValue, indexNum);
  pipe_barrier(PIPE_V);
  int32_t scalar = 4;
  Muls(tmpS81, tmpS82, scalar, indexNum);
  pipe_barrier(PIPE_V);
  gatherIndex = tmpS81.ReinterpretCast<uint32_t>();
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::CastDeqScale(uint32_t offset, uint32_t nLength, uint32_t singleM, uint32_t singleN, bool isMM1) {
  uint32_t baseNAlign = AlignDown(ubCalSize / 8, UB_BLOCK_UNIT_SIZE);  // 8 = 2(uint64 need double length of float) * sizeof(float), should be limitted by the size of vecOutQueue;
  uint32_t baseN = baseNAlign;
  for (uint32_t offsetN = 0; offsetN < singleN; offsetN += baseNAlign) {
    if ((singleN - offsetN) < baseNAlign) {
      baseN = singleN - offsetN;
      baseNAlign = AlignUp<UB_BLOCK_UNIT_SIZE>(baseN);
    }
    CreateIndex(2 * baseNAlign);

    LocalTensor<uint32_t> dequantInitLocal = vecOutQueue.AllocTensor<uint32_t>();
    uint32_t scalarZeroValue = 0;
    Duplicate(dequantInitLocal, scalarZeroValue, 2 * baseN);
    pipe_barrier(PIPE_V);

    LocalTensor<uint32_t> dequantLocal = vecInQueue.AllocTensor<uint32_t>();
    DataCopyPadParams padParams;
    DataCopyParams intriParams1;
    intriParams1.blockLen = baseN * sizeof(uint32_t);
    intriParams1.blockCount = 1;
    intriParams1.srcStride = 0;
    intriParams1.dstStride = 0;
    if (isMM1) {
      DataCopyPad(dequantLocal, deqScale1GmVector[offset + offsetN], intriParams1, padParams);
    } else {
      DataCopyPad(dequantLocal, deqScale2GmVector[offset + offsetN], intriParams1, padParams);
    }
    vecInQueue.EnQue(dequantLocal);

    uint64_t mask[2] = {0x5555555555555555, 0};
    LocalTensor<uint32_t> dequantSrcLocal = vecInQueue.DeQue<uint32_t>();
    Gather(dequantInitLocal, dequantSrcLocal, gatherIndex, 0, mask, (uint8_t)(baseNAlign / (8 * sizeof(dequantT))), (uint16_t)(sizeof(uint64_t)));
    pipe_barrier(PIPE_ALL);
    vecInQueue.FreeTensor(dequantSrcLocal);

    DataCopyParams intriParams2;
    intriParams2.blockLen = 2 * baseN * sizeof(uint32_t);
    intriParams2.blockCount = 1;
    intriParams2.srcStride = 0;
    intriParams2.dstStride = 0;

    vecOutQueue.EnQue(dequantInitLocal);
    LocalTensor<uint32_t> dequantDstLocal = vecOutQueue.DeQue<uint32_t>();
    if (isMM1) {
      uint64_t deqscale1Offset = (workspace1Size + workspace2Size) / sizeof(uint32_t) + (offset  + offsetN) * 2;
      DataCopyPad(deqScale1FloatGm[deqscale1Offset], dequantDstLocal, intriParams2);
    } else {
      uint64_t deqscale2Offset = (workspace1Size + workspace2Size + expertNum * n1 * sizeof(uint64_t)) / sizeof(uint32_t) + (offset + offsetN) * 2;
      DataCopyPad(deqScale2FloatGm[deqscale2Offset], dequantDstLocal, intriParams2);
    }
    vecOutQueue.FreeTensor(dequantDstLocal);
  }
}


template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::ComputeExpertParallNum(const uint32_t& expertI, const uint32_t& baseM,
                                                                                                                        ExpertParallInfo& expertParallInfo) {
  if (expertI == expertNum) {
    expertParallInfo.expertParallelism = Min(expertParallInfo.size, expertParallInfo.maxExpertParallelism);
    expertParallInfo.start = 0;
    size_t index = expertParallInfo.expertIdxBuf[0];
    tokens = ubTokens.GetValue(index);
    return;
  }
  expertParallInfo.expertParallelism = 1;
  if (tokens <= baseM) {  // if is a small expert
    bool isFull = expertParallInfo.AddExpert(expertI, tokens, tokensOffset);
    if (isFull) {
        // the buffer is full, it's time to compute experts parallelly
        expertParallInfo.expertParallelism = expertParallInfo.maxExpertParallelism;
        expertParallInfo.start = 0;
    } else {
        expertParallInfo.expertParallelism = 0;  // store this expert information, not to compute the expert
    }
  } else {  // if is a large expert
    expertParallInfo.AddExpert(expertI, tokens, tokensOffset);
    expertParallInfo.expertParallelism = 1;  // compute this expert solely
    expertParallInfo.start = expertParallInfo.size - 1;
  }
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::Process() {
  if (unlikely(this->ProcessZeroN1())) {
    return;
  }

  tokensOffset = 0;
  uint32_t tokensThisLoop = 0;  // backup tokens value, bacasue `tokens` will be modified soon after.
  ExpertParallInfo mm1ExpertParallInfo(cubeCoreNum, Ceil(n1, tilingData->mm1TilingData.baseN * tilingData->mm1TilingData.stepN));
  ExpertParallInfo mm2ExpertParallInfo(cubeCoreNum, Ceil(n2, tilingData->mm2TilingData.baseN * tilingData->mm2TilingData.stepN));
  if (mm2ExpertParallInfo.maxSize < mm1ExpertParallInfo.maxSize) {
      // MM1 first computes experts, then MM2. If an expert is not processed by mm1, it cannot be processed by mm2.
      // Expertsin MM1 buffer are all unprocessed, so the buffer of MM2 should hold these experts too.
      // This requires MM2's maxSize >= MM1's maxSize, no matter what relative value of both of maxExpertParallelism.
      mm2ExpertParallInfo.maxSize = mm1ExpertParallInfo.maxSize;
  }
  if (mm2ExpertParallInfo.maxExpertParallelism > mm1ExpertParallInfo.maxExpertParallelism) {
      // Now MM2's expert parallelism is not supported to be larger than MM1's.
      // If it happens, one should consider adjusting workspace1Size and workspace2Size.
      mm2ExpertParallInfo.maxExpertParallelism = mm1ExpertParallInfo.maxExpertParallelism;
      mm2ExpertParallInfo.maxSize = mm1ExpertParallInfo.maxSize;
  }
  MNConfig mnConfig;
  bool whetherFirstMM1 = true;
  WhetherSyncBeforeMM1(mnConfig, mm1ExpertParallInfo.maxExpertParallelism,
                                mm2ExpertParallInfo.maxExpertParallelism);

  for (uint32_t expertI(0); expertI < expertNum || mm1ExpertParallInfo.size > 0
        || mm2ExpertParallInfo.size > 0; ++expertI) {
      tokensOffset += tokensThisLoop;  // cannot ignore Step5
      if (likely(expertI < expertNum)) {
          tokensThisLoop = ubTokens.GetValue(expertI);
          if (tokensThisLoop == 0) {continue;}
          tokens = tokensThisLoop;
      }
      // Step0: detemine expert parallalism and core number for each expert.
      ComputeExpertParallNum(expertI, tilingData->mm1TilingData.baseM, mm1ExpertParallInfo);
      ComputeExpertParallNum(expertI, tilingData->mm1TilingData.baseM, mm2ExpertParallInfo);

      // Step1: mm1
      if (mm1ExpertParallInfo.expertParallelism > 0) {
          MM1Process(mm1ExpertParallInfo, mnConfig, whetherFirstMM1);
          // Step2: sync
          set_flag(PIPE_MTE3, PIPE_S, EVENT_ID7);
          wait_flag(PIPE_MTE3, PIPE_S, EVENT_ID7);
          SyncAll<true>();
      }

      // Step3: mm2
      if (mm2ExpertParallInfo.expertParallelism > 0) {
          MM2Process(mm2ExpertParallInfo, whetherSyncBeforeMM1, mnConfig, whetherWaitMM2);
      }

      // Step4: post-process ...
      if (mm1ExpertParallInfo.expertParallelism > 0) {
          mm1ExpertParallInfo.Clear(mm1ExpertParallInfo.start);
      }
      if (mm2ExpertParallInfo.expertParallelism > 0) {
          mm2ExpertParallInfo.Clear(mm2ExpertParallInfo.start);
      }
  }
}


template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::MM1Process(
    ExpertParallInfo& mm1ExpertParallInfo, MNConfig& mnConfig, bool& whetherFirstMM1) {
    uint32_t coreNumEachExpert = cubeCoreNum / mm1ExpertParallInfo.expertParallelism;
    mnConfig.SetConstriant(tokens, n1, tilingData->mm1TilingData.baseM,
                            tilingData->mm1TilingData.baseN * tilingData->mm1TilingData.stepN, coreNumEachExpert);
    KernelTiling(mnConfig);
    coreNumEachExpert = mnConfig.blockDimM * mnConfig.blockDimN;
    size_t expertOrderInBuf = Min<uint32_t>(mm1ExpertParallInfo.start + coreIdx / coreNumEachExpert,
                                            mm1ExpertParallInfo.maxSize - 1);
    // make sure which expert each core/cube needs to compute
    uint32_t expertIMM = mm1ExpertParallInfo.expertIdxBuf[expertOrderInBuf];
    tokens = ubTokens.GetValue(expertIMM);
    if constexpr (IsSameType<c1T, half>::value) { SyncBeforeMM1(whetherFirstMM1); }

    if (coreIdx < mm1ExpertParallInfo.expertParallelism * coreNumEachExpert && subBlockIdx == 0) {
        // assert mm1ExpertParallInfo.size == mm1ExpertParallInfo.start + mm1ParallelExpertsNum
        // For this expertOrderInBuf/expert, detemine the offset of output
        // local m-offset of the expert
        mnConfig.m = tokens;
        uint32_t baseBlockIdx = coreIdx % coreNumEachExpert;
        uint32_t outRowOffset = mm1ExpertParallInfo.LocalOffset[expertOrderInBuf]
                                - mm1ExpertParallInfo.LocalOffset[mm1ExpertParallInfo.start];
        // todo, contain activation currently
        MM1Compute(mnConfig, baseBlockIdx, expertIMM,
                         mm1ExpertParallInfo.GlobalOffset[expertOrderInBuf], outRowOffset);
    }
    whetherFirstMM1 = false;
}


template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::MM2Process(ExpertParallInfo& mm2ExpertParallInfo, bool whetherSyncBeforeMM1, MNConfig& mnConfig,
                                      bool& whetherWaitMM2) {
  for (uint32_t i = mm2ExpertParallInfo.start; i < mm2ExpertParallInfo.size;
      i += mm2ExpertParallInfo.expertParallelism) {
      if (i + mm2ExpertParallInfo.expertParallelism > mm2ExpertParallInfo.size) {
          mm2ExpertParallInfo.expertParallelism = mm2ExpertParallInfo.size - i;
      }
      uint32_t coreNumEachExpert = cubeCoreNum / mm2ExpertParallInfo.expertParallelism;

      if (coreIdx >= coreNumEachExpert * mm2ExpertParallInfo.expertParallelism || subBlockIdx != 0) {
          continue;
      }
      uint32_t expertOrderInBuf = i + coreIdx / coreNumEachExpert;
      uint32_t expertIMM = mm2ExpertParallInfo.expertIdxBuf[expertOrderInBuf];
      tokens = ubTokens.GetValue(expertIMM);
      mnConfig.SetConstriant(tokens, n2, tilingData->mm2TilingData.baseM,
                              tilingData->mm2TilingData.baseN * tilingData->mm2TilingData.stepN, coreNumEachExpert);
      KernelTiling(mnConfig);
      uint32_t baseBlockIdx = coreIdx % coreNumEachExpert;
      coreNumEachExpert = mnConfig.blockDimM * mnConfig.blockDimN;
      if (baseBlockIdx < coreNumEachExpert) {
          uint32_t tokensRowOffset = mm2ExpertParallInfo.LocalOffset[expertOrderInBuf]
                                      - mm2ExpertParallInfo.LocalOffset[mm2ExpertParallInfo.start];
          uint32_t outRowOffset = mm2ExpertParallInfo.GlobalOffset[expertOrderInBuf];
          if constexpr (IsSameType<c1T, half>::value) { ControlMM2(); }
          MM2Compute(mnConfig, baseBlockIdx, expertIMM, tokensRowOffset, outRowOffset,
                            whetherSyncBeforeMM1);
          whetherWaitMM2 = whetherSyncBeforeMM1;
      }
  }
}


template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::KernelTiling(MNConfig& mnConfig) {
  uint32_t maxNLoops = Ceil(mnConfig.n, mnConfig.baseN);
  uint32_t maxMLoops = Ceil(mnConfig.m, mnConfig.baseM);
  uint32_t curNLoops = Min(maxNLoops, mnConfig.coreNum);
  if (unlikely(curNLoops == 0)) {
      curNLoops = 1;
  }
  uint32_t curMLoops = Min(maxMLoops, mnConfig.coreNum / curNLoops);
  uint32_t curSingleN = AlignUp(Ceil(mnConfig.n, curNLoops), mnConfig.baseN);
  uint32_t curSingleM = AlignUp<CUBE_BASE_ALIGN_FACTOR>(Ceil(mnConfig.m, curMLoops));
  curSingleM = Min(Max(curSingleM, mnConfig.baseM), mnConfig.m);
  mnConfig.singleM = curSingleM;
  mnConfig.singleN = curSingleN;
  mnConfig.blockDimM = Ceil(mnConfig.m, curSingleM);
  mnConfig.blockDimN = Ceil(mnConfig.n, curSingleN);
  if (curNLoops * curMLoops <= (mnConfig.coreNum >> 1)) {
      return;
  }
  uint32_t minSingleCore = mnConfig.singleM * mnConfig.singleN;  // calc loop on the single core
  while (curNLoops > 1) {
      // skip curNLoops in range (maxNLoops/2) + 1 to (maxNLoops - 1)
      curNLoops = Min(curNLoops - 1, Ceil(mnConfig.n, curSingleN + mnConfig.baseN));
      curSingleN = AlignUp(Ceil(mnConfig.n, curNLoops), mnConfig.baseN);
      curNLoops = Ceil(mnConfig.n, curSingleN);
      if (unlikely(curNLoops == 0)) {break;}
      curMLoops = Min(mnConfig.coreNum / curNLoops, maxMLoops);
      if (curNLoops * curMLoops <= (mnConfig.coreNum >> 1)) {
          break;
      }
      curSingleM = AlignUp<CUBE_BASE_ALIGN_FACTOR>(Ceil(mnConfig.m, curMLoops));
      curSingleM = Min(Max(curSingleM, mnConfig.baseM), mnConfig.m);
      curMLoops = Ceil(mnConfig.m, curSingleM);
      uint32_t curSingleCore = curSingleN * curSingleM;
      // select the smaller calc loop on the single core, preferred split N
      if (curSingleCore < minSingleCore ||
          (curSingleCore == minSingleCore && curNLoops * curMLoops < mnConfig.blockDimN * mnConfig.blockDimM) ||
          (curSingleCore == minSingleCore && curNLoops * curMLoops == mnConfig.blockDimN * mnConfig.blockDimM
          && curSingleM + curSingleN < mnConfig.singleM + mnConfig.singleN)
          ) {
          mnConfig.blockDimM = curMLoops;
          mnConfig.blockDimN = curNLoops;
          mnConfig.singleM = curSingleM;
          mnConfig.singleN = curSingleN;
          minSingleCore = curSingleCore;
      }
  }
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::WhetherSyncBeforeMM1(MNConfig& mnConfig, uint32_t maxMM1ExpertParallelism,
                                                                        uint32_t maxMM2ExpertParallelism) {
  uint32_t maxMM1UsedCubeCore;
  uint32_t minMM2UsedCubeCore;
  uint32_t mm1ExpertParallelUsedCore;
  uint32_t mm1SingleExpertUsedCore;

  tokens = tilingData->mm1TilingData.baseM;
  if (unlikely(maxMM1ExpertParallelism == 0)) {
      maxMM1ExpertParallelism = 1;
  }
  mnConfig.SetConstriant(tokens, n1, tilingData->mm1TilingData.baseM,
                          tilingData->mm1TilingData.baseN * tilingData->mm1TilingData.stepN, cubeCoreNum / maxMM1ExpertParallelism);
  KernelTiling(mnConfig);
  mm1ExpertParallelUsedCore = mnConfig.blockDimM * mnConfig.blockDimN * maxMM1ExpertParallelism;

  if (expertNum > 1 && tilingData->ffnBaseParams.maxTokens <= tilingData->mm1TilingData.baseM) {
      maxMM1UsedCubeCore = mm1ExpertParallelUsedCore;
  } else {
      tokens = tilingData->ffnBaseParams.maxTokens;
      mnConfig.SetConstriant(tokens, n1, tilingData->mm1TilingData.baseM,
                              tilingData->mm1TilingData.baseN * tilingData->mm1TilingData.stepN, cubeCoreNum);
      KernelTiling(mnConfig);
      mm1SingleExpertUsedCore = mnConfig.blockDimM * mnConfig.blockDimN;
      maxMM1UsedCubeCore = Max(mm1SingleExpertUsedCore, mm1ExpertParallelUsedCore);
  }

  tokens = tilingData->mm2TilingData.baseM;
  mnConfig.SetConstriant(tokens, n2, tilingData->mm2TilingData.baseM,
                          tilingData->mm2TilingData.baseN * tilingData->mm2TilingData.stepN, cubeCoreNum);
  KernelTiling(mnConfig);
  minMM2UsedCubeCore = mnConfig.blockDimM * mnConfig.blockDimN;
  if (minMM2UsedCubeCore >= maxMM1ExpertParallelism && n1 > INT8_SYNC_N1_SIZE) {
    whetherSyncBeforeMM1 = false;
  }
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::SyncBeforeMM1(bool whetherFirstMM1) {
  if (whetherWaitMM2) {
    mm2.WaitIterateAll();
    mm2.End();
    whetherWaitMM2 = false;
  }
  if (whetherSyncBeforeMM1 && whetherFirstMM1==false) {
    SyncAll<true>();
  }
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::ControlMM2() {
  if (whetherWaitMM2) {
    mm2.WaitIterateAll();
    mm2.End();
    whetherWaitMM2 = false;
  }
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::MM1Compute(MNConfig& mnConfig, uint32_t baseBlockIdx, uint32_t expertIdx,
                                      uint32_t tokensOffset, uint32_t outRowOffset) {
  uint32_t mIdx = baseBlockIdx / mnConfig.blockDimN;
  uint32_t nIdx = baseBlockIdx % mnConfig.blockDimN;
  uint32_t curSingleN = mnConfig.singleN;
  uint32_t tailN = nIdx * mnConfig.singleN;
  if (nIdx == mnConfig.blockDimN - 1) {
      curSingleN = mnConfig.n - tailN;
  }
  uint32_t curSingleM = mnConfig.singleM;
  if (mIdx == mnConfig.blockDimM - 1) {
      curSingleM = mnConfig.m - mIdx * curSingleM;
  }
  uint64_t outOffset = (outRowOffset + mIdx * mnConfig.singleM) * (uint64_t)n1 + tailN;
  uint64_t xCoreOffset = (tokensOffset + mIdx * mnConfig.singleM) * k1;
  uint64_t w1CoreOffset = expertIdx * (uint64_t)k1 * n1 + tailN;

  mm1.SetOrgShape(mnConfig.m, n1, k1);
  mm1.SetSingleShape(curSingleM, curSingleN, k1);
  mm1.SetTensorA(xGm[xCoreOffset]);
  mm1.SetTensorB(weight1Gm[w1CoreOffset]);
  if (hasBias1) {
      mm1.SetBias(bias1Gm[expertIdx * n1 + tailN]);
  } else {
      mm1.ClearBias();
  }
  if constexpr (isSmooth) {
    scaleOffset = expertIdx * n1 + tailN;
  }
  if constexpr (IsSameType<c1T, half>::value) {
    if constexpr (IsSameType<dequantT, float>::value) {
      CastDeqScale(expertIdx * n1 + tailN, n1, curSingleM, curSingleN, true);
      mm1.SetQuantVector(deqScale1UInt64Gm[(workspace1Size + workspace2Size) / sizeof(uint64_t) + expertIdx * n1 + tailN]);
    } else {
      mm1.SetQuantVector(deqScale1Gm[expertIdx * n1 + tailN]);
    }
  } else {
    deqscale1Offset = expertIdx * n1 + tailN;
  }
  mm1.template IterateAll<true>(mm1WorkspaceGm[outOffset], false);
  mm1.End();
  activeOffset = workspace1Size / dataTypeSize + outOffset;
  Elewise1(curSingleM, curSingleN, outOffset, activeOffset, expertIdx);
}


template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::MM2Compute(MNConfig& mnConfig, uint32_t baseBlockIdx, uint32_t expertIdx,
                                      uint32_t tokensRowOffset, uint32_t outRowOffset, bool whetherSyncBeforeMM1) {
  uint32_t mIdx = baseBlockIdx / mnConfig.blockDimN;
  uint32_t nIdx = baseBlockIdx % mnConfig.blockDimN;
  uint32_t curSingleN = mnConfig.singleN;
  uint32_t tailN = nIdx * mnConfig.singleN;
  if (nIdx == mnConfig.blockDimN - 1) {
      curSingleN = mnConfig.n - tailN;
  }
  uint32_t curSingleM = mnConfig.singleM;
  if (mIdx == mnConfig.blockDimM - 1) {
      curSingleM = mnConfig.m - mIdx * curSingleM;
  }
  uint64_t outOffset = (outRowOffset + mIdx * mnConfig.singleM) * (uint64_t)n2 + tailN;
  uint64_t xCoreOffset = workspace1Size / dataTypeSize + (tokensRowOffset + mIdx * mnConfig.singleM) * k2;
  uint64_t w2CoreOffset = expertIdx * (uint64_t)k2 * n2 + tailN;

  mm2.SetOrgShape(mnConfig.m, n2, k2);
  mm2.SetSingleShape(curSingleM, curSingleN, k2);
  mm2.SetTensorA(mm2WorkspaceGm[xCoreOffset]);
  mm2.SetTensorB(weight2Gm[w2CoreOffset]);
  if (hasBias2) {
      mm2.SetBias(bias2Gm[expertIdx * n2 + tailN]);
  } else {
      mm2.ClearBias();
  }
  if constexpr (IsSameType<c1T, half>::value) {
    if constexpr (IsSameType<dequantT, float>::value) {
      CastDeqScale(expertIdx * n2 + nIdx * singleN2, n2, curSingleM, curSingleN, false);
      mm2.SetQuantVector(deqScale2UInt64Gm[(workspace1Size + workspace2Size + expertNum * n1 * sizeof(uint64_t)) / sizeof(uint64_t) + expertIdx * n2 + nIdx * singleN2]);
    } else {
      mm2.SetQuantVector(deqScale2Gm[expertIdx * n2 + nIdx * singleN2]);
    }
    mm2.template IterateAll<false>(yGm[outOffset], 0, false, whetherSyncBeforeMM1);
  } else {
    deqscale2Offset = expertIdx * n2 + nIdx * singleN2;
    mm2OutOffset = (workspace1Size + workspace2Size) / mmOutTypeSize + outOffset;
    mm2.template IterateAll<true>(mm2OutWorkspaceGm[mm2OutOffset], false);
    Elewise2(curSingleM, curSingleN, mm2OutOffset, outOffset, expertIdx);
  }
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::QuantCompute(uint32_t computeSize, uint32_t expertIdx, uint32_t computeBaseN1) {
  // quant compute
  uint32_t tmpsize = AlignUp<UB_BLOCK_UNIT_SIZE>(computeSize);
  if constexpr (isSmooth && IsSameType<c1T, int32_t>::value) {
    // bf16 per-channel mode
    LocalTensor<float> scaleSrcUb = scaleQueue.DeQue<float>();
    LocalTensor<T> quantOutUb = vecOutQueue.AllocTensor<T>();
    LocalTensor<half> srcUbFp16 = quantTmp.template ReinterpretCast<half>();
    LocalTensor<half> scalecUbFp16 = srcUbFp16[ubCalSize];
    LocalTensor<uint8_t> quantTmp1 = scalecUbFp16[computeBaseN1].template ReinterpretCast<uint8_t>();
    Cast(srcUbFp16, actOut, RoundMode::CAST_NONE, tmpsize);
    pipe_barrier(PIPE_V);
    Cast(scalecUbFp16, scaleSrcUb, RoundMode::CAST_NONE, computeBaseN1);
    pipe_barrier(PIPE_V);
    AscendQuant(quantOutUb, srcUbFp16, quantTmp1, scalecUbFp16, static_cast<half>(quantOffset[expertIdx]), computeBaseN1, tmpsize);
    scaleQueue.FreeTensor(scaleSrcUb);
    vecOutQueue.EnQue<T>(quantOutUb);
  } else if constexpr (isSmooth && IsSameType<c1T, half>::value) {
    // fp16 per-channel mode
    LocalTensor<float> scaleSrcUb = scaleQueue.DeQue<float>();
    LocalTensor<half> scaleSrcUbFp16 = tmpBuff.ReinterpretCast<half>();
    scaleSrcUbFp16 = scaleSrcUbFp16[ubCalSize];
    quantTmp = tmpBuff[ubCalSize * sizeof(half) + computeBaseN1 * sizeof(half)];
    Cast(scaleSrcUbFp16, scaleSrcUb, RoundMode::CAST_NONE, computeBaseN1);
    pipe_barrier(PIPE_V);
    LocalTensor<T> quantOutUb = vecOutQueue.AllocTensor<T>();
    AscendQuant(quantOutUb, actOut, quantTmp, scaleSrcUbFp16,
                static_cast<half>(quantOffset[expertIdx]), computeBaseN1, tmpsize);
    scaleQueue.FreeTensor(scaleSrcUb);
    vecOutQueue.EnQue<T>(quantOutUb);
  } else {
    // bf16 and fp16 per-tensor mode
    LocalTensor<T> quantOutUb = vecOutQueue.AllocTensor<T>();
    AscendQuant(quantOutUb, actOut, quantTmp, quantScale[expertIdx], quantOffset[expertIdx], tmpsize);
    vecOutQueue.EnQue<T>(quantOutUb);
  }
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::DequantDataCopy(uint32_t curBaseN1, DataCopyPadParams padParams) {
  LocalTensor<dequantT> dequantLocal = scaleQueue.AllocTensor<dequantT>();
  DataCopyParams intriParams3;
  intriParams3.blockLen = curBaseN1 * outTypeSize;
  intriParams3.blockCount = 1;
  intriParams3.srcStride = (n1 - curBaseN1) * outTypeSize;
  intriParams3.dstStride = 0;
  DataCopyPad(dequantLocal, deqScale1Gm[deqscale1Offset], intriParams3, padParams);
  scaleQueue.EnQue(dequantLocal);
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::ScaleDataCopy(uint32_t curBaseN1, uint32_t offsetN, DataCopyPadParams padParams) {
  LocalTensor<float> scaleLocal = scaleQueue.AllocTensor<float>();
  DataCopyParams intriParams;
  intriParams.blockLen = curBaseN1 * sizeof(float);
  intriParams.blockCount = 1;
  intriParams.srcStride = (n1 - curBaseN1) * sizeof(float);
  intriParams.dstStride = 0;
  DataCopyPad(scaleLocal, ScaleGm[scaleOffset + offsetN], intriParams, padParams);
  scaleQueue.EnQue(scaleLocal);
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::Elewise1(uint32_t curSingleM, uint32_t curSingleN1,
                                                               uint64_t mm1OutOffset, uint64_t activeOffset, uint32_t expertIdx) {
  uint32_t curBaseM = baseM1;
  DataCopyPadParams padParams;
  uint32_t computeBaseN1;
  uint32_t computeSize;
  for (uint32_t offsetM = 0; offsetM < curSingleM; offsetM += baseM1) {
    if (offsetM + baseM1 >= curSingleM) { curBaseM = curSingleM - offsetM; }
    uint32_t curBaseN1 = baseN1;
    for (uint32_t offsetN = 0; offsetN < curSingleN1; offsetN += baseN1) {
      if (offsetN + baseN1 >= curSingleN1) { curBaseN1 = curSingleN1 - offsetN; }
      computeBaseN1 = AlignUp<UB_BLOCK_UNIT_SIZE>(curBaseN1);
      computeSize = curBaseM * computeBaseN1;
      // copy mm1 output from workspace
      LocalTensor<c1T> inLocal = vecInQueue.AllocTensor<c1T>();

      DataCopyParams intriParams1;
      intriParams1.blockLen = curBaseN1 * mmOutTypeSize;
      intriParams1.blockCount = curBaseM;
      intriParams1.srcStride = (n1 - curBaseN1) * mmOutTypeSize;
      intriParams1.dstStride = (computeBaseN1 - curBaseN1) * mmOutTypeSize / UB_BLOCK_UNIT_SIZE;
      DataCopyPad(inLocal, mm1WorkspaceGm[mm1OutOffset + offsetM * n1 + offsetN], intriParams1, padParams);
      vecInQueue.EnQue(inLocal);

      LocalTensor<actT> actInput;
      if constexpr (IsSameType<c1T, int32_t>::value) {
        DequantDataCopy(curBaseN1, padParams);

        dequantCountNum = curBaseM * computeBaseN1;
        dequantParamsNum = computeBaseN1;
        CastCompute(true); // cast int32 to float
        actInput = dequantOut;
      } else {
        actInput = vecInQueue.DeQue<actT>();
      }
      if constexpr (isSmooth) {
        ScaleDataCopy(curBaseN1, offsetN, padParams);
      }
      ActivationFunction(actOut, actInput, computeSize);
      if constexpr (IsSameType<c1T, int32_t>::value == false) {
        vecInQueue.FreeTensor(actInput);
      }
      QuantCompute(computeSize, expertIdx, computeBaseN1);

      LocalTensor<T> activeResUb = vecOutQueue.DeQue<T>();

      DataCopyParams intriParams2;
      intriParams2.blockLen = curBaseN1 * dataTypeSize;
      intriParams2.blockCount = curBaseM;
      intriParams2.srcStride = 0;
      intriParams2.dstStride = (n1 - curBaseN1) * dataTypeSize;
      DataCopyPad(mm2WorkspaceGm[activeOffset + offsetM * n1 + offsetN], activeResUb, intriParams2);
      vecOutQueue.FreeTensor(activeResUb);
    }
  }
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::Elewise2(uint32_t curSingleM, uint32_t curSingleN2,
                                                               uint64_t mm2OutOffset, uint64_t activeOffset,
                                                               uint32_t expertIdx) {
  uint32_t curBaseM = baseM1;
  DataCopyPadParams padParams;
  uint32_t computeBaseN2;
  uint32_t mm2ResShapeArray[2];
  for (uint32_t offsetM = 0; offsetM < curSingleM; offsetM += baseM1) {
    if (offsetM + baseM1 >= curSingleM) {
      curBaseM = curSingleM - offsetM;
    }
    uint32_t curBaseN2 = baseN1;
    for (uint32_t offsetN = 0; offsetN < curSingleN2; offsetN += baseN1) {
      if (offsetN + baseN1 >= curSingleN2) {
        curBaseN2 = curSingleN2 - offsetN;
      }
      computeBaseN2 = AlignUp<GetNumInUbBlock<c2T>()>(curBaseN2);  // pad
      // copy mm1 output from workspace
      LocalTensor<c1T> inLocal = vecInQueue.AllocTensor<c1T>();
      mm2ResShapeArray[0] = curBaseM;
      mm2ResShapeArray[1] = computeBaseN2;

      DataCopyParams intriParams1;
      intriParams1.blockLen = curBaseN2 * mmOutTypeSize;
      intriParams1.blockCount = curBaseM;
      intriParams1.srcStride = (n2 - curBaseN2) * mmOutTypeSize;
      intriParams1.dstStride = (computeBaseN2 - curBaseN2) * mmOutTypeSize / UB_BLOCK_UNIT_SIZE;
      DataCopyPad(inLocal, mm2OutWorkspaceGm[mm2OutOffset + offsetM * n2 + offsetN], intriParams1, padParams);
      vecInQueue.EnQue(inLocal);

      LocalTensor<dequantT> dequantLocal = scaleQueue.AllocTensor<dequantT>();
      DataCopyParams intriParams3;
      intriParams3.blockLen = curBaseN2 * outTypeSize;
      intriParams3.blockCount = 1;
      intriParams3.srcStride = (n2 - curBaseN2) * outTypeSize;
      intriParams3.dstStride = 0;
      DataCopyPad(dequantLocal, deqScale2Gm[deqscale2Offset], intriParams3, padParams);
      scaleQueue.EnQue(dequantLocal);

      dequantCountNum = curBaseM * computeBaseN2;
      dequantParamsNum = computeBaseN2;
      CastCompute(false);  // cast int32 to bf16

      // output from buffer to gm
      LocalTensor<c2T> quantSrcUb = vecOutQueue.DeQue<c2T>();

      DataCopyParams intriParams2;
      intriParams2.blockLen = curBaseN2 * outTypeSize;
      intriParams2.blockCount = curBaseM;
      intriParams2.srcStride = 0;
      intriParams2.dstStride = (n2 - curBaseN2) * outTypeSize;
      DataCopyPad(yGm[activeOffset + offsetM * n2 + offsetN], quantSrcUb, intriParams2);
      vecOutQueue.FreeTensor(quantSrcUb);
    }
  }
}

template <typename T, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT, typename actT, typename dequantT, bool isSmooth>
__aicore__ inline void FFNQuant<T, mm1Type, mm2Type, c1T, c2T, biasT, actT, dequantT, isSmooth>::CastCompute(bool isMM1) {
  // if mm1 :cast int32 to float, if mm2: cast int32 to bf16
  LocalTensor<c1T> mmResUb = vecInQueue.DeQue<c1T>();
  LocalTensor<dequantT> dequantUb = scaleQueue.DeQue<dequantT>();
  if (isMM1) {
    AscendDequant(dequantOut, mmResUb, dequantUb, actTmp, {1, dequantCountNum, dequantParamsNum});
    vecInQueue.FreeTensor(mmResUb);
    scaleQueue.FreeTensor(dequantUb);
  } else {
    LocalTensor<c2T> castUb = vecOutQueue.AllocTensor<c2T>();
    AscendDequant(castUb, mmResUb, dequantUb, actTmp, {1, dequantCountNum, dequantParamsNum});
    vecInQueue.FreeTensor(mmResUb);
    scaleQueue.FreeTensor(dequantUb);
    vecOutQueue.EnQue<c2T>(castUb);
  }
}
}  // namespace FFN

#endif  // FFN_QUANT_CPP
