/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2024. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*!
 * \file ffn_antiquant.cpp
 * \brief
 */
#ifndef FFN_ANTI_QUANT_CPP
#define FFN_ANTI_QUANT_CPP

#include "ffn_antiquant.h"

namespace FFN {

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::Init(
    __gm__ uint8_t* x, __gm__ uint8_t* weight1, __gm__ uint8_t* weight2, __gm__ uint8_t* expertTokens,
    __gm__ uint8_t* bias1, __gm__ uint8_t* bias2, __gm__ uint8_t* antiQuantScale1, __gm__ uint8_t* antiQuantScale2,
    __gm__ uint8_t* antiQuantOffset1, __gm__ uint8_t* antiQuantOffset2, __gm__ uint8_t* y, __gm__ uint8_t* workSpace,
    const FFNTilingData* __restrict tiling, TPipe* tPipe) {
  tilingData = tiling;
  pipe = tPipe;
  InitTilingData();

  // init global buffer
  xGm.SetGlobalBuffer((__gm__ T*)x);
  weight1Gm.SetGlobalBuffer((__gm__ int8_t*)weight1);
  if (bias1 != nullptr) {
    hasBias1 = true;
    bias1Gm.SetGlobalBuffer((__gm__ biasT*)bias1);
  }
  weight2Gm.SetGlobalBuffer((__gm__ int8_t*)weight2);
  if (bias2 != nullptr) {
    hasBias2 = true;
    bias2Gm.SetGlobalBuffer((__gm__ biasT*)bias2);
  }

  scale1WorkspaceGm.SetGlobalBuffer((__gm__ T*)antiQuantScale1);
  scale2WorkspaceGm.SetGlobalBuffer((__gm__ T*)antiQuantScale2);
  offset1WorkspaceGm.SetGlobalBuffer((__gm__ T*)antiQuantOffset1);
  offset2WorkspaceGm.SetGlobalBuffer((__gm__ T*)antiQuantOffset2);

  yGm.SetGlobalBuffer((__gm__ c2T*)y);
  mm1WorkspaceGm.SetGlobalBuffer((__gm__ c1T*)workSpace);
  mm2WorkspaceGm.SetGlobalBuffer((__gm__ T*)workSpace);
  // init w1 and w2 workspace
  uint64_t offAddr = uint64_t(workspace1Size) + uint64_t(workspace2Size);
  w1WorkspaceGm.SetGlobalBuffer((__gm__ T*)(workSpace + offAddr));
  offAddr += (k1 * n1 * dataTypeSize * 2);
  w2WorkspaceGm.SetGlobalBuffer((__gm__ T*)(workSpace + offAddr));

  InitLocalBuff(expertTokens, tPipe);
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::InitLocalBuff(
    __gm__ uint8_t* expertTokens, TPipe* tPipe) {
  pipe = tPipe;

  // scale should bigger than singleN, 32 alignment is required
  pipe->InitBuffer(scaleInQueue, 2, bestCopySize * sizeof(T));
  pipe->InitBuffer(offsetInQueue, 2, bestCopySize * sizeof(T));
  pipe->InitBuffer(vecInQueue, 2, ubCalSize * sizeof(c1T));
  pipe->InitBuffer(vecOutQueue, 2, ubCalSize * sizeof(T));
  pipe->InitBuffer(tmpBuff, ubRestBytes);

  tmpUb = tmpBuff.Get<T>();
  TBuf<QuePosition::VECCALC> eTokens64Buf;
  pipe->InitBuffer(eTokens64Buf, AlignUp<UB_BLOCK_UNIT_SIZE>(expertNum * sizeof(int64_t)));  // 32Byte alignment
  ubTokens = eTokens64Buf.Get<int64_t>();
  if (likely(expertTokens != nullptr)) {
    // copy tokens array from GM
    expertTokensGm.SetGlobalBuffer((__gm__ int64_t*)expertTokens);
    DataCopy(ubTokens, expertTokensGm, AlignUp<EXPERT_NUM_ALIGN>(expertNum));  // 32Byte alignment
    set_flag(PIPE_MTE2, PIPE_S, EVENT_ID0);
    wait_flag(PIPE_MTE2, PIPE_S, EVENT_ID0);
    if (tilingData->ffnBaseParams.tokensIndexFlag) {
      TokensIndicesToValues(ubTokens, expertNum);
    }
  } else {
    ubTokens.SetValue(0, static_cast<int64_t>(tilingData->ffnBaseParams.maxTokens));
  }
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::InitTilingData() {
  curBlockIdx = GetBlockIdx();
  subBlockIdx = GetSubBlockIdx();
  coreIdx = curBlockIdx / GetTaskRation();

  totalTokens = tilingData->ffnBaseParams.totalTokens;
  maxTokens = tilingData->ffnBaseParams.maxTokens;
  k1 = tilingData->ffnBaseParams.k1;
  n1 = tilingData->ffnBaseParams.n1;
  k2 = n1;
  n2 = tilingData->ffnBaseParams.n2;
  expertNum = tilingData->ffnBaseParams.expertNum;
  coreNum = tilingData->ffnBaseParams.coreNum;
  activeType = tilingData->ffnBaseParams.activeType;
  dataTypeSize = 2;
  mm1DataTypeSize = sizeof(c1T);

  baseM1 = tilingData->ffnSingleCoreParams.baseM1;
  baseN1 = tilingData->ffnSingleCoreParams.baseN1;
  baseN2 = tilingData->ffnSingleCoreParams.baseN2;
  ubCalSize = tilingData->ffnSingleCoreParams.ubCalSize;
  ubRestBytes = tilingData->ffnSingleCoreParams.ubRestBytes;
  workspace1Size = tilingData->ffnBaseParams.workspace1Size;
  workspace2Size = tilingData->ffnBaseParams.workspace2Size;
  scale1GroupNum = tilingData->ffnBaseParams.scale1GroupNum;
  scale2GroupNum = tilingData->ffnBaseParams.scale2GroupNum;
  scale1GroupSize = k1 / scale1GroupNum;
  scale2GroupSize = k2 / scale2GroupNum;

  if constexpr (IsSameType<c1T, float>::value) {
    bestCopySize = BF16_INT8_BEST_DATACOPY_BASE_SIZE;
  }
  if constexpr (IsSameType<wT, int4b_t>::value) {
    reciprocalOfOneByteMultiple = 2;  // 2: the reciprocal of half Byte.
  }
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::Process() {
  tokensOffset = 0;
  tokens = 0;
  while (currentExpert < expertNum) {
    tokens = static_cast<uint32_t>(ubTokens.GetValue(currentExpert));
    if (tokens > 0) {
      ComputeExpertSplitMN();
    }
    currentExpert += 1;
    tokensOffset += tokens;
  }
}

__aicore__ inline void FindCoreSplit(uint32_t m, uint32_t n, uint32_t tilingCoreNum,
                                     uint32_t& nLoops, uint32_t& mLoops) {
  uint32_t baseN = nLoops;
  uint32_t baseM = mLoops;
  uint32_t maxNLoops = Ceil(n, baseN);
  uint32_t maxMLoops = Ceil(m, baseM);
  nLoops = maxNLoops;
  mLoops = maxMLoops;
  uint32_t minSingleCore = m * n;  // calc loop on the single core
  uint32_t curNLoops = Min(maxNLoops, tilingCoreNum);
  while (curNLoops > 0) {
    uint32_t curSingleN = Ceil(n, curNLoops);
    curSingleN = Ceil(curSingleN, baseN) * baseN;
    curNLoops = Ceil(n, curSingleN);
    if (curNLoops == 0) { break; }
    uint32_t curMLoops = Min(tilingCoreNum / curNLoops, maxMLoops);
    uint32_t curSingleM = Ceil(m, curMLoops);
    curSingleM = Max(AlignUp<CUBE_BASE_ALIGN_FACTOR>(curSingleM), baseM);
    curSingleM = Min(curSingleM, m);
    curMLoops = Ceil(m, curSingleM);
    uint32_t curSingleCore = curSingleN * curSingleM;
    // select the smaller calc loop on the single core, preferred split N
    if (curSingleCore < minSingleCore ||
        (curSingleCore == minSingleCore && curNLoops * curMLoops < nLoops * mLoops)) {
      nLoops = curNLoops;
      mLoops = curMLoops;
      minSingleCore = curSingleCore;
    }
    // skip curNLoops in range (maxNLoops/2) + 1 to (maxNLoops - 1)
    curNLoops = Min(curNLoops - 1, Ceil(n, curSingleN + baseN));
  }
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::KernelTiling(uint32_t baseM,
    uint32_t baseN, uint32_t Nlength, bool isMatMul1) {
  uint32_t nLoops = baseN;  // core num used on n axis
  uint32_t mLoops = baseM;  // core num used on m axis
  FindCoreSplit(tokens, Nlength, coreNum, nLoops, mLoops);

  uint32_t singleN = Ceil(Nlength, nLoops);
  singleN = AlignUp<CUBE_BASE_ALIGN_FACTOR>(singleN);
  nLoops = Ceil(Nlength, singleN);
  uint32_t singleM = Ceil(tokens, mLoops);  // mLoops >= 1
  singleM = AlignUp<CUBE_BASE_ALIGN_FACTOR>(singleM);
  singleM = singleM > baseM ? singleM : baseM;
  singleM = singleM > tokens ? tokens : singleM;
  mLoops = Ceil(tokens, singleM);
  if (isMatMul1) {
    n1Loops = nLoops;
    m1Loops = mLoops;
    singleM1 = singleM;  // compute C matrix block length along m direction for each cube
    singleN1 = singleN;  // compute C matrix block length along n direction for each cube
    singleM1Tail = tokens - (m1Loops - 1) * singleM1;  // recompute last block length along m direction
    singleN1Tail = n1 - (n1Loops - 1) * singleN1;        // recompute last block length along n direction
    castWeightSingleN1 = n1;
    castWeightN1Loops = Ceil(n1, castWeightSingleN1);
    castWeightSingleN1Tail = n1 - (castWeightN1Loops - 1) * castWeightSingleN1;
    castWeightK1Loops = coreNum / castWeightN1Loops;
    castWeightK1Loops = k1 / castWeightK1Loops > 0 ? castWeightK1Loops : k1;
    castWeightSingleK1 = Ceil(k1, castWeightK1Loops);
    castWeightK1Loops = Ceil(k1, castWeightSingleK1);
    castWeightSingleK1Tail = k1 - (castWeightK1Loops - 1) * castWeightSingleK1;
  } else {
    n2Loops = nLoops;
    m2Loops = mLoops;
    singleM2 = singleM;
    singleN2 = singleN;
    singleM2Tail = tokens - (m2Loops - 1) * singleM2;
    singleN2Tail = n2 - (n2Loops - 1) * singleN2;
    castWeightSingleN2 = n2;
    castWeightN2Loops = Ceil(n2, castWeightSingleN2);
    castWeightSingleN2Tail = n2 - (castWeightN2Loops - 1) * castWeightSingleN2;
    castWeightK2Loops = coreNum / castWeightN2Loops;
    castWeightK2Loops = k2 / castWeightK2Loops > 0 ? castWeightK2Loops : k2;
    castWeightSingleK2 = Ceil(k2, castWeightK2Loops);
    castWeightK2Loops = Ceil(k2, castWeightSingleK2);
    castWeightSingleK2Tail = k2 - (castWeightK2Loops - 1) * castWeightSingleK2;
  }
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::SelectCastWeight(
    uint32_t n, uint32_t curSingleK, uint32_t curSingleN, uint32_t kInOffset, uint64_t wInOffset,
    uint64_t wOutOffset, uint64_t scaleOffset, uint32_t groupNum, uint32_t groupSize, GlobalTensor<int8_t> weightGm,
    GlobalTensor<T> wWorkspaceGm, GlobalTensor<T> scaleWorkspaceGm, GlobalTensor<T> offsetWorkspaceGm) {
    if constexpr (isPerGroup == true) {
      CastWeightPerGroup(n, curSingleK, curSingleN, kInOffset, wInOffset, wOutOffset, scaleOffset, groupNum,
                groupSize, weightGm, wWorkspaceGm, scaleWorkspaceGm, offsetWorkspaceGm);
    } else {
      CastWeightNormal(n, curSingleK, curSingleN, kInOffset, wInOffset, wOutOffset, scaleOffset, groupNum,
                groupSize, weightGm, wWorkspaceGm, scaleWorkspaceGm, offsetWorkspaceGm);
    }
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::CalcOffsetAndCastWeight(
      bool isMatMul1) {
  uint64_t wInOffset;
  uint64_t wOutOffset;
  uint64_t scaleOffset;
  uint32_t kInOffset;
  uint32_t kIdx;
  uint32_t nIdx;
  uint64_t wCoreOffsetKMulN;
  uint32_t curSingleN = isMatMul1 ? castWeightSingleN1 : castWeightSingleN2;
  uint32_t curSingleK = isMatMul1 ? castWeightSingleK1 : castWeightSingleK2;
  if (isMatMul1) {
    kIdx = coreIdx / castWeightN1Loops;
    nIdx = coreIdx % castWeightN1Loops;
    if (nIdx == castWeightN1Loops - 1) {
      curSingleN = castWeightSingleN1Tail;
    }
    if (kIdx == castWeightK1Loops - 1) {
      curSingleK = castWeightSingleK1Tail;
    }
    uint32_t alignCastWeightSingleN1 = AlignUp(castWeightSingleN1, reciprocalOfOneByteMultiple);
    kInOffset = kIdx * castWeightSingleK1;
    wCoreOffsetKMulN = k1 * n1;
    wInOffset = currentExpert * wCoreOffsetKMulN + nIdx * alignCastWeightSingleN1 + kInOffset * n1;
    wOutOffset = kInOffset * n1 + nIdx * castWeightSingleN1;
    scaleOffset = currentExpert * n1 * scale1GroupNum + nIdx * castWeightSingleN1;
    SelectCastWeight(n1, curSingleK, curSingleN, kInOffset, wInOffset, wOutOffset, scaleOffset, scale1GroupNum,
              scale1GroupSize, weight1Gm, w1WorkspaceGm, scale1WorkspaceGm, offset1WorkspaceGm);
  } else {
    kIdx = coreIdx / castWeightN2Loops;
    nIdx = coreIdx % castWeightN2Loops;
    if (nIdx == castWeightN2Loops - 1) {
      curSingleN = castWeightSingleN2Tail;
    }
    if (kIdx == castWeightK2Loops - 1) {
      curSingleK = castWeightSingleK2Tail;
    }
    uint32_t alignCastWeightSingleN2 = AlignUp(castWeightSingleN2, reciprocalOfOneByteMultiple);
    kInOffset = kIdx * castWeightSingleK2;
    wCoreOffsetKMulN = k2 * n2;
    wInOffset = currentExpert * wCoreOffsetKMulN + nIdx * alignCastWeightSingleN2 + kInOffset * n2;
    wOutOffset = kInOffset * n2 + nIdx * castWeightSingleN2;
    scaleOffset = currentExpert * n2 * scale2GroupNum+ nIdx * castWeightSingleN2;
    SelectCastWeight(n2, curSingleK, curSingleN, kInOffset, wInOffset, wOutOffset, scaleOffset, scale2GroupNum,
              scale2GroupSize, weight2Gm, w2WorkspaceGm, scale2WorkspaceGm, offset2WorkspaceGm);
  }
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::ComputeExpertSplitMN() {
  KernelTiling(tilingData->mm1TilingData.baseM, tilingData->mm1TilingData.baseN, n1, true);
  SplitMM1();

  set_flag(PIPE_MTE3, PIPE_S, EVENT_ID7);
  wait_flag(PIPE_MTE3, PIPE_S, EVENT_ID7);
  SyncAll<true>();

  SplitMM2();
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::SplitMM1() {
  uint32_t tokensOffsetInner = tokensOffset;
  uint32_t curSingleM = singleM1;
  // calc mm1 tiling
  uint32_t m1Idx = coreIdx / n1Loops;  // 0 <= m1Idx < 2, m1Idx=0 is FIRST expert, m1Idx=1 is SECOND expert.
  uint32_t n1Idx = coreIdx % n1Loops;
  uint32_t curSingleN1 = singleN1;
  uint32_t expertIdx = currentExpert;
  uint32_t tilingCoreNum = n1Loops * m1Loops;
  bool isValidCore = (coreIdx < tilingCoreNum && subBlockIdx == 0);
  uint64_t OffsetTail = n1Idx * singleN1;
  uint64_t w1CoreOffsetK1MulN1 = k1 * n1;
  if (isValidCore) {
    if (n1Idx == n1Loops - 1) {
      curSingleN1 = singleN1Tail;
    }
    if (m1Idx == m1Loops -1) {
      curSingleM = singleM1Tail;
    }
    tokensOffsetInner = tokensOffset + m1Idx * singleM1;
    outOffset = m1Idx * singleM1 * n1 + OffsetTail;

    mm1.SetOrgShape(tokens, n1, k1);
    mm1.SetSingleShape(curSingleM, curSingleN1, k1);
    xCoreOffset = tokensOffsetInner * k1;
    w1CoreOffset = OffsetTail;
  }
  if (coreIdx < castWeightN1Loops * castWeightK1Loops) {
    CalcOffsetAndCastWeight(true);
  }
  SyncBeforeMM1();
  SyncAll<true>();
  if (isValidCore) {MM1Compute(expertIdx, OffsetTail);}
  KernelTiling(tilingData->mm2TilingData.baseM, tilingData->mm2TilingData.baseN, n2, false);
  if (coreIdx < castWeightN2Loops * castWeightK2Loops) {
    CalcOffsetAndCastWeight(false);
  }
  if (isValidCore) {
    mm1.WaitIterateAll();
    mm1.End();
    activeOffset = workspace1Size / dataTypeSize + outOffset;
    Elewise1(curSingleM, curSingleN1, outOffset, activeOffset);
  }
}


template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::SplitMM2() {
  uint32_t curSingleN2 = singleN2;
  uint32_t expertIdx = currentExpert;
  tokens = static_cast<uint32_t>(ubTokens.GetValue(expertIdx));
  uint32_t tilingCoreNum = n2Loops * m2Loops;
  uint32_t m2Idx = coreIdx / n2Loops;
  uint32_t n2Idx = coreIdx % n2Loops;
  uint32_t curSingleM = singleM2;
  uint64_t OffsetTail = n2Idx * singleN2;
  uint64_t w2CoreOffsetK2MulN2 = k2 * n2;

  if (coreIdx < tilingCoreNum) {
    curSingleN2 = singleN2;
    if (m2Idx == m2Loops - 1) {
      curSingleM = singleM2Tail;
    }
    if (n2Idx == n2Loops - 1) {
      curSingleN2 = singleN2Tail;
    }

    w2CoreOffset = OffsetTail;
  }
  if (coreIdx < tilingCoreNum && subBlockIdx == 0) {
    // mm2 compute
    mm2.SetOrgShape(tokens, n2, k2);
    mm2.SetSingleShape(curSingleM, curSingleN2, k2);
    mm2CoreOffset = workspace1Size / dataTypeSize + m2Idx * singleM2 * k2;
    outOffset = (tokensOffset + m2Idx * singleM2) * n2 + OffsetTail;
    MM2Compute(expertIdx, OffsetTail);
  }
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::MM1Compute(
    uint32_t expertIdx, uint64_t OffsetTail) {
  mm1.SetTensorA(xGm[xCoreOffset]);
  mm1.SetTensorB(w1WorkspaceGm[w1CoreOffset]);
  if (hasBias1) {
    bias1CoreOffset = expertIdx * n1 + OffsetTail;
    mm1.SetBias(bias1Gm[bias1CoreOffset]);
  }
  mm1.template IterateAll<false>(mm1WorkspaceGm[outOffset], 0, false, true);
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::MM2Compute(
    uint32_t expertIdx, uint64_t OffsetTail) {
  mm2.SetTensorA(mm2WorkspaceGm[mm2CoreOffset]);
  mm2.SetTensorB(w2WorkspaceGm[w2CoreOffset]);

  if (hasBias2) {
    bias2CoreOffset = expertIdx * n2 + OffsetTail;
    mm2.SetBias(bias2Gm[bias2CoreOffset]);
  }
  mm2.template IterateAll<false>(yGm[outOffset], 0, false, true);
  mm2WaitStatue = true;
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::Elewise1(uint32_t curSingleM,
    uint32_t curSingleN1, uint64_t mm1OutOffset, uint64_t activeOffset) {
  // in bf16, vector uses fp32 data.compared with fp16, baseM is reduced by half.
  uint32_t realBaseM1 = ubCalSize / baseN1;
  uint32_t curBaseM = realBaseM1;
  DataCopyPadParams padParams;
  uint32_t computeBaseN1;
  uint32_t computeSize;
  for (uint32_t offsetM = 0; offsetM < curSingleM; offsetM += realBaseM1) {
    if (offsetM + realBaseM1 >= curSingleM) {
      curBaseM = curSingleM - offsetM;
    }
    uint32_t curBaseN1 = baseN1;
    for (uint32_t offsetN = 0; offsetN < curSingleN1; offsetN += baseN1) {
      if (offsetN + baseN1 >= curSingleN1) {
        curBaseN1 = curSingleN1 - offsetN;
      }
      // mm1 is float16 and 32-byte aligned. mm1 is float32 and 64-byte aligned.
      computeBaseN1 = AlignUp<GetNumInUbBlock<T>()>(curBaseN1);
      computeSize = curBaseM * computeBaseN1;
      // copy mm1 output from workspace
      LocalTensor<c1T> inLocal = vecInQueue.AllocTensor<c1T>();

      DataCopyParams intriParams1;
      intriParams1.blockLen = curBaseN1 * mm1DataTypeSize;
      intriParams1.blockCount = curBaseM;
      intriParams1.srcStride = (n1 - curBaseN1) * mm1DataTypeSize;
      intriParams1.dstStride = (computeBaseN1 - curBaseN1) * mm1DataTypeSize / UB_BLOCK_UNIT_SIZE;
      DataCopyPad(inLocal, mm1WorkspaceGm[mm1OutOffset + offsetM * n1 + offsetN], intriParams1, padParams);
      vecInQueue.EnQue(inLocal);

      Elewise1Compute(computeSize);

      // ResultCopy2GM
      LocalTensor<T> activeResUb = vecOutQueue.DeQue<T>();

      DataCopyParams intriParams2;
      intriParams2.blockLen = curBaseN1 * dataTypeSize;
      intriParams2.blockCount = curBaseM;
      intriParams2.srcStride = 0;
      intriParams2.dstStride = (n1 - curBaseN1) * dataTypeSize;
      DataCopyPad(mm2WorkspaceGm[activeOffset + offsetM * n1 + offsetN], activeResUb, intriParams2);
      vecOutQueue.FreeTensor(activeResUb);
    }
  }
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::InitActivationFunction(
    LocalTensor<c1T> activeResUb, uint32_t computeSize, uint32_t activeUbOffset) {
  LocalTensor<c1T> mm1ResUb = vecInQueue.DeQue<c1T>();
  LocalTensor<uint8_t> tmpLocal = tmpUb[activeUbOffset].template ReinterpretCast<uint8_t>();

  ActiveType active = ActiveType(activeType);
  if (active == ActiveType::FASTGELU) {
    FasterGelu(activeResUb, mm1ResUb, tmpLocal, computeSize);
  } else if (active == ActiveType::RELU) {
    Relu(activeResUb, mm1ResUb, computeSize);
    pipe_barrier(PIPE_V);
  } else if (active == ActiveType::SILU) {
    Silu(activeResUb, mm1ResUb, computeSize);
  } else if (active == ActiveType::GELU) {
    Gelu(activeResUb, mm1ResUb, tmpLocal, computeSize);
  }

  vecInQueue.FreeTensor(mm1ResUb);
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::Elewise1Compute(
      uint32_t computeSize) {
  LocalTensor<T> activeResUb = vecOutQueue.AllocTensor<T>();
  uint32_t activeUbOffset = 0;

  if constexpr (IsSameType<c1T, float>::value) {
    LocalTensor<float> activeResUbFp32 = tmpUb.template ReinterpretCast<float>()[computeSize];
    activeUbOffset = computeSize * sizeof(float);
    InitActivationFunction(activeResUbFp32, computeSize, activeUbOffset);
    Cast(activeResUb, activeResUbFp32, RoundMode::CAST_ROUND, computeSize);
  } else {
    InitActivationFunction(activeResUb, computeSize, activeUbOffset);
  }

  vecOutQueue.EnQue<T>(activeResUb);
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::SyncBeforeMM1() {
  if (mm2WaitStatue) {
    mm2.WaitIterateAll();
    mm2.End();
    mm2WaitStatue = false;
  }
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::DataCopyScaleAndOffset(
    uint32_t n, uint32_t curBaseN, uint32_t alignBaseN1, uint32_t scaleOffset, uint32_t offsetN, uint32_t kInOffset,
    uint32_t groupNum, uint32_t groupSize, GlobalTensor<T> scaleWorkspaceGm, GlobalTensor<T> offsetWorkspaceGm) {
  uint32_t realScaleOffset = scaleOffset + offsetN;
  if constexpr (isPerGroup == true) {
    realScaleOffset += (kInOffset / groupSize * n);
  }

  // copy scale and offset frome GM
  DataCopyPadParams padParams;
  DataCopyParams scaleParams;
  scaleParams.blockLen = curBaseN * dataTypeSize;
  scaleParams.blockCount = 1;
  scaleParams.srcStride = 0;
  scaleParams.dstStride = 0;
  LocalTensor<T> scaleLocal = scaleInQueue.AllocTensor<T>();
  DataCopyPad(scaleLocal, scaleWorkspaceGm[realScaleOffset], scaleParams, padParams);
  scaleInQueue.EnQue(scaleLocal);

  LocalTensor<T> offsetLocal = offsetInQueue.AllocTensor<T>();
  DataCopyPad(offsetLocal, offsetWorkspaceGm[realScaleOffset], scaleParams, padParams);
  offsetInQueue.EnQue(offsetLocal);

  scaleInUb = scaleInQueue.DeQue<T>();
  scaleInUb.SetSize(alignBaseN1);
  offsetInUb = offsetInQueue.DeQue<T>();
  offsetInUb.SetSize(alignBaseN1);
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::DataCopyAndComputeW(
    uint32_t n, uint32_t curBaseN, uint32_t curBaseK, uint32_t alignBaseN1, uint32_t offsetN, uint32_t offsetK,
    uint64_t wInOffset, uint64_t wOutOffset, uint64_t scaleOffset, GlobalTensor<int8_t> weightGm,
    GlobalTensor<T> wWorkspaceGm, GlobalTensor<T> scaleWorkspaceGm, GlobalTensor<T> offsetWorkspaceGm) {
  DataCopyPadParams padParams;

  // copy mm1 output from workspace
  LocalTensor<int8_t> inLocal = vecInQueue.AllocTensor<int8_t>();
  DataCopyParams intriParams1;
  intriParams1.blockLen = curBaseN / reciprocalOfOneByteMultiple;  // int4 weight are copied based on int8 type
  intriParams1.blockCount = curBaseK;
  intriParams1.srcStride = (n - curBaseN) / reciprocalOfOneByteMultiple;
  intriParams1.dstStride = 0;
  DataCopyPad(inLocal, weightGm[(wInOffset + offsetK * n + offsetN) / reciprocalOfOneByteMultiple],
              intriParams1, padParams);
  vecInQueue.EnQue(inLocal);

  CastWeightCompute(curBaseN, scaleOffset, offsetN, curBaseK, alignBaseN1, scaleInUb,
                    offsetInUb);

  // ResultCopy2GM
  LocalTensor<T> w1ResUb = vecOutQueue.DeQue<T>();

  DataCopyParams intriParams2;
  intriParams2.blockLen = curBaseN * dataTypeSize;
  intriParams2.blockCount = curBaseK;
  intriParams2.srcStride = (alignBaseN1 - curBaseN) / GetNumInUbBlock<T>();
  intriParams2.dstStride = (n - curBaseN) * dataTypeSize;
  DataCopyPad(wWorkspaceGm[wOutOffset + offsetK * n + offsetN], w1ResUb, intriParams2);
  vecOutQueue.FreeTensor(w1ResUb);
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::CastWeightNormal(
    uint32_t n, uint32_t curSingleK, uint32_t curSingleN, uint32_t kInOffset, uint64_t wInOffset,
    uint64_t wOutOffset, uint64_t scaleOffset, uint32_t groupNum, uint32_t groupSize, GlobalTensor<int8_t> weightGm,
    GlobalTensor<T> wWorkspaceGm, GlobalTensor<T> scaleWorkspaceGm, GlobalTensor<T> offsetWorkspaceGm) {
  uint32_t newBaseN1 = bestCopySize;
  // ensure when cast weight, newBaseN1 align to 32, compute size will not larger than ubCalSize
  uint32_t newBaseK = ubCalSize / newBaseN1;
  uint32_t curBaseN = newBaseN1;
  uint32_t subCoreCount = 0;

  for (uint32_t offsetN = 0; offsetN < curSingleN; offsetN += newBaseN1) {
    if (offsetN + newBaseN1 >= curSingleN) {curBaseN = curSingleN - offsetN;}
    uint32_t alignBaseN1 = AlignUp(curBaseN, UB_BLOCK_UNIT_SIZE * reciprocalOfOneByteMultiple);
    DataCopyScaleAndOffset(n, curBaseN, alignBaseN1, scaleOffset, offsetN, 0, groupNum,
                               groupSize, scaleWorkspaceGm, offsetWorkspaceGm);
    uint32_t curBaseK = newBaseK;
    for (uint32_t offsetK = 0; offsetK < curSingleK; offsetK += curBaseK) {
      if (unlikely(offsetK + newBaseK >= curSingleK)) {
        curBaseK = curSingleK - offsetK;
      }
      subCoreCount += 1;
      if (subBlockIdx == subCoreCount % 2) {continue;}  // enable both subcores to cast weight

      DataCopyAndComputeW(n, curBaseN, curBaseK, alignBaseN1, offsetN, offsetK, wInOffset, wOutOffset,
                          scaleOffset, weightGm, wWorkspaceGm, scaleWorkspaceGm, offsetWorkspaceGm);
    }
    scaleInQueue.FreeTensor(scaleInUb);
    offsetInQueue.FreeTensor(offsetInUb);
  }
  set_flag(PIPE_MTE3, PIPE_S, EVENT_ID7);
  wait_flag(PIPE_MTE3, PIPE_S, EVENT_ID7);
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::CastWeightPerGroup(
    uint32_t n, uint32_t curSingleK, uint32_t curSingleN, uint32_t kInOffset, uint64_t wInOffset,
    uint64_t wOutOffset, uint64_t scaleOffset, uint32_t groupNum, uint32_t groupSize, GlobalTensor<int8_t> weightGm,
    GlobalTensor<T> wWorkspaceGm, GlobalTensor<T> scaleWorkspaceGm, GlobalTensor<T> offsetWorkspaceGm) {
  uint32_t newBaseN1 = bestCopySize;
  // ensure when cast weight, newBaseN1 align to 32, compute size will not larger than ubCalSize
  uint32_t newBaseK = Min(ubCalSize / newBaseN1, groupSize);
  uint32_t curBaseN = newBaseN1;
  uint32_t usedGroupSize = groupSize * (kInOffset / groupSize + 1);
  uint32_t subCoreCount = 0;
  uint32_t curBaseK;

  for (uint32_t offsetN = 0; offsetN < curSingleN; offsetN += newBaseN1) {
    if (offsetN + newBaseN1 >= curSingleN) {curBaseN = curSingleN - offsetN;}
    uint32_t alignBaseN1 = AlignUp(curBaseN, UB_BLOCK_UNIT_SIZE * reciprocalOfOneByteMultiple);
    uint32_t curUsedGroupSize = usedGroupSize;
    for (uint32_t offsetK = 0; offsetK < curSingleK; offsetK += curBaseK) {
      if (unlikely(offsetK + newBaseK + kInOffset >= curUsedGroupSize)) {
        curBaseK = curUsedGroupSize - offsetK - kInOffset;
        curUsedGroupSize += groupSize;
      } else if (unlikely(offsetK + newBaseK >= curSingleK)) {
        curBaseK = curSingleK - offsetK;
      } else {
        curBaseK = newBaseK;
      }

      subCoreCount += 1;
      if (subBlockIdx == subCoreCount % 2) {continue;}  // enable both subcores to cast weight

      DataCopyScaleAndOffset(n, curBaseN, alignBaseN1, scaleOffset, offsetN, kInOffset + offsetK, groupNum,
                              groupSize, scaleWorkspaceGm, offsetWorkspaceGm);
      DataCopyAndComputeW(n, curBaseN, curBaseK, alignBaseN1, offsetN, offsetK, wInOffset, wOutOffset,
                          scaleOffset, weightGm, wWorkspaceGm, scaleWorkspaceGm, offsetWorkspaceGm);
      scaleInQueue.FreeTensor(scaleInUb);
      offsetInQueue.FreeTensor(offsetInUb);
    }
  }
  set_flag(PIPE_MTE3, PIPE_S, EVENT_ID7);
  wait_flag(PIPE_MTE3, PIPE_S, EVENT_ID7);
}

template <typename T, typename wT, typename mm1Type, typename mm2Type, typename c1T, typename c2T, typename biasT,
          bool isPerGroup>
__aicore__ inline void FFNAntiQuant<T, wT, mm1Type, mm2Type, c1T, c2T, biasT, isPerGroup>::CastWeightCompute(
    uint32_t curBaseN, uint32_t scaleOffset, uint32_t offsetN, uint32_t curCalcK, uint32_t curCalcAlignN,
    LocalTensor<T> scaleInUb, LocalTensor<T> offsetInUb) {
  LocalTensor<wT> w1InUb = vecInQueue.DeQue<wT>();
  w1InUb.SetSize(curCalcK * curCalcAlignN);
  LocalTensor<T> w1ResUb = vecOutQueue.AllocTensor<T>();

  LocalTensor<uint8_t> tmpLocal = tmpUb.template ReinterpretCast<uint8_t>();
  AntiQuantShapeInfo shapeInfo = {1, curCalcAlignN, 1, curCalcAlignN};
  // fp16 tempbuff is 0, bf16 tempbuff = offset.GetSize() * 2 * sizeof(float) + 64 * K * sizeof(float)
  AscendAntiQuant<wT, T, false>(w1ResUb, w1InUb, offsetInUb, scaleInUb, tmpLocal, curCalcK, shapeInfo);

  vecInQueue.FreeTensor(w1InUb);
  vecOutQueue.EnQue<T>(w1ResUb);
}
}  // namespace FFN

#endif  // FFN_ANTI_QUANT
