/**
 * Copyright (c) 2024 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

/* !
 * \file ascend_antiquant_c220_impl.h
 * \brief
 */
#ifndef IMPL_QUANTIZATION_ANTIQUANT_ASCEND_ANTIQUANT_C220_IMPL_H
#define IMPL_QUANTIZATION_ANTIQUANT_ASCEND_ANTIQUANT_C220_IMPL_H

#include "kernel_tensor.h"
#include "kernel_operator_intf.h"
#include "kernel_pop_stack_buffer.h"
#include "ascend_antiquant_common.h"

namespace AscendC {
template <typename InputDataType, bool withOffset = true>
__aicore__ inline void AntiQuantInnerLoop(const LocalTensor<bfloat16_t> &dst, const LocalTensor<InputDataType> &src,
    const LocalTensor<bfloat16_t> &offset, const LocalTensor<bfloat16_t> &scale,
    const LocalTensor<uint8_t> &sharedTmpBuffer, const uint32_t calCount)
{
    uint32_t srcFp16Pos = calCount * sizeof(bfloat16_t);
    uint32_t offsetFp32Pos = calCount * sizeof(float);
    auto fp16TmpBuffer = sharedTmpBuffer[srcFp16Pos].ReinterpretCast<half>();
    auto offsetBuffer = sharedTmpBuffer[offsetFp32Pos].ReinterpretCast<float>();
    auto resultBuffer = sharedTmpBuffer.ReinterpretCast<float>();

    UnaryRepeatParams src2f16unaryParams;
    if constexpr(IsSameType<InputDataType, int8_t>::value) {
        src2f16unaryParams.srcRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    } else {
        src2f16unaryParams.srcRepStride = ONE_FOURTH_DEFAULT_REPEAT_STRIDE;
    }
    src2f16unaryParams.srcRepStride = ONE_FOURTH_DEFAULT_REPEAT_STRIDE;
    UnaryRepeatParams unaryParams;
    unaryParams.srcRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    UnaryRepeatParams f322f16Params;
    f322f16Params.dstRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    BinaryRepeatParams binaryParams;

    SetVectorMask<float, MaskMode::COUNTER>(0, calCount);
    Cast<half, InputDataType, false>(fp16TmpBuffer, src, RoundMode::CAST_NONE, MASK_PLACEHOLDER, 1, src2f16unaryParams);
    PipeBarrier<PIPE_V>();
    Cast<float, half, false>(resultBuffer, fp16TmpBuffer, RoundMode::CAST_NONE, MASK_PLACEHOLDER, 1, unaryParams);
    PipeBarrier<PIPE_V>();
    if constexpr (withOffset) {
        Cast<float, bfloat16_t, false>(offsetBuffer, offset, RoundMode::CAST_NONE, MASK_PLACEHOLDER, 1, unaryParams);
        PipeBarrier<PIPE_V>();
        Add<float, false>(resultBuffer, resultBuffer, offsetBuffer, MASK_PLACEHOLDER, 1, binaryParams);
        PipeBarrier<PIPE_V>();
    }
    Cast<float, bfloat16_t, false>(offsetBuffer, scale, RoundMode::CAST_NONE, MASK_PLACEHOLDER, 1, unaryParams);
    PipeBarrier<PIPE_V>();
    Mul<float, false>(resultBuffer, resultBuffer, offsetBuffer, MASK_PLACEHOLDER, 1, binaryParams);
    PipeBarrier<PIPE_V>();
    Cast<bfloat16_t, float, false>(dst, resultBuffer, RoundMode::CAST_RINT, MASK_PLACEHOLDER, 1, f322f16Params);
    PipeBarrier<PIPE_V>();
}

template <typename InputDataType, bool withOffset = true>
__aicore__ inline void AntiQuantInnerLoop(const LocalTensor<bfloat16_t> &dst, const LocalTensor<InputDataType> &src,
    const bfloat16_t offset, const bfloat16_t scale, const LocalTensor<uint8_t> &sharedTmpBuffer,
    const uint32_t calCount)
{
    uint32_t srcFp16Pos = calCount * sizeof(bfloat16_t);
    auto fp16TmpBuffer = sharedTmpBuffer[srcFp16Pos].ReinterpretCast<half>();
    auto resultBuffer = sharedTmpBuffer.ReinterpretCast<float>();

    UnaryRepeatParams src2f16unaryParams;
    if constexpr(IsSameType<InputDataType, int8_t>::value) {
        src2f16unaryParams.srcRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    } else {
        src2f16unaryParams.srcRepStride = ONE_FOURTH_DEFAULT_REPEAT_STRIDE;
    }
    UnaryRepeatParams unaryParams;
    unaryParams.srcRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    UnaryRepeatParams f322f16Params;
    f322f16Params.dstRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    UnaryRepeatParams unaryParamsScalar;

    SetVectorMask<float, MaskMode::COUNTER>(0, calCount);
    Cast<half, InputDataType, false>(fp16TmpBuffer, src, RoundMode::CAST_NONE, MASK_PLACEHOLDER, 1, src2f16unaryParams);
    PipeBarrier<PIPE_V>();
    Cast<float, half, false>(resultBuffer, fp16TmpBuffer, RoundMode::CAST_NONE, MASK_PLACEHOLDER, 1, unaryParams);
    PipeBarrier<PIPE_V>();
    if constexpr (withOffset) {
        Adds<float, false>(resultBuffer, resultBuffer, ToFloat(offset), MASK_PLACEHOLDER, 1, unaryParamsScalar);
        PipeBarrier<PIPE_V>();
    }
    Muls<float, false>(resultBuffer, resultBuffer, ToFloat(scale), MASK_PLACEHOLDER, 1, unaryParamsScalar);
    PipeBarrier<PIPE_V>();
    Cast<bfloat16_t, float, false>(dst, resultBuffer, RoundMode::CAST_RINT, MASK_PLACEHOLDER, 1, f322f16Params);
    PipeBarrier<PIPE_V>();
}

template <typename InputDataType>
__aicore__ inline void AscendAntiQuantNoTransposePerformance(const LocalTensor<bfloat16_t> &dst,
    const LocalTensor<InputDataType> &src, const LocalTensor<bfloat16_t> &offset, const LocalTensor<bfloat16_t> &scale,
    const LocalTensor<uint8_t> &sharedTmpBuffer, const uint32_t K, const uint32_t N)
{
    uint32_t posOffsetScale = N * sizeof(float) * ANTIQUANT_TWO;
    uint32_t posCast = posOffsetScale + ANTIQUANT_SINGLE_N_SIZE_BF16 * K * sizeof(half);
    auto fp16TmpBuffer = sharedTmpBuffer[posCast].ReinterpretCast<half>();
    auto resultBuffer = sharedTmpBuffer[posOffsetScale].ReinterpretCast<float>();

    UnaryRepeatParams s42f16unaryParams;
    s42f16unaryParams.srcRepStride = N / ANTIQUANT_TWO / ONE_BLK_SIZE;
    s42f16unaryParams.dstRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    UnaryRepeatParams s82f16unaryParams;
    s82f16unaryParams.srcRepStride = N * sizeof(int8_t) / ONE_BLK_SIZE;
    s82f16unaryParams.dstRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    UnaryRepeatParams f162f32unaryParams;
    f162f32unaryParams.srcRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    BinaryRepeatParams binaryParams;
    binaryParams.src1RepStride = 0;
    UnaryRepeatParams f322f16Params;
    f322f16Params.dstRepStride = N * sizeof(half) / ONE_BLK_SIZE;

    for (uint32_t i = 0; i < N / ANTIQUANT_SINGLE_N_SIZE_BF16; i++) {
        SetMaskNorm();
        SetVectorMask<half, MaskMode::NORMAL>(ANTIQUANT_SINGLE_N_SIZE_BF16);
        if constexpr (IsSameType<InputDataType, int4b_t>::value) {
            // 1.cast 64K to fp16, use norm mode
            Cast<half, int4b_t, false>(fp16TmpBuffer, src[ANTIQUANT_SINGLE_N_SIZE_BF16 * i], RoundMode::CAST_NONE,
                MASK_PLACEHOLDER, K, s42f16unaryParams);
        } else {
            // 1.cast 64K to fp16, use norm mode
            Cast<half, int8_t, false>(fp16TmpBuffer, src[ANTIQUANT_SINGLE_N_SIZE_BF16 * i], RoundMode::CAST_NONE,
                MASK_PLACEHOLDER, K, s82f16unaryParams);
        }
        PipeBarrier<PIPE_V>();
        // cast 64K to fp32, use count mode
        SetMaskCount();
        SetVectorMask<float, MaskMode::COUNTER>(0, ANTIQUANT_SINGLE_N_SIZE_BF16 * K);
        Cast<float, half, false>(resultBuffer, fp16TmpBuffer, RoundMode::CAST_NONE,
            MASK_PLACEHOLDER, K, f162f32unaryParams);
        PipeBarrier<PIPE_V>();
        // 2.add offset
        auto offsetBuffer = sharedTmpBuffer[ANTIQUANT_SINGLE_N_SIZE_BF16 * i * sizeof(float)].ReinterpretCast<float>();
        Add<float, false>(resultBuffer, resultBuffer, offsetBuffer, MASK_PLACEHOLDER, K, binaryParams);
        PipeBarrier<PIPE_V>();
        // 3.mul scale
        auto scaleBuffer = sharedTmpBuffer[N * sizeof(float) +
            ANTIQUANT_SINGLE_N_SIZE_BF16 * i * sizeof(float)].ReinterpretCast<float>();
        Mul<float, false>(resultBuffer, resultBuffer, scaleBuffer, MASK_PLACEHOLDER, K, binaryParams);
        PipeBarrier<PIPE_V>();
        // 4.cast back to bf16
        Cast<bfloat16_t, float, false>(dst[ANTIQUANT_SINGLE_N_SIZE_BF16 * i], resultBuffer,
            RoundMode::CAST_RINT, MASK_PLACEHOLDER, K, f322f16Params);
        PipeBarrier<PIPE_V>();
    }
    SetMaskNorm();
    ResetMask();
}

template <typename InputDataType>
__aicore__ inline void AscendAntiQuantNoTransposePerformanceTail(const LocalTensor<bfloat16_t> &dst,
    const LocalTensor<InputDataType> &src, const LocalTensor<bfloat16_t> &offset, const LocalTensor<bfloat16_t> &scale,
    const LocalTensor<uint8_t> &sharedTmpBuffer, const uint32_t K, const uint32_t N, const uint32_t mask)
{
    uint32_t index = N / ANTIQUANT_SINGLE_N_SIZE_BF16 * ANTIQUANT_SINGLE_N_SIZE_BF16;
    uint32_t posOffset = N * sizeof(float);
    uint32_t posOffsetScale = posOffset * ANTIQUANT_TWO;
    uint32_t posCast = posOffsetScale + ANTIQUANT_SINGLE_N_SIZE_BF16 * K * sizeof(half);
    auto fp16TmpBuffer = sharedTmpBuffer[posCast].ReinterpretCast<half>();
    auto resultBuffer = sharedTmpBuffer[posOffsetScale].ReinterpretCast<float>();
    auto offsetBuffer = sharedTmpBuffer[index * sizeof(float)].ReinterpretCast<float>();
    auto scaleBuffer = sharedTmpBuffer[posOffset + index * sizeof(float)].ReinterpretCast<float>();

    UnaryRepeatParams s42f16unaryParams;
    s42f16unaryParams.srcRepStride = N / ANTIQUANT_TWO / ONE_BLK_SIZE;
    UnaryRepeatParams s82f16unaryParams;
    s82f16unaryParams.srcRepStride = N * sizeof(int8_t) / ONE_BLK_SIZE;
    s82f16unaryParams.dstRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    UnaryRepeatParams f162f32unaryParams;
    f162f32unaryParams.srcRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    BinaryRepeatParams binaryParams;
    binaryParams.src1RepStride = 0;
    UnaryRepeatParams f322f16Params;
    f322f16Params.dstRepStride = N * sizeof(bfloat16_t) / ONE_BLK_SIZE;

    // 1.cast 64K to fp16, usr norm mode
    SetMaskNorm();
    SetVectorMask<half, MaskMode::NORMAL>(mask);
    if constexpr (IsSameType<InputDataType, int4b_t>::value) {
        Cast<half, int4b_t, false>(fp16TmpBuffer, src, RoundMode::CAST_NONE, MASK_PLACEHOLDER, K, s42f16unaryParams);
    } else {
        Cast<half, int8_t, false>(fp16TmpBuffer, src, RoundMode::CAST_NONE, MASK_PLACEHOLDER, K, s82f16unaryParams);
    }
    PipeBarrier<PIPE_V>();

    // cast 64K to fp32, use count mode
    Cast<float, half, false>(resultBuffer, fp16TmpBuffer, RoundMode::CAST_NONE,
                             MASK_PLACEHOLDER, K, f162f32unaryParams);
    PipeBarrier<PIPE_V>();
    // 2.add offset
    Add<float, false>(resultBuffer, resultBuffer, offsetBuffer, MASK_PLACEHOLDER, K, binaryParams);
    PipeBarrier<PIPE_V>();
    // 3.mul scale
    Mul<float, false>(resultBuffer, resultBuffer, scaleBuffer, MASK_PLACEHOLDER, K, binaryParams);
    PipeBarrier<PIPE_V>();
    // 4.cast back to bf16
    Cast<bfloat16_t, float, false>(dst, resultBuffer, RoundMode::CAST_RINT, MASK_PLACEHOLDER, K, f322f16Params);
    PipeBarrier<PIPE_V>();
    ResetMask();
}

template <typename InputDataType>
__aicore__ inline void PreCast(const LocalTensor<bfloat16_t> &dst, const LocalTensor<InputDataType> &src,
    const LocalTensor<bfloat16_t> &offset, const LocalTensor<bfloat16_t> &scale,
    const LocalTensor<uint8_t> &sharedTmpBuffer, const uint32_t K)
{
    uint32_t posOffset = offset.GetSize() * sizeof(float);
    uint32_t repeatEle = ONE_REPEAT_BYTE_SIZE / sizeof(bfloat16_t);
    uint32_t repeatTimes =
        offset.GetSize() % repeatEle == 0 ? offset.GetSize() / repeatEle : offset.GetSize() / repeatEle + 1;
    auto offsetBuffer = sharedTmpBuffer.ReinterpretCast<float>();
    auto scaleBuffer = sharedTmpBuffer[posOffset].ReinterpretCast<float>();

    UnaryRepeatParams unaryParams;
    unaryParams.srcRepStride = HALF_DEFAULT_REPEAT_STRIDE;

    SetMaskCount();
    SetVectorMask<half, MaskMode::COUNTER>(0, offset.GetSize());
    Cast<float, bfloat16_t, false>(
        offsetBuffer, offset, RoundMode::CAST_NONE, MASK_PLACEHOLDER, repeatTimes, unaryParams);
    PipeBarrier<PIPE_V>();
    Cast<float, bfloat16_t, false>(
        scaleBuffer, scale, RoundMode::CAST_NONE, MASK_PLACEHOLDER, repeatTimes, unaryParams);
    PipeBarrier<PIPE_V>();
}

template <typename OutputDataType>
__aicore__ inline bool AntiQuantCheckPerformanceMode(const LocalTensor<OutputDataType> &scale,
    const LocalTensor<uint8_t> &sharedTmpBuffer, const uint32_t K)
{
    if constexpr (IsSameType<OutputDataType, bfloat16_t>::value) {
        uint32_t maxTmpBufferSize =
            scale.GetSize() * ANTIQUANT_TWO * sizeof(float) + ANTIQUANT_SINGLE_N_SIZE_BF16 * K * sizeof(float);
        return sharedTmpBuffer.GetSize() >= maxTmpBufferSize;
    }
    return true;
}

// scale * (src + offset)   src: N * K, scale: N, offset: N  NOffset: offset used for tmpTensorOffset, tmpTensorScale
// For now, calCount must equal to N * K then can use brcb
template <typename InputDataType, typename OutputDataType, bool isOffset>
__aicore__ inline void CalculationMax(const LocalTensor<InputDataType> &src, const LocalTensor<OutputDataType> &dst,
    AntiquantParams<float> &params, const uint32_t calCount, const uint32_t N, const uint32_t K, const uint32_t NOffset)
{
    // store FP16 result in second half of FP32 tmpTensor to avoid input FP16 being replaced
    uint32_t srcFp16Pos = calCount / ANTIQUANT_TWO; // therefore start from (calCount / 2)th FP32 tmpTensor
    auto fp16TmpBuffer = params.tempTensorInput[srcFp16Pos].ReinterpretCast<half>();

    UnaryRepeatParams unaryParams;
    unaryParams.srcRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    UnaryRepeatParams f322f16Params;
    f322f16Params.dstRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    uint32_t count = K / ANTIQUANT_SINGLE_N_SIZE; // times of for loop   K = n * 64
    // src1BlkStride = 0: need same line for add and mul
    // src1RepStride = 1: 1 block for 64 num calculation
    // dst, src0RepStride = count * 8: one repeat calculate 64 num, need to jump n * 8 block
    BinaryRepeatParams binaryParams(1, 1, 0, count * DEFAULT_REPEAT_STRIDE, count * DEFAULT_REPEAT_STRIDE, 1);

    SetVectorMask<half, MaskMode::COUNTER>(0, calCount);
    // INT8 -> FP16
    Cast<half, int8_t, false>(fp16TmpBuffer, src, RoundMode::CAST_NONE, MASK_PLACEHOLDER, 1, unaryParams);
    PipeBarrier<PIPE_V>();
    // FP16 -> FP32
    Cast<float, half, false>(params.tempTensorInput, fp16TmpBuffer, RoundMode::CAST_NONE,
        MASK_PLACEHOLDER, 1, unaryParams);
    PipeBarrier<PIPE_V>();

    SetVectorMask<float, MaskMode::COUNTER>(0, ANTIQUANT_SINGLE_N_SIZE * N); // brcb  src1 has N line, 1 line has 64 num
    for (uint32_t i = 0; i < count; i++) {
        // scale * (src + offset)
        uint32_t curOffset = i * ANTIQUANT_SINGLE_N_SIZE;
        // calculate the first group (0 ~ 64) in first loop, second group (64 ~ 128) in second loop
        if constexpr (isOffset) {
            Add<float, false>(params.tempTensorInput[curOffset], params.tempTensorInput[curOffset],
                params.tempTensorOffset[NOffset], MASK_PLACEHOLDER, N, binaryParams);
            PipeBarrier<PIPE_V>();
        }
        Mul<float, false>(params.tempTensorInput[curOffset], params.tempTensorInput[curOffset],
            params.tempTensorScale[NOffset], MASK_PLACEHOLDER, N, binaryParams);
        PipeBarrier<PIPE_V>();
    }

    // FP32 -> BF16
    SetVectorMask<float, MaskMode::COUNTER>(0, calCount);
    Cast<bfloat16_t, float, false>(dst, params.tempTensorInput, RoundMode::CAST_RINT,
        MASK_PLACEHOLDER, 1, f322f16Params);
    PipeBarrier<PIPE_V>();
}

// Brcb version
// allocate tmp buffer
template <typename OutputDataType>
__aicore__ inline void GetAntiquantTensorInfo(const LocalTensor<OutputDataType> &scale,
    const LocalTensor<float> &stackBuffer, AntiquantParams<float> &params)
{
    uint32_t N = scale.GetSize();                                      // scale and offset are shape [N]
    params.tempTensorOffset = stackBuffer[0];                          // store 8 * N * FP32    N -> brcb -> 8 * N
    params.tempTensorScale = stackBuffer[ANTIQUANT_BRCB_BASE * N];     // store 8 * N * FP32    N -> brcb -> 8 * N
    params.tempTensorInput = stackBuffer[ANTIQUANT_BRCB_BASE * ANTIQUANT_TWO * N]; // need [N * 64 * FP32, N * K * FP32]
}

// 1. BF16 / FP16 -> cast -> FP32      2. N -> brcb -> 8 * N
// nLength means shape [N] for offset and scale
template <typename OutputDataType, bool withOffset = true>
__aicore__ inline void CastAndBrcb(const LocalTensor<OutputDataType> &offset, const LocalTensor<OutputDataType> &scale,
    AntiquantParams<float> &params, const uint32_t nLength)
{
    UnaryRepeatParams unaryParams;
    unaryParams.srcRepStride = HALF_DEFAULT_REPEAT_STRIDE;
    uint32_t N = offset.GetSize();

    // shape [N]  BF16/ FP16 offset, scale -> cast -> FP32
    SetVectorMask<half, MaskMode::COUNTER>(0, nLength);
    if constexpr (withOffset) {
        Cast<float, OutputDataType, false>(params.tempTensorOffset[ANTIQUANT_BRCB_BASE * N - nLength], offset,
            RoundMode::CAST_NONE, MASK_PLACEHOLDER, 1, unaryParams);
    }
    Cast<float, OutputDataType, false>(params.tempTensorScale[ANTIQUANT_BRCB_BASE * N - nLength], scale,
        RoundMode::CAST_NONE, MASK_PLACEHOLDER, 1, unaryParams);
    PipeBarrier<PIPE_V>();

    constexpr uint16_t brcbDstBlkStride = 1;                   // 1 num -> 8 num(1 block)
    constexpr uint16_t brcbDstRepStride = ANTIQUANT_BRCB_BASE; // 1 brcb: 8 num -> 64 num
    const uint8_t repeatTimes = nLength / ANTIQUANT_BRCB_BASE; // 1 brcb cmd needs 8 input num
    BrcbRepeatParams brcbParams(brcbDstBlkStride, brcbDstRepStride);

    SetMaskNorm();
    ResetMask();
    // brcb: 1 FP32 A -> 1 block contains 8 FP32 A, after 1 block, do the same to the next FP32 B
    if constexpr (withOffset) {
        Brcb(params.tempTensorOffset, params.tempTensorOffset[ANTIQUANT_BRCB_BASE * N - nLength],
            repeatTimes, brcbParams);
        PipeBarrier<PIPE_V>();
    }
    Brcb(params.tempTensorScale, params.tempTensorScale[ANTIQUANT_BRCB_BASE * N - nLength], repeatTimes, brcbParams);
    PipeBarrier<PIPE_V>();
    SetMaskCount();
}

// scale * (src + offset)   src: N * K, scale: N, offset: N  NOffset: offset used for tmpTensorOffset, tmpTensorScale
// For now, calCount must equal to N * K then can use brcb   calCount: 64 * N
template <typename InputDataType, typename OutputDataType, bool withOffset>
__aicore__ inline void CalculationMin(const LocalTensor<InputDataType> &src, const LocalTensor<OutputDataType> &dst,
    AntiquantParams<float> &params, const uint32_t calCount, const uint32_t N, const uint32_t srcN, const uint32_t K)
{
    // store FP16 result in second half of FP32 tmpTensor to avoid input FP16 being replaced
    uint32_t srcFp16Pos = calCount / ANTIQUANT_TWO; // therefore start from (calCount / 2)th FP32 tmpTensor
    uint32_t n = K / ANTIQUANT_SINGLE_N_SIZE; // K = 64 * n
    UnaryRepeatParams unaryParamsInt8Fp16;
    unaryParamsInt8Fp16.srcRepStride = ANTIQUANT_TWO * n; // K(num) / 32(num per block)
    // one repeat calculate 64 int8 -> 64 fp16, 4 block
    unaryParamsInt8Fp16.dstRepStride = ANTIQUANT_SINGLE_N_SIZE / (ONE_BLK_SIZE / sizeof(half));
    UnaryRepeatParams unaryParamsFp16Fp32;
    unaryParamsFp16Fp32.srcRepStride = HALF_DEFAULT_REPEAT_STRIDE;

    // Must use NORM for calculation instead of counter
    SetMaskNorm();
    SetVectorMask<half, MaskMode::NORMAL>(0, FULL_MASK); // the first 64 num for calculation
    // INT8 -> FP16
    auto fp16TmpBuffer = params.tempTensorInput[srcFp16Pos].ReinterpretCast<half>();
    Cast<half, int8_t, false>(fp16TmpBuffer, src, RoundMode::CAST_NONE, MASK_PLACEHOLDER, N, unaryParamsInt8Fp16);
    PipeBarrier<PIPE_V>();
    // FP16 -> FP32
    Cast<float, half, false>(
        params.tempTensorInput, fp16TmpBuffer, RoundMode::CAST_NONE, MASK_PLACEHOLDER, N, unaryParamsFp16Fp32);
    PipeBarrier<PIPE_V>();

    SetMaskCount();
    BinaryRepeatParams binaryParams;
    binaryParams.src1BlkStride = 0; // same line for add and mul
    binaryParams.src1RepStride = 1; // one line for 64 num calculation

    SetVectorMask<float, MaskMode::COUNTER>(0, ANTIQUANT_SINGLE_N_SIZE * N);
    // scale * (src + offset)
    if constexpr (withOffset) {
        Add<float, false>(
            params.tempTensorInput, params.tempTensorInput, params.tempTensorOffset, MASK_PLACEHOLDER, N, binaryParams);
        PipeBarrier<PIPE_V>();
    }
    Mul<float, false>(
        params.tempTensorInput, params.tempTensorInput, params.tempTensorScale, MASK_PLACEHOLDER, N, binaryParams);
    PipeBarrier<PIPE_V>();

    // FP32 -> BF16
    SetMaskNorm();
    SetVectorMask<float, MaskMode::NORMAL>(0, FULL_MASK);
    UnaryRepeatParams f322f16Params;
    f322f16Params.dstRepStride = ANTIQUANT_SINGLE_N_SIZE * n / (ONE_BLK_SIZE / sizeof(half));
    Cast<OutputDataType, float, false>(
        dst, params.tempTensorInput, RoundMode::CAST_RINT, MASK_PLACEHOLDER, srcN, f322f16Params);
    PipeBarrier<PIPE_V>();
}

// Method2: min: N * 64
template <typename InputDataType, typename OutputDataType>
__aicore__ inline void CalculateByBrcbMin(const LocalTensor<OutputDataType> &dst, const LocalTensor<InputDataType> &src,
    const LocalTensor<OutputDataType> &offset, const LocalTensor<OutputDataType> &scale,
    const LocalTensor<float> &stackBuffer, const uint32_t calCount, const uint32_t N, const uint32_t K)
{
    AntiquantParams<float> antiquantParams;
    GetAntiquantTensorInfo<OutputDataType>(scale, stackBuffer, antiquantParams);

    SetMaskCount();
    CastAndBrcb<OutputDataType, true>(offset, scale, antiquantParams, N); // store FP32 offset and scale into params

    uint32_t curNKOffset = 0;
    uint32_t loopNum = K / ANTIQUANT_SINGLE_N_SIZE;
    uint32_t srcN = src.GetSize() / K;
    // calculate  N * 64
    for (uint32_t i = 0; i < loopNum; i++) {
        curNKOffset = ANTIQUANT_SINGLE_N_SIZE * i;
        CalculationMin<InputDataType, OutputDataType, true>(src[curNKOffset], dst[curNKOffset], antiquantParams,
            ANTIQUANT_SINGLE_N_SIZE * N, N, srcN, K);
    }
}

template <typename InputDataType, typename OutputDataType>
__aicore__ inline void CalculateByBrcbMin(const LocalTensor<OutputDataType> &dst, const LocalTensor<InputDataType> &src,
    const LocalTensor<OutputDataType> &scale, const LocalTensor<float> &stackBuffer,
    const uint32_t calCount, const uint32_t N, const uint32_t K)
{
    AntiquantParams<float> antiquantParams;
    GetAntiquantTensorInfo<OutputDataType>(scale, stackBuffer, antiquantParams);

    SetMaskCount();
    CastAndBrcb<OutputDataType, false>(scale, scale, antiquantParams, N); // store FP32 offset and scale into params

    uint32_t curNKOffset = 0;
    uint32_t loopNum = K / ANTIQUANT_SINGLE_N_SIZE;
    uint32_t srcN = src.GetSize() / K;
    // calculate  N * 64
    for (uint32_t i = 0; i < loopNum; i++) {
        curNKOffset = ANTIQUANT_SINGLE_N_SIZE * i;
        CalculationMin<InputDataType, OutputDataType, false>(src[curNKOffset], dst[curNKOffset], antiquantParams,
            ANTIQUANT_SINGLE_N_SIZE * N, N, srcN, K);
    }
}

template <typename OutputDataType>
__aicore__ inline void CalculateByBrcbMin(const LocalTensor<OutputDataType> &dst, const LocalTensor<int4b_t> &src,
    const LocalTensor<OutputDataType> &scale, const LocalTensor<float> &stackBuffer,
    const uint32_t calCount, const uint32_t N, const uint32_t K)
{
    ASCENDC_ASSERT(false, { KERNEL_LOG(KERNEL_ERROR, "unsupported type: int4b_t for AntiQuant"); });
}

template <typename OutputDataType>
__aicore__ inline void CalculateByBrcbMin(const LocalTensor<OutputDataType> &dst, const LocalTensor<int4b_t> &src,
    const LocalTensor<OutputDataType> &offset, const LocalTensor<OutputDataType> &scale,
    const LocalTensor<float> &stackBuffer, const uint32_t calCount, const uint32_t N, const uint32_t K)
{
    ASCENDC_ASSERT(false, { KERNEL_LOG(KERNEL_ERROR, "unsupported type: int4b_t for AntiQuant"); });
}

template <bool withOffset = true>
__aicore__ inline void AntiQuantFp16Brcb(const LocalTensor<half> &scale, const LocalTensor<half> &offset,
    AntiquantParams<half> &params, const uint32_t scaleN)
{
    // step 1: do brcb for scale and offset
    const uint8_t repeatTimes = scaleN / BRCB_BROADCAST_NUMBER;
    BrcbRepeatParams brcbParams(DEFAULT_BLK_STRIDE, DEFAULT_REPEAT_STRIDE);
    SetMaskNorm();
    ResetMask();
    Brcb(params.tempTensorScale, scale, repeatTimes, brcbParams);
    PipeBarrier<PIPE_V>();
    if constexpr (withOffset) {
        Brcb(params.tempTensorOffset, offset, repeatTimes, brcbParams);
        PipeBarrier<PIPE_V>();
    }
}

template <typename InputDataType, typename OutputDataType>
__aicore__ inline void AscendAntiQuantTranspose(const LocalTensor<OutputDataType> &dst,
    const LocalTensor<InputDataType> &src, const LocalTensor<OutputDataType> &offset,
    const LocalTensor<OutputDataType> &scale, const LocalTensor<uint8_t> &sharedTmpBuffer,
    const uint32_t K, const AntiQuantShapeInfo& shapeInfo = {})
{
    uint32_t calCount = src.GetSize();
    uint32_t N = offset.GetSize();
    if constexpr (IsSameType<OutputDataType, half>::value || IsSameType<InputDataType, int4b_t>::value) {
        return AntiQuantImplScalar(dst, src, offset, scale, sharedTmpBuffer, calCount, K, shapeInfo);
    }
    if (K > ANTIQUANT_MAX_K * ANTIQUANT_BRCB_BASE || (K % ANTIQUANT_SINGLE_N_SIZE != 0)) {
        return AntiQuantImplScalar(dst, src, offset, scale, sharedTmpBuffer, calCount, K, shapeInfo);
    }

    auto stackBuffer = sharedTmpBuffer.ReinterpretCast<float>();
    // input and scale & offset
    uint32_t stackBufferSize = N * ANTIQUANT_SINGLE_N_SIZE + N * ANTIQUANT_BRCB_BASE * ANTIQUANT_TWO;
    stackBuffer.SetSize(stackBufferSize);
    CalculateByBrcbMin(dst, src, offset, scale, stackBuffer, calCount, N, K);
}

template <typename InputDataType, typename OutputDataType>
__aicore__ inline void AscendAntiQuantTranspose(const LocalTensor<OutputDataType> &dst,
    const LocalTensor<InputDataType> &src, const LocalTensor<OutputDataType> &scale,
    const LocalTensor<uint8_t> &sharedTmpBuffer, const uint32_t K, const AntiQuantShapeInfo& shapeInfo = {})
{
    uint32_t calCount = src.GetSize();
    uint32_t N = scale.GetSize();
    if constexpr (IsSameType<OutputDataType, half>::value || IsSameType<InputDataType, int4b_t>::value) {
        return AntiQuantImplScalar(dst, src, scale, sharedTmpBuffer, calCount, K, shapeInfo);
    }
    if (K > ANTIQUANT_MAX_K * ANTIQUANT_BRCB_BASE || (K % ANTIQUANT_SINGLE_N_SIZE != 0) ||
        IsSameType<InputDataType, int4b_t>::value) {
        return AntiQuantImplScalar(dst, src, scale, sharedTmpBuffer, calCount, K, shapeInfo);
    }

    auto stackBuffer = sharedTmpBuffer.ReinterpretCast<float>();
    uint32_t stackBufferSize = N * ANTIQUANT_SINGLE_N_SIZE + N * ANTIQUANT_BRCB_BASE * ANTIQUANT_TWO;
    stackBuffer.SetSize(stackBufferSize);
    CalculateByBrcbMin(dst, src, scale, stackBuffer, calCount, N, K);
}

} // namespace AscendC
#endif // IMPL_QUANTIZATION_ANTIQUANT_ASCEND_ANTIQUANT_C220_IMPL_H