/**
 * @file add_custom.cpp
 *
 * Copyright (C) 2022-2024. Huawei Technologies Co., Ltd. All rights reserved.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 */
#include "kernel_operator.h"
constexpr int32_t BUFFER_NUM = 2; // tensor num for each queue

template<typename TYPE_X, typename TYPE_Y, typename TYPE_Z> class KernelAdd {
    using T = TYPE_X;
public:
    __aicore__ inline KernelAdd() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, GM_ADDR z, uint32_t smallCoreDataNum,
        uint32_t bigCoreDataNum, uint32_t finalBigTileNum, 
        uint32_t finalSmallTileNum, uint32_t tileDataNum, 
        uint32_t smallTailDataNum, uint32_t bigTailDataNum, 
        uint32_t tailBlockNum)
    {
        ASSERT(AscendC::GetBlockNum() != 0 && "block dim can not be zero!");//获得block数量，即aicore数量
        uint32_t coreNum = AscendC::GetBlockIdx();//当前aicore的索引
        uint32_t globalBufferIndex = bigCoreDataNum * AscendC::GetBlockIdx();//按大核计算当前核计算的数据索引
        this->tileDataNum = tileDataNum;//tile的元素个数，一次计算可以计算的元素个数
        if (coreNum < tailBlockNum) { //如果当前核的索引小于大核的个数，即此时是大核
          this->coreDataNum = bigCoreDataNum;
          this->tileNum = finalBigTileNum;
          this->tailDataNum = bigTailDataNum;
        }
        else { //如果是小核
          this->coreDataNum = smallCoreDataNum;
          this->tileNum = finalSmallTileNum;
          this->tailDataNum = smallTailDataNum;
          globalBufferIndex -= (bigCoreDataNum - smallCoreDataNum) * (AscendC::GetBlockIdx() - tailBlockNum);//减去大核和小核元素相差的个数*相差的小核数量
        }
        xGm.SetGlobalBuffer((__gm__ TYPE_X*)x + globalBufferIndex, this->coreDataNum);//表示需要绑定的数据元素数量（以 T 类型为单位），即从 baseAddr 开始的连续 elementCount 个 T 类型数据。
        yGm.SetGlobalBuffer((__gm__ TYPE_Y*)y + globalBufferIndex, this->coreDataNum);
        zGm.SetGlobalBuffer((__gm__ TYPE_Z*)z + globalBufferIndex, this->coreDataNum);
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_X));//第二个参数表示缓冲块数量，第三个参数表示一个缓冲块的大小B单位
        pipe.InitBuffer(inQueueY, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_Y));
        pipe.InitBuffer(outQueueZ, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_Z));
        pipe.InitBuffer(tmp1, this->tileDataNum * sizeof(half));//当输入类型为 int8_t 时，需要先将数据转换为 half 类型进行计算（见 Compute 函数的 int8_t 分支），因此临时缓冲区需要存储 half 类型的数据。
        pipe.InitBuffer(tmp2, this->tileDataNum * sizeof(half));
    }
    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum;
        this->processDataNum = this->tileDataNum;
        for (int32_t i = 0; i < loopCount; i++) {
            if (i == this->tileNum - 1) {
              this->processDataNum = this->tailDataNum;
            }
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
        AscendC::LocalTensor<TYPE_Y> yLocal = inQueueY.AllocTensor<TYPE_Y>();
        AscendC::DataCopy(xLocal, xGm[progress * this->tileDataNum], this->processDataNum);//当前tile的数据拷贝到本地
        AscendC::DataCopy(yLocal, yGm[progress * this->tileDataNum], this->processDataNum);
        inQueueX.EnQue(xLocal);
        inQueueY.EnQue(yLocal);
    }
    __aicore__ inline void Compute(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
        AscendC::LocalTensor<TYPE_Y> yLocal = inQueueY.DeQue<TYPE_Y>();
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.AllocTensor<TYPE_Z>();
        if constexpr (std::is_same_v<T, int8_t>) {//当类型为int8时
          auto p1 = tmp1.Get<half>();
          auto p2 = tmp2.Get<half>();
          AscendC::Cast(p1, xLocal, AscendC::RoundMode::CAST_NONE, this->processDataNum);//Cast：把 int8 → half（CAST_NONE 表示普通转换）
          AscendC::Cast(p2, yLocal, AscendC::RoundMode::CAST_NONE, this->processDataNum);
          AscendC::Add(p2, p1, p2, this->processDataNum);
          AscendC::Cast(p1.ReinterpretCast<int16_t>(), p2, AscendC::RoundMode::CAST_RINT, this->processDataNum);
          AscendC::ShiftLeft(p1.ReinterpretCast<int16_t>(), p1.ReinterpretCast<int16_t>(), int16_t(8), this->processDataNum); 
          AscendC::ShiftRight(p1.ReinterpretCast<int16_t>(), p1.ReinterpretCast<int16_t>(), int16_t(8), this->processDataNum);
          AscendC::Cast(p2, p1.ReinterpretCast<int16_t>(), AscendC::RoundMode::CAST_NONE, this->processDataNum);
          AscendC::Cast(zLocal, p2, AscendC::RoundMode::CAST_NONE, this->processDataNum);
        }
        else {//其他类型
          AscendC::Add(zLocal, xLocal, yLocal, this->processDataNum);
        }
        outQueueZ.EnQue<TYPE_Z>(zLocal);
        inQueueX.FreeTensor(xLocal);
        inQueueY.FreeTensor(yLocal);
    }
    __aicore__ inline void CopyOut(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.DeQue<TYPE_Z>();  
        AscendC::DataCopy(zGm[progress * this->tileDataNum], zLocal, this->processDataNum);
        outQueueZ.FreeTensor(zLocal);
    }

private:
        AscendC::TPipe pipe;
        AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX, inQueueY;
        AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueZ;
        AscendC::TBuf<AscendC::QuePosition::VECCALC> tmp1, tmp2;
        AscendC::GlobalTensor<TYPE_X> xGm;
        AscendC::GlobalTensor<TYPE_Y> yGm;
        AscendC::GlobalTensor<TYPE_Z> zGm;
        uint32_t coreDataNum;
        uint32_t tileNum;
        uint32_t tileDataNum;
        uint32_t tailDataNum;
        uint32_t processDataNum;
};

extern "C" __global__ __aicore__ void add_custom(GM_ADDR x, GM_ADDR y, GM_ADDR z, GM_ADDR workspace, GM_ADDR tiling)
{
    GET_TILING_DATA(tiling_data, tiling);
    KernelAdd<DTYPE_X, DTYPE_Y, DTYPE_Z> op;
    op.Init(x, y, z, tiling_data.smallCoreDataNum, 
            tiling_data.bigCoreDataNum, tiling_data.finalBigTileNum, 
            tiling_data.finalSmallTileNum, tiling_data.tileDataNum, 
            tiling_data.smallTailDataNum, tiling_data.bigTailDataNum, 
            tiling_data.tailBlockNum);  
    op.Process();
}

