/**
 * @file add_custom.cpp
 *
 * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 */
#include "add_custom_tiling.h"
#include "kernel_operator.h"

constexpr int32_t BUFFER_NUM = 2; // tensor num for each queue
constexpr int32_t LINE_EACH = 16; // < 32

template <class T>
__aicore__ constexpr T RoundUp(const T &val, const T align)
{
    return (val + align - 1) / align * align;
}

class KernelAdd
{
public:
    __aicore__ inline KernelAdd() {}
    __aicore__ inline void Init(GM_ADDR A, GM_ADDR X, GM_ADDR Y, AddCustomTilingData &tiling)
    {
        this->blockNum = tiling.blockNum;
        this->mBlock = tiling.mBlock;
        this->m = tiling.m;
        this->n = tiling.n;
        AGm.SetGlobalBuffer((__gm__ half *)A + mBlock * n * AscendC::GetBlockIdx(), mBlock * n);
        XGm.SetGlobalBuffer((__gm__ half *)X, n);
        YGm.SetGlobalBuffer((__gm__ half *)Y + mBlock * AscendC::GetBlockIdx(), mBlock);
        pipe.InitBuffer(inQueueA, BUFFER_NUM, LINE_EACH * n * sizeof(half));
        pipe.InitBuffer(inBufX, n * sizeof(half));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, LINE_EACH * sizeof(half));
    }

    __aicore__ inline void Process()
    {
        XLocal = inBufX.Get<half>();
        AscendC::DataCopy(XLocal, XGm, n);
        int32_t loopCount = mBlock / LINE_EACH;
        for (int32_t i = 0; i < loopCount; i++)
        {
            CopyIn(i * LINE_EACH);
            // AscendC::SetFlag<AscendC::HardEvent::MTE2_V>(0);
            // AscendC::WaitFlag<AscendC::HardEvent::MTE2_V>(0);
            Compute(i * LINE_EACH);
            // AscendC::SetFlag<AscendC::HardEvent::V_MTE3>(0);
            // AscendC::WaitFlag<AscendC::HardEvent::V_MTE3>(0);
            CopyOut(i * LINE_EACH);
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t line)
    {
        AscendC::LocalTensor<half> ALocal = inQueueA.AllocTensor<half>();
        AscendC::DataCopy(ALocal, AGm[line * n], LINE_EACH * n);
        inQueueA.EnQue(ALocal);
    }
    __aicore__ inline void Compute(int32_t line)
    {
        AscendC::LocalTensor<half> ALocal = inQueueA.DeQue<half>();
        AscendC::LocalTensor<half> YLocal = outQueueY.AllocTensor<half>();

        constexpr uint32_t num_per_datablock = 32 / sizeof(half);
        constexpr uint32_t num_per_repeat = num_per_datablock * 8;
        uint32_t repeat_num = n / num_per_repeat;

        AscendC::BinaryRepeatParams params;
        params.dstBlkStride = 1;
        params.src0BlkStride = 1;
        params.src1BlkStride = 1;
        params.dstRepStride = RoundUp(n, num_per_repeat) / num_per_datablock;
        params.src0RepStride = RoundUp(n, num_per_repeat) / num_per_datablock;
        params.src1RepStride = 0;

        AscendC::SetMaskCount();
        AscendC::SetVectorMask<half, AscendC::MaskMode::COUNTER>(LINE_EACH * num_per_repeat);
        for (uint32_t i = 0; i < repeat_num; i++)
        {
            uint32_t offset = i * num_per_repeat;
            if (i == 0)
            {
                AscendC::Mul<half, false>(
                    ALocal,
                    ALocal,
                    XLocal,
                    AscendC::MASK_PLACEHOLDER,
                    1,
                    params);
            }
            else
            {
                AscendC::MulAddDst<half, half, false>(
                    ALocal,
                    ALocal[offset],
                    XLocal[offset],
                    AscendC::MASK_PLACEHOLDER,
                    1,
                    params);
            }
            AscendC::PipeBarrier<PIPE_V>();
        }
        AscendC::SetMaskNorm();
        AscendC::ResetMask();
        AscendC::PipeBarrier<PIPE_V>();

        AscendC::WholeReduceSum<half, true>(
            YLocal,
            ALocal,
            num_per_repeat,
            LINE_EACH,
            1,
            1,
            RoundUp(n, num_per_repeat) / num_per_datablock);

        // if (AscendC::GetBlockIdx() == 0)
        // {
        //     AscendC::printf("the RoundUp(n, num_per_repeat) / num_per_datablock is %d\n", RoundUp(n, num_per_repeat) / num_per_datablock);
        // }

        outQueueY.EnQue<half>(YLocal);
        inQueueA.FreeTensor(ALocal);
    }
    __aicore__ inline void CopyOut(int32_t line)
    {
        AscendC::LocalTensor<half> YLocal = outQueueY.DeQue<half>();
        AscendC::DataCopy(YGm[line], YLocal, LINE_EACH);
        outQueueY.FreeTensor(YLocal);
        // if(AscendC::GetBlockIdx() == 0) {
        //     AscendC::printf("YLocal[0]: %f gm[%d]: %f\n", (float)YLocal.GetValue(0), line, (float)YGm[line].GetValue(0));
        // }
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueA;
    AscendC::TBuf<AscendC::TPosition::VECCALC> inBufX;
    AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    AscendC::GlobalTensor<half> AGm;
    AscendC::GlobalTensor<half> XGm;
    AscendC::GlobalTensor<half> YGm;
    AscendC::LocalTensor<half> XLocal;
    uint32_t blockNum;
    uint32_t mBlock;
    uint32_t m;
    uint32_t n;
};

extern "C" __global__ __aicore__ void add_custom(GM_ADDR A, GM_ADDR X, GM_ADDR Y, AddCustomTilingData tiling)
{
    KernelAdd op;
    op.Init(A, X, Y, tiling);
    op.Process();
}
