/**
 * @file add_custom.cpp
 *
 * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 */
#include "kernel_operator.h"

constexpr int32_t TOTAL_LENGTH = 8 * 2048;                            // total length of data
constexpr int32_t USE_CORE_NUM = 8;                                   // num of core used
constexpr int32_t BLOCK_LENGTH = TOTAL_LENGTH / USE_CORE_NUM;         // length computed of each core
constexpr int32_t TILE_NUM = 8;                                       // split data into 8 tiles for each core
constexpr int32_t BUFFER_NUM = 2;                                     // tensor num for each queue
constexpr int32_t TILE_LENGTH = BLOCK_LENGTH / TILE_NUM / BUFFER_NUM; // separate to 2 parts, due to double buffer

class KernelLeakyRelu {
public:
    __aicore__ inline KernelLeakyRelu() {}
    __aicore__ inline void Init(GM_ADDR src, GM_ADDR dst, float negativeSlope)
    {
        this->negativeSlope = static_cast<float>(negativeSlope);

        srcGm.SetGlobalBuffer((__gm__ float *)src + BLOCK_LENGTH * AscendC::GetBlockIdx(), BLOCK_LENGTH);
        dstGm.SetGlobalBuffer((__gm__ float *)dst + BLOCK_LENGTH * AscendC::GetBlockIdx(), BLOCK_LENGTH);
        pipe.InitBuffer(inQueueSrc, BUFFER_NUM, TILE_LENGTH * sizeof(float));
        pipe.InitBuffer(tmpBuffer, TILE_LENGTH * sizeof(float));
        pipe.InitBuffer(outQueueDst, BUFFER_NUM, TILE_LENGTH * sizeof(float));
    }
    __aicore__ inline void Process()
    {
        int32_t loopCount = TILE_NUM * BUFFER_NUM;
        for (int32_t i = 0; i < loopCount; i++) {
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
        AscendC::LocalTensor<float> srcLocal = inQueueSrc.AllocTensor<float>();
        AscendC::DataCopy(srcLocal, srcGm[progress * TILE_LENGTH], TILE_LENGTH);
        inQueueSrc.EnQue(srcLocal);
    }
    __aicore__ inline void Compute(int32_t progress)
    {
        AscendC::LocalTensor<float> srcLocal = inQueueSrc.DeQue<float>();
        AscendC::LocalTensor<float> dstLocal = outQueueDst.AllocTensor<float>();
        AscendC::LocalTensor<float> tmp = tmpBuffer.Get<float>();

        AscendC::Muls(tmp, srcLocal, this->negativeSlope, TILE_LENGTH);
        AscendC::Max(dstLocal, tmp, srcLocal, TILE_LENGTH);

        outQueueDst.EnQue<float>(dstLocal);
        inQueueSrc.FreeTensor(srcLocal);
    }
    __aicore__ inline void CopyOut(int32_t progress)
    {
        AscendC::LocalTensor<float> dstLocal = outQueueDst.DeQue<float>();
        AscendC::DataCopy(dstGm[progress * TILE_LENGTH], dstLocal, TILE_LENGTH);
        outQueueDst.FreeTensor(dstLocal);
    }

private:
  	float negativeSlope;

    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::TPosition::VECIN, BUFFER_NUM> inQueueSrc;
    AscendC::TQue<AscendC::TPosition::VECOUT, BUFFER_NUM> outQueueDst;
    AscendC::TBuf<AscendC::TPosition::VECCALC> tmpBuffer;

    AscendC::GlobalTensor<float> srcGm;
    AscendC::GlobalTensor<float> dstGm;
};

extern "C" __global__ __aicore__ void leakyrelu_custom(GM_ADDR src, GM_ADDR dst, float negativeSlope)
{
    KernelLeakyRelu op;
    op.Init(src, dst, negativeSlope);
    op.Process();
}

#ifndef ASCENDC_CPU_DEBUG
void leakyrelu_custom_do(uint32_t blockDim, void *stream, uint8_t *src, uint8_t *dst, float negativeSlope)
{
    leakyrelu_custom<<<blockDim, nullptr, stream>>>(src, dst, negativeSlope);
}
#endif
