/*
   @file reducesum_custom.cpp
*/

#include "kernel_operator.h"
__aicore__ inline uint32_t Ceiling(uint32_t a, uint32_t b)
{
    return (a + b - 1) / b;
}
class KernelReduceSum
{
public:
    __aicore__ inline KernelReduceSum() {}

    __aicore__ inline void Init(__gm__ uint8_t *src, __gm__ uint8_t *dstGm, __gm__ uint8_t *scratch)
    {
        AscendC::printf("************testing init in************\n");

        int block_id = AscendC::GetBlockIdx();
        AscendC::printf("the block_id is : %d\n", block_id);
        // AscendC::printf("in copyin, the i is : %d\n", i);
        int srcOffset = block_id * 16 * 512;
        int scrOffset = block_id * 512;
        // srcGlobal.SetGlobalBuffer((__gm__ half *)src + srcOffset + i * 512, srcDataSize);
        srcGlobal.SetGlobalBuffer((__gm__ half *)src + srcOffset, srcDataSize);
        __dstGlobal.SetGlobalBuffer((__gm__ float *)dstGm, srcDataSize);
        scratchGlobal.SetGlobalBuffer((__gm__ float *)scratch + scrOffset, srcDataSize);

        repeat = srcDataSize / mask;
        int32_t elementsPerBlock = 32 / sizeof(float);
        int32_t repeatTimes = repeat;
        int32_t iter1OutputCount = repeatTimes;
        int iter1AlignEnd = Ceiling(iter1OutputCount, elementsPerBlock) * elementsPerBlock;
        int finalWorkLocalNeedSize = iter1AlignEnd; // 对于half来说这里应该是64
        AscendC::printf("the Ceiling(iter1OutputCount, elementsPerBlock) is : %d\n", Ceiling(iter1OutputCount, elementsPerBlock));
        AscendC::printf("the elementsPerBlock is : %d\n", elementsPerBlock);
        AscendC::printf("the finalWorkLocalNeedSize is : %d\n", finalWorkLocalNeedSize);

        pipe.InitBuffer(workQueue, 1, finalWorkLocalNeedSize * sizeof(float));

        pipe.InitBuffer(fp32Queue, 1, srcDataSize * sizeof(float));
        pipe.InitBuffer(inQueueSrc, 1, srcDataSize * sizeof(half));
        pipe.InitBuffer(outQueueDst, 1, dstDataSize * sizeof(float));

        AscendC::printf("************testing init out************\n");
    }
    __aicore__ inline void Process(__gm__ uint8_t *src, __gm__ uint8_t *dstGm, __gm__ uint8_t *scratch)
    {
        AscendC::printf("************testing process in************\n");

        for (int i{0}; i < 16; i++)
        {
            AscendC::printf("in reducesum process, the i is : %d\n", i);
            CopyIn(i, src, dstGm, scratch);
            Compute();
            CopyOut(i);
        }

        AscendC::printf("************testing process out************\n");
    }

private:
    __aicore__ inline void CopyIn(int i, __gm__ uint8_t *src, __gm__ uint8_t *dstGm, __gm__ uint8_t *scratch)
    {
        AscendC::printf("************testing copyin in************\n");

        int block_id = AscendC::GetBlockIdx();
        // if (i == 0)

        AscendC::LocalTensor<half> srcLocal = inQueueSrc.AllocTensor<half>();
        AscendC::LocalTensor<float> srcLocalfp32 = fp32Queue.AllocTensor<float>();

        AscendC::DataCopy(srcLocal, srcGlobal[i * 512], srcDataSize);

        for (int i = 0; i < srcLocal.GetSize(); i++)
        {
            srcLocalfp32.SetValue(i, static_cast<float>(srcLocal.GetValue(i)));
        }
        // AscendC::DataCopy(scratchGlobal, srcLocalfp32, srcDataSize);

        fp32Queue.EnQue(srcLocalfp32);
        inQueueSrc.EnQue(srcLocal);

        AscendC::printf("************testing copyin out************\n");
    }
    __aicore__ inline void Compute()
    {
        AscendC::printf("************testing compute in************\n");

        AscendC::LocalTensor<half> srcLocal = inQueueSrc.DeQue<half>();
        AscendC::LocalTensor<float> srcLocalfp32 = fp32Queue.DeQue<float>();
        AscendC::LocalTensor<float> dstLocal = outQueueDst.AllocTensor<float>();
        AscendC::LocalTensor<float> workLocal = workQueue.AllocTensor<float>();

        auto pos = srcLocal.GetPosition();
        AscendC::printf("the pos is :%d\n", static_cast<int>(pos));

        uint64_t size = srcLocal.GetSize();
        AscendC::printf("the size of srcLocal is : %d\n", size);

        AscendC::ReduceSum<float>(dstLocal, srcLocalfp32, workLocal, mask, 8, 8);

        // AscendC::DataCopy(scratchGlobal, dstLocal, srcDataSize);

        outQueueDst.EnQue<float>(dstLocal);
        inQueueSrc.FreeTensor(srcLocal);
        fp32Queue.FreeTensor(srcLocalfp32);
        workQueue.FreeTensor(workLocal);

        AscendC::printf("************testing compute out************\n");
    }
    __aicore__ inline void CopyOut(int i)
    {
        AscendC::printf("************testing copyout in************\n");

        AscendC::LocalTensor<float> dstLocal = outQueueDst.DeQue<float>();
        AscendC::DataCopy(scratchGlobal[i], dstLocal, srcDataSize);
        AscendC::DataCopy(__dstGlobal[i], dstLocal, srcDataSize);

        outQueueDst.FreeTensor(dstLocal);

        AscendC::printf("************testing copyout out************\n");
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::TPosition::VECIN, 1> inQueueSrc;
    AscendC::TQue<AscendC::TPosition::VECIN, 1> fp32Queue;
    AscendC::TQue<AscendC::TPosition::VECOUT, 1> outQueueDst;
    AscendC::TQue<AscendC::TPosition::VECOUT, 1> workQueue;

    AscendC::GlobalTensor<half> srcGlobal;
    AscendC::GlobalTensor<float> __dstGlobal;
    AscendC::GlobalTensor<float> scratchGlobal;

    int repeat = 0;
    int srcDataSize = 512;
    int dstDataSize = 512;
    int mask = 64;
};

extern "C" __global__ __aicore__ void reducesum_custom(GM_ADDR src, GM_ADDR dstGm, GM_ADDR scratch)
{
    AscendC::printf("************testing kernel in************\n");

    if (src == nullptr || dstGm == nullptr || scratch == nullptr)
    {
        AscendC::printf("Invalid pointer in Init\n");
        return;
    }
    KernelReduceSum op;
    op.Init(src, dstGm, scratch);
    op.Process(src, dstGm, scratch);

    AscendC::printf("************testing kernel out \n");
}
