#include "kernel_operator.h"

constexpr int32_t MATRIX_SIZE = 10;                                   // output length
constexpr int32_t MATRIX_NUM = 8;                                     // output width
constexpr int32_t TOTAL_LENGTH = 8 * 10 * 10;                         // total length of data
constexpr int32_t USE_CORE_NUM = 8;                                   // num of core used
constexpr int32_t BLOCK_LENGTH = TOTAL_LENGTH / USE_CORE_NUM;         // length computed of each core
constexpr int32_t TILE_NUM = 8;                                       // split data into 8 tiles for each core
constexpr int32_t BUFFER_NUM = 2;                                     // tensor num for each queue
constexpr int32_t TILE_LENGTH = BLOCK_LENGTH / TILE_NUM / BUFFER_NUM; // separate to 2 parts, due to double buffer

class KernelTraceCustom
{
public:
    __aicore__ inline KernelTraceCustom() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y)
    {
        // 输入 x 分配全局缓冲区，输出 y 分配第三维批次batch缓冲区
        xGm.SetGlobalBuffer((__gm__ half *)x, TOTAL_LENGTH);
        yGm.SetGlobalBuffer((__gm__ half *)y, MATRIX_NUM); 
        pipe.InitBuffer(queBind, BUFFER_NUM, TILE_LENGTH * sizeof(half));
    }
    __aicore__ inline void Process()
    {
        int32_t coreId = static_cast<int32_t> (AscendC::GetBlockIdx()); 
        int32_t matrixOffset = coreId * BLOCK_LENGTH;
        // 计算前转为float避免精度损失（搬入阶段）
        float traceSum = 0.0f;
        for (int32_t row = 0; row < MATRIX_SIZE; row++)
        {
            for (int32_t col = 0; col < MATRIX_SIZE; col++)
            {
                int32_t globalIdx = matrixOffset + row * MATRIX_SIZE + col;
                half curr = xGm.GetValue(globalIdx);  
                if (row == col)
                {
                    traceSum += static_cast<float>(curr);
                }
            }
        }
        // 写入y时转回half（搬出阶段）
        yGm.SetValue(coreId, static_cast<float>(traceSum));
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQueBind<AscendC::QuePosition::VECIN, AscendC::QuePosition::VECOUT, BUFFER_NUM> queBind;
    AscendC::GlobalTensor<half> xGm;
    AscendC::GlobalTensor<half> yGm;
};

extern "C" __global__ __aicore__ void trace_custom(GM_ADDR x, GM_ADDR y)
{
    KernelTraceCustom op;
    op.Init(x, y);
    op.Process();
}

#ifndef ASCENDC_CPU_DEBUG
void trace_custom_do(uint32_t blockDim, void *stream, uint8_t *x, uint8_t *y)
{
    trace_custom<<<blockDim, nullptr, stream>>>(x, y);
}
#endif