#include "kernel_operator.h"
#include "trace_custom_tiling.h"

constexpr int32_t BUFFER_NUM = 1;

class KernelTraceCustom {
public:
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, const TraceCustomTilingData &td)
    {
        tiling = td;
        blockMatrixIdx = AscendC::GetBlockIdx();
        if (blockMatrixIdx >= tiling.matrixNum) { validBlock = false; return; }
        rawLen = tiling.lastTileLength;                // = rowLength (777)
        alignNum   = tiling.alignNum;                  // 16
        alignedLen = ((rawLen + alignNum - 1) / alignNum) * alignNum; // 792
        xGm.SetGlobalBuffer((__gm__ half*)x,
                            tiling.matrixNum * tiling.rowLength * tiling.columnLength);
        yGm.SetGlobalBuffer((__gm__ half*)y, tiling.matrixNum);
        pipe.InitBuffer(diagQueue, BUFFER_NUM, alignedLen * sizeof(half));
        pipe.InitBuffer(workQueue, BUFFER_NUM, alignedLen * sizeof(half));
        pipe.InitBuffer(outQueue,  BUFFER_NUM, sizeof(half));
    }

    __aicore__ inline void Process()
    {
        if (!validBlock) return;
        CopyIn();
        Compute();
        CopyOut();
    }

private:
    __aicore__ inline void CopyIn()
    {
        AscendC::LocalTensor<half> diagLocal = diagQueue.AllocTensor<half>();

        uint32_t matrixIdx = blockMatrixIdx;
        uint64_t matrixBase = (uint64_t)matrixIdx * tiling.rowLength * tiling.columnLength;

        uint32_t maxDiag = (tiling.rowLength < tiling.columnLength)
                         ? tiling.rowLength : tiling.columnLength; 
        uint32_t valid = (rawLen > maxDiag) ? maxDiag : rawLen;   

        // 逐元素准确读取
        for (uint32_t i = 0; i < valid; ++i) {
            uint64_t pos  = matrixBase + (uint64_t)i * (tiling.columnLength + 1);
            uint64_t base = (pos / alignNum) * alignNum;
            uint32_t lane = (uint32_t)(pos - base);
            half v = xGm[base].GetValue(lane);
            diagLocal.SetValue(i, v);
        }
        for (uint32_t i = valid; i < alignedLen; ++i) {
            diagLocal.SetValue(i, (half)0.0f);
        }
        diagQueue.EnQue(diagLocal);
    }

    __aicore__ inline void Compute()
    {
        AscendC::LocalTensor<half> diagLocal = diagQueue.DeQue<half>();
        AscendC::LocalTensor<half> workLocal = workQueue.AllocTensor<half>();
        AscendC::LocalTensor<half> yLocal    = outQueue.AllocTensor<half>();
        AscendC::ReduceSum<half>(yLocal, diagLocal, workLocal, alignedLen);
        outQueue.EnQue(yLocal);
        diagQueue.FreeTensor(diagLocal);
        workQueue.FreeTensor(workLocal);
    }

    __aicore__ inline void CopyOut()
    {
        AscendC::LocalTensor<half> yLocal = outQueue.DeQue<half>();
#ifdef ASCENDC_CPU_DEBUG
        yGm[blockMatrixIdx].SetValue(0, yLocal.GetValue(0));
#else
        // 单块独占该矩阵，无需原子
        AscendC::DataCopy(yGm[blockMatrixIdx], yLocal, 1);
#endif
        outQueue.FreeTensor(yLocal);
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::TPosition::VECIN,  BUFFER_NUM> diagQueue;
    AscendC::TQue<AscendC::TPosition::VECIN,  BUFFER_NUM> workQueue;
    AscendC::TQue<AscendC::TPosition::VECOUT, BUFFER_NUM> outQueue;
    AscendC::GlobalTensor<half> xGm;
    AscendC::GlobalTensor<half> yGm;

    TraceCustomTilingData tiling;
    uint32_t rawLen;
    uint32_t alignedLen;
    uint32_t alignNum;
    uint32_t blockMatrixIdx;
    bool validBlock = true;
};

#ifdef ASCENDC_CPU_DEBUG
extern "C" __global__ __aicore__
void trace_custom(GM_ADDR x, GM_ADDR y, TraceCustomTilingData tiling)
{
    KernelTraceCustom op;
    op.Init(x, y, tiling);
    op.Process();
}
#else
extern "C" __global__ __aicore__
void trace_custom(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tilingGm)
{
    (void)workspace;
    TraceCustomTilingData tiling;
    AscendC::DataCopy(reinterpret_cast<__gm__ uint8_t*>(&tiling),
                      reinterpret_cast<__gm__ uint8_t*>(tilingGm),
                      sizeof(TraceCustomTilingData));
    KernelTraceCustom op;
    op.Init(x, y, tiling);
    op.Process();
}
#endif