#include "kernel_operator.h"
using namespace AscendC;

constexpr int32_t BUFFER_NUM = 1;

class KernelReduceLogSumExp0 { // float & dim size = 1
public:
    __aicore__ inline KernelReduceLogSumExp0() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y,
                                int32_t coreLength, int32_t xiLength, int32_t xiLengthAligned,
                                int32_t groupNum, int32_t groupLength, int32_t xiNum,
                                int32_t tileNum, int32_t tileLength, int32_t lastTileLength) {

        this->coreLength = coreLength;
        this->groupNum = groupNum;
        this->groupLength = groupLength;
        this->xiLength = xiLength;
        this->xiLengthAligned = xiLengthAligned;
        this->xiNum = xiNum;
        this->tileNum = tileNum;
        this->tileLength = tileLength;
        this->lastTileLength = lastTileLength;

        xGm.SetGlobalBuffer((__gm__ float *)x, this->coreLength);
        yGm.SetGlobalBuffer((__gm__ float *)y, this->coreLength);

        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileLength * sizeof(float));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileLength * sizeof(float));
    }
    __aicore__ inline void Process(int32_t dimInd) {
        int32_t loopCount = this->tileNum * BUFFER_NUM;
        // 对每个组进行操作
        int32_t groupOffset;
        // shape=[2,3,4,5], dim=[1,3]，处理第一个维度(原始dim=1)
        // 三层嵌套循环：
        // 外层：遍历组 (groupI: 0→1, 共2组)
        // 中层：遍历tile (i: 0→0, 共1个tile)  
        // 内层：遍历xi向量 (xi: 0→2, 共3个向量)
        for(int32_t groupI = 0; groupI < groupNum; ++groupI){
            groupOffset = groupI * this->groupLength;
            for (int32_t i = 0; i < loopCount; i++) {     //在tile分块，最后一块要用lastTileLength
                this->sumX = outQueueY.AllocTensor<float>();
                Duplicate(this->sumX, 0.0f, this->tileLength);
                for(int32_t xi = 0; xi < xiNum; ++xi){
                    if(i == loopCount - 1){
                        CopyIn(i, this->lastTileLength, xi, groupOffset);
                        Compute(i, this->lastTileLength, dimInd);
                    }else{
                        CopyIn(i, this->tileLength, xi, groupOffset);
                        Compute(i, this->tileLength, dimInd);
                    }
                }
                Ln(this->sumX, this->sumX, this->tileLength);
                // printf("after ln sumX:");
                // int j = 0;
                // for(j=0;j<3;++j){
                //     printf("%f ",this->sumX(j));
                // }
                // printf("\n"); 
                outQueueY.EnQue(this->sumX);
                this->sumX = outQueueY.DeQue<float>();
                DataCopy(yGm[groupI * this->xiLength + i * this->tileLength], this->sumX, this->tileLength);
                outQueueY.FreeTensor(this->sumX);
            }
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress, uint32_t length, int32_t xi, int32_t groupOffset) {
        LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();

        DataCopy(xLocal[0], xGm[groupOffset + xi * this->xiLength + progress * this->tileLength], length);
    
        inQueueX.EnQue(xLocal);
    }

    __aicore__ inline void Compute(int32_t progress, uint32_t length, int32_t dimInd) {
        LocalTensor<float> xLocal = inQueueX.DeQue<float>();

        // printf("before compute:");
        // int i = 0;
        // for(i=0;i<3;++i){
        //     printf("%f ",xLocal(i));
        // }
        // printf("\n"); 
        if(dimInd == 0){  // 第一个要reduce的维度
            Exp(xLocal, xLocal, length);  // 计算 e^x
        }
        // printf("after compute:");
        // for(i=0;i<3;++i){
        //     printf("%f ",xLocal(i));
        // }
        // printf("\n"); 

        
        // 其他维度直接累加
        Add(this->sumX, this->sumX, xLocal, length);  // sumX += xLocal
        // printf("sumX:");
        // for(i=0;i<3;++i){
        //     printf("%f ",this->sumX(i));
        // }
        // printf("\n"); 
        inQueueX.FreeTensor(xLocal);  // 释放tensor
    }

private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    TBuf<QuePosition::VECCALC> tmpBuffer1;
    GlobalTensor<float> xGm;
    GlobalTensor<float> yGm;
    LocalTensor<float> sumX;
    int32_t coreLength, xiLength, xiLengthAligned, xiNum, tileNum;
    int32_t tileLength, lastTileLength, groupNum, groupLength, dimSize;
};


class KernelReduceLogSumExp2 { // float & dim size = 1
public:
    __aicore__ inline KernelReduceLogSumExp2() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y,
                                int32_t coreLength, int32_t xiLength, int32_t xiLengthAligned,
                                int32_t groupNum, int32_t groupLength, int32_t xiNum,
                                int32_t tileNum, int32_t tileLength, int32_t lastTileLength) {
        this->coreLength = coreLength;
        this->groupNum = groupNum;
        this->groupLength = groupLength;
        this->xiLength = xiLength;
        this->xiLengthAligned = xiLengthAligned;
        this->xiNum = xiNum;
        this->tileNum = tileNum;
        this->tileLength = tileLength;
        this->lastTileLength = lastTileLength;

        xGm.SetGlobalBuffer((__gm__ half *)x, this->coreLength);
        yGm.SetGlobalBuffer((__gm__ half *)y, this->coreLength);

        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileLength * sizeof(half));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileLength * sizeof(half));
        pipe.InitBuffer(tmpBuffer1, this->tileLength * sizeof(half));
        pipe.InitBuffer(tmpBuffer2, this->tileLength * sizeof(float));
        pipe.InitBuffer(tmpBuffer3, this->tileLength * sizeof(float));

    }
    __aicore__ inline void Process(int32_t dimInd) {
        int32_t loopCount = this->tileNum * BUFFER_NUM;
        this->sumX = tmpBuffer1.Get<half>();
        this->temp1 = tmpBuffer2.Get<float>();
        this->temp2 = tmpBuffer3.Get<float>();
        // 对每个组进行操作
        int32_t groupOffset;
        for(int32_t groupI = 0; groupI < groupNum; ++groupI){
            groupOffset = groupI * this->groupLength;
            for (int32_t i = 0; i < loopCount; i++) {
                this->sumX = outQueueY.AllocTensor<half>();
                Duplicate(this->sumX, ZERO, this->tileLength);
                for(int32_t xi = 0; xi < xiNum; ++xi){
                    if(i == loopCount - 1){
                        CopyIn(i, this->lastTileLength, xi, groupOffset);
                        Compute(i, this->lastTileLength, dimInd);
                    }else{
                        CopyIn(i, this->tileLength, xi, groupOffset);
                        Compute(i, this->tileLength, dimInd);
                    }
                }
                Cast(temp1, this->sumX, RoundMode::CAST_NONE, this->tileLength);
                Ln(temp1, temp1, this->tileLength);
                Cast(this->sumX, temp1, RoundMode::CAST_NONE, this->tileLength);
                
                outQueueY.EnQue(this->sumX);
                this->sumX = outQueueY.DeQue<half>();
                DataCopy(yGm[groupI * this->xiLength + i * this->tileLength], this->sumX, this->tileLength);
                outQueueY.FreeTensor(this->sumX);
            }
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress, uint32_t length, int32_t xi, int32_t groupOffset) {
        LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();

        DataCopy(xLocal[0], xGm[groupOffset + xi * this->xiLength + progress * this->tileLength], length);

        inQueueX.EnQue(xLocal);
    }

    __aicore__ inline void Compute(int32_t progress, uint32_t length, int32_t dimInd) {
        LocalTensor<half> xLocal = inQueueX.DeQue<half>();

        if(dimInd == 0){
            Cast(temp1, xLocal, RoundMode::CAST_NONE, length);
            Exp(temp1, temp1, length);
            Cast(temp2, this->sumX, RoundMode::CAST_NONE, length);
            Add(temp1, temp1, temp2, length);
            Cast(this->sumX, temp1, RoundMode::CAST_NONE, length);
        }else{
            Cast(temp1, xLocal, RoundMode::CAST_NONE, length);
            Cast(temp2, this->sumX, RoundMode::CAST_NONE, length);
            Add(temp1, temp1, temp2, length);
            Cast(this->sumX, temp1, RoundMode::CAST_NONE, length);
        }

        inQueueX.FreeTensor(xLocal);
    }

private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    TBuf<QuePosition::VECCALC> tmpBuffer1, tmpBuffer2, tmpBuffer3;
    GlobalTensor<half> xGm;
    GlobalTensor<half> yGm;
    LocalTensor<half> sumX;
    LocalTensor<float> temp1, temp2;
    int32_t coreLength, xiLength, xiLengthAligned, xiNum, tileNum;
    int32_t tileLength, lastTileLength, groupNum, groupLength, dimSize;
    half ZERO = 0;
};

extern "C" __global__ __aicore__ void reduce_log_sum_exp(GM_ADDR x,GM_ADDR axes, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    if (TILING_KEY_IS(1)) { // float
        GET_TILING_DATA(tiling_data, tiling);

        int32_t coreLength = tiling_data.coreLength;
        int32_t groupNum = tiling_data.groupNum;
        int32_t groupLength = tiling_data.groupLength;
        int32_t xiNum = tiling_data.xiNum;
        int32_t xiLength = tiling_data.xiLength;
        int32_t xiLengthAligned = tiling_data.xiLengthAligned;
        int32_t tileNum = tiling_data.tileNum;
        int32_t tileLength = tiling_data.tileLength;
        int32_t lastTileLength = tiling_data.lastTileLength;

        KernelReduceLogSumExp0 op;
        op.Init(x, y, coreLength, xiLength, xiLengthAligned, groupNum,
                groupLength, xiNum, tileNum, tileLength, lastTileLength);
        op.Process(0);

    }else if(TILING_KEY_IS(2)){ // half
        GET_TILING_DATA(tiling_data, tiling);

        int32_t coreLength = tiling_data.coreLength;
        int32_t groupNum = tiling_data.groupNum;
        int32_t groupLength = tiling_data.groupLength;
        int32_t xiNum = tiling_data.xiNum;
        int32_t xiLength = tiling_data.xiLength;
        int32_t xiLengthAligned = tiling_data.xiLengthAligned;
        int32_t tileNum = tiling_data.tileNum;
        int32_t tileLength = tiling_data.tileLength;
        int32_t lastTileLength = tiling_data.lastTileLength;

        KernelReduceLogSumExp2 op;

        op.Init(x, y, coreLength, xiLength, xiLengthAligned, groupNum,
                groupLength, xiNum, tileNum, tileLength, lastTileLength);
        op.Process(0);
    }
}
