#include "kernel_operator.h"

using namespace AscendC;

constexpr int32_t BUFFER_NUM = 1; 

template<typename typeT>
class KernelLogSumExp {
public:
    __aicore__ inline KernelLogSumExp() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, uint32_t totallength, uint32_t outputlength, uint32_t tilelength, int32_t dim, int32_t lastdimnum, int32_t loopcount)
    {
        this->totallength = totallength;
        this->outputlength = outputlength;
        this->tilelength = tilelength;
        this->dim = dim;
        this->lastdimnum = lastdimnum;
        this->loopcount = loopcount;
        uint32_t alignnum = 32 / sizeof(typeT);
        this->tilebufferlength = ((lastdimnum % alignnum) == 0) ? lastdimnum : (lastdimnum + alignnum - 1) / alignnum * alignnum;
        uint32_t bufferlength = totallength + alignnum;

        xGm.SetGlobalBuffer((__gm__ typeT*)x, bufferlength);
        yGm.SetGlobalBuffer((__gm__ typeT*)y, outputlength);
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(tmpBuffer, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(QueueTmpX, this->tilebufferlength * sizeof(float));
        pipe.InitBuffer(QueueTmpY, this->tilebufferlength * sizeof(float));

        int typeSize = 32 / alignnum;                           // half类型为2Bytes，float类型为4Bytes，按需填入
        // 再根据数据类型定义两个单位
        int elementsPerBlock = 32 / typeSize;       // 1个block存放的元素个数
        int elementsPerRepeat = 256 / typeSize;     // 1次repeat可以处理的元素个数

        // 最后确定首次最大repeat值
        int firstMaxRepeat = this->tilebufferlength / elementsPerRepeat;           // 此处需要注意：对于tensor高维切分计算接口，firstMaxRepeat就是	repeatTimes；对于tensor前n个数据计算接口，firstMaxRepeat为count/elementsPerRepeat，比如在half类型下firstMaxRepeat就是count/128，在float类型下为count/64，按需填入，对于count<elementsPerRepeat的场景，firstMaxRepeat就是1

        int iter1OutputCount = firstMaxRepeat;                                              // 第一轮操作产生的元素个数
        int iter1AlignEnd = RoundUp(iter1OutputCount, elementsPerBlock) * elementsPerBlock; // 第一轮产生的元素个数做向上取整
        int finalWorkLocalNeedSize = iter1AlignEnd;   
        pipe.InitBuffer(workQueue, BUFFER_NUM, finalWorkLocalNeedSize * sizeof(float));  

    }        
    __aicore__ inline void Process()
    {

        for (int32_t i = 0; i < this->loopcount; i++) {
            CopyIn(i, this->lastdimnum);
            Compute(i, this->lastdimnum);
            //CopyOut(i, this->lastdimnum);
        }

    }
    //
    // for tiling_key 3, vertical 
    //
     __aicore__ inline void InitVertical(GM_ADDR x, GM_ADDR y, uint32_t totallength, uint32_t outputlength, uint32_t tilelength, int32_t dim, int32_t lastdimnum, int32_t loopcount,
                                         int32_t v1stlevelloop, int32_t v2ndlevleloop, int32_t v1stlevelgap, int32_t v2ndlevelgap, int32_t v3rdlevleloop)
    {
        this->totallength = totallength;
        this->outputlength = outputlength;
        this->tilelength = tilelength;
        this->dim = dim;
        this->lastdimnum = lastdimnum;
        this->loopcount = loopcount;
        this->v1stlevelloop = v1stlevelloop;
        this->v2ndlevleloop = v2ndlevleloop;
        this->v1stlevelgap = v1stlevelgap;
        this->v2ndlevelgap = v2ndlevelgap;
        this->v3rdlevleloop = v3rdlevleloop;
        uint32_t alignnum = 32 / sizeof(typeT);
        this->tilebufferlength = ((lastdimnum % alignnum) == 0) ? lastdimnum : (lastdimnum + alignnum - 1) / alignnum * alignnum;
        uint32_t bufferlength = totallength + alignnum;

        xGm.SetGlobalBuffer((__gm__ typeT*)x, bufferlength);
        yGm.SetGlobalBuffer((__gm__ typeT*)y, outputlength);
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tilelength * sizeof(typeT));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tilelength * sizeof(typeT));
        pipe.InitBuffer(tmpBuffer, this->tilelength * sizeof(float));
        pipe.InitBuffer(QueueTmpX, this->tilelength * sizeof(float));
        pipe.InitBuffer(QueueTmpY, this->tilelength * sizeof(float));

        int typeSize = 32 / alignnum;                           // half类型为2Bytes，float类型为4Bytes，按需填入
        // 再根据数据类型定义两个单位
        int elementsPerBlock = 32 / typeSize;       // 1个block存放的元素个数
        int elementsPerRepeat = 256 / typeSize;     // 1次repeat可以处理的元素个数

        // 最后确定首次最大repeat值
        int firstMaxRepeat = this->tilebufferlength / elementsPerRepeat;           // 此处需要注意：对于tensor高维切分计算接口，firstMaxRepeat就是	repeatTimes；对于tensor前n个数据计算接口，firstMaxRepeat为count/elementsPerRepeat，比如在half类型下firstMaxRepeat就是count/128，在float类型下为count/64，按需填入，对于count<elementsPerRepeat的场景，firstMaxRepeat就是1

        int iter1OutputCount = firstMaxRepeat;                                              // 第一轮操作产生的元素个数
        int iter1AlignEnd = RoundUp(iter1OutputCount, elementsPerBlock) * elementsPerBlock; // 第一轮产生的元素个数做向上取整
        int finalWorkLocalNeedSize = iter1AlignEnd;   
        pipe.InitBuffer(workQueue, BUFFER_NUM, finalWorkLocalNeedSize * sizeof(float));  
        this->sumtmp = tmpBuffer.Get<float>();
    } 
    __aicore__ inline void ProcessVertical()
    {
        this->progresswhole = 0;
        for (int32_t i = 0; i < this->v1stlevelloop; i++) {
            for (int32_t m = 0; m < this->v3rdlevleloop; m++) {
                int32_t startpos = i * this->v1stlevelgap + m * this->lastdimnum;
                GetSum(startpos);
                GetDiv(startpos);
            }
        }

    }
    __aicore__ inline void InitVMulti(GM_ADDR x, GM_ADDR y, uint32_t totallength, uint32_t outputlength, uint32_t tilelength, int32_t lastdimnum, 
                                         int32_t axisDumNum, int32_t loopgap, int32_t ss1[], int32_t ss2[], int32_t ss3[])
    {
        this->totallength = totallength;
        this->outputlength = outputlength;
        this->tilelength = tilelength;
        this->lastdimnum = lastdimnum;
        this->axisDumNum = axisDumNum;
        this->loopgap = loopgap;

        uint32_t alignnum = 32 / sizeof(typeT);
        this->tilebufferlength = ((lastdimnum % alignnum) == 0) ? lastdimnum : (lastdimnum + alignnum - 1) / alignnum * alignnum;
        uint32_t bufferlength = totallength + 16;
        for(int32_t k = 0; k < this->axisDumNum; k++) {
            ((int32_t *)this->stepGapPerAxis)[k] = ss1[k];
            ((int32_t *)this->axDirNumPerAxis)[k] = ss2[k];
            ((int32_t *)this->outterGapPerAxis)[k] = ss3[k];
        }

        xGm.SetGlobalBuffer((__gm__ typeT*)x, totallength);
        yGm.SetGlobalBuffer((__gm__ typeT*)y, outputlength);
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(tmpBuffer, this->tilebufferlength * sizeof(float));
        pipe.InitBuffer(QueueTmpX, this->tilebufferlength * sizeof(float));
        pipe.InitBuffer(QueueTmpY, this->tilebufferlength * sizeof(float));

        this->sumtmp = tmpBuffer.Get<float>();

    } 
    __aicore__ inline void ProcessVMulti()
    {
        this->progresswhole = 0;
        int32_t loopcountVerti = this->outputlength / this->lastdimnum;
        int32_t accumcount = this->totallength / this->outputlength;
        for (int32_t i = 0; i < loopcountVerti; i++) { 
            for(int32_t k = 0; k < accumcount; k++) {
                int32_t startpos = GetStartPos(i, k);
                AccumUp(startpos, k);
            }
            GetDiv(0);
        }    
    }
    __aicore__ inline void InitVMultiWL(GM_ADDR x, GM_ADDR y, uint32_t totallength, uint32_t outputlength, uint32_t tilelength, int32_t lastdimnum, 
                                         int32_t axisDumNum, int32_t loopgap, int32_t ss1[], int32_t ss2[], int32_t ss3[])
    {
        this->totallength = totallength;
        this->outputlength = outputlength;
        this->tilelength = tilelength;
        this->lastdimnum = lastdimnum;
        this->axisDumNum = axisDumNum;
        this->loopgap = loopgap;

        uint32_t alignnum = 32 / sizeof(typeT);
        this->tilebufferlength = ((lastdimnum % alignnum) == 0) ? lastdimnum : (lastdimnum + alignnum - 1) / alignnum * alignnum;
        uint32_t bufferlength = totallength + 16;
        for(int32_t k = 0; k < this->axisDumNum; k++) {
            ((int32_t *)this->stepGapPerAxis)[k] = ss1[k];
            ((int32_t *)this->axDirNumPerAxis)[k] = ss2[k];
            ((int32_t *)this->outterGapPerAxis)[k] = ss3[k];
        }

        xGm.SetGlobalBuffer((__gm__ typeT*)x, totallength);
        yGm.SetGlobalBuffer((__gm__ typeT*)y, outputlength);
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(tmpBuffer, this->tilebufferlength * sizeof(float));
        pipe.InitBuffer(QueueTmpX, this->tilebufferlength * sizeof(float));
        pipe.InitBuffer(QueueTmpY, this->tilebufferlength * sizeof(float));

        this->sumtmp = tmpBuffer.Get<float>();

        int typeSize = 32 / alignnum;                           // half类型为2Bytes，float类型为4Bytes，按需填入
        // 再根据数据类型定义两个单位
        int elementsPerBlock = 32 / typeSize;       // 1个block存放的元素个数
        int elementsPerRepeat = 256 / typeSize;     // 1次repeat可以处理的元素个数

        // 最后确定首次最大repeat值
        int firstMaxRepeat = this->tilebufferlength / elementsPerRepeat;           // 此处需要注意：对于tensor高维切分计算接口，firstMaxRepeat就是	repeatTimes；对于tensor前n个数据计算接口，firstMaxRepeat为count/elementsPerRepeat，比如在half类型下firstMaxRepeat就是count/128，在float类型下为count/64，按需填入，对于count<elementsPerRepeat的场景，firstMaxRepeat就是1

        int iter1OutputCount = firstMaxRepeat;                                              // 第一轮操作产生的元素个数
        int iter1AlignEnd = RoundUp(iter1OutputCount, elementsPerBlock) * elementsPerBlock; // 第一轮产生的元素个数做向上取整
        int finalWorkLocalNeedSize = iter1AlignEnd;   
        pipe.InitBuffer(workQueue, BUFFER_NUM, finalWorkLocalNeedSize * sizeof(float));  
    } 
    __aicore__ inline void ProcessVMultiWL()
    {
        this->progresswhole = 0;
        int32_t loopcountVerti = this->outputlength;
        int32_t accumcount = this->totallength / this->outputlength / this->lastdimnum;
        for (int32_t i = 0; i < loopcountVerti; i++) { 
            for(int32_t k = 0; k < accumcount; k++) {
                int32_t startpos = GetStartPosWL(i, k);
                AccumUp(startpos, k);
            }
            GetLnWL(i);
        }    
    }    
  
private:
    __aicore__ inline void CopyIn(int32_t progress, uint32_t length)
    {
        LocalTensor<typeT> xLocal = inQueueX.AllocTensor<typeT>();
        DataCopy(xLocal, xGm[progress * length], this->tilebufferlength);
        inQueueX.EnQue(xLocal);

    }

    __aicore__ inline void CopyOut(int32_t progress, uint32_t length)
    {
        LocalTensor<typeT> yLocal = outQueueY.DeQue<typeT>();
        //DataCopy(yGm[progress * length], yLocal, this->tilebufferlength);
        DataCopy(yGm[progress], yLocal, 32 / sizeof(typeT));
        outQueueY.FreeTensor(yLocal);
    }
    __aicore__ inline void Compute(int32_t progress, uint32_t length) 
    {
        LocalTensor<typeT> xLocal = inQueueX.DeQue<typeT>();
        //LocalTensor<typeT> yLocal = outQueueY.AllocTensor<typeT>();
        LocalTensor<typeT> tmp = tmpBuffer.Get<typeT>();
        LocalTensor<float> workLocal = workQueue.AllocTensor<float>();
        if constexpr (std::is_same_v<typeT, half>) {
            LocalTensor<float> tmpX = QueueTmpX.Get<float>();
            Cast(tmpX, xLocal, RoundMode::CAST_NONE, length);
            Exp(tmpX, tmpX, length);
            ReduceSum(tmpX, tmpX, workLocal, length);
            Ln(tmpX, tmpX, 1);
            // float sum = tmpY.GetValue(0);
            // Duplicate(tmpY, sum, length);
            // Div(tmpY, tmpX, tmpY, length);
            Cast(xLocal, tmpX, RoundMode::CAST_RINT, length);
            typeT resulttmp = xLocal.GetValue(0);
            yGm.SetValue(progress, resulttmp);
        } else {
            Exp(xLocal, xLocal, length);
            ReduceSum(xLocal, xLocal, workLocal, length);
            Ln(xLocal, xLocal, 1);
            typeT resulttmp = xLocal.GetValue(0);
            yGm.SetValue(progress, resulttmp);
            // typeT sum = yLocal.GetValue(0);
            // Duplicate(yLocal, sum, length);
            // Div(yLocal, xLocal, yLocal, length);
        }

        inQueueX.FreeTensor(xLocal);
        workQueue.FreeTensor(workLocal);
    }    
    __aicore__ inline int RoundUp(int a, int b)
    { 
    	return (a + b - 1) / b;
    }
    __aicore__ inline int32_t GetStartPos(int32_t loopcount, int32_t accumcount) {

        int32_t pos = GetAxisPos(accumcount) + (loopcount * this->loopgap);
        return pos;
    }
    __aicore__ inline int32_t GetStartPosWL(int32_t loopcount, int32_t accumcount) {

        int32_t pos = GetAxisPosWL(accumcount) + (loopcount * this->loopgap);
        return pos;
    }
    /* recursive call not allowed
    __aicore__ inline int32_t GetAxisPos(int32_t accumcount, int32_t dim){
        int32_t gap = 1;
        int32_t pos = 0;
        for(int32_t d = dim + 1; d < this->axisDumNum; d++) {
            gap *= axDirNumPerAxis[d];
        }
        if(dim == this->axisDumNum - 1) {
            int32_t tmpdim = axDirNumPerAxis[dim];
            pos = accumcount * stepGapPerAxis[tmpdim];
            return pos;
        } else {
            int32_t tmpdim = axDirNumPerAxis[dim];
            pos += accumcount / gap * stepGapPerAxis[tmpdim];
            accumcount = accumcount % gap;
            pos += GetAxisPos(accumcount, dim + 1);
            return pos;
        }
    }
    */
    __aicore__ inline int32_t GetAxisPos(int32_t accumcount){
        int32_t pos = 0;
        for(int32_t i = 0; i < this->axisDumNum; i++) {
            int32_t gap = 1;
            for(int32_t d = i + 1; d < this->axisDumNum; d++) {
                gap *= axDirNumPerAxis[d];
            }
            if(i == this->axisDumNum - 1) {
                //int32_t tmpdim = axDirNumPerAxis[i];
                pos += accumcount * stepGapPerAxis[i];
            } else {
                //int32_t tmpdim = axDirNumPerAxis[i];
                pos += accumcount / gap * stepGapPerAxis[i];
                accumcount = accumcount % gap;
            }
        }
        return pos;
    } 
    __aicore__ inline int32_t GetAxisPosWL(int32_t accumcount){
        int32_t pos = 0;
        for(int32_t i = 0; i < this->axisDumNum - 1; i++) {
            int32_t gap = 1;
            for(int32_t d = i + 1; d < this->axisDumNum - 1; d++) {
                gap *= axDirNumPerAxis[d];
            }
            if(i == this->axisDumNum - 2) {
                //int32_t tmpdim = axDirNumPerAxis[i];
                pos += accumcount * stepGapPerAxis[i];
            } else {
                //int32_t tmpdim = axDirNumPerAxis[i];
                pos += accumcount / gap * stepGapPerAxis[i];
                accumcount = accumcount % gap;
            }
        }
        return pos;
    }    
    __aicore__ inline void GetSum(int32_t startpos) {
        for(int k = 0; k < v2ndlevleloop; k++) {
            int32_t pos = startpos + k * this->v2ndlevelgap;
            CopyInVertical(pos, this->lastdimnum);
            ComputeSumVertical(k, this->lastdimnum);
        }
    }
    __aicore__ inline void GetDiv(int32_t startpos) {
        // for(int k = 0; k < v2ndlevleloop; k++) {
        //     int32_t pos = startpos + k * this->v2ndlevelgap;
        //     //CopyInVertical(pos, this->lastdimnum);
        //     ComputeDivVertical(k, this->lastdimnum);
        //     CopyOutVertical(pos, this->lastdimnum);
        // }
        ComputeDivVertical(0, this->lastdimnum);
        CopyOutVertical(startpos, this->lastdimnum);
    }
    __aicore__ inline void GetLnWL(int32_t progress) {
        LocalTensor<float> workLocal = workQueue.AllocTensor<float>();
        if constexpr (std::is_same_v<typeT, half>) {
            // LocalTensor<float> tmpX = QueueTmpX.Get<float>();
            // Cast(tmpX, xLocal, RoundMode::CAST_NONE, length);
            // Exp(tmpX, tmpX, length);
            // ReduceSum(tmpX, tmpX, workLocal, length);
            // Ln(tmpX, tmpX, 1);
            // Cast(xLocal, tmpX, RoundMode::CAST_RINT, length);
            // typeT resulttmp = xLocal.GetValue(0);
            // yGm.SetValue(progress, resulttmp);

            ReduceSum(sumtmp, sumtmp, workLocal, this->lastdimnum);
            Ln(sumtmp, sumtmp, 1);
            float resulttmp = sumtmp.GetValue(0);
            half resulttmpfp16 = resulttmp;
            yGm.SetValue(progress, resulttmpfp16);
        } else {
            ReduceSum(sumtmp, sumtmp, workLocal, this->lastdimnum);
            Ln(sumtmp, sumtmp, 1);
            typeT resulttmp = sumtmp.GetValue(0);
            yGm.SetValue(progress, resulttmp);
            // typeT sum = yLocal.GetValue(0);
            // Duplicate(yLocal, sum, length);
            // Div(yLocal, xLocal, yLocal, length);
        }
        workQueue.FreeTensor(workLocal);

    }
    __aicore__ inline void CopyInVertical(int32_t startpos, uint32_t length)
    {
        LocalTensor<typeT> xLocal = inQueueX.AllocTensor<typeT>();
        DataCopy(xLocal, xGm[startpos], this->tilebufferlength);
        inQueueX.EnQue(xLocal);

    }
    __aicore__ inline void CopyOutVertical(int32_t startpos, uint32_t length)
    {
        LocalTensor<typeT> yLocal = outQueueY.DeQue<typeT>();
        int32_t realpos = this->progresswhole * length;
        this->progresswhole += 1;

        uint32_t blockLen = length * sizeof(typeT);
        DataCopyExtParams copyParams{1, blockLen, 0, 0, 0};
        DataCopyPad(yGm[realpos], yLocal, copyParams);
        outQueueY.FreeTensor(yLocal);
    }
    __aicore__ inline void ComputeSumVertical(int32_t progress, uint32_t length) 
    {
        LocalTensor<typeT> xLocal = inQueueX.DeQue<typeT>();
        if constexpr (std::is_same_v<typeT, float>) {
            Exp(xLocal, xLocal, length);
            if(progress == 0) {
                Adds(this->sumtmp, xLocal, typeT(0), length);
            } else {
                Add(this->sumtmp, xLocal, this->sumtmp, length);
            }
        } else {
            LocalTensor<float> tmpX = QueueTmpX.Get<float>();
            Cast(tmpX, xLocal, RoundMode::CAST_NONE, length);
            Exp(tmpX, tmpX, length);
            if(progress == 0) {
                Adds(this->sumtmp, tmpX, float(0), length);
            } else {
                Add(this->sumtmp, tmpX, this->sumtmp, length);
            }
        }   

        inQueueX.FreeTensor(xLocal);
    }    
    __aicore__ inline void ComputeDivVertical(int32_t progress, uint32_t length) 
    {
        //LocalTensor<typeT> xLocal = inQueueX.DeQue<typeT>();
        LocalTensor<typeT> yLocal = outQueueY.AllocTensor<typeT>();
        if constexpr (std::is_same_v<typeT, float>) {
            // Exp(xLocal, xLocal, length);
            // Div(yLocal, xLocal, sumtmp, length);
            Ln(yLocal, sumtmp, length);
        } else {
            //LocalTensor<float> tmpX = QueueTmpX.Get<float>();
            LocalTensor<float> tmpY = QueueTmpY.Get<float>();
            //Cast(tmpX, xLocal, RoundMode::CAST_NONE, length);
            // Exp(tmpX, tmpX, length);
            // Div(tmpY, tmpX, sumtmp, length);
            Ln(tmpY, sumtmp, length);
            Cast(yLocal, tmpY, RoundMode::CAST_RINT, length);
        }   

        outQueueY.EnQue<typeT>(yLocal);
        //inQueueX.FreeTensor(xLocal);
    } 
   
    __aicore__ inline void AccumUp(int32_t startpos, int32_t progress) {
        // for(int32_t k = 0; k < axDirNum; k++) {
        //     CopyInVMulti(startpos);
        //     ComputeSumVertical(k, this->lastdimnum);
        // }
        CopyInVMulti(startpos);
        ComputeSumVertical(progress, this->lastdimnum);
    }
    __aicore__ inline void CopyInVMulti(int32_t startpos)
    {
        // LocalTensor<typeT> xLocal = inQueueX.AllocTensor<typeT>();
        // DataCopy(xLocal, xGm[startpos], this->tilebufferlength);
        // inQueueX.EnQue(xLocal);
        LocalTensor<typeT> xLocal = inQueueX.AllocTensor<typeT>();
        uint32_t blockLen = this->lastdimnum * sizeof(typeT);
        DataCopyExtParams copyParams{1, blockLen, 0, 0, 0}; 
        DataCopyPadExtParams<typeT> padParams{false, 0, 0, 0};
        DataCopyPad(xLocal, xGm[startpos], copyParams, padParams);
        inQueueX.EnQue(xLocal);
    }
   
private:
    TPipe pipe;
    TBuf<QuePosition::VECCALC> tmpBuffer; 
    TBuf<QuePosition::VECCALC> QueueTmpX, QueueTmpY;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    TQue<QuePosition::VECOUT, 1> workQueue;
    GlobalTensor<typeT> xGm;
    GlobalTensor<typeT> yGm;
    LocalTensor<float> sumtmp;
    LocalTensor<typeT> ones;
    uint32_t totallength;
    uint32_t outputlength;
    uint32_t tilelength;
    int32_t dim;
    int32_t lastdimnum;
    int32_t loopcount;
    uint32_t tilebufferlength;
    int32_t v1stlevelloop;
    int32_t v2ndlevleloop;
    int32_t v1stlevelgap;
    int32_t v2ndlevelgap;
    int32_t v3rdlevleloop;
    int32_t connectnum;
    int32_t progresswhole;
    int32_t axisDumNum;
    int32_t loopgap;
    int32_t stepGapPerAxis[64];
    int32_t axDirNumPerAxis[64];
    int32_t outterGapPerAxis[64];

};

extern "C" __global__ __aicore__ void log_sum_exp(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    KernelLogSumExp<DTYPE_X> op;
    if (TILING_KEY_IS(1)) {
        op.Init(x, y, tiling_data.totallength, tiling_data.outputlength, tiling_data.tilelength, tiling_data.dim, tiling_data.lastdimnum, tiling_data.loopcount);
        op.Process();
    } else if (TILING_KEY_IS(6)){
        op.Init(x, y, tiling_data.totallength, tiling_data.outputlength, tiling_data.tilelength, tiling_data.dim, tiling_data.lastdimnum, tiling_data.loopcount);
        op.Process();
    } else if (TILING_KEY_IS(3)){
        op.InitVertical(x, y, tiling_data.totallength, tiling_data.outputlength, tiling_data.tilelength, tiling_data.dim, tiling_data.lastdimnum, tiling_data.loopcount,
                        tiling_data.v1stlevelloop, tiling_data.v2ndlevleloop, tiling_data.v1stlevelgap, tiling_data.v2ndlevelgap, tiling_data.v3rdlevleloop);
        op.ProcessVertical();  
    } else if (TILING_KEY_IS(901)){
        op.InitVMultiWL(x, y, tiling_data.totallength, tiling_data.outputlength, tiling_data.tilelength, tiling_data.lastdimnum, 
                        tiling_data.axisDumNum, tiling_data.loopgap, tiling_data.stepGapPerAxis, tiling_data.axDirNumPerAxis, tiling_data.outterGapPerAxis);
        op.ProcessVMultiWL();    
    } else if (TILING_KEY_IS(902)){
        op.InitVMulti(x, y, tiling_data.totallength, tiling_data.outputlength, tiling_data.tilelength, tiling_data.lastdimnum, 
                        tiling_data.axisDumNum, tiling_data.loopgap, tiling_data.stepGapPerAxis, tiling_data.axDirNumPerAxis, tiling_data.outterGapPerAxis);
        op.ProcessVMulti();               
    } else {
        
    }
}