#include "kernel_operator.h"

using namespace AscendC;

constexpr int32_t BUFFER_NUM = 2; 

template<typename typeT>
class KernelSoftmax {
public:
    __aicore__ inline KernelSoftmax() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, uint32_t totallength, uint32_t tilelength, int32_t dim, int32_t lastdimnum, int32_t loopcount)
    {
        this->totallength = totallength;
        this->tilelength = tilelength;
        this->dim = dim;
        this->lastdimnum = lastdimnum;
        this->loopcount = loopcount;
        uint32_t alignnum = 32 / sizeof(typeT);
        this->tilebufferlength = ((lastdimnum % alignnum) == 0) ? lastdimnum : (lastdimnum + alignnum - 1) / alignnum * alignnum;
        uint32_t bufferlength = totallength + alignnum;

        xGm.SetGlobalBuffer((__gm__ typeT*)x, bufferlength);
        yGm.SetGlobalBuffer((__gm__ typeT*)y, bufferlength);
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(tmpBuffer, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(QueueTmpX, this->tilebufferlength * sizeof(float));
        pipe.InitBuffer(QueueTmpY, this->tilebufferlength * sizeof(float));

        int typeSize = 32 / alignnum;                           // half类型为2Bytes，float类型为4Bytes，按需填入
        // 再根据数据类型定义两个单位
        int elementsPerBlock = 32 / typeSize;       // 1个block存放的元素个数
        int elementsPerRepeat = 256 / typeSize;     // 1次repeat可以处理的元素个数

        // 最后确定首次最大repeat值
        int firstMaxRepeat = this->tilebufferlength / elementsPerRepeat;           // 此处需要注意：对于tensor高维切分计算接口，firstMaxRepeat就是	repeatTimes；对于tensor前n个数据计算接口，firstMaxRepeat为count/elementsPerRepeat，比如在half类型下firstMaxRepeat就是count/128，在float类型下为count/64，按需填入，对于count<elementsPerRepeat的场景，firstMaxRepeat就是1

        int iter1OutputCount = firstMaxRepeat;                                              // 第一轮操作产生的元素个数
        int iter1AlignEnd = RoundUp(iter1OutputCount, elementsPerBlock) * elementsPerBlock; // 第一轮产生的元素个数做向上取整
        int finalWorkLocalNeedSize = iter1AlignEnd;   
        pipe.InitBuffer(workQueue, BUFFER_NUM, finalWorkLocalNeedSize * sizeof(float));  

    }        
    __aicore__ inline void Process()
    {

        for (int32_t i = 0; i < this->loopcount; i++) {
            CopyIn(i, this->lastdimnum);
            Compute(i, this->lastdimnum);
            CopyOut(i, this->lastdimnum);
        }

    }
    //
    // for tiling_key 3, vertical 
    //
     __aicore__ inline void InitVertical(GM_ADDR x, GM_ADDR y, uint32_t totallength, uint32_t tilelength, int32_t dim, int32_t lastdimnum, int32_t loopcount,
                                         int32_t v1stlevelloop, int32_t v2ndlevleloop, int32_t v1stlevelgap, int32_t v2ndlevelgap, int32_t v3rdlevleloop)
    {
        this->totallength = totallength;
        this->tilelength = tilelength;
        this->dim = dim;
        this->lastdimnum = lastdimnum;
        this->loopcount = loopcount;
        this->v1stlevelloop = v1stlevelloop;
        this->v2ndlevleloop = v2ndlevleloop;
        this->v1stlevelgap = v1stlevelgap;
        this->v2ndlevelgap = v2ndlevelgap;
        this->v3rdlevleloop = v3rdlevleloop;
        uint32_t alignnum = 32 / sizeof(typeT);
        this->tilebufferlength = ((lastdimnum % alignnum) == 0) ? lastdimnum : (lastdimnum + alignnum - 1) / alignnum * alignnum;
        uint32_t bufferlength = totallength + alignnum;

        xGm.SetGlobalBuffer((__gm__ typeT*)x, bufferlength);
        yGm.SetGlobalBuffer((__gm__ typeT*)y, bufferlength);
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tilelength * sizeof(typeT));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tilelength * sizeof(typeT));
        pipe.InitBuffer(tmpBuffer, this->tilelength * sizeof(float));
        pipe.InitBuffer(QueueTmpX, this->tilelength * sizeof(float));
        pipe.InitBuffer(QueueTmpY, this->tilelength * sizeof(float));

        int typeSize = 32 / alignnum;                           // half类型为2Bytes，float类型为4Bytes，按需填入
        // 再根据数据类型定义两个单位
        int elementsPerBlock = 32 / typeSize;       // 1个block存放的元素个数
        int elementsPerRepeat = 256 / typeSize;     // 1次repeat可以处理的元素个数

        // 最后确定首次最大repeat值
        int firstMaxRepeat = this->tilebufferlength / elementsPerRepeat;           // 此处需要注意：对于tensor高维切分计算接口，firstMaxRepeat就是	repeatTimes；对于tensor前n个数据计算接口，firstMaxRepeat为count/elementsPerRepeat，比如在half类型下firstMaxRepeat就是count/128，在float类型下为count/64，按需填入，对于count<elementsPerRepeat的场景，firstMaxRepeat就是1

        int iter1OutputCount = firstMaxRepeat;                                              // 第一轮操作产生的元素个数
        int iter1AlignEnd = RoundUp(iter1OutputCount, elementsPerBlock) * elementsPerBlock; // 第一轮产生的元素个数做向上取整
        int finalWorkLocalNeedSize = iter1AlignEnd;   
        pipe.InitBuffer(workQueue, BUFFER_NUM, finalWorkLocalNeedSize * sizeof(float));  
        this->sumtmp = tmpBuffer.Get<float>();
    } 
    __aicore__ inline void ProcessVertical()
    {
        // for (int32_t i = 0; i < this->v1stlevelloop; i++) {
        //     for(int32_t k = 0; k < this->v3rdlevleloop; k++) {
        //         for(int32_t k = 0; k < this->v2ndlevelgap; k++) {
        //             GetSum(startpos, );
        //             GetDiv(startpos);
        //         }
        //     }
        // }
        for (int32_t i = 0; i < this->v1stlevelloop; i++) {
            for (int32_t m = 0; m < this->v3rdlevleloop; m++) {
                int32_t startpos = i * this->v1stlevelgap + m * this->lastdimnum;
                GetSum(startpos);
                GetDiv(startpos);
            }
        }

    }
    __aicore__ inline void InitSpOne(GM_ADDR x, GM_ADDR y, uint32_t totallength, uint32_t tilelength)
    {
        this->totallength = totallength;
        this->tilelength = tilelength;
        uint32_t alignnum = 32 / sizeof(typeT);
        uint32_t bufferlength = totallength + alignnum;

        xGm.SetGlobalBuffer((__gm__ typeT*)x, bufferlength);
        yGm.SetGlobalBuffer((__gm__ typeT*)y, bufferlength);
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tilelength * sizeof(typeT));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tilelength * sizeof(typeT));
        pipe.InitBuffer(tmpBuffer, this->tilelength * sizeof(typeT));

        this->ones = tmpBuffer.Get<typeT>();
        Duplicate(this->ones, typeT(1), this->tilelength);
    } 
    __aicore__ inline void ProcessSpOne()
    {
        int32_t loopcount = this->totallength / this->tilelength;
        for (int32_t i = 0; i < loopcount; i++) {
            CopyOutOnes(i * this->tilelength, this->tilelength);
        }
        if(this->totallength % this->tilelength != 0) {
            int32_t taillength = this->totallength % this->tilelength;
            CopyOutOnes(loopcount * this->tilelength, taillength);
        }
    }
    __aicore__ inline void InitK6(GM_ADDR x, GM_ADDR y, uint32_t totallength, uint32_t tilelength, int32_t dim, int32_t lastdimnum, int32_t connectnum)
    {
        this->totallength = totallength;
        this->tilelength = tilelength;
        this->dim = dim;
        this->lastdimnum = lastdimnum;
        this->connectnum = connectnum;
        uint32_t alignnum = 32 / sizeof(typeT);
        this->tilebufferlength = lastdimnum * connectnum;
        uint32_t bufferlength = totallength + alignnum;

        xGm.SetGlobalBuffer((__gm__ typeT*)x, bufferlength);
        yGm.SetGlobalBuffer((__gm__ typeT*)y, bufferlength);
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(tmpBuffer, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(QueueTmpX, this->tilebufferlength * sizeof(float));
        pipe.InitBuffer(QueueTmpY, this->tilebufferlength * sizeof(float));

        int typeSize = 32 / alignnum;                           // half类型为2Bytes，float类型为4Bytes，按需填入
        // 再根据数据类型定义两个单位
        int elementsPerBlock = 32 / typeSize;       // 1个block存放的元素个数
        int elementsPerRepeat = 256 / typeSize;     // 1次repeat可以处理的元素个数

        // 最后确定首次最大repeat值
        int firstMaxRepeat = this->tilebufferlength / elementsPerRepeat;           // 此处需要注意：对于tensor高维切分计算接口，firstMaxRepeat就是	repeatTimes；对于tensor前n个数据计算接口，firstMaxRepeat为count/elementsPerRepeat，比如在half类型下firstMaxRepeat就是count/128，在float类型下为count/64，按需填入，对于count<elementsPerRepeat的场景，firstMaxRepeat就是1

        int iter1OutputCount = firstMaxRepeat;                                              // 第一轮操作产生的元素个数
        int iter1AlignEnd = RoundUp(iter1OutputCount, elementsPerBlock) * elementsPerBlock; // 第一轮产生的元素个数做向上取整
        int finalWorkLocalNeedSize = iter1AlignEnd;   
        pipe.InitBuffer(workQueue, BUFFER_NUM, finalWorkLocalNeedSize * sizeof(float));  
    }   
    __aicore__ inline void ProcessK6()
    {
        int32_t loopk6 = this->totallength / this->tilebufferlength;
        for (int32_t i = 0; i < loopk6; i++) {
            CopyInK6(i, this->tilebufferlength);
            ComputeK6(i, this->tilebufferlength, this->connectnum);
            CopyOutK6(i, this->tilebufferlength);
        }
        if(this->totallength % this->tilebufferlength != 0) {
            int32_t taillength = this->totallength % this->tilebufferlength;
            int32_t tailconnectnum = taillength / this->lastdimnum;
            CopyInK6(loopk6, taillength);
            ComputeK6(loopk6, taillength, tailconnectnum);
            CopyOutK6(loopk6, taillength);
        }
    }
    __aicore__ inline void InitK7(GM_ADDR x, GM_ADDR y, uint32_t totallength, uint32_t tilelength, int32_t dim, int32_t lastdimnum, int32_t connectnum, 
                                  const SoftMaxTiling &tilingData)
    {
        this->totallength = totallength;
        this->tilelength = tilelength;
        this->dim = dim;
        this->lastdimnum = lastdimnum;
        this->connectnum = connectnum;
        tiling = tilingData;
        uint32_t alignnum = 32 / sizeof(typeT);
        this->tilebufferlength = lastdimnum * connectnum;
        uint32_t bufferlength = totallength + alignnum;

        xGm.SetGlobalBuffer((__gm__ typeT*)x, bufferlength);
        yGm.SetGlobalBuffer((__gm__ typeT*)y, bufferlength);
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tilebufferlength * sizeof(typeT));
        pipe.InitBuffer(maxQueue, 1, connectnum * alignnum * sizeof(typeT));
        pipe.InitBuffer(sumQueue, 1, connectnum * alignnum * sizeof(typeT));
 
    }   
    __aicore__ inline void ProcessK7()
    {
        int32_t loopk7 = this->totallength / this->tilebufferlength;
        for (int32_t i = 0; i < loopk7; i++) {
            CopyInK6(i, this->tilebufferlength);
            ComputeK7(i, this->tilebufferlength, this->connectnum);
            CopyOutK6(i, this->tilebufferlength);
        }
        if(this->totallength % this->tilebufferlength != 0) {
            int32_t taillength = this->totallength % this->tilebufferlength;
            int32_t tailconnectnum = taillength / this->lastdimnum;
            CopyInK6(loopk7, taillength);
            ComputeK7(loopk7, taillength, tailconnectnum);
            CopyOutK6(loopk7, taillength);
        }
    }
private:
    __aicore__ inline void CopyIn(int32_t progress, uint32_t length)
    {
        LocalTensor<typeT> xLocal = inQueueX.AllocTensor<typeT>();
        DataCopy(xLocal, xGm[progress * length], this->tilebufferlength);
        inQueueX.EnQue(xLocal);

    }

    __aicore__ inline void CopyOut(int32_t progress, uint32_t length)
    {
        LocalTensor<typeT> yLocal = outQueueY.DeQue<typeT>();
        DataCopy(yGm[progress * length], yLocal, this->tilebufferlength);
        outQueueY.FreeTensor(yLocal);
    }
    __aicore__ inline void Compute(int32_t progress, uint32_t length) 
    {
        LocalTensor<typeT> xLocal = inQueueX.DeQue<typeT>();
        LocalTensor<typeT> yLocal = outQueueY.AllocTensor<typeT>();
        LocalTensor<typeT> tmp = tmpBuffer.Get<typeT>();
        //LocalTensor<typeT> workLocal = workQueue.AllocTensor<typeT>();
        LocalTensor<float> workLocal = workQueue.AllocTensor<float>();
        if constexpr (std::is_same_v<typeT, half>) {
            /*
            Exp(xLocal, xLocal, length);
            float sumf32 = 0;
            half sumhalf = 0;
            for(int32_t k = 0; k < length; k++) {
                half tmphalf = xLocal.GetValue(k);
                float tmpf32 = tmphalf;
                sumf32 += tmpf32;
                sumhalf = sumf32;
                sumf32 = sumhalf;
            }
            Duplicate(yLocal, sumhalf, length);
            Div(yLocal, xLocal, yLocal, length);
            */
            LocalTensor<float> tmpX = QueueTmpX.Get<float>();
            LocalTensor<float> tmpY = QueueTmpY.Get<float>();
            Cast(tmpX, xLocal, RoundMode::CAST_NONE, length);
            Exp(tmpX, tmpX, length);
            ReduceSum(tmpY, tmpX, workLocal, length);
            float sum = tmpY.GetValue(0);
            Duplicate(tmpY, sum, length);
            Div(tmpY, tmpX, tmpY, length);
            Cast(yLocal, tmpY, RoundMode::CAST_RINT, length);
        } else {
            Exp(xLocal, xLocal, length);
            ReduceSum(yLocal, xLocal, workLocal, length);
            typeT sum = yLocal.GetValue(0);
            Duplicate(yLocal, sum, length);
            Div(yLocal, xLocal, yLocal, length);
        }

        outQueueY.EnQue<typeT>(yLocal);
        inQueueX.FreeTensor(xLocal);
        workQueue.FreeTensor(workLocal);
    }    
    __aicore__ inline int RoundUp(int a, int b)
    { 
    	return (a + b - 1) / b;
    }
    __aicore__ inline void GetSum(int32_t startpos) {
        for(int k = 0; k < v2ndlevleloop; k++) {
            int32_t pos = startpos + k * this->v2ndlevelgap;
            CopyInVertical(pos, this->lastdimnum);
            ComputeSumVertical(k, this->lastdimnum);
        }
    }
    __aicore__ inline void GetDiv(int32_t startpos) {
        for(int k = 0; k < v2ndlevleloop; k++) {
            int32_t pos = startpos + k * this->v2ndlevelgap;
            CopyInVertical(pos, this->lastdimnum);
            ComputeDivVertical(k, this->lastdimnum);
            CopyOutVertical(pos, this->lastdimnum);
        }
    }
    __aicore__ inline void CopyInVertical(int32_t startpos, uint32_t length)
    {
        LocalTensor<typeT> xLocal = inQueueX.AllocTensor<typeT>();
        DataCopy(xLocal, xGm[startpos], this->tilebufferlength);
        inQueueX.EnQue(xLocal);

    }
    __aicore__ inline void CopyOutVertical(int32_t startpos, uint32_t length)
    {
        LocalTensor<typeT> yLocal = outQueueY.DeQue<typeT>();
        // DataCopy(yGm[startpos], yLocal, this->tilebufferlength);
        // PipeBarrier<PIPE_MTE3>();
        uint32_t blockLen = length * sizeof(typeT);
        DataCopyExtParams copyParams{1, blockLen, 0, 0, 0};
        DataCopyPad(yGm[startpos], yLocal, copyParams);
        outQueueY.FreeTensor(yLocal);
    }
    __aicore__ inline void ComputeSumVertical(int32_t progress, uint32_t length) 
    {
        LocalTensor<typeT> xLocal = inQueueX.DeQue<typeT>();
        if constexpr (std::is_same_v<typeT, float>) {
            Exp(xLocal, xLocal, length);
            if(progress == 0) {
                Adds(this->sumtmp, xLocal, typeT(0), length);
            } else {
                Add(this->sumtmp, xLocal, this->sumtmp, length);
            }
        } else {
            LocalTensor<float> tmpX = QueueTmpX.Get<float>();
            Cast(tmpX, xLocal, RoundMode::CAST_NONE, length);
            Exp(tmpX, tmpX, length);
            if(progress == 0) {
                Adds(this->sumtmp, tmpX, float(0), length);
            } else {
                Add(this->sumtmp, tmpX, this->sumtmp, length);
            }
        }   
        // if(progress == 0) {
        //     LocalTensor<typeT> yLocal = outQueueY.AllocTensor<typeT>();
        //     Exp(xLocal, xLocal, length);
        //     Adds(yLocal, xLocal, typeT(0), length);
        // } else {
        //     LocalTensor<typeT> yLocal = outQueueY.DeQue<typeT>();
        //     Exp(xLocal, xLocal, length);
        //     Add(yLocal, xLocal, yLocal, length);
        // }
        // outQueueY.EnQue<typeT>(yLocal);
        inQueueX.FreeTensor(xLocal);
    }    
    __aicore__ inline void ComputeDivVertical(int32_t progress, uint32_t length) 
    {
        LocalTensor<typeT> xLocal = inQueueX.DeQue<typeT>();
        LocalTensor<typeT> yLocal = outQueueY.AllocTensor<typeT>();
        if constexpr (std::is_same_v<typeT, float>) {
            Exp(xLocal, xLocal, length);
            Div(yLocal, xLocal, sumtmp, length);
        } else {
            LocalTensor<float> tmpX = QueueTmpX.Get<float>();
            LocalTensor<float> tmpY = QueueTmpY.Get<float>();
            Cast(tmpX, xLocal, RoundMode::CAST_NONE, length);
            Exp(tmpX, tmpX, length);
            Div(tmpY, tmpX, sumtmp, length);
            Cast(yLocal, tmpY, RoundMode::CAST_RINT, length);
        }   
        // if(progress == 0) {
        //     LocalTensor<typeT> yLocal = outQueueY.DeQue<typeT>();
        //     Adds(sumtmp, yLocal, typeT(0), length);
        //     Exp(xLocal, xLocal, length);
        //     Div(yLocal, xLocal, sumtmp, length);
        // } else {
        //     LocalTensor<typeT> yLocal = outQueueY.AllocTensor<typeT>();
        //     Exp(xLocal, xLocal, length);
        //     Add(yLocal, xLocal, sumtmp, length);
        // }

        outQueueY.EnQue<typeT>(yLocal);
        inQueueX.FreeTensor(xLocal);
    } 
    __aicore__ inline void CopyOutOnes(int32_t startpos, uint32_t length)
    {
        uint32_t blockLen = length * sizeof(typeT);
        DataCopyExtParams copyParams{1, blockLen, 0, 0, 0};
        DataCopyPad(yGm[startpos], this->ones, copyParams);
        //outQueueY.FreeTensor(yLocal);
    }
    __aicore__ inline void CopyInK6(int32_t progress, uint32_t length)
    {
        LocalTensor<typeT> xLocal = inQueueX.AllocTensor<typeT>();
        DataCopy(xLocal, xGm[progress * this->tilebufferlength], length);
        inQueueX.EnQue(xLocal);

    }
    __aicore__ inline void CopyOutK6(int32_t progress, uint32_t length)
    {
        LocalTensor<typeT> yLocal = outQueueY.DeQue<typeT>();
        DataCopy(yGm[progress * this->tilebufferlength], yLocal, length);
        outQueueY.FreeTensor(yLocal);
    }
    __aicore__ inline void ComputeK6(int32_t progress, uint32_t length, int32_t connectnum) 
    {
        LocalTensor<typeT> xLocal = inQueueX.DeQue<typeT>();
        LocalTensor<typeT> yLocal = outQueueY.AllocTensor<typeT>();
        LocalTensor<typeT> tmp = tmpBuffer.Get<typeT>();
        LocalTensor<float> workLocal = workQueue.AllocTensor<float>();
        //check special case: if all elements valus is same
        ReduceMax(yLocal, xLocal, yLocal, connectnum * this->lastdimnum, false);
        float tmpmax = yLocal.GetValue(0);
        ReduceMin(yLocal, xLocal, yLocal, connectnum * this->lastdimnum, false);
        float tmpmin = yLocal.GetValue(0);
        if(tmpmax == tmpmin){
            typeT tmpmean = 1 / (connectnum * this->lastdimnum);
            Duplicate(yLocal, tmpmean, connectnum * this->lastdimnum);
            outQueueY.EnQue<typeT>(yLocal);
            inQueueX.FreeTensor(xLocal);
            workQueue.FreeTensor(workLocal);
            return;
        }
        if constexpr (std::is_same_v<typeT, half>) {
            LocalTensor<float> tmpX = QueueTmpX.Get<float>();
            LocalTensor<float> tmpY = QueueTmpY.Get<float>();
            Cast(tmpX, xLocal, RoundMode::CAST_NONE, length);
            Exp(tmpX, tmpX, length);
            for(int32_t k = 0; k < connectnum; k++) {
                ReduceSum(tmpY[k * this->lastdimnum], tmpX[k * this->lastdimnum], workLocal, this->lastdimnum);
                float sum = tmpY.GetValue(k * this->lastdimnum);
                Duplicate(tmpY[k * this->lastdimnum], sum, this->lastdimnum);
            }
            // ReduceSum(tmpY, tmpX, workLocal, length);
            // float sum = tmpY.GetValue(0);
            // Duplicate(tmpY, sum, length);
            Div(tmpY, tmpX, tmpY, length);
            Cast(yLocal, tmpY, RoundMode::CAST_RINT, length);
        } else {
            Exp(xLocal, xLocal, length);
            for(int32_t k = 0; k < connectnum; k++) {
                ReduceSum(yLocal[k * this->lastdimnum], xLocal[k * this->lastdimnum], workLocal, this->lastdimnum);
                typeT sum = yLocal.GetValue(k * this->lastdimnum);
                Duplicate(yLocal[k * this->lastdimnum], sum, this->lastdimnum);
            }
            // ReduceSum(yLocal, xLocal, workLocal, length);
            // typeT sum = yLocal.GetValue(0);
            // Duplicate(yLocal, sum, length);
            Div(yLocal, xLocal, yLocal, length);
        }

        outQueueY.EnQue<typeT>(yLocal);
        inQueueX.FreeTensor(xLocal);
        workQueue.FreeTensor(workLocal);
    }    
    __aicore__ inline void ComputeK7(int32_t progress, uint32_t length, int32_t connectnum) 
    {
        LocalTensor<typeT> xLocal = inQueueX.DeQue<typeT>();
        LocalTensor<typeT> yLocal = outQueueY.AllocTensor<typeT>();
        LocalTensor<typeT> sumTempLocal = sumQueue.AllocTensor<typeT>();
        LocalTensor<typeT> maxTempLocal = maxQueue.AllocTensor<typeT>();
        uint32_t heighttmp = connectnum;
        uint32_t widthtmp = this->lastdimnum;
        SoftMaxShapeInfo srcShape = {heighttmp, widthtmp, heighttmp, widthtmp};
        SoftMax(yLocal, sumTempLocal, maxTempLocal, xLocal, tiling, srcShape);

        outQueueY.EnQue<typeT>(yLocal);
        maxQueue.FreeTensor(maxTempLocal);
        sumQueue.FreeTensor(sumTempLocal);
        inQueueX.FreeTensor(xLocal);

    }    
private:
    TPipe pipe;
    TBuf<QuePosition::VECCALC> tmpBuffer; 
    TBuf<QuePosition::VECCALC> QueueTmpX, QueueTmpY;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    TQue<QuePosition::VECOUT, 1> workQueue;
    AscendC::TQue<AscendC::QuePosition::VECIN, 1> maxQueue;
    AscendC::TQue<AscendC::QuePosition::VECIN, 1> sumQueue;
    GlobalTensor<typeT> xGm;
    GlobalTensor<typeT> yGm;
    LocalTensor<float> sumtmp;
    LocalTensor<typeT> ones;
    uint32_t totallength;
    uint32_t tilelength;
    int32_t dim;
    int32_t lastdimnum;
    int32_t loopcount;
    uint32_t tilebufferlength;
    int32_t v1stlevelloop;
    int32_t v2ndlevleloop;
    int32_t v1stlevelgap;
    int32_t v2ndlevelgap;
    int32_t v3rdlevleloop;
    int32_t connectnum;
    SoftMaxTiling tiling;
};

extern "C" __global__ __aicore__ void softmax(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    KernelSoftmax<DTYPE_X> op;
    if (TILING_KEY_IS(1)) {
        op.Init(x, y, tiling_data.totallength, tiling_data.tilelength, tiling_data.dim, tiling_data.lastdimnum, tiling_data.loopcount);
        op.Process();
    } else if (TILING_KEY_IS(3)){
        op.InitVertical(x, y, tiling_data.totallength, tiling_data.tilelength, tiling_data.dim, tiling_data.lastdimnum, tiling_data.loopcount,
                        tiling_data.v1stlevelloop, tiling_data.v2ndlevleloop, tiling_data.v1stlevelgap, tiling_data.v2ndlevelgap, tiling_data.v3rdlevleloop);
        op.ProcessVertical();
    } else if (TILING_KEY_IS(5)){
        op.InitSpOne(x, y, tiling_data.totallength, tiling_data.tilelength);
        op.ProcessSpOne();
    } else if (TILING_KEY_IS(6)){
        op.InitK6(x, y, tiling_data.totallength, tiling_data.tilelength, tiling_data.dim, tiling_data.lastdimnum, tiling_data.connectnum);
        op.ProcessK6();
    } else if (TILING_KEY_IS(7)){
        op.InitK7(x, y, tiling_data.totallength, tiling_data.tilelength, tiling_data.dim, tiling_data.lastdimnum, tiling_data.connectnum, tiling_data.softmaxTilingData);
        op.ProcessK7();
    } else {

    }
}