#define K_MAX_SHAPE_DIM 0
#include "kernel_operator.h"
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 1;
class KernelGluJvp_float {
public:
    __aicore__ inline KernelGluJvp_float() {}
    __aicore__ inline void Init(GM_ADDR glu_out, GM_ADDR input, GM_ADDR v, GM_ADDR jvp_out, 
                                uint32_t bigNum, uint32_t bigLength, uint32_t smallLength, 
                                uint32_t interval, TPipe* pipeIn) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
        uint32_t coreDataNum;
        if (GetBlockIdx() < bigNum) {
            coreDataNum = bigLength;
            inputGm.SetGlobalBuffer((__gm__ float*)input + bigLength * interval * 2 * GetBlockIdx());
            vGm.SetGlobalBuffer((__gm__ float*)v + bigLength * interval * 2 * GetBlockIdx());
            outGm.SetGlobalBuffer((__gm__ float*)jvp_out + bigLength * interval * GetBlockIdx());
        } else {
            coreDataNum = smallLength;
            inputGm.SetGlobalBuffer((__gm__ float*)input + bigLength * interval * 2 * bigNum + smallLength * interval * 2 * (GetBlockIdx() - bigNum));
            vGm.SetGlobalBuffer((__gm__ float*)v + bigLength * interval * 2 * bigNum + smallLength * interval * 2 * (GetBlockIdx() - bigNum));
            outGm.SetGlobalBuffer((__gm__ float*)jvp_out + bigLength * interval * bigNum + smallLength * interval  * (GetBlockIdx() - bigNum));
        }
        this->processDataLenght = interval;
        this->repeat = coreDataNum;
        this->interval = interval;
        this->interval_32 = (interval * sizeof(float) + 31)/32*32 /sizeof(float);
        pipeIn->InitBuffer(inQueueInput1, BUFFER_NUM, this->interval_32 * 4 * sizeof(float));
        pipeIn->InitBuffer(inQueueV1, BUFFER_NUM, this->interval_32 * 4 * sizeof(float));
        pipeIn->InitBuffer(outQueueOut, BUFFER_NUM, this->interval_32 * 2 * sizeof(float));
    }
    __aicore__ inline void Process() {
        for (int32_t i = 0; i < this->repeat; i+=2) {
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }
private:
 __aicore__ inline void CopyIn(int32_t progress) {
        DataCopyExtParams copyParams{4, static_cast<uint32_t>(this->interval * sizeof(float)), 0, 0, 0};
        DataCopyPadExtParams<float> padParams{false, 0, 0, 0};
        LocalTensor<float> input1Local = inQueueInput1.AllocTensor<float>();
        LocalTensor<float> v1Local = inQueueV1.AllocTensor<float>();
        DataCopyPad(input1Local, inputGm[progress * this->interval * 2], copyParams, padParams);
        DataCopyPad(v1Local, vGm[progress * this->interval * 2], copyParams, padParams);
        inQueueInput1.EnQue(input1Local);
        inQueueV1.EnQue(v1Local);
   }
    __aicore__ inline void Compute(int32_t progress) {
        LocalTensor<float> input1Local = inQueueInput1.DeQue<float>();
        LocalTensor<float> input2Local = input1Local[this->interval_32];
        LocalTensor<float> v1Local = inQueueV1.DeQue<float>();
        LocalTensor<float> v2Local = v1Local[this->interval_32];
        LocalTensor<float> outLocal = outQueueOut.AllocTensor<float>();
        Muls(input2Local, input2Local, (float)(-1), this->processDataLenght);
        Exp(input2Local, input2Local, this->processDataLenght);
        Adds(input2Local, input2Local, (float)(1), this->processDataLenght);
        Duplicate(outLocal, (float)(1), this->processDataLenght);
        Div(input2Local, outLocal, input2Local, this->processDataLenght);
        Mul(input1Local, input1Local, v2Local, this->processDataLenght);
        Muls(outLocal, input2Local, (float)(-1), this->processDataLenght);
        Adds(v2Local, outLocal, (float)(1), this->processDataLenght);
        FusedMulAdd(input1Local, v2Local, v1Local, this->processDataLenght);
        Mul(outLocal, input2Local, input1Local, this->processDataLenght);
        LocalTensor<float> input1Local1 = input1Local[this->interval_32*2];
        LocalTensor<float> input2Local1 = input1Local1[this->interval_32];
        LocalTensor<float> v1Local1 = v1Local[this->interval_32*2];
        LocalTensor<float> v2Local1 = v1Local1[this->interval_32];
        LocalTensor<float> outLocal1 = outLocal[this->interval_32];
        Muls(input2Local1, input2Local1, (float)(-1), this->processDataLenght);
        Exp(input2Local1, input2Local1, this->processDataLenght);
        Adds(input2Local1, input2Local1, (float)(1), this->processDataLenght);
        Duplicate(outLocal1, (float)(1), this->processDataLenght);
        Div(input2Local1, outLocal1, input2Local1, this->processDataLenght);
        Mul(input1Local1, input1Local1, v2Local1, this->processDataLenght);
        Muls(outLocal1, input2Local1, (float)(-1), this->processDataLenght);
        Adds(v2Local1, outLocal1, (float)(1), this->processDataLenght);
        FusedMulAdd(input1Local1, v2Local1, v1Local1, this->processDataLenght);
        Mul(outLocal1, input2Local1, input1Local1, this->processDataLenght);
        inQueueInput1.FreeTensor(input1Local);
        inQueueV1.FreeTensor(v1Local);
        outQueueOut.EnQue<float>(outLocal);
    }
    __aicore__ inline void CopyOut(int32_t progress) {
        LocalTensor<float> outLocal = outQueueOut.DeQue<float>();
        DataCopyExtParams copyParams{2, static_cast<uint32_t>(this->processDataLenght * sizeof(float)), 0, 0, 0}; 
        DataCopyPad(outGm[progress * this->interval], outLocal, copyParams);
        outQueueOut.FreeTensor(outLocal);
    }
private:
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueInput1, inQueueV1, inQueueV2, inQueueInput2;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueOut;
    GlobalTensor<float> inputGm;
    GlobalTensor<float> vGm;
    GlobalTensor<float> outGm;
    uint32_t repeat;
    uint32_t interval;
    uint32_t interval_32;
    uint32_t processDataLenght;
};
class KernelGluJvp_float1 {
public:
    __aicore__ inline KernelGluJvp_float1() {}
    __aicore__ inline void Init(GM_ADDR glu_out, GM_ADDR input, GM_ADDR v, GM_ADDR jvp_out, 
                                uint32_t bigNum, uint32_t bigLength, uint32_t smallLength, 
                                uint32_t interval, TPipe* pipeIn) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
        uint32_t coreDataNum;
        if (GetBlockIdx() < bigNum) {
            coreDataNum = bigLength;
            gluGm.SetGlobalBuffer((__gm__ float*)glu_out + bigLength * interval * GetBlockIdx());
            inputGm.SetGlobalBuffer((__gm__ float*)input + bigLength * interval * 2 * GetBlockIdx());
            vGm.SetGlobalBuffer((__gm__ float*)v + bigLength * interval * 2 * GetBlockIdx());
            outGm.SetGlobalBuffer((__gm__ float*)jvp_out + bigLength * interval * GetBlockIdx());
        } else {
            coreDataNum = smallLength;
            gluGm.SetGlobalBuffer((__gm__ float*)glu_out + bigLength * interval * bigNum + smallLength * interval  * (GetBlockIdx() - bigNum));
            inputGm.SetGlobalBuffer((__gm__ float*)input + bigLength * interval * 2 * bigNum + smallLength * interval * 2 * (GetBlockIdx() - bigNum));
            vGm.SetGlobalBuffer((__gm__ float*)v + bigLength * interval * 2 * bigNum + smallLength * interval * 2 * (GetBlockIdx() - bigNum));
            outGm.SetGlobalBuffer((__gm__ float*)jvp_out + bigLength * interval * bigNum + smallLength * interval  * (GetBlockIdx() - bigNum));
        }
        this->interval_32 = (interval * sizeof(float) + 31)/32*32 /sizeof(float);
        this->repeat = coreDataNum;
        this->interval = interval;
        this->tileDataLenght = 9824/this->interval_32;
        this->loopCount = this->repeat/this->tileDataLenght;
        this->tailDataLenght = this->repeat - this->tileDataLenght*this->loopCount;
        if(this->tailDataLenght == 0){this->tailDataLenght = this->tileDataLenght; this->loopCount--;}
        pipeIn->InitBuffer(inQueueGlu, BUFFER_NUM, 9824 * sizeof(float));
        pipeIn->InitBuffer(inQueueInput, BUFFER_NUM, 9824 * sizeof(float));
        pipeIn->InitBuffer(inQueueV1, BUFFER_NUM, 9824 * sizeof(float));
        pipeIn->InitBuffer(inQueueV2, BUFFER_NUM, 9824 * sizeof(float));
        pipeIn->InitBuffer(outQueueOut, BUFFER_NUM, 9824 * sizeof(float));
    }
    __aicore__ inline void Process() {
        this->processDataLenght = this->tileDataLenght;
        for (int32_t i = 0; i < this->loopCount; i++) {
            CopyIn(i);
            Compute(this->processDataLenght * this->interval_32);
            CopyOut(i);
        }
        this->processDataLenght = this->tailDataLenght;
        CopyIn(this->loopCount);
        Compute(this->processDataLenght * this->interval_32);
        CopyOut(this->loopCount);
    }
private:
    __aicore__ inline void CopyIn(int32_t progress) {
        DataCopyExtParams copyParams{static_cast<uint16_t>(this->processDataLenght), static_cast<uint32_t>(this->interval * sizeof(float)), 0, 0, 0};
        DataCopyExtParams copyParams1{static_cast<uint16_t>(this->processDataLenght), static_cast<uint32_t>(this->interval * sizeof(float)), static_cast<uint32_t>(this->interval * sizeof(float)), 0, 0};
        DataCopyPadExtParams<float> padParams{false, 0, 0, 0};
        LocalTensor<float> gluLocal = inQueueGlu.AllocTensor<float>();
        LocalTensor<float> inputLocal = inQueueInput.AllocTensor<float>();
        LocalTensor<float> v1Local = inQueueV1.AllocTensor<float>();
        LocalTensor<float> v2Local = inQueueV2.AllocTensor<float>();
        DataCopyPad(gluLocal, gluGm[progress * this->interval * this->tileDataLenght], copyParams, padParams);
        DataCopyPad(inputLocal, inputGm[progress * this->interval * this->tileDataLenght * 2], copyParams1, padParams);
        DataCopyPad(v1Local, vGm[progress * this->interval * this->tileDataLenght * 2], copyParams1, padParams);
        DataCopyPad(v2Local, vGm[progress * this->interval * this->tileDataLenght * 2 + this->interval], copyParams1, padParams);
        inQueueGlu.EnQue(gluLocal);
        inQueueInput.EnQue(inputLocal);
        inQueueV1.EnQue(v1Local);
        inQueueV2.EnQue(v2Local);
    }
    __aicore__ inline void Compute(int32_t lenght) {
        LocalTensor<float> gluLocal = inQueueGlu.DeQue<float>();
        LocalTensor<float> inputLocal = inQueueInput.DeQue<float>();
        LocalTensor<float> v1Local = inQueueV1.DeQue<float>();
        LocalTensor<float> v2Local = inQueueV2.DeQue<float>();
        LocalTensor<float> outLocal = outQueueOut.AllocTensor<float>();
        Sub(outLocal, inputLocal, gluLocal, lenght);
        Div(gluLocal, gluLocal, inputLocal, lenght);
        FusedMulAdd(outLocal, v2Local, v1Local, lenght);
        Mul(outLocal, outLocal, gluLocal, lenght);
        inQueueGlu.FreeTensor(gluLocal);
        inQueueInput.FreeTensor(inputLocal);
        inQueueV1.FreeTensor(v1Local);
        inQueueV2.FreeTensor(v2Local);
        outQueueOut.EnQue<float>(outLocal);
    }
    __aicore__ inline void CopyOut(int32_t progress) {
        LocalTensor<float> outLocal = outQueueOut.DeQue<float>();
        DataCopyExtParams copyParams{static_cast<uint16_t>(this->processDataLenght), static_cast<uint32_t>(this->interval * sizeof(float)), 0, 0, 0}; 
        DataCopyPad(outGm[progress * this->interval * this->tileDataLenght], outLocal, copyParams);
        outQueueOut.FreeTensor(outLocal);
    }
private:
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueInput, inQueueV1, inQueueV2, inQueueGlu;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueOut;
    GlobalTensor<float> gluGm;
    GlobalTensor<float> inputGm;
    GlobalTensor<float> vGm;
    GlobalTensor<float> outGm;
    uint32_t repeat;
    uint32_t interval;
    uint32_t interval_32;
    uint32_t loopCount;
    uint32_t tileDataLenght;
    uint32_t tailDataLenght;
    uint32_t tailDataLenght_32;
    uint32_t processDataLenght;
};
class KernelGluJvp_half {
public:
    __aicore__ inline KernelGluJvp_half() {}
    __aicore__ inline void Init(GM_ADDR glu_out, GM_ADDR input, GM_ADDR v, GM_ADDR jvp_out, 
                                uint32_t bigNum, uint32_t bigLength, uint32_t smallLength, 
                                uint32_t interval, TPipe* pipeIn) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
        uint32_t coreDataNum;
        if (GetBlockIdx() < bigNum) {
            coreDataNum = bigLength;
            gluGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)glu_out + bigLength * interval * GetBlockIdx());
            inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input + bigLength * interval * 2 * GetBlockIdx());
            vGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)v + bigLength * interval * 2 * GetBlockIdx());
            outGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)jvp_out + bigLength * interval * GetBlockIdx());
        } else {
            coreDataNum = smallLength;
            gluGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)glu_out + bigLength * interval * bigNum + smallLength * interval  * (GetBlockIdx() - bigNum));
            inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input + bigLength * interval * 2 * bigNum + smallLength * interval * 2 * (GetBlockIdx() - bigNum));
            vGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)v + bigLength * interval * 2 * bigNum + smallLength * interval * 2 * (GetBlockIdx() - bigNum));
            outGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)jvp_out + bigLength * interval * bigNum + smallLength * interval  * (GetBlockIdx() - bigNum));
        }
        this->repeat = coreDataNum;
        this->interval = interval;
        this->tileDataLenght = 10880;
        this->loopCount = this->interval/this->tileDataLenght;
        this->tailDataLenght = this->interval - this->tileDataLenght*this->loopCount;
        if(this->tailDataLenght == 0){this->tailDataLenght = this->tileDataLenght; this->loopCount--;}
        this->tailDataLenght_32 = (this->tailDataLenght * sizeof(DTYPE_INPUT) + 31)/32*32 /sizeof(DTYPE_INPUT);
        pipeIn->InitBuffer(inQueueInput, BUFFER_NUM, this->tileDataLenght *2 * sizeof(DTYPE_INPUT));
        pipeIn->InitBuffer(inQueueV, BUFFER_NUM, this->tileDataLenght *2 * sizeof(DTYPE_INPUT));
        pipeIn->InitBuffer(outQueueOut, BUFFER_NUM, this->tileDataLenght * sizeof(DTYPE_INPUT));
        pipeIn->InitBuffer(QueueTmp1, this->tileDataLenght * sizeof(float));
        pipeIn->InitBuffer(QueueTmp2, this->tileDataLenght * sizeof(float));
    }
    __aicore__ inline void Process() {
        for (int32_t i = 0; i < this->repeat; i++) {
            this->processDataLenght = this->tileDataLenght;
            for(int32_t j=0; j<this->loopCount; j++)
            {
                CopyIn(i, j);
                Compute(i);
                CopyOut(i, j);
            }
            this->processDataLenght = this->tailDataLenght;
            CopyInTail(i, this->loopCount);
            Compute(i);
            CopyOut(i, this->loopCount);
        }
    }
private:
    __aicore__ inline void CopyIn(int32_t progress, int32_t step) {
        LocalTensor<DTYPE_INPUT> inputLocal = inQueueInput.AllocTensor<DTYPE_INPUT>();
        LocalTensor<DTYPE_INPUT> vLocal = inQueueV.AllocTensor<DTYPE_INPUT>();
        DataCopy(inputLocal, inputGm[progress * this->interval * 2 + step*this->tileDataLenght], this->processDataLenght);
        DataCopy(inputLocal[this->tileDataLenght], inputGm[progress * this->interval * 2 + this->interval + step*this->tileDataLenght], this->processDataLenght);
        DataCopy(vLocal, vGm[progress * this->interval * 2 + step*this->tileDataLenght], this->processDataLenght);
        DataCopy(vLocal[this->tileDataLenght], vGm[progress * this->interval * 2 + this->interval + step*this->tileDataLenght], this->processDataLenght);
        inQueueInput.EnQue(inputLocal);
        inQueueV.EnQue(vLocal);
    }
    __aicore__ inline void CopyInTail(int32_t progress, int32_t step) {
        LocalTensor<DTYPE_INPUT> inputLocal = inQueueInput.AllocTensor<DTYPE_INPUT>();
        LocalTensor<DTYPE_INPUT> vLocal = inQueueV.AllocTensor<DTYPE_INPUT>();
        DataCopy(inputLocal, inputGm[progress * this->interval * 2 + step*this->tileDataLenght], this->tailDataLenght_32);
        DataCopy(inputLocal[this->tileDataLenght], inputGm[progress * this->interval * 2 + this->interval + step*this->tileDataLenght], this->tailDataLenght_32);
        DataCopy(vLocal, vGm[progress * this->interval * 2 + step*this->tileDataLenght], this->tailDataLenght_32);
        DataCopy(vLocal[this->tileDataLenght], vGm[progress * this->interval * 2 + this->interval + step*this->tileDataLenght], this->tailDataLenght_32);
        inQueueInput.EnQue(inputLocal);
        inQueueV.EnQue(vLocal);
    }
    __aicore__ inline void Compute(int32_t progress) {
        LocalTensor<DTYPE_INPUT> inputLocal = inQueueInput.DeQue<DTYPE_INPUT>();
        LocalTensor<DTYPE_INPUT> vLocal = inQueueV.DeQue<DTYPE_INPUT>();
        LocalTensor<DTYPE_INPUT> outLocal = outQueueOut.AllocTensor<DTYPE_INPUT>();
        auto tmp1 = QueueTmp1.Get<float>();
        auto tmp2 = QueueTmp2.Get<float>();
        Cast(tmp1, inputLocal[this->tileDataLenght], RoundMode::CAST_NONE, this->processDataLenght);
        Muls(tmp1, tmp1, (float)(-1), this->processDataLenght);
        Exp(tmp1, tmp1, this->processDataLenght);
        Adds(tmp1, tmp1, (float)(1), this->processDataLenght);
        Duplicate(tmp2, (float)(1), this->processDataLenght);
        Div(tmp1, tmp2, tmp1, this->processDataLenght);
        Cast(tmp2, inputLocal, RoundMode::CAST_NONE, this->processDataLenght);
        Cast(inputLocal.ReinterpretCast<float>(), vLocal, RoundMode::CAST_NONE, this->processDataLenght);
        Cast(vLocal.ReinterpretCast<float>(), vLocal[this->tileDataLenght], RoundMode::CAST_NONE, this->processDataLenght);
        Mul(tmp2, tmp2, vLocal.ReinterpretCast<float>(), this->processDataLenght);
        Muls(vLocal.ReinterpretCast<float>(), tmp1, (float)(-1), this->processDataLenght);
        Adds(vLocal.ReinterpretCast<float>(), vLocal.ReinterpretCast<float>(), (float)(1), this->processDataLenght);
        Mul(tmp2, tmp2, vLocal.ReinterpretCast<float>(), this->processDataLenght);
        Add(tmp2, tmp2, inputLocal.ReinterpretCast<float>(), this->processDataLenght);
        Mul(tmp1, tmp1, tmp2, this->processDataLenght);
        Cast(outLocal, tmp1, RoundMode::CAST_RINT, this->processDataLenght);
        inQueueInput.FreeTensor(inputLocal);
        inQueueV.FreeTensor(vLocal);
        outQueueOut.EnQue<DTYPE_INPUT>(outLocal);
    }
    __aicore__ inline void CopyOut(int32_t progress, int32_t step) {
        LocalTensor<DTYPE_INPUT> outLocal = outQueueOut.DeQue<DTYPE_INPUT>();
        DataCopyExtParams copyParams{1, static_cast<uint32_t>(this->processDataLenght * sizeof(DTYPE_INPUT)), 0, 0, 0}; 
        DataCopyPad(outGm[progress * this->interval + step*this->tileDataLenght], outLocal, copyParams);
        outQueueOut.FreeTensor(outLocal);
    }
private:
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueInput, inQueueV, inQueueGlu;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueOut;
    TBuf<QuePosition::VECCALC> QueueTmp1, QueueTmp2,QueueTmp3,QueueTmp4, QueueTmp5;
    GlobalTensor<DTYPE_INPUT> gluGm;
    GlobalTensor<DTYPE_INPUT> inputGm;
    GlobalTensor<DTYPE_INPUT> vGm;
    GlobalTensor<DTYPE_INPUT> outGm;
    uint32_t repeat;
    uint32_t interval;
    uint32_t loopCount;
    uint32_t tileDataLenght;
    uint32_t tailDataLenght;
    uint32_t tailDataLenght_32;
    uint32_t processDataLenght;
};
extern "C" __global__ __aicore__ void glu_jvp(GM_ADDR glu_out, GM_ADDR input, GM_ADDR v, GM_ADDR jvp_out, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    TPipe pipe;
    if(TILING_KEY_IS(1)) { 
        KernelGluJvp_float op;
        op.Init(glu_out, input, v, jvp_out,
                tiling_data.bigNum, tiling_data.bigLength, tiling_data.smallLength, 
                tiling_data.interval, &pipe);
        op.Process();
    }
    else 
    if(TILING_KEY_IS(2)) { 
        KernelGluJvp_float1 op;
        op.Init(glu_out, input, v, jvp_out,
                tiling_data.bigNum, tiling_data.bigLength, tiling_data.smallLength, 
                tiling_data.interval, &pipe);
        op.Process();
    }
    else if(TILING_KEY_IS(3)) { 
        KernelGluJvp_half op;
        op.Init(glu_out, input, v, jvp_out,
                tiling_data.bigNum, tiling_data.bigLength, tiling_data.smallLength, 
                tiling_data.interval, &pipe);
        op.Process();
    }
}