#include "kernel_operator.h"
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;
// constexpr half param1 = -1.702;
// constexpr half param2 = 0.851;
// constexpr half param3 = 1.0;

template<typename T>
class KernelFastgelugrad{
public:
    __aicore__ inline KernelFastgelugrad() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR dy, GM_ADDR out,
                              uint32_t blockLength,
                              uint32_t tileNum, uint32_t tileLength,
                              uint32_t lasttileLength, uint32_t formerNum,
                              uint32_t formerLength, uint32_t formertileNum,
                              uint32_t formertileLength,
                              uint32_t formerlasttileLength, uint32_t tailNum,
                              uint32_t tailLength, uint32_t tailtileNum,
                              uint32_t tailtileLength,
                              uint32_t taillasttileLength, uint32_t tilingKey) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
        if (tilingKey == 1) {
        this->blockLength = blockLength;
        this->tileNum =
            tileNum ASSERT(tileNum != 0 && "tile num can not be zero!");
        this->tileLength = tileLength / BUFFER_NUM;
        this->lasttileLength = lasttileLength;

        xGm.SetGlobalBuffer((__gm__ T*)x + this->blockLength * GetBlockIdx(),
                            this->blockLength);
        dyGm.SetGlobalBuffer((__gm__ T*)dy + this->blockLength * GetBlockIdx(),
                            this->blockLength);
        outGm.SetGlobalBuffer(
            (__gm__ T*)out + this->blockLength * GetBlockIdx(),
            this->blockLength);
        }

        if (tilingKey == 2) {
        this->formerNum = formerNum;
        this->formerLength = formerLength;
        this->formertileNum = formertileNum;
        this->formertileLength = formertileLength;
        this->formerlasttileLength = formerlasttileLength;

        this->tailNum = tailNum;
        this->tailLength = tailLength;
        this->tailtileNum = tailtileNum;
        this->tailtileLength = tailtileLength;
        this->taillasttileLength = taillasttileLength;

        if (GetBlockIdx() < this->formerNum) {  //分到大块核的处理
            this->tileLength = this->formertileLength / BUFFER_NUM;
            this->lasttileLength = this->formerlasttileLength;
            this->tileNum = this->formertileNum * BUFFER_NUM;
            xGm.SetGlobalBuffer(
                (__gm__ T*)x + this->formerLength * GetBlockIdx(),
                this->formerLength);
            dyGm.SetGlobalBuffer(
                (__gm__ T*)dy + this->formerLength * GetBlockIdx(),
                this->formerLength);
            outGm.SetGlobalBuffer(
                (__gm__ T*)out + this->formerLength * GetBlockIdx(),
                this->formerLength);
        } else {  //分到小块核的处理，需要处理的数据量比大核少alignNum个
            this->tileLength = this->tailtileLength / BUFFER_NUM;
            this->lasttileLength = this->taillasttileLength;
            this->tileNum = this->tailtileNum * BUFFER_NUM;
            xGm.SetGlobalBuffer(
                (__gm__ T*)x + this->formerLength * this->formerNum +
                    this->tailLength * (GetBlockIdx() - this->formerNum),
                this->tailLength);
            dyGm.SetGlobalBuffer(
                (__gm__ T*)dy + this->formerLength * this->formerNum +
                    this->tailLength * (GetBlockIdx() - this->formerNum),
                this->tailLength);
            outGm.SetGlobalBuffer(
                (__gm__ T*)out + this->formerLength * this->formerNum +
                    this->tailLength * (GetBlockIdx() - this->formerNum),
                this->tailLength);
        }
        }
    
        pipe.InitBuffer(inQueueIN, BUFFER_NUM, this->tileLength * 2 * sizeof(T));
        pipe.InitBuffer(outQueueOUT, BUFFER_NUM, this->tileLength * sizeof(T));

        pipe.InitBuffer(divDownBuf, this->tileLength * sizeof(T));
        pipe.InitBuffer(divUpBuf, this->tileLength * sizeof(T));
        pipe.InitBuffer(cBuf, this->tileLength * sizeof(T));
        pipe.InitBuffer(tmpBuf, this->tileLength * sizeof(T));
        pipe.InitBuffer(resBuf, this->tileLength * sizeof(T));

    }

    __aicore__ inline void Process() {
        int32_t loopCount = this->tileNum * BUFFER_NUM;
        for (int32_t i = 0; i < loopCount; i++) {
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress) {
        LocalTensor<T> inLocal = inQueueIN.AllocTensor<T>();
        if (BUFFER_NUM == 1) {
            if (progress == this->tileNum - 1) {
                if (progress == 0) {
                //如果只有一包，则搬运的起始地址为0，tileLength为实际分块的数据量
                DataCopy(inLocal[0], xGm[0], this->tileLength);
                DataCopy(inLocal[this->tileLength], dyGm[0], this->tileLength);
                } else {
                //将最后一个分块的起始地址向前移动tileLength-lasttileLength
                DataCopy(
                    inLocal[0],
                    xGm[(progress - 1) * this->tileLength + this->lasttileLength],
                    this->tileLength);
                DataCopy(
                    inLocal[this->tileLength],
                    dyGm[(progress - 1) * this->tileLength + this->lasttileLength],
                    this->tileLength);
                }
            } else {
                DataCopy(inLocal[0], xGm[progress * this->tileLength],
                        this->tileLength);
                DataCopy(inLocal[this->tileLength], dyGm[progress * this->tileLength],
                        this->tileLength);
            }
        }
        if (BUFFER_NUM == 2) {
            //开启double
            //buffer时，由于将输入数据分成了相等的2部分，分块大小为不开启double
            //buffer的一半， 所以需要对最后两个分块数据的起始地址做处理
            if ((progress == (this->tileNum * BUFFER_NUM - 2)) ||
                (progress == (this->tileNum * BUFFER_NUM - 1))) {
                //分块大小变为tileLength的一半
                //倒数第2个分块数据的起始地址向前移动（tileLength-lasttileLength)，最后一个分块的起始地址以此为基础进行移动
                DataCopy(
                    inLocal[0],
                    xGm[(progress - 2) * (this->tileLength) + this->lasttileLength],
                    (this->tileLength));
                DataCopy(
                    inLocal[this->tileLength],
                    dyGm[(progress - 2) * (this->tileLength) + this->lasttileLength],
                    (this->tileLength));
            }

            else {
                DataCopy(inLocal[0], xGm[progress * (this->tileLength)],
                        (this->tileLength));
                DataCopy(inLocal[this->tileLength], dyGm[progress * (this->tileLength)],
                        (this->tileLength));
            }
        }

        inQueueIN.EnQue(inLocal);
    }

    __aicore__ inline void Compute(int32_t progress) {
        LocalTensor<T> inLocal = inQueueIN.DeQue<T>();
        LocalTensor<T> dyLocal = inLocal;
        LocalTensor<T> xLocal = inLocal[this->tileLength];
        LocalTensor<T> yLocal = outQueueOUT.AllocTensor<T>();

        LocalTensor<T> divDownLocal = divDownBuf.Get<T>();
        LocalTensor<T> tmpLocal = tmpBuf.Get<T>();
        Abs(divDownLocal, xLocal, this->tileLength); // abs_x = np.abs(input_x)
        Muls(divDownLocal, divDownLocal, (attr_opp), this->tileLength); // mul_abs_x = abs_x * attr_opp 
        Exp(divDownLocal, divDownLocal, this->tileLength);  // exp_x = np.exp(mul_abs_x)

        Mul(tmpLocal, divDownLocal, xLocal, this->tileLength);
        Muls(tmpLocal, tmpLocal, (attr), this->tileLength); // add_2
        // Add(tmpLocal, divDownLocal, divUpLocal, this->tileLength); // add_2 + exp_x
        
        LocalTensor<T> cLocal = cBuf.Get<T>();
        Abs(cLocal, xLocal, this->tileLength);
        for (int i = 0; i < this->tileLength; ++i)
        {
            if ((float)xLocal.GetValue(i) > 0)
                cLocal.SetValue(i, (T)0.0);
            else
                cLocal.SetValue(i, 2 * (float)xLocal.GetValue(i));
        }
        Muls(cLocal, cLocal, (attr), this->tileLength);
        Exp(cLocal, cLocal, this->tileLength); // exp_pn_x

        LocalTensor<T> divUpLocal = divUpBuf.Get<T>();
        Add(divUpLocal, divDownLocal, tmpLocal, this->tileLength); // exp_pn_x + add_2 + exp_x
        Add(divUpLocal, divUpLocal, cLocal, this->tileLength);
        Adds(divDownLocal, divDownLocal, (param), this->tileLength);
        Mul(divDownLocal, divDownLocal, divDownLocal, this->tileLength);
        
        LocalTensor<T> resLocal = resBuf.Get<T>();
        Div(resLocal, divUpLocal, divDownLocal, this->tileLength);        
        Mul(yLocal, dyLocal, resLocal, this->tileLength);
        outQueueOUT.EnQue<T>(yLocal);
        inQueueIN.FreeTensor(inLocal);
    }

    // __aicore__ inline void Compute(int32_t progress) {
    //     LocalTensor<T> inLocal = inQueueIN.DeQue<T>();
    //     LocalTensor<T> xLocal = inLocal;
    //     LocalTensor<T> dyLocal = inLocal[this->tileLength];
    //     LocalTensor<T> yLocal = outQueueOUT.AllocTensor<T>();
    //     //存放float的临时变量
    //     TBuf<TPosition::VECCALC> float_temp_buf;
    //     pipe.InitBuffer(float_temp_buf, this->tileLength * 3 *sizeof(float));
    //     LocalTensor<float> temp = float_temp_buf.Get<float>();
    //     LocalTensor<float> xfloat = temp;
    //     LocalTensor<float> dyfloat = temp[this->tileLength];
    //     LocalTensor<float> yfloat = temp[this->tileLength * 2];
    //     Cast(xfloat, xLocal, RoundMode::CAST_NONE, this->tileLength);
    //     Cast(dyfloat, dyLocal, RoundMode::CAST_NONE, this->tileLength);
        

    //     LocalTensor<float> divDownLocal = divDownBuf.Get<float>();
    //     LocalTensor<float> tmpLocal = tmpBuf.Get<float>();
    //     Abs(divDownLocal, xfloat, this->tileLength); // abs_x = np.abs(input_x)
    //     Muls(divDownLocal, divDownLocal, (attr_opp), this->tileLength); // mul_abs_x = abs_x * attr_opp         
    //     event_t eventIdSToV1 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV1);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV1);
    //     Exp(divDownLocal, divDownLocal, this->tileLength);  // exp_x = np.exp(mul_abs_x)
    //     event_t eventIdSToV2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV2);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV2);
    //     Mul(tmpLocal, divDownLocal, xfloat, this->tileLength);
    //     event_t eventIdSToV3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV3);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV3);
    //     Muls(tmpLocal, tmpLocal, (attr), this->tileLength); // add_2
    //     event_t eventIdSToV4 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV4);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV4);   
    //     LocalTensor<float> cLocal = cBuf.Get<float>();
    //     Abs(cLocal, xfloat, this->tileLength);
    //     event_t eventIdSToV5 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV5);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV5);
    //     Sub(cLocal, xfloat, cLocal, this->tileLength);
    //     event_t eventIdSToV6 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV6);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV6);
    //     Muls(cLocal, cLocal, (attr), this->tileLength);
    //     event_t eventIdSToV7 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV7);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV7);
    //     Exp(cLocal, cLocal, this->tileLength); // exp_pn_x
    
    //     LocalTensor<float> divUpLocal = divUpBuf.Get<float>();
    //     event_t eventIdSToV8 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV8);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV8);
    //     Add(divUpLocal, divDownLocal, tmpLocal, this->tileLength); // exp_pn_x + add_2 + exp_x
    //     event_t eventIdSToV9 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV9);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV9);
    //     Add(divUpLocal, divUpLocal, cLocal, this->tileLength);
    //     event_t eventIdSToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV);
    //     Adds(divDownLocal, divDownLocal, (param), this->tileLength);
    //     event_t eventIdSToV11 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV11);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV11);
    //     Mul(divDownLocal, divDownLocal, divDownLocal, this->tileLength);
    //     event_t eventIdSToV12 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV12);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV12);
    //     LocalTensor<float> resLocal = resBuf.Get<float>();
    //     Div(resLocal, divUpLocal, divDownLocal, this->tileLength);
    //     event_t eventIdSToV13 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV13);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV13);
    //     Mul(yfloat, divDownLocal, dyfloat, this->tileLength);
    //     event_t eventIdSToV14 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_V));
    //     set_flag(PIPE_S, PIPE_V, eventIdSToV14);
    //     wait_flag(PIPE_S, PIPE_V, eventIdSToV14);
    //     Cast(yLocal, yfloat, RoundMode::CAST_NONE, this->tileLength);

    //     outQueueOUT.EnQue<T>(yLocal);
    //     inQueueIN.FreeTensor(inLocal);
    // }

    __aicore__ inline void CopyOut(int32_t progress) {
        LocalTensor<T> outLocal = outQueueOUT.DeQue<T>();
        if (BUFFER_NUM == 1) {
            if (progress == this->tileNum - 1) {
                if (progress == 0) {
                //如果只有一包，则搬运的起始地址为0，tileLength为实际分块的数据量
                DataCopy(outGm[0], outLocal, this->tileLength);
                } else {
                //将最后一个分块的起始地址向前移动tileLength-lasttileLength
                DataCopy(
                    outGm[(progress - 1) * this->tileLength + this->lasttileLength],
                    outLocal, this->tileLength);
                }
            } else {
                DataCopy(outGm[progress * this->tileLength], outLocal,
                        this->tileLength);
            }
        }
        if (BUFFER_NUM == 2) {
        //开启double
        //buffer时，由于将输入数据分成了相等的2部分，分块大小为不开启double
        //buffer的一半， 所以需要对最后两个分块数据的起始地址做处理
        if ((progress == (this->tileNum * BUFFER_NUM - 2)) ||
            (progress == (this->tileNum * BUFFER_NUM - 1))) {
            //分块大小变为tileLength的一半
            //倒数第2个分块数据的起始地址向前移动（tileLength-lasttileLength)，最后一个分块的起始地址以此为基础进行移动
            DataCopy(
                outGm[(progress - 2) * (this->tileLength) + this->lasttileLength],
                outLocal, (this->tileLength));
        }

        else {
            DataCopy(outGm[progress * (this->tileLength)], outLocal,
                    (this->tileLength));
        }
        }

        outQueueOUT.FreeTensor(outLocal);
    }

private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueIN;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueOUT;
    TBuf<TPosition::VECCALC> divDownBuf, divUpBuf, cBuf, tmpBuf, resBuf;


    // TBuf<QuePosition::VECCALC> divDownLocal, tmp1buf, tmp2buf, divUpLocal, resbuf;
    
    GlobalTensor<T> xGm;
    GlobalTensor<T> dyGm;
    GlobalTensor<T> outGm;

    T attr = 1.702;
    T attr_opp = -1.702;
    T param = 1.0;

    uint32_t blockLength;
    uint32_t tileNum;
    uint32_t tileLength;
    uint32_t lasttileLength;
    uint32_t formerNum;
    uint32_t formerLength;
    uint32_t formertileNum;
    uint32_t formertileLength;
    uint32_t formerlasttileLength;
    uint32_t tailNum;
    uint32_t tailLength;
    uint32_t tailtileNum;
    uint32_t tailtileLength;
    uint32_t taillasttileLength;
};

extern "C" __global__ __aicore__ void fast_gelu_grad(GM_ADDR x, GM_ADDR dy, GM_ADDR z, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    // TODO: user kernel impl
    if(tiling_data.datatype == 1){ //fp16 half 
        KernelFastgelugrad<half> op;
        uint32_t tilingKey = 1;
        if (TILING_KEY_IS(1)) {
            tilingKey = 1;
        } else if (TILING_KEY_IS(2)) {
            tilingKey = 2;
        } else {
            tilingKey = 1;
        }
        op.Init(x, dy, z, tiling_data.blockLength,
                tiling_data.tileNum, tiling_data.tileLength,
                tiling_data.lasttileLength, tiling_data.formerNum,
                tiling_data.formerLength, tiling_data.formertileNum,
                tiling_data.formertileLength, tiling_data.formerlasttileLength,
                tiling_data.tailNum, tiling_data.tailLength, tiling_data.tailtileNum,
                tiling_data.tailtileLength, tiling_data.taillasttileLength,
                tilingKey);
        op.Process();
    }else if(tiling_data.datatype == 0){ //fp32
        KernelFastgelugrad<float> op;
        uint32_t tilingKey = 1;
        if (TILING_KEY_IS(1)) {
            tilingKey = 1;
        } else if (TILING_KEY_IS(2)) {
            tilingKey = 2;
        } else {
            tilingKey = 1;
        }

        op.Init(x, dy, z, tiling_data.blockLength,
                tiling_data.tileNum, tiling_data.tileLength,
                tiling_data.lasttileLength, tiling_data.formerNum,
                tiling_data.formerLength, tiling_data.formertileNum,
                tiling_data.formertileLength, tiling_data.formerlasttileLength,
                tiling_data.tailNum, tiling_data.tailLength, tiling_data.tailtileNum,
                tiling_data.tailtileLength, tiling_data.taillasttileLength,
                tilingKey);
        op.Process();
    }      
}

#ifndef __CCE_KT_TEST__
// call of kernel function
void fast_gelu_grad_do(uint32_t blockDim, void* l2ctrl, void* stream,
                       uint8_t* x, uint8_t* dy,  uint8_t* out,
                       uint8_t* workspace, uint8_t* tiling) {
  fast_gelu_grad<<<blockDim, l2ctrl, stream>>>(x, dy, out, workspace, tiling);
}
#endif