#include "kernel_operator.h"
#include<type_traits>
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;                                     // tensor num for each queue
constexpr float a1 = 0.254829592;
constexpr float a2 = -0.284496736;
constexpr float a3 = 1.421413741;
constexpr float a4 = -1.453152027;
constexpr float a5 = 1.061405429;

constexpr float p = 0.3275911;
constexpr float _pi = 0.707106781;  // 1/sqrt(2)

class KernelGelu {
private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    TBuf<QuePosition::VECCALC> All0, BITS ,Sign, All1, tmp5, tmp6;
    // All_1,


    GlobalTensor<float> xGm;
    GlobalTensor<float> yGm;

    uint32_t ALIGN_NUM;
    uint32_t totalLength;
    uint32_t tileLength;
    uint32_t blockLength;
    uint32_t tileNum;

public:
    __aicore__ inline KernelGelu(){}
    // tiling_size没用到
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, uint32_t totalLength, uint32_t ALIGN_NUM, uint32_t block_size, uint32_t tiling_size, uint32_t core_size, uint32_t core_remain) {

        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
        this->totalLength = totalLength;
        // 每个核心处理的数据个数
        this->blockLength = core_size + (GetBlockNum() == GetBlockIdx() + 1 ? core_remain : 0);
        // 每次tiling的数据个数（若大于256字节则256字节对齐）
        this->tileLength = block_size;
        this->ALIGN_NUM = ALIGN_NUM;
        // 32位对齐
        this->blockLength = this->blockLength + (this->blockLength % ALIGN_NUM ? ALIGN_NUM - this->blockLength % ALIGN_NUM : 0);

        auto startPointer = GetBlockIdx() * core_size;
        auto bufferlength = this->blockLength;


        xGm.SetGlobalBuffer((__gm__ float*)x + startPointer, bufferlength);
        yGm.SetGlobalBuffer((__gm__ float*)y + startPointer, bufferlength);

        // 循环次数
        this->tileNum = this->blockLength / this->tileLength + (this->blockLength % this->tileLength > 0);
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileLength * sizeof(float));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileLength * sizeof(float));
        
        pipe.InitBuffer(All0, this->tileLength * sizeof(float));
        pipe.InitBuffer(BITS, this->tileLength * sizeof(uint8_t));
        pipe.InitBuffer(Sign, this->tileLength * sizeof(float));
        pipe.InitBuffer(All1, this->tileLength * sizeof(float));
        // pipe.InitBuffer(All_1, this->tileLength * sizeof(float));
        pipe.InitBuffer(tmp5, this->tileLength * sizeof(float));
        pipe.InitBuffer(tmp6, this->tileLength * sizeof(float));
    }

    __aicore__ inline void InitBuf()
    {
        LocalTensor<float> all0 = All0.Get<float>();    //0 0 0
        Muls(all0, all0, (float)0.0, tileLength);

        LocalTensor<float> all1 = All1.Get<float>();    // 1 1 1
        Muls(all1, all1, (float)0.0, tileLength);
        Adds(all1, all1, (float)1.0, tileLength);

        // LocalTensor<float> all_1 = All_1.Get<float>();    // -1 -1 -1
        // Muls(all_1, all_1, (float)0.0, tileLength);
        // Adds(all_1, all_1, (float)-1.0, tileLength);
    }

    __aicore__ inline void Process() {

        InitBuf();
        int32_t loopCount = this->tileNum;
        for (int32_t i = 0; i < loopCount - 1; i++)
        {
            CopyIn(i, this->tileLength);
            Compute(i, this->tileLength);
            CopyOut(i, this->tileLength);
        }

        uint32_t length = this->blockLength - this->tileLength * (loopCount - 1);
        CopyIn(loopCount - 1, length);
        Compute(loopCount - 1, length);
        CopyOut(loopCount - 1, length);
        
    }

private:
    __aicore__ inline void CopyIn(int32_t progress, uint32_t length) {
        LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();
        DataCopy(xLocal, xGm[progress * this->tileLength], length);
        inQueueX.EnQue(xLocal);
    }


    __aicore__ inline void Compute(int32_t progress, uint32_t length) {
        LocalTensor<float> xLocal = inQueueX.DeQue<float>();
        LocalTensor<float> yLocal = outQueueY.AllocTensor<float>();

        LocalTensor<float> all0 = All0.Get<float>();    //0 0 0
        LocalTensor<uint8_t> bits = BITS.Get<uint8_t>();    //
        LocalTensor<float> sign = Sign.Get<float>();    //sign

        LocalTensor<float> all1 = All1.Get<float>();    // 1 1 1
        // LocalTensor<float> all_1 = All_1.Get<float>();    // -1 -1 -1


        Compare(bits, xLocal, all0, CMPMODE::GT, length);     //1 1 0 0 1 0 1
        Select(sign, bits, all1, (float)-1.0, SELMODE::VSEL_TENSOR_SCALAR_MODE, length);   // 1 1 -1 -1 1 -1 1

        Abs(yLocal, xLocal, length);
        Muls(yLocal, yLocal, _pi, length);  //x = abs(x) / math.sqrt(2.0)

        LocalTensor<float> t5 = tmp5.Get<float>();
        Muls(t5, yLocal, p, length);
        Adds(t5, t5, (float)1.0, length);
        Div(t5, all1, t5, length);     //t = 1.0 / (1.0 + p * x)
        //y = 1.0 - (((((a5 * t + a4) * t + a3) * t + a2) * t + a1) * t * math.exp(-x * x))
        
        LocalTensor<float> t6 = tmp6.Get<float>();
        Muls(t6, t5, a5, length);
        Adds(t6, t6, a4, length);
        Mul(t6, t6, t5, length);
        Adds(t6, t6, a3, length);
        Mul(t6, t6, t5, length);
        Adds(t6, t6, a2, length);
        Mul(t6, t6, t5, length);
        Adds(t6, t6, a1, length);
        Mul(t6, t6, t5, length);

        

        Muls(t5, yLocal, (float)-1.0, length);
        Mul(t5, t5, yLocal, length);
        Exp(t5, t5, length);    //math.exp(-x * x))

        // Muls(yLocal, t6, (float)1.0, length);

        Mul(t6, t6, t5, length);
        Muls(t6, t6, (float)-1.0, length);
        Adds(t6, t6, (float)1.0, length);   // 1 - ()

        // 0.5 * (1.0 + sign * y)
        Mul(t6, sign, t6, length);
        Adds(t6, t6, (float)1.0, length);
        Muls(t6, t6, (float)0.5, length);

        Mul(yLocal, xLocal, t6, length);

        outQueueY.EnQue(yLocal);
        inQueueX.FreeTensor(xLocal);
    }

    __aicore__ inline void CopyOut(int32_t progress, uint32_t length) {
        LocalTensor<float> yLocal = outQueueY.DeQue<float>();
        DataCopy(yGm[progress * this->tileLength], yLocal, length);
        outQueueY.FreeTensor(yLocal);
    }
};


class KernelGeluFP16 {

    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;


    GlobalTensor<half> xGm;
    GlobalTensor<half> yGm;

    uint32_t ALIGN_NUM;
    uint32_t totalLength;
    uint32_t tileLength;
    uint32_t blockLength;
    uint32_t tileNum;

public:
    __aicore__ inline KernelGeluFP16(){}
    // tiling_size没用到
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, uint32_t totalLength, uint32_t ALIGN_NUM, uint32_t block_size, uint32_t tiling_size, uint32_t core_size, uint32_t core_remain) {

        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
        this->totalLength = totalLength;
        // 每个核心处理的数据个数
        this->blockLength = core_size + (GetBlockNum() == GetBlockIdx() + 1 ? core_remain : 0);
        // 每次tiling的数据个数（若大于256字节则256字节对齐）
        this->tileLength = block_size;
        this->ALIGN_NUM = ALIGN_NUM;
        // 32位对齐
        this->blockLength = this->blockLength + (this->blockLength % ALIGN_NUM ? ALIGN_NUM - this->blockLength % ALIGN_NUM : 0);

        auto startPointer = GetBlockIdx() * core_size;
        auto bufferlength = this->blockLength;


        xGm.SetGlobalBuffer((__gm__ half*)x + startPointer, bufferlength);
        yGm.SetGlobalBuffer((__gm__ half*)y + startPointer, bufferlength);

        // 循环次数
        this->tileNum = this->blockLength / this->tileLength + (this->blockLength % this->tileLength > 0);
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileLength * sizeof(half));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileLength * sizeof(half));
    }

    __aicore__ inline void Process() {
        int32_t loopCount = this->tileNum;
        for (int32_t i = 0; i < loopCount - 1; i++)
        {
            CopyIn(i, this->tileLength);
            Compute(i, this->tileLength);
            CopyOut(i, this->tileLength);
        }

        uint32_t length = this->blockLength - this->tileLength * (loopCount - 1);
        CopyIn(loopCount - 1, length);
        Compute(loopCount - 1, length);
        CopyOut(loopCount - 1, length);
        
    }

private:
    __aicore__ inline void CopyIn(int32_t progress, uint32_t length) {
        LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
        DataCopy(xLocal, xGm[progress * this->tileLength], length);
        inQueueX.EnQue(xLocal);
    }


    __aicore__ inline void Compute(int32_t progress, uint32_t length) {
        
        LocalTensor<half> xLocal = inQueueX.DeQue<half>();
        LocalTensor<half> yLocal = outQueueY.AllocTensor<half>();

        Mul(yLocal, xLocal, xLocal, length);
        Mul(yLocal, yLocal, xLocal, length); //x^3

        Muls(yLocal, yLocal, half(0.044715), length);
        Add(yLocal, yLocal, xLocal, length);

        Muls(yLocal, yLocal, half(-1.59576912), length);

        Exp(yLocal, yLocal, length);
        Adds(yLocal, yLocal, (half)1.0, length);

        Div(yLocal, xLocal, yLocal, length);
        
        outQueueY.EnQue(yLocal);
        inQueueX.FreeTensor(xLocal);
    }


    __aicore__ inline void CopyOut(int32_t progress, uint32_t length) {
        LocalTensor<half> yLocal = outQueueY.DeQue<half>();
        DataCopy(yGm[progress * this->tileLength], yLocal, length);
        outQueueY.FreeTensor(yLocal);
    }

};





extern "C" __global__ __aicore__ void gelu(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    if(TILING_KEY_IS(1))    //half
    {
        KernelGeluFP16 op;
        op.Init(x, y, tiling_data.total_length, tiling_data.ALIGN_NUM, tiling_data.block_size, tiling_data.tiling_size, tiling_data.core_size, tiling_data.core_remain);
        op.Process();
    }
    else if (TILING_KEY_IS(2))  //float
    {
        KernelGelu op;
        op.Init(x, y, tiling_data.total_length, tiling_data.ALIGN_NUM, tiling_data.block_size, tiling_data.tiling_size, tiling_data.core_size, tiling_data.core_remain);
        op.Process();
    }
    
}