#include "kernel_operator.h"
#include "cmath"
// tensor num for each queue
constexpr int32_t BUFFER_NUM = 2;

const unsigned pow2_max = 0x7f000000;

template <typename TYPE_X, typename TYPE_Y, typename TYPE_Z, bool IsExistBigCore>
class KernelPower
{
public:
    __aicore__ inline KernelPower() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, GM_ADDR z, uint64_t smallCoreDataNum,
                                uint64_t bigCoreDataNum, uint64_t bigCoreLoopNum,
                                uint64_t smallCoreLoopNum, uint64_t ubPartDataNum,
                                uint64_t smallCoreTailDataNum, uint64_t bigCoreTailDataNum,
                                uint64_t tailBlockNum)
    {
        ASSERT(AscendC::GetBlockNum() != 0 && "block dim can not be zero!");
        uint64_t coreNum = AscendC::GetBlockIdx();
        uint64_t globalBufferIndex = bigCoreDataNum * AscendC::GetBlockIdx();
        this->ubPartDataNum = ubPartDataNum;
        if constexpr (IsExistBigCore)
        {
            if (coreNum < tailBlockNum)
            {
                this->coreDataNum = bigCoreDataNum;
                this->tileNum = bigCoreLoopNum;
                this->tailDataNum = bigCoreTailDataNum;
            }
            else
            {
                this->coreDataNum = smallCoreDataNum;
                this->tileNum = smallCoreLoopNum;
                this->tailDataNum = smallCoreTailDataNum;
                globalBufferIndex -= (bigCoreDataNum - smallCoreDataNum) * (AscendC::GetBlockIdx() - tailBlockNum);
            }
        }
        else
        {
            this->coreDataNum = smallCoreDataNum;
            this->tileNum = smallCoreLoopNum;
            this->tailDataNum = smallCoreTailDataNum;
            globalBufferIndex = smallCoreDataNum * AscendC::GetBlockIdx();
        }

        xGm.SetGlobalBuffer((__gm__ TYPE_X *)x + globalBufferIndex, this->coreDataNum);
        yGm.SetGlobalBuffer((__gm__ TYPE_Y *)y + globalBufferIndex, this->coreDataNum);
        zGm.SetGlobalBuffer((__gm__ TYPE_Z *)z + globalBufferIndex, this->coreDataNum);

        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->ubPartDataNum * sizeof(TYPE_X));
        pipe.InitBuffer(inQueueY, BUFFER_NUM, this->ubPartDataNum * sizeof(TYPE_Y));
        pipe.InitBuffer(outQueueZ, BUFFER_NUM, this->ubPartDataNum * sizeof(TYPE_Z));


        if constexpr ((!std::is_same_v<TYPE_X, TYPE_Z>) || (!std::is_same_v<TYPE_Y, TYPE_Z>))
        {
            pipe.InitBuffer(tmp1, this->ubPartDataNum * sizeof(float32_t));
        }

    }

    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum;
        this->processDataNum = this->ubPartDataNum;
        for (int32_t i = 0; i < loopCount - 1; i++)
        {
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
        this->processDataNum = this->tailDataNum;
        CopyIn(loopCount - 1);
        Compute(loopCount - 1);
        CopyOut(loopCount - 1);
    }

private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
        AscendC::LocalTensor<TYPE_Y> yLocal = inQueueY.AllocTensor<TYPE_Y>();
        AscendC::DataCopy(xLocal, xGm[progress * this->ubPartDataNum], this->processDataNum);
        AscendC::DataCopy(yLocal, yGm[progress * this->ubPartDataNum], this->processDataNum);
        inQueueX.EnQue(xLocal);
        inQueueY.EnQue(yLocal);
    }

    __aicore__ inline void Compute(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
        AscendC::LocalTensor<TYPE_Y> yLocal = inQueueY.DeQue<TYPE_Y>();
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.AllocTensor<TYPE_Z>();

        AscendC::LocalTensor<TYPE_Z> p1, p2;
        p1 = xLocal.template ReinterpretCast<TYPE_Z>();
        p2 = yLocal.template ReinterpretCast<TYPE_Z>();

        if constexpr ((!std::is_same_v<TYPE_X, TYPE_Z>) || (!std::is_same_v<TYPE_Y, DTYPE_OUT>))
        {
            // 自动类型提升 此时 X,Y -> float32
            if constexpr (!std::is_same_v<TYPE_X, TYPE_Z>)
            {
                p1 = tmp1.Get<float32_t>();
                AscendC::Cast(p1, xLocal, AscendC::RoundMode::CAST_NONE, this->processDataNum);
            }
            else
            {
                p2 = tmp1.Get<float32_t>();
                AscendC::Cast(p2, yLocal, AscendC::RoundMode::CAST_NONE, this->processDataNum);
            }
        }
        /*AscendC::Ln(zLocal, p1, this->processDataNum);
        AscendC::Mul(zLocal, zLocal, p2, this->processDataNum);
        AscendC::Exp(zLocal, zLocal, this->processDataNum);
         处理0的情况
        */

        AscendC::Power(zLocal, p1, p2, this->processDataNum);

        outQueueZ.EnQue<TYPE_Z>(zLocal);
        inQueueX.FreeTensor(xLocal);
        inQueueY.FreeTensor(yLocal);
    }

    __aicore__ inline void CopyOut(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.DeQue<TYPE_Z>();
        AscendC::DataCopy(zGm[progress * this->ubPartDataNum], zLocal, this->processDataNum);
        outQueueZ.FreeTensor(zLocal);
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueY;
    AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueZ;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmp1;
    AscendC::GlobalTensor<TYPE_X> xGm;
    AscendC::GlobalTensor<TYPE_Y> yGm;
    AscendC::GlobalTensor<TYPE_Z> zGm;
    uint64_t coreDataNum;
    uint64_t tileNum;
    uint64_t ubPartDataNum;
    uint64_t tailDataNum;
    uint64_t processDataNum;
    bool datatype_promotion = false;
};

extern "C" __global__ __aicore__ void power_custom(GM_ADDR x, GM_ADDR y, GM_ADDR z, GM_ADDR workspace, GM_ADDR tiling)
{
    GET_TILING_DATA(tiling_data, tiling);
    if (TILING_KEY_IS(1))
    {
        KernelPower<DTYPE_INPUT, DTYPE_OTHER, DTYPE_OUT, 1> op;
        op.Init(x, y, z, tiling_data.smallCoreDataNum,
                tiling_data.bigCoreDataNum, tiling_data.bigCoreLoopNum,
                tiling_data.smallCoreLoopNum, tiling_data.ubPartDataNum,
                tiling_data.smallCoreTailDataNum, tiling_data.bigCoreTailDataNum,
                tiling_data.tailBlockNum);
        op.Process();
    }
    else if (TILING_KEY_IS(0))
    {
        KernelPower<DTYPE_INPUT, DTYPE_OTHER, DTYPE_OUT, 0> op;
        op.Init(x, y, z, tiling_data.smallCoreDataNum,
                tiling_data.bigCoreDataNum, tiling_data.bigCoreLoopNum,
                tiling_data.smallCoreLoopNum, tiling_data.ubPartDataNum,
                tiling_data.smallCoreTailDataNum, tiling_data.bigCoreTailDataNum,
                tiling_data.tailBlockNum);
        op.Process();
    }
    else if (TILING_KEY_IS(2))
    {
        // NEED BOARDCAST; todo
    }
}