#include "kernel_operator.h"
using namespace AscendC;

constexpr int32_t BUFFER_NUM = 2;      //昇腾双buffer技术

class KernelAsinh {
public:
    __aicore__ inline KernelAsinh() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y,  uint32_t CoreDataNum, uint32_t finalTileNum, uint32_t tileDataNum, uint32_t TailDataNum)
    {

        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->coreDataNum = CoreDataNum;
        this->tileNum = finalTileNum;
        this->tileDataNum = tileDataNum;
        this->tailDataNum = TailDataNum;

        xGm.SetGlobalBuffer((__gm__ DTYPE_X*)x, this->coreDataNum);
        yGm.SetGlobalBuffer((__gm__ DTYPE_Y*)y, this->coreDataNum);

        pipe.InitBuffer(inQueueX, BUFFER_NUM, (this->tileDataNum)  * sizeof(DTYPE_X));  
        pipe.InitBuffer(outQueueY, BUFFER_NUM, (this->tileDataNum) * sizeof(DTYPE_Y));  

        pipe.InitBuffer(QueueZero, (this->tileDataNum)  * sizeof(DTYPE_X));    
        //pipe.InitBuffer(QueueMask, (this->tileDataNum+256)  * sizeof(uint8_t));    
        //pipe.InitBuffer(QueueTmpY, (this->tileDataNum+256) * sizeof(DTYPE_Y));     

   
    }
    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum;   //手动考虑了双buff
        this->processDataNum = this->tileDataNum;
        for (int32_t i = 0; i < loopCount; i++) {
            if (i == this->tileNum - 1) {
                this->processDataNum = this->tailDataNum;
            }
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        } 
    }

private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
        //考生补充算子代码
        LocalTensor<DTYPE_X> xLocal = inQueueX.AllocTensor<DTYPE_X>();
        DataCopy(xLocal, xGm[progress * this->tileDataNum], this->processDataNum);  //tileDataNum>taileDataNum，所以不需要麻烦分情况讨论
        inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void Compute(int32_t progress)
    {

        //y = ln(x + sqr(x2 + 1)) x>0   
        //y = -ln(sqr(x2 + 1)-x) x<0
        LocalTensor<DTYPE_X> xLocal = inQueueX.DeQue<DTYPE_X>();
        LocalTensor<DTYPE_Y> yLocal = outQueueY.AllocTensor<DTYPE_Y>();

        auto m_zero    = QueueZero.Get<DTYPE_X>();
        //auto m_mask    = QueueMask.Get<uint8_t>();
        //auto m_tmpy    = QueueTmpY.Get<DTYPE_Y>();

        Duplicate(m_zero, (DTYPE_X)(0), this->processDataNum);
        //Compare(m_mask, xLocal, m_zero, CMPMODE::LT, (this->processDataNum+255)/256*256);  //256字节对齐生成mask,小于0为1

        DTYPE_X scalar = 1.00;
        Mul(yLocal, xLocal, xLocal, this->processDataNum);
        Adds(yLocal, yLocal, scalar, this->processDataNum);
        Sqrt(yLocal, yLocal,this->processDataNum);

        //select 版本
        //y = -ln(sqr(x2 + 1)-x) x<0
        if((float)xLocal.GetValue(0) < (float)0)
        {
            Sub(yLocal, yLocal, xLocal, this->processDataNum);
            Ln(yLocal, yLocal, this->processDataNum);
            Sub(yLocal, m_zero, yLocal, this->processDataNum);
        }
        else
        {
            //y = ln(x + sqr(x2 + 1)) x>0 
            Add(yLocal, yLocal, xLocal, this->processDataNum);
            Ln(yLocal, yLocal, this->processDataNum);
        }

        //mask 版本
        //y = -ln(sqr(x2 + 1)-x) x<0
        // Sub(m_tmpy, yLocal, xLocal, this->processDataNum);
        // Ln(m_tmpy, m_tmpy, this->processDataNum);
        // Duplicate(m_zero, (DTYPE_X)(0), this->processDataNum);
        // Sub(m_tmpy, m_zero, m_tmpy, this->processDataNum);
        // //y = ln(x + sqr(x2 + 1)) x>0 
        // Add(yLocal, yLocal, xLocal, this->processDataNum);
        // Ln(yLocal, yLocal, this->processDataNum);

        // Select(yLocal, m_mask, m_tmpy, yLocal, SELMODE::VSEL_TENSOR_TENSOR_MODE, this->processDataNum);

        outQueueY.EnQue<DTYPE_Y>(yLocal);
        inQueueX.FreeTensor(xLocal);


    }
    __aicore__ inline void CopyOut(int32_t progress)
    {
        //考生补充算子代码
        LocalTensor<DTYPE_Y> yLocal = outQueueY.DeQue<DTYPE_Y>();
        DataCopy(yGm[progress * this->tileDataNum], yLocal, this->processDataNum);
        outQueueY.FreeTensor(yLocal);
    }

private:
    TPipe pipe;
    //create queue for input, in this case depth is equal to buffer num
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    //create queue for output, in this case depth is equal to buffer num
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    GlobalTensor<DTYPE_X> xGm;
    GlobalTensor<DTYPE_Y> yGm;
    TBuf<QuePosition::VECCALC> QueueZero;// 模板参数为TPosition中的VECCALC类型 , QueueMask,QueueTmpY


    //考生补充自定义成员变量
    uint32_t coreDataNum;
    uint32_t tileNum;
    uint32_t tileDataNum;
    uint32_t tailDataNum;
    uint32_t processDataNum;
};

extern "C" __global__ __aicore__ void asinh(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    KernelAsinh op;
    //补充init和process函数调用内容
    op.Init(x, y, tiling_data.CoreDataNum, tiling_data.finalTileNum, tiling_data.tileDataNum, tiling_data.TailDataNum);
    op.Process();
}

#ifndef ASCENDC_CPU_DEBUG
// call of kernel function
void asinh_do(uint32_t blockDim, void *l2ctrl, void *stream, uint8_t *x, uint8_t *y, 
                   uint8_t *workspace, uint8_t *tiling)
{
    asinh<<<blockDim, l2ctrl, stream>>>(x, y, workspace, tiling);
}
#endif
