#include "kernel_operator.h"
using namespace AscendC;

constexpr int32_t BUFFER_NUM = 2;      //昇腾双buffer技术

class KernelNotEqual {
public:
    __aicore__ inline KernelNotEqual() {}
    __aicore__ inline void Init(GM_ADDR x1, GM_ADDR x2, GM_ADDR y,  uint32_t CoreDataNum, uint32_t finalTileNum, uint32_t tileDataNum, uint32_t TailDataNum)
    {

        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->coreDataNum = CoreDataNum;
        this->tileNum = finalTileNum;
        this->tileDataNum = tileDataNum;
        this->tailDataNum = TailDataNum;

        xGm1.SetGlobalBuffer((__gm__ DTYPE_X1*)x1, this->coreDataNum + 32);
        xGm2.SetGlobalBuffer((__gm__ DTYPE_X2*)x2, this->coreDataNum + 32);
        yGm.SetGlobalBuffer((__gm__ DTYPE_Y*)y, this->coreDataNum + 32);

        pipe.InitBuffer(inQueueX1, BUFFER_NUM, (this->tileDataNum+ 256)  * sizeof(DTYPE_X1));  
        pipe.InitBuffer(inQueueX2, BUFFER_NUM, (this->tileDataNum+ 256)  * sizeof(DTYPE_X2));  
        pipe.InitBuffer(outQueueY, BUFFER_NUM, (this->tileDataNum+ 256)  * sizeof(DTYPE_Y));     
        if constexpr (std::is_same_v<DTYPE_X1, int32_t>)
        {
            pipe.InitBuffer(QueueFloat1, (this->tileDataNum + 256)  * sizeof(float));
            pipe.InitBuffer(QueueFloat2, (this->tileDataNum + 256)  * sizeof(float));   
        }
        else if constexpr (std::is_same_v<DTYPE_X1, int8_t>)
        {
            pipe.InitBuffer(QueueHalf1, (this->tileDataNum + 256)  * sizeof(half));
            pipe.InitBuffer(QueueHalf2, (this->tileDataNum + 256)  * sizeof(half));
        }
        pipe.InitBuffer(QueueMask, (this->tileDataNum + 256 )  * sizeof(DTYPE_X1));   //
    }
    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum;   //手动考虑了双buff
        this->processDataNum = this->tileDataNum;
        for (int32_t i = 0; i < loopCount; i++) {
            if (i == this->tileNum - 1) {
                this->processDataNum = this->tailDataNum;
            }
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        } 


        /*串行版本*/
        // for (int i=0; i< this->coreDataNum; ++i)
        // {
        //     if((float)xGm1.GetValue(i) != (float)xGm2.GetValue(i))
        //     {
        //         yGm.SetValue(i,1);
        //     }
        //     else
        //     {
        //         yGm.SetValue(i,0);
        //     }
        // }

    }

private:

    __aicore__ inline int isnan(float num) 
    {
        unsigned int bits = *(unsigned int*)&num;
        return (bits & 0x7F800000) == 0x7F800000 && (bits & 0x007FFFFF) != 0;
    }

    __aicore__ inline void CopyIn(int32_t progress)
    {
        //考生补充算子代码
        LocalTensor<DTYPE_X1> xLocal1 = inQueueX1.AllocTensor<DTYPE_X1>();
        LocalTensor<DTYPE_X2> xLocal2 = inQueueX2.AllocTensor<DTYPE_X2>();
        DataCopy(xLocal1, xGm1[progress * this->tileDataNum], this->processDataNum);  //tileDataNum>taileDataNum，所以不需要麻烦分情况讨论
        DataCopy(xLocal2, xGm2[progress * this->tileDataNum],  this->processDataNum);
        //printf("xGm1 %d xGm2 %d xLocal1 %d xLocal2 %d\n",xGm1(2), xGm2(2), xLocal1(2), xLocal2(2)); 
        inQueueX1.EnQue(xLocal1);
        inQueueX2.EnQue(xLocal2);
    }
    __aicore__ inline void Compute(int32_t progress)
    {

        LocalTensor<DTYPE_X1> xLocal1 = inQueueX1.DeQue<DTYPE_X1>();
        LocalTensor<DTYPE_X2> xLocal2 = inQueueX2.DeQue<DTYPE_X2>();
        LocalTensor<DTYPE_Y> yLocal   = outQueueY.AllocTensor<DTYPE_Y>();

        auto m_mask    = QueueMask.Get<int8_t>();

        //DTYPE_Y scalar = 1.00;
        //Duplicate(m_zero, (DTYPE_X)(20.0), this->processDataNum);
        if constexpr (std::is_same_v<DTYPE_X1, int32_t>)
        {
            auto m_float1    = QueueFloat1.Get<float>();
            auto m_float2    = QueueFloat2.Get<float>();


            Cast(m_float1, xLocal1, RoundMode::CAST_NONE, this->processDataNum);
            Cast(m_float2, xLocal2, RoundMode::CAST_NONE, this->processDataNum);
            Compare(m_mask, m_float1, m_float2, CMPMODE::NE, (this->processDataNum+255)/256*256);  //256字节对齐生成mask,小于0为1
            //printf("%d, %d, %f, %f,%d\n",xLocal1(1),xLocal2(1), m_float1(1),m_float2(1),m_mask(1)); 
        }
        else if constexpr (std::is_same_v<DTYPE_X1, int8_t>)
        {
            auto m_half1    = QueueHalf1.Get<half>();
            auto m_half2    = QueueHalf2.Get<half>();
            Cast(m_half1, xLocal1, RoundMode::CAST_NONE, this->processDataNum);
            Cast(m_half2, xLocal2, RoundMode::CAST_NONE, this->processDataNum);
            Compare(m_mask, m_half1, m_half2, CMPMODE::NE, (this->processDataNum+255)/256*256);  //256字节对齐生成mask,小于0为1
            //printf("%d, %d, %d, %d,%d\n",xLocal1(1),xLocal2(1), m_half1(1),m_half2(1),m_mask(1)); 
        }
        else
        {
            Compare(m_mask, xLocal1, xLocal2, CMPMODE::NE, (this->processDataNum+255)/256*256);  //256字节对齐生成mask,小于0为1
            //printf("%d, %d, %d\n",xLocal1(1),xLocal2(1),m_mask(1)); 
        }
        
        

        size_t m_size = (this->processDataNum+255)/256*256/8; 
        //printf(" m_size is %d\n", m_size); 
        for (size_t i = 0; i < m_size; ++i) {
            uint8_t value = (m_mask.GetValue(i));
            //printf("v %d, %d\n",i, value); 
            for (size_t j = 0; j < 8; ++j) {
                //printf("b %d %d ",i*8 + j, yLocal(i*8 + j)); 
                yLocal(i*8 + j) =((value >> j) & 1);  
                //printf(" a %d %d\n",i*8 + j, yLocal(i*8 + j)); 
                if(isnan(xLocal1(i*8 + j)) && isnan(xLocal1(i*8 + j)))
                {
                    yLocal(i*8 + j) = 1;
                }
            }
        }
        
        
        // if constexpr (std::is_same_v<DTYPE_X1, int32_t>) 
        // {
        //     for (size_t i = 0; i < m_size; ++i) {
        //         uint8_t k1 = i/4;
        //         uint8_t k2 = i%4;
        //         yLocal(k1) +=  yLocal(i) << (8*(k2)); 
        //     }
        // }
        // else if  constexpr (std::is_same_v<DTYPE_X1, float>)
        // {
        //     for (size_t i = 0; i < m_size; ++i) {
        //         uint8_t k1 = i/4;
        //         uint8_t k2 = i%4;
        //         yLocal(k1) +=  yLocal(i) << (8*(k2)); 
        //     }
        // }     
        // else if  constexpr (std::is_same_v<DTYPE_X1, half>)
        // {
        //     for (size_t i = 0; i < m_size; ++i) {
        //         uint8_t k1 = i/2;
        //         uint8_t k2 = i%2 ? 8:1;
        //         yLocal(k1) +=  yLocal(i) * k2; 
        //     }
        // }


        
        
        // Exp(yLocal, xLocal, this->processDataNum);   
        // if(((float)xLocal.GetValue(0) > (float)0)) 
        // {
        //     Exp(xLocal, xLocal, this->processDataNum); 
        //     Reciprocal(yLocal, yLocal, this->processDataNum);
        // }
        // else
        // {
        //     Exp(xLocal, xLocal, this->processDataNum); 
        //     Reciprocal(xLocal, xLocal, this->processDataNum);
        // }
        // DTYPE_Y scalar = 1.00;
        // Mul(yLocal, yLocal, yLocal, this->processDataNum); 
        // Adds(yLocal, yLocal, scalar, this->processDataNum); 
        // scalar = 0.5;
        // Muls(xLocal, xLocal, scalar, this->processDataNum); 
        // Mul(yLocal, yLocal, xLocal, this->processDataNum);  

        //Select(xLocal, m_mask, yLocal, xLocal, SELMODE::VSEL_TENSOR_TENSOR_MODE, this->processDataNum);


            // # y 是 asinhgrad(x) 的结果
            // # dy 是损失函数关于 y 的梯度
            // x = np.sinh(y)  # 计算 x = sinh(y)
            // grad_x = dy / np.sqrt(x**2 + 1)  # 计算 grad_x
            //  grad_x = dy / cosh(asinh(x))
        //}
        //Div(yLocal, dyLocal, yLocal, this->processDataNum);

        outQueueY.EnQue<DTYPE_Y>(yLocal);
        inQueueX1.FreeTensor(xLocal1);
        inQueueX2.FreeTensor(xLocal2);

    }
    __aicore__ inline void CopyOut(int32_t progress)
    {
        //考生补充算子代码
        
        LocalTensor<DTYPE_Y> yLocal2 = outQueueY.DeQue<DTYPE_Y>();
        DataCopy(yGm[progress * this->tileDataNum], yLocal2, this->processDataNum);
        //printf("this->CoreDataNum %d, tileNum %d, tileDataNum %d TailDataNum %d\n",this->processDataNum, this->tileNum, this->tileDataNum, this->tailDataNum);
        //printf("this->processDataNum %d, progress %d, yGm1 %d yLocal1 %d\n",this->processDataNum, progress, yGm(2), yLocal2(2)); 
        outQueueY.FreeTensor(yLocal2);
    }   

private:
    TPipe pipe;
    //create queue for input, in this case depth is equal to buffer num
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX1;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX2;
    //create queue for output, in this case depth is equal to buffer num
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    GlobalTensor<DTYPE_X1> xGm1;
    GlobalTensor<DTYPE_X2> xGm2;
    GlobalTensor<DTYPE_Y> yGm;
    TBuf<QuePosition::VECCALC> QueueMask, QueueHalf1, QueueHalf2, QueueFloat1, QueueFloat2;

    //考生补充自定义成员变量
    uint32_t coreDataNum;
    uint32_t tileNum;
    uint32_t tileDataNum;
    uint32_t tailDataNum;
    uint32_t processDataNum;
};

extern "C" __global__ __aicore__ void not_equal(GM_ADDR x1, GM_ADDR x2, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    KernelNotEqual op;
    //补充init和process函数调用内容
    op.Init(x1, x2, y, tiling_data.CoreDataNum, tiling_data.finalTileNum, tiling_data.tileDataNum, tiling_data.TailDataNum);
    op.Process();
}

#ifndef ASCENDC_CPU_DEBUG
// call of kernel function
void not_equal_do(uint32_t blockDim, void *l2ctrl, void *stream, uint8_t *x1, uint8_t *x2, uint8_t *y, 
                   uint8_t *workspace, uint8_t *tiling)
{
    not_equal<<<blockDim, l2ctrl, stream>>>(x1, x2, y, workspace, tiling);
}
#endif
