#include "kernel_operator.h"
#include <type_traits>
using namespace AscendC;
//constexpr int32_t BUFFER_NUM = 2; 

template<typename TYPE_X1, typename TYPE_X2, typename TYPE_Y> class KernelIsClose {
    using T = TYPE_X1;
public:
    __aicore__ inline KernelIsClose() {}
    __aicore__ inline void Init(GM_ADDR x1, GM_ADDR x2, GM_ADDR y, uint32_t totalLength, uint32_t ALIGN_NUM, uint32_t block_size, uint32_t core_size, uint32_t core_remain, float rtol, float atol, bool equal_nan) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        int32_t BUFFER_NUM = 2; 
        this->blockLength = core_size + (GetBlockNum() == GetBlockIdx() + 1 ? core_remain : 0);
        this->tileLength = block_size;
        this->blockLength = this->blockLength + (this->blockLength % ALIGN_NUM ? ALIGN_NUM - this->blockLength % ALIGN_NUM : 0);

        auto startPointer = core_size * GetBlockIdx();
        auto bufferlength = this->blockLength;
        this->rtol = rtol;
        this->atol = atol;
        this->equal_nan = equal_nan;

        Gm_x1.SetGlobalBuffer((__gm__ TYPE_X1*)x1 + startPointer, bufferlength);
        Gm_x2.SetGlobalBuffer((__gm__ TYPE_X2*)x2 + startPointer, bufferlength);
        Gm_y.SetGlobalBuffer((__gm__ TYPE_Y*)y + startPointer, bufferlength);

        this->tileNum = this->blockLength / this->tileLength + (this->blockLength % this->tileLength > 0);

        pipe.InitBuffer(Q_x1, BUFFER_NUM, this->tileLength * sizeof(TYPE_X1));
        pipe.InitBuffer(Q_x2, BUFFER_NUM, this->tileLength * sizeof(TYPE_X2));
        pipe.InitBuffer(Q_y, BUFFER_NUM, this->tileLength * sizeof(TYPE_Y));
        pipe.InitBuffer(B_bits, this->tileLength * sizeof(uint8_t));
        pipe.InitBuffer(B_result, this->tileLength * sizeof(half));
        pipe.InitBuffer(B_zero, this->tileLength * sizeof(half));
        this->zero = B_zero.Get<half>();
        Duplicate(this->zero, half(0), this->tileLength);
        if constexpr (std::is_same_v<T, int32_t>) {
            pipe.InitBuffer(B_x1, this->tileLength * sizeof(float));
            pipe.InitBuffer(B_x2, this->tileLength * sizeof(float));
            pipe.InitBuffer(B_x3, this->tileLength * sizeof(float));
            auto float0 = B_x3.Get<float>();
            Duplicate(float0, float(0), this->tileLength);
        }
        else if constexpr (std::is_same_v<T, float>) {
            pipe.InitBuffer(B_x1, this->tileLength * sizeof(float));
            auto float0 = B_x1.Get<float>();
            Duplicate(float0, float(0), this->tileLength);
        }
        else if constexpr (std::is_same_v<T, uint8_t>) {
            pipe.InitBuffer(B_x1, this->tileLength * sizeof(half));
            pipe.InitBuffer(B_x2, this->tileLength * sizeof(half));
            pipe.InitBuffer(B_x3, this->tileLength * sizeof(float));
            pipe.InitBuffer(B_x4, this->tileLength * sizeof(float));
            pipe.InitBuffer(B_x5, this->tileLength * sizeof(float));
            auto float0 = B_x5.Get<float>();
            Duplicate(float0, float(0), this->tileLength);
        }
        else{
            pipe.InitBuffer(B_x1, this->tileLength * sizeof(float));
            pipe.InitBuffer(B_x2, this->tileLength * sizeof(float));
            pipe.InitBuffer(B_x3, this->tileLength * sizeof(float));
            auto float0 = B_x3.Get<float>();
            Duplicate(float0, float(0), this->tileLength);
        }
    }
    __aicore__ inline void Process() {
        int32_t loopCount = this->tileNum;
        for (int32_t i = 0; i < loopCount-1; i++) {
            CopyIn(i, this->tileLength);
            Compute(i, this->tileLength);
            CopyOut(i, this->tileLength);
        }
        uint32_t length = this->blockLength - this->tileLength * (loopCount - 1);
        CopyIn(loopCount - 1, length);
        Compute(loopCount - 1, length);
        CopyOut(loopCount - 1, (length + 31) / 32 * 32);
    }
private:
    __aicore__ inline void CopyIn(int32_t progress, uint32_t length) {
        LocalTensor<TYPE_X1> x1 = Q_x1.AllocTensor<TYPE_X1>();
        LocalTensor<TYPE_X2> x2 = Q_x2.AllocTensor<TYPE_X2>();
        DataCopy(x1, Gm_x1[progress * this->tileLength], length);
        DataCopy(x2, Gm_x2[progress * this->tileLength], length);
        Q_x1.EnQue(x1);
        Q_x2.EnQue(x2);
    }
    __aicore__ inline void Compute(int32_t progress, uint32_t length) {
        LocalTensor<TYPE_X1> x1 = Q_x1.DeQue<TYPE_X1>();
        LocalTensor<TYPE_X2> x2 = Q_x2.DeQue<TYPE_X2>();
        LocalTensor<TYPE_Y> y = Q_y.AllocTensor<TYPE_Y>();
        float rtoltmp = this->rtol;
        float atoltmp = this->atol;
        bool equal_nantmp = this->equal_nan;

        auto bits = B_bits.Get<uint8_t>();
        auto result = B_result.Get<half>();
        auto inty = y.template ReinterpretCast<uint8_t>();
        if constexpr (std::is_same_v<T, uint8_t>) {
            auto half_x1 = B_x1.Get<half>();
            auto half_x2 = B_x2.Get<half>();
            auto float_x1 = B_x3.Get<float>();
            auto float_x2 = B_x4.Get<float>();
            auto float_zero = B_x5.Get<float>();
            Cast(half_x1, x1, RoundMode::CAST_NONE, length);
            Cast(half_x2, x2, RoundMode::CAST_NONE, length);
            Sub(half_x1, half_x1, half_x2, length);
            Abs(half_x1, half_x1, length);
            Abs(half_x2, half_x2, length);

            Cast(float_x1, half_x1, RoundMode::CAST_NONE, length);
            Cast(float_x2, half_x2, RoundMode::CAST_NONE, length);
            Muls(float_x2, float_x2, rtoltmp, length);
            Adds(float_x2, float_x2, atoltmp, length);
            Min(float_x2, float_x1, float_x2, length);
            Sub(float_x1, float_x1, float_x2, length);

            Compare(bits, float_x1, float_zero, CMPMODE::NE, length);
            Select(result, bits, zero, half(1), SELMODE::VSEL_TENSOR_SCALAR_MODE, length);
            Cast(inty, result, RoundMode::CAST_NONE, length);
        } 
        else {
            Sub(x1, x1, x2, length);
            if constexpr (std::is_same_v<T, int32_t>) {
                auto val1 = B_x1.Get<float>();
                auto val2 = B_x2.Get<float>();
                auto float_zero = B_x3.Get<float>();
                Cast(val1, x1, RoundMode::CAST_NONE, length);
                Cast(val2, x2, RoundMode::CAST_NONE, length);

                Abs(val1, val1, length);
                Abs(val2, val2, length);
                Muls(val2, val2, rtoltmp, length);
                Adds(val2, val2, atoltmp, length);
                Min(val2, val1, val2, length);
                Sub(val1, val1, val2, length);
                Compare(bits, val1, float_zero, CMPMODE::NE, length);
            }
            else if constexpr (std::is_same_v<T, float>) {
                auto float_zero = B_x1.Get<float>();
                Abs(x1, x1, length);
                Abs(x2, x2, length);
                Muls(x2, x2, rtoltmp, length);
                Adds(x2, x2, atoltmp, length);
                Min(x2, x1, x2, length);
                Sub(x1, x1, x2, length);
                Compare(bits, x1, float_zero, CMPMODE::NE, length);
            }
            else if constexpr (std::is_same_v<T, half>){ //half
                auto tmp1 = B_x1.Get<float>();
                auto tmp2 = B_x2.Get<float>();
                auto float_zero = B_x3.Get<float>();
                Abs(x1, x1, length);
                Abs(x2, x2, length);
                Cast(tmp1, x1, RoundMode::CAST_NONE, length);
                Cast(tmp2, x2, RoundMode::CAST_NONE, length);
                Muls(tmp2, tmp2, rtoltmp, length);
                Adds(tmp2, tmp2, atoltmp, length);
                Min(tmp2, tmp1, tmp2, length);
                Sub(tmp1, tmp1, tmp2, length);
                Compare(bits, tmp1, float_zero, CMPMODE::NE, length);
            }
            Select(result, bits, zero, half(1), SELMODE::VSEL_TENSOR_SCALAR_MODE, length);
            Cast(inty, result, RoundMode::CAST_NONE, length);
        }
        Q_x1.FreeTensor(x1);
        Q_x2.FreeTensor(x2);
        Q_y.EnQue<TYPE_Y>(y);
    }
    __aicore__ inline void CopyOut(int32_t progress, uint32_t length) {
        LocalTensor<TYPE_Y> y = Q_y.DeQue<TYPE_Y>();
        DataCopy(Gm_y[progress * this->tileLength], y, length);
        Q_y.FreeTensor(y);
    }

private:
    TPipe pipe;
    TQue<QuePosition::VECIN, 2> Q_x1, Q_x2;
    TQue<QuePosition::VECOUT, 2> Q_y;
    TBuf<QuePosition::VECCALC> B_result, B_zero, B_bits;
    TBuf<QuePosition::VECCALC> B_x1, B_x2,B_x3,B_x4,B_x5;
    LocalTensor<half> zero;
    GlobalTensor<TYPE_X1> Gm_x1;
    GlobalTensor<TYPE_X2> Gm_x2;
    GlobalTensor<TYPE_Y> Gm_y;
    uint32_t blockLength;
    uint32_t tileNum;
    uint32_t tileLength;
    float rtol;
    float atol;
    bool equal_nan;
};


template<typename TYPE_X1, typename TYPE_X2, typename TYPE_Y> class KernelIsClose_Broadcast {
    using T = TYPE_X1;
public:
    __aicore__ inline KernelIsClose_Broadcast() {}
    __aicore__ inline void Init(GM_ADDR x1, GM_ADDR x2, GM_ADDR y, int32_t y_dimensional, 
                                int32_t *y_ndarray, int32_t *x1_ndarray, int32_t *x2_ndarray, 
                                int32_t *y_sumndarray, int32_t *x1_sumndarray, int32_t *x2_sumndarray, float rtol, float atol, bool equal_nan) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        


        this->y_dimensional = y_dimensional;

         /*
        y_dimensional = 4
        x1_dimensional = 4
        x2_dimensional = 4
        y_ndarray = {69, 33, 4, 3}
        x1_ndarray = {69, 33, 4, 3}
        x2_ndarray = {1, 33, 4, 3}
        
        y_sumndarray = {1, 69, 2277, 9108, 27324}
        x1_sumndarray = {1, 69, 2277, 9108, 27324}
        x2_sumndarray = {1, 1, 33, 132, 396}
        */

        this->rtol = rtol;
        this->atol = atol;
        this->equal_nan = equal_nan;

        for(int k=0; k<=y_dimensional; k++)
        {
            this->y_ndarray[k] = y_ndarray[k];
            this->x1_ndarray[k] = x1_ndarray[k];
            this->x2_ndarray[k] = x2_ndarray[k];
            this->y_sumndarray[k] = y_sumndarray[k];
            this->x1_sumndarray[k] = x1_sumndarray[k];
            this->x2_sumndarray[k] = x2_sumndarray[k];
        }

        x1Gm.SetGlobalBuffer((__gm__ TYPE_X1*)x1, 1);
        x2Gm.SetGlobalBuffer((__gm__ TYPE_X2*)x2, 1);
        yGm.SetGlobalBuffer((__gm__ TYPE_Y*)y, 1);

        

        
    }
    __aicore__ inline void Process() {
        int dim = this->y_dimensional;

        float rtoltmp = this->rtol;
        float atoltmp = this->atol;
        bool equal_nantmp = this->equal_nan;

        

        
        for(int j=0; j<this->y_sumndarray[dim]; j++)
        {
            int x1_start = 0, x2_start = 0;
            for(int k=0; k<dim; k++)
            {
                if(this->x1_ndarray[k] != 1){
                    x1_start += this->x1_sumndarray[k] * (j / this->y_sumndarray[k] % this->y_ndarray[k]);
                }
                if(this->x2_ndarray[k] != 1){
                    x2_start += this->x2_sumndarray[k] * (j / this->y_sumndarray[k] % this->y_ndarray[k]);
                }

            }

            bool y;
            TYPE_X1 x1 = x1Gm.GetValue(x1_start);
            TYPE_X2 x2 = x2Gm.GetValue(x2_start);


            if constexpr (std::is_same_v<T, half>){

                float x11 = static_cast<float>(x1);
                float x22 = static_cast<float>(x2);

                float abs_error = (x11 - x22) >= 0 ? (x11 - x22) : -(x11 - x22);

                float rel_error = rtol * ((x22) >= 0 ? x22 : -x22);



                if ((abs_error) <= (atol+rel_error)){

                    y =true;
                }
                else{

                    y=false;
                }


            }

            else{

                float abs_error = (x1 - x2) >= 0 ? (x1 - x2) : -(x1 - x2);

                float rel_error = rtol * ((x2) >= 0 ? x2 : -x2);

                if (abs_error <= (atol+rel_error)){

                    y =true;
                }
                else{

                    y=false;
                }


            }

            

           
            



            yGm.SetValue(j, (DTYPE_Y)y);
            
    }
    }
    

private:
    LocalTensor<half> zero;
    GlobalTensor<TYPE_X1> x1Gm;
    GlobalTensor<TYPE_X2> x2Gm;
    GlobalTensor<TYPE_Y> yGm;
    
    int32_t y_dimensional;
    int32_t y_ndarray[20];
    int32_t x1_ndarray[20];
    int32_t x2_ndarray[20];

    int32_t y_sumndarray[20];
    int32_t x1_sumndarray[20];
    int32_t x2_sumndarray[20];
    float rtol;
    float atol;
    bool equal_nan;
    // int32_t BUFFER_NUM;
};



extern "C" __global__ __aicore__ void is_close(GM_ADDR x1, GM_ADDR x2, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {

     GET_TILING_DATA(tiling_data, tiling);
    if(TILING_KEY_IS(1)){
   
        KernelIsClose<DTYPE_X1, DTYPE_X2, DTYPE_Y> op;
        op.Init(x1, x2, y, tiling_data.totalLength, tiling_data.ALIGN_NUM, tiling_data.block_size, tiling_data.core_size, tiling_data.core_remain, tiling_data.rtol, tiling_data.atol, tiling_data.equal_nan);
        op.Process();
    }

    else if(TILING_KEY_IS(2)){

        KernelIsClose_Broadcast<DTYPE_X1, DTYPE_X2, DTYPE_Y> op;
        op.Init(x1, x2, y, tiling_data.y_dimensional,
                tiling_data.y_ndarray, tiling_data.x1_ndarray, tiling_data.x2_ndarray,
                tiling_data.y_sumndarray, tiling_data.x1_sumndarray, tiling_data.x2_sumndarray,
        tiling_data.rtol, tiling_data.atol, tiling_data.equal_nan);
        op.Process();




    }
}