#define K_MAX_SHAPE_DIM 0
#include "kernel_operator.h"
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;
constexpr float MIN_ACCURACY_FP16 = 0.00000005960464477539063F;
constexpr float MAX_MUL_FP16 = 4096;
constexpr float MIN_ACCURACY_FP32 = 1.1754943508222875e-38;
constexpr float MAX_MUL_FP32 = 8.507059173023462e+37;
class KernelHeaviside_one {
public:
    __aicore__ inline KernelHeaviside_one() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR values, GM_ADDR out,
                                uint32_t formerNum, uint32_t tailNum, uint32_t formerLength, uint32_t tailLength,
                                TPipe* pipe) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        if (GetBlockIdx() < formerNum) {
            this->tileLength = formerLength/2;
            inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input + formerLength * GetBlockIdx(), this->tileLength);
            outGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)out + formerLength * GetBlockIdx(), this->tileLength);
        } else {
            this->tileLength = tailLength/2;
            inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input + formerLength * formerNum + tailLength * (GetBlockIdx() - formerNum), this->tileLength);
            outGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)out + formerLength * formerNum + tailLength * (GetBlockIdx() - formerNum), this->tileLength);
        }
        valuesGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)values, 1);
        pipe->InitBuffer(inQueueInput, BUFFER_NUM, this->tileLength * sizeof(DTYPE_INPUT));
        pipe->InitBuffer(outQueueOut, BUFFER_NUM, this->tileLength * sizeof(DTYPE_OUT));

        pipe->InitBuffer(QueueTmp1, this->tileLength * sizeof(uint8_t));
    }
    __aicore__ inline void Process() {
            CopyIn(0);
            Compute(valuesGm.GetValue(0));
            CopyOut(0);
            CopyIn(this->tileLength);
            Compute(valuesGm.GetValue(0));
            CopyOut(this->tileLength);
    }
private:
    __aicore__ inline void CopyIn(int32_t offset) {
        LocalTensor<DTYPE_INPUT> inputLocal = inQueueInput.AllocTensor<DTYPE_INPUT>();
        DataCopy(inputLocal, inputGm[offset], this->tileLength);
        inQueueInput.EnQue(inputLocal);
    }
    __aicore__ inline void Compute(DTYPE_OUT data) {
        LocalTensor<DTYPE_INPUT> inputLocal = inQueueInput.DeQue<DTYPE_INPUT>();
        LocalTensor<DTYPE_INPUT> outLocal = outQueueOut.AllocTensor<DTYPE_OUT>();
        auto tmp1 = QueueTmp1.Get<uint8_t>();
        CompareScalar(tmp1, inputLocal, (DTYPE_INPUT)0, CMPMODE::NE, this->tileLength);
        Maxs(outLocal, inputLocal, (DTYPE_INPUT)0, this->tileLength);
        inQueueInput.FreeTensor(inputLocal);

        if constexpr (std::is_same_v<DTYPE_INPUT, half>) {
            Mins(outLocal, outLocal, (DTYPE_INPUT)MIN_ACCURACY_FP16, this->tileLength);
            Muls(outLocal, outLocal, (DTYPE_INPUT)MAX_MUL_FP16, this->tileLength);
            Muls(outLocal, outLocal, (DTYPE_INPUT)MAX_MUL_FP16, this->tileLength);
        }
        else{

            Mins(outLocal, outLocal, (DTYPE_INPUT)MIN_ACCURACY_FP32, this->tileLength);
            Muls(outLocal, outLocal, (DTYPE_INPUT)MAX_MUL_FP32, this->tileLength);
        }
        Select(outLocal, tmp1, outLocal, data, SELMODE::VSEL_TENSOR_SCALAR_MODE, this->tileLength);


        
        outQueueOut.EnQue<DTYPE_INPUT>(outLocal);
    }
    __aicore__ inline void CopyOut(int32_t offset) {
        LocalTensor<DTYPE_INPUT> outLocal = outQueueOut.DeQue<DTYPE_INPUT>();
        DataCopy(outGm[offset], outLocal, this->tileLength);
        outQueueOut.FreeTensor(outLocal);
    }
private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueInput, inQueueValues;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueOut;
    TBuf<QuePosition::VECCALC> QueueTmp1, QueueTmp2,QueueTmp3,QueueTmp4, QueueTmp5;

    GlobalTensor<DTYPE_INPUT> inputGm;
    GlobalTensor<DTYPE_INPUT> valuesGm;
    GlobalTensor<DTYPE_INPUT> outGm;

    uint32_t tileLength;
};
class KernelHeaviside {
public:
    __aicore__ inline KernelHeaviside() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR values, GM_ADDR out,
                                uint32_t formerNum, uint32_t tailNum, uint32_t formerLength, uint32_t tailLength) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        if (GetBlockIdx() < formerNum) {
            this->tileLength = formerLength;
            inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input + formerLength * GetBlockIdx(), formerLength);
            valuesGm.SetGlobalBuffer((__gm__ DTYPE_VALUES*)values + formerLength * GetBlockIdx(), formerLength);
            outGm.SetGlobalBuffer((__gm__ DTYPE_OUT*)out + formerLength * GetBlockIdx(), formerLength);
        } else {
            this->tileLength = tailLength;
            inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input + formerLength * formerNum + tailLength * (GetBlockIdx() - formerNum), tailLength);
            valuesGm.SetGlobalBuffer((__gm__ DTYPE_VALUES*)values + formerLength * formerNum + tailLength * (GetBlockIdx() - formerNum), tailLength);
            outGm.SetGlobalBuffer((__gm__ DTYPE_OUT*)out + formerLength * formerNum + tailLength * (GetBlockIdx() - formerNum), tailLength);
        }
        pipe.InitBuffer(inQueueInput, BUFFER_NUM, this->tileLength * sizeof(DTYPE_INPUT));
        pipe.InitBuffer(inQueueValues, BUFFER_NUM, this->tileLength * sizeof(DTYPE_VALUES));
        pipe.InitBuffer(outQueueOut, BUFFER_NUM, this->tileLength * sizeof(DTYPE_OUT));

        pipe.InitBuffer(QueueTmp1, this->tileLength * sizeof(uint8_t));
    }
    __aicore__ inline void Process() {
        CopyIn();
        Compute();
        CopyOut();
    }
private:
    __aicore__ inline void CopyIn() {
        LocalTensor<DTYPE_INPUT> inputLocal = inQueueInput.AllocTensor<DTYPE_INPUT>();
        LocalTensor<DTYPE_VALUES> valuesLocal = inQueueValues.AllocTensor<DTYPE_VALUES>();
        DataCopy(inputLocal, inputGm, this->tileLength);
        DataCopy(valuesLocal, valuesGm, this->tileLength);
        inQueueInput.EnQue(inputLocal);
        inQueueValues.EnQue(valuesLocal);
    }
    __aicore__ inline void Compute() {
        LocalTensor<DTYPE_INPUT> inputLocal = inQueueInput.DeQue<DTYPE_INPUT>();
        LocalTensor<DTYPE_VALUES> valuesLocal = inQueueValues.DeQue<DTYPE_VALUES>();
        LocalTensor<DTYPE_OUT> outLocal = outQueueOut.AllocTensor<DTYPE_OUT>();

        auto tmp1 = QueueTmp1.Get<uint8_t>();

        CompareScalar(tmp1, inputLocal, (DTYPE_INPUT)0, CMPMODE::EQ, this->tileLength);
        Select(outLocal, tmp1, valuesLocal, (DTYPE_OUT)0, SELMODE::VSEL_TENSOR_SCALAR_MODE, this->tileLength);

        CompareScalar(tmp1, inputLocal, (DTYPE_INPUT)0, CMPMODE::LE, this->tileLength);
        Select(outLocal, tmp1, outLocal, (DTYPE_OUT)1, SELMODE::VSEL_TENSOR_SCALAR_MODE, this->tileLength);

        inQueueInput.FreeTensor(inputLocal);
        inQueueValues.FreeTensor(valuesLocal);
        outQueueOut.EnQue<DTYPE_OUT>(outLocal);
    }
    __aicore__ inline void CopyOut() {
        LocalTensor<DTYPE_OUT> outLocal = outQueueOut.DeQue<DTYPE_OUT>();
        DataCopy(outGm, outLocal, this->tileLength);
        outQueueOut.FreeTensor(outLocal);
    }
private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueInput, inQueueValues;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueOut;
    TBuf<QuePosition::VECCALC> QueueTmp1, QueueTmp2,QueueTmp3,QueueTmp4, QueueTmp5;

    GlobalTensor<DTYPE_INPUT> inputGm;
    GlobalTensor<DTYPE_VALUES> valuesGm;
    GlobalTensor<DTYPE_OUT> outGm;

    uint32_t tileLength;
};


class KernelHeaviside_broadcast {
public:
    __aicore__ inline KernelHeaviside_broadcast() {}
    __aicore__ inline void Init(GM_ADDR input, GM_ADDR values, GM_ADDR out,
                                uint32_t out_size, uint32_t out_dim, uint32_t *input_shape, uint32_t *values_shape, uint32_t *out_shape, int32_t formerNum, int32_t tailLength) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->out_size = out_size;
        this->out_dim = out_dim;
        for(uint32_t i=0; i<this->out_dim; i++)
        {
            this->input_shape[i] = input_shape[i];
            this->values_shape[i] = values_shape[i];
            this->out_shape[i] = out_shape[i];
        }

        this->start = 0;
        inputGm.SetGlobalBuffer((__gm__ DTYPE_INPUT*)input, this->out_size);
        valuesGm.SetGlobalBuffer((__gm__ DTYPE_VALUES*)values, this->out_size);
        outGm.SetGlobalBuffer((__gm__ DTYPE_OUT*)out, this->out_size);
    }
    __aicore__ inline void Process() {
        for(uint32_t i=this->start; i<this->out_size; i++)
        {
            if(this->values_shape[0] != this->input_shape[0])
            {
                DTYPE_INPUT input = inputGm.GetValue(get_broadcasted_index(i, input_shape, out_shape, out_dim));
                if((float)input == 0)
                {
                    outGm.SetValue(i, valuesGm.GetValue(get_broadcasted_index(i, values_shape, out_shape, out_dim)));
                }
                else if((float)input < 0)
                {
                    outGm.SetValue(i, (DTYPE_INPUT)0);
                }
                else
                {
                    outGm.SetValue(i, (DTYPE_INPUT)1);
                }
            }
                DTYPE_INPUT input = inputGm.GetValue(get_broadcasted_index(i, input_shape, out_shape, out_dim));
                if((float)input == 0)
                {
                    outGm.SetValue(i, valuesGm.GetValue(get_broadcasted_index(i, values_shape, out_shape, out_dim)));
                }
                else if((float)input < 0)
                {
                    outGm.SetValue(i, (DTYPE_INPUT)0);
                }
                else
                {
                    outGm.SetValue(i, (DTYPE_INPUT)1);
                }
        }
    }
private:
__aicore__ inline uint32_t get_broadcasted_index(uint32_t indices, uint32_t *original_shape, uint32_t *broadcast_shape, uint32_t ndim)
{
    uint32_t shape[8];
    uint32_t original_indices = 0;
    int i =0;
    for(i=ndim-1; i>=0; i--)
    {
        shape[i] = 0;
        if(original_shape[i]!=1)
        {
            shape[i] = indices % broadcast_shape[i];
        }
        indices /= broadcast_shape[i];
        if(indices == 0) break;   
    }
    for(; i<ndim; i++)
    {
        original_indices = shape[i] + original_indices * original_shape[i];
    }
    return original_indices;
}
private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueInput, inQueueValues;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueOut;
    TBuf<QuePosition::VECCALC> QueueTmp1, QueueTmp2,QueueTmp3,QueueTmp4, QueueTmp5;

    GlobalTensor<DTYPE_INPUT> inputGm;
    GlobalTensor<DTYPE_VALUES> valuesGm;
    GlobalTensor<DTYPE_OUT> outGm;

    uint32_t out_size;

    uint32_t start;

    uint32_t out_dim;
    uint32_t input_shape[8];
    uint32_t values_shape[8];
    uint32_t out_shape[8];
};
extern "C" __global__ __aicore__ void heaviside(GM_ADDR input, GM_ADDR values, GM_ADDR out, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    // TODO: user kernel impl
    if (TILING_KEY_IS(1)) {
        TPipe pipe;
        KernelHeaviside_one op;
        op.Init(input, values, out,
                tiling_data.formerNum, tiling_data.tailNum, tiling_data.formerLength, tiling_data.tailLength, &pipe);
        op.Process();
    }
    else if (TILING_KEY_IS(2)) {
        KernelHeaviside op;
        op.Init(input, values, out,
                tiling_data.formerNum, tiling_data.tailNum, tiling_data.formerLength, tiling_data.tailLength);
        op.Process();
    }
    else if (TILING_KEY_IS(3)) {
        KernelHeaviside_broadcast op;
        op.Init(input, values, out,
                tiling_data.out_size, tiling_data.out_dim, tiling_data.input_shape, tiling_data.values_shape, tiling_data.out_shape, tiling_data.formerNum, tiling_data.tailLength);
        op.Process();
    }
}