
#include "kernel_operator.h"
using namespace AscendC;

constexpr int32_t BUFFER_NUM = 2; // 双缓冲队列
constexpr float POSITIVE_ONE_FP32 = 1.0F;
constexpr int32_t POSITIVE_ONE_I32 = 1;
constexpr float MIN_ACCURACY_FP16 = 0.00000005960464477539063F;
constexpr float MAX_MUL_FP16 = 4096;
constexpr float MIN_ACCURACY_FP32 = 1.1754943508222875e-38;
constexpr float MAX_MUL_1_FP32 = 1125899906842624;
constexpr float MAX_MUL_2_FP32 = 67108864;
// constexpr uint32_t BLOCK_SIZE = 32;
constexpr float MAX_F16 = 0.0;
constexpr float MAX_F32 = 0.0f;

struct TilingParam
{
    uint32_t total_length;
    uint32_t x1_length;
    uint32_t x2_length;
    uint32_t ALIGN_NUM;
    uint32_t tiling_size;
    uint32_t block_size;
    uint32_t core_size;
    uint32_t core_remain;
    uint32_t shape[20];
    uint32_t reduce1[20];
    uint32_t reduce2[20];
    uint32_t dim;
};

template <typename T>
class KernelGreater
{
public:
    __aicore__ inline KernelGreater() {}

    __aicore__ inline void Init(GM_ADDR x1, GM_ADDR x2, GM_ADDR y, TilingParam &paramList)
    {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        // 分块参数初始化
        this->blockLength = paramList.core_size + (GetBlockNum() == GetBlockIdx() + 1 ? paramList.core_remain : 0);
        this->tileLength = paramList.block_size;
        this->ALIGN_NUM = paramList.ALIGN_NUM;
        ASSERT(this->ALIGN_NUM != 0 && "ALIGN_NUM can not be zero!");

        this->blockLength = this->blockLength + (this->blockLength % this->ALIGN_NUM ? this->ALIGN_NUM - this->blockLength % this->ALIGN_NUM : 0);
        // this->blockLength = (blockLength + ALIGN_NUM - 1) / ALIGN_NUM * ALIGN_NUM;
        auto startPointer = paramList.core_size * GetBlockIdx();
        auto bufferlength = this->blockLength;

        x1Gm.SetGlobalBuffer((__gm__ T *)x1 + startPointer, bufferlength);
        x2Gm.SetGlobalBuffer((__gm__ T *)x2 + startPointer, bufferlength);
        yGm.SetGlobalBuffer((__gm__ int8_t *)y + startPointer, bufferlength);

        pipe.InitBuffer(x1_inque, BUFFER_NUM, this->tileLength * sizeof(T));
        pipe.InitBuffer(x2_inque, BUFFER_NUM, this->tileLength * sizeof(T));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileLength * sizeof(int8_t));
        pipe.InitBuffer(calc_buf_1, this->tileLength * sizeof(T));
        pipe.InitBuffer(calc_buf_2, this->tileLength * sizeof(half));
        pipe.InitBuffer(calc_buf_3, this->tileLength * sizeof(half));
        pipe.InitBuffer(calc_buf_4, this->tileLength * sizeof(float));

        // this->tileNum = blockLength / tileLength;
        this->tileNum = this->blockLength / this->tileLength + (this->blockLength % this->tileLength > 0);
    }

    // 主处理流程
    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum;
        for (int32_t i = 0; i < loopCount - 1; ++i)
        {
            CopyIn(i, this->tileLength);
            Compute(i, this->tileLength);
            CopyOut(i, this->tileLength);
        }
        auto length = this->blockLength - this->tileLength * (loopCount - 1);
        CopyIn(loopCount - 1, length);
        Compute(loopCount - 1, length);
        CopyOut(loopCount - 1, length);
    }

private:
    __aicore__ inline void CopyIn(int32_t progress, uint32_t length)
    {
        LocalTensor<T> x1Local = x1_inque.AllocTensor<T>();
        LocalTensor<T> x2Local = x2_inque.AllocTensor<T>();

        DataCopy(x1Local, x1Gm[progress * this->tileLength], length);
        DataCopy(x2Local, x2Gm[progress * this->tileLength], length);

        x1_inque.EnQue(x1Local);
        x2_inque.EnQue(x2Local);
    }

    // 计算逻辑
    __aicore__ inline void Compute(int32_t progress, uint32_t length)
    {
        LocalTensor<int8_t> y_local = outQueueY.AllocTensor<int8_t>();
        if constexpr (std::is_same_v<T, half> || std::is_same_v<T, bfloat16_t>)
        {
            LocalTensor<half> x1_local = x1_inque.DeQue<half>();
            LocalTensor<half> x2_local = x2_inque.DeQue<half>();

            LocalTensor<half> y_compute = calc_buf_1.Get<half>();
            // 1、 计算差值
            Sub(y_compute, x1_local, x2_local, length);

            // 2、误差容差处理，将小于误差值的差值设置为 精度差
            Mins(y_compute, y_compute, (half)MIN_ACCURACY_FP16, length);

            // 3、和0比大小
            Maxs(y_compute, y_compute, (half)MAX_F16, length);

            // 4、将所有非零值设置为 1
            Muls(y_compute, y_compute, (half)MAX_MUL_FP16, length);
            Muls(y_compute, y_compute, (half)MAX_MUL_FP16, length);

            // 5、将结果转换为 FP16 类型并保存到输出张量
            Cast(y_local, y_compute, RoundMode::CAST_NONE, length);

            x1_inque.FreeTensor(x1_local);
            x2_inque.FreeTensor(x2_local);
        }
        else if constexpr (std::is_same_v<T, float>)
        {
            LocalTensor<float> x1_local = x1_inque.DeQue<float>();
            LocalTensor<float> x2_local = x2_inque.DeQue<float>();
            LocalTensor<float> y_compute = calc_buf_1.Get<float>();

            LocalTensor<half> y_fp16 = calc_buf_2.Get<half>();

            // 1、计算差值
            Sub(y_compute, x1_local, x2_local, length);

            // 2、误差容差处理，将小于误差值的差值设置为 精度差
            Mins(y_compute, y_compute, (float)MIN_ACCURACY_FP32, length);

            // 3、和0比大小
            Maxs(y_compute, y_compute, (float)MAX_F32, length);

            // 4、将所有非零值设置为 1
            Muls(y_compute, y_compute, (float)MAX_MUL_1_FP32, length);
            Muls(y_compute, y_compute, (float)MAX_MUL_1_FP32, length);
            Muls(y_compute, y_compute, (float)MAX_MUL_2_FP32, length);

            // 将结果转换为 FP16 类型并保存到输出张量
            Cast(y_fp16, y_compute, RoundMode::CAST_NONE, length);
            Cast(y_local, y_fp16, RoundMode::CAST_NONE, length);

            x1_inque.FreeTensor(x1_local);
            x2_inque.FreeTensor(x2_local);
        }
        else if constexpr (std::is_same_v<T, int8_t> || std::is_same_v<T, uint8_t>)
        {
            LocalTensor<int8_t> x1_local = x1_inque.DeQue<int8_t>();
            LocalTensor<int8_t> x2_local = x2_inque.DeQue<int8_t>();
            LocalTensor<int8_t> y_compute = calc_buf_1.Get<int8_t>();

            // 1、计算差值
            LocalTensor<half> x1_local_fp16 = calc_buf_2.Get<half>();
            LocalTensor<half> x2_local_fp16 = calc_buf_3.Get<half>();
            LocalTensor<half> y_local_fp16 = calc_buf_4.Get<half>();

            // 2、将输入张量转换为 FP16
            Cast(x1_local_fp16, x1_local, RoundMode::CAST_NONE, length);
            Cast(x2_local_fp16, x2_local, RoundMode::CAST_NONE, length);

            // 3、计算差值
            Sub(y_local_fp16, x1_local_fp16, x2_local_fp16, length);

            // 4、误差容差处理，将小于误差值的差值设置为 精度差
            Mins(y_local_fp16, y_local_fp16, (half)POSITIVE_ONE_FP32, length);

            Maxs(y_local_fp16, y_local_fp16, (half)MAX_F16, length);

            // 5、将所有非零值设置为 1
            Cast(y_local, y_local_fp16, RoundMode::CAST_NONE, length);

            x1_inque.FreeTensor(x1_local);
            x2_inque.FreeTensor(x2_local);
        }
        else if constexpr (std::is_same_v<T, int32_t> || std::is_same_v<T, int64_t>)
        {
            LocalTensor<int32_t> x1_local = x1_inque.DeQue<int32_t>();
            LocalTensor<int32_t> x2_local = x2_inque.DeQue<int32_t>();
            LocalTensor<int32_t> y_compute = calc_buf_1.Get<int32_t>();

            LocalTensor<half> y_fp16 = calc_buf_3.Get<half>();
            LocalTensor<float> y_fp32 = calc_buf_4.Get<float>();

            // 1、计算差值
            Sub(y_compute, x1_local, x2_local, length);

            // 2、误差容差处理，将小于误差值的差值设置为 精度差
            Mins(y_compute, y_compute, (int32_t)POSITIVE_ONE_I32, length);

            // 3、和0比大小
            Maxs(y_compute, y_compute, 0, length);

            // 4、int32->float->half
            Cast(y_fp32, y_compute, RoundMode::CAST_NONE, length);
            Cast(y_fp16, y_fp32, RoundMode::CAST_NONE, length);
            Cast(y_local, y_fp16, RoundMode::CAST_NONE, length);

            x1_inque.FreeTensor(x1_local);
            x2_inque.FreeTensor(x2_local);
        }
        outQueueY.EnQue<int8_t>(y_local);
    }

    __aicore__ inline void CopyOut(int32_t progress, uint32_t length)
    {
        LocalTensor<int8_t> yLocal = outQueueY.DeQue<int8_t>();
        DataCopy(yGm[progress * this->tileLength], yLocal, length);
        outQueueY.FreeTensor(yLocal);
    }

private:
    TPipe pipe;
    TBuf<TPosition::VECCALC> calc_buf_1, calc_buf_2, calc_buf_3, calc_buf_4;
    TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> x1_inque, x2_inque;
    TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    GlobalTensor<T> x1Gm, x2Gm;
    GlobalTensor<int8_t> yGm;

    uint32_t blockLength;
    uint32_t tileNum;
    uint32_t tileLength;
    uint32_t ALIGN_NUM;
};

template <typename T>
class KernelGreater_Broadcast
{
public:
    __aicore__ inline KernelGreater_Broadcast() {}
    __aicore__ inline void Init(GM_ADDR x1, GM_ADDR x2, GM_ADDR y, TilingParam &paramList)
    {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->blockLength = paramList.core_size + (GetBlockNum() == GetBlockIdx() + 1 ? paramList.core_remain : 0);
        this->tileLength = paramList.block_size;
        this->ALIGN_NUM = paramList.ALIGN_NUM;
        ASSERT(ALIGN_NUM != 0 && "ALIGN_NUM can not be zero!");

        this->blockLength = this->blockLength + (this->blockLength % ALIGN_NUM ? ALIGN_NUM - this->blockLength % ALIGN_NUM : 0);
        auto startPointer = paramList.core_size * GetBlockIdx();
        auto bufferlength = this->blockLength;

        this->reduce1 = paramList.reduce1;
        this->reduce2 = paramList.reduce2;
        this->shape = paramList.shape;
        this->dim = paramList.dim;

        this->totalLength = paramList.total_length;
        this->x1Length = paramList.x1_length;
        this->x2Length = paramList.x2_length;

        x1Gm.SetGlobalBuffer((__gm__ T *)x1 + startPointer, this->x1Length);
        x2Gm.SetGlobalBuffer((__gm__ T *)x2 + startPointer, this->x2Length);
        yGm.SetGlobalBuffer((__gm__ int8_t *)y + startPointer, bufferlength);

        pipe.InitBuffer(x1_inque, BUFFER_NUM, this->tileLength * sizeof(T));
        pipe.InitBuffer(x2_inque, BUFFER_NUM, this->tileLength * sizeof(T));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileLength * sizeof(int8_t));
        pipe.InitBuffer(calc_buf_1, this->tileLength * sizeof(T));
        pipe.InitBuffer(calc_buf_2, this->tileLength * sizeof(float));
        pipe.InitBuffer(calc_buf_3, this->tileLength * sizeof(float));
        pipe.InitBuffer(calc_buf_4, this->tileLength * sizeof(float));
    }

    __aicore__ inline void Process()
    {
        int32_t count = this->totalLength / this->shape[this->dim - 1];
        uint32_t totalLength = this->shape[this->dim - 1];
        this->tileNum = totalLength / this->tileLength + (totalLength % this->tileLength > 0);
        uint32_t d[21] = {0};
        uint32_t dn1[21] = {0};
        uint32_t dn2[21] = {0};

        auto dim = this->dim - 1;
        d[dim] = dn1[dim] = dn2[dim] = 1;
        InitializeDnArrays(d, dn1, this->reduce1, dim, this->shape);
        InitializeDnArrays(d, dn2, this->reduce2, dim, this->shape);
        for (int j = 0; j < count; j++)
        {
            uint32_t start1 = 0, start2 = 0;
            CalculateStart(j, start1, dn1, reduce1, d);
            CalculateStart(j, start2, dn2, reduce2, d);
            int32_t loopCount = this->tileNum;
            for (int32_t i = 0; i < loopCount - 1; i++)
            {
                CopyIn(start1 * totalLength, start2 * totalLength, i, this->tileLength);
                Compute(i, this->tileLength);
                CopyOut(j * totalLength, i, this->tileLength);
            }
            uint32_t length = totalLength - this->tileLength * (loopCount - 1);
            auto length_align = (length + this->ALIGN_NUM - 1) / this->ALIGN_NUM * this->ALIGN_NUM;
            CopyIn(start1 * totalLength, start2 * totalLength, loopCount - 1, length_align);
            Compute(loopCount - 1, length);
            CopyOut(j * totalLength, loopCount - 1, (length + 31) / 32 * 32);
        }
    }

private:
    __aicore__ inline void CopyIn(uint32_t start1, uint32_t start2, int32_t progress, uint32_t length)
    {
        {
            LocalTensor<T> x1Local = x1_inque.AllocTensor<T>();
            LocalTensor<T> x2Local = x2_inque.AllocTensor<T>();

            DataCopy(x1Local, x1Gm[start1 + progress * this->tileLength], length);
            DataCopy(x2Local, x2Gm[start2 + progress * this->tileLength], length);

            x1_inque.EnQue(x1Local);
            x2_inque.EnQue(x2Local);
        }
    }
    // 计算逻辑
    __aicore__ inline void Compute(int32_t progress, uint32_t length)
    {
        LocalTensor<int8_t> y_local = outQueueY.AllocTensor<int8_t>();
        if constexpr (std::is_same_v<T, half> || std::is_same_v<T, bfloat16_t>)
        {
            LocalTensor<half> x1_local = x1_inque.DeQue<half>();
            LocalTensor<half> x2_local = x2_inque.DeQue<half>();

            LocalTensor<half> y_compute = calc_buf_1.Get<half>();
            // 1、 计算差值
            Sub(y_compute, x1_local, x2_local, length);

            // 2、误差容差处理，将小于误差值的差值设置为 精度差
            Mins(y_compute, y_compute, (half)MIN_ACCURACY_FP16, length);

            // 3、和0比大小
            Maxs(y_compute, y_compute, (half)MAX_F16, length);

            // 4、将所有非零值设置为 1
            Muls(y_compute, y_compute, (half)MAX_MUL_FP16, length);
            Muls(y_compute, y_compute, (half)MAX_MUL_FP16, length);

            // 5、将结果转换为 FP16 类型并保存到输出张量
            Cast(y_local, y_compute, RoundMode::CAST_NONE, length);

            x1_inque.FreeTensor(x1_local);
            x2_inque.FreeTensor(x2_local);
        }
        else if constexpr (std::is_same_v<T, float>)
        {
            LocalTensor<float> x1_local = x1_inque.DeQue<float>();
            LocalTensor<float> x2_local = x2_inque.DeQue<float>();
            LocalTensor<float> y_compute = calc_buf_1.Get<float>();

            LocalTensor<half> y_fp16 = calc_buf_2.Get<half>();

            // 1、计算差值
            Sub(y_compute, x1_local, x2_local, length);

            // 2、误差容差处理，将小于误差值的差值设置为 精度差
            Mins(y_compute, y_compute, (float)MIN_ACCURACY_FP32, length);

            // 3、和0比大小
            Maxs(y_compute, y_compute, (float)MAX_F32, length);

            // 4、将所有非零值设置为 1
            Muls(y_compute, y_compute, (float)MAX_MUL_1_FP32, length);
            Muls(y_compute, y_compute, (float)MAX_MUL_1_FP32, length);
            Muls(y_compute, y_compute, (float)MAX_MUL_2_FP32, length);

            // 将结果转换为 FP16 类型并保存到输出张量
            Cast(y_fp16, y_compute, RoundMode::CAST_NONE, length);
            Cast(y_local, y_fp16, RoundMode::CAST_NONE, length);

            x1_inque.FreeTensor(x1_local);
            x2_inque.FreeTensor(x2_local);
        }
        else if constexpr (std::is_same_v<T, int8_t> || std::is_same_v<T, uint8_t>)
        {
            LocalTensor<int8_t> x1_local = x1_inque.DeQue<int8_t>();
            LocalTensor<int8_t> x2_local = x2_inque.DeQue<int8_t>();
            LocalTensor<int8_t> y_compute = calc_buf_1.Get<int8_t>();

            // 1、计算差值
            LocalTensor<half> x1_local_fp16 = calc_buf_2.Get<half>();
            LocalTensor<half> x2_local_fp16 = calc_buf_3.Get<half>();
            LocalTensor<half> y_local_fp16 = calc_buf_4.Get<half>();

            // 2、将输入张量转换为 FP16
            Cast(x1_local_fp16, x1_local, RoundMode::CAST_NONE, length);
            Cast(x2_local_fp16, x2_local, RoundMode::CAST_NONE, length);

            // 3、计算差值
            Sub(y_local_fp16, x1_local_fp16, x2_local_fp16, length);

            // 4、误差容差处理，将小于误差值的差值设置为 精度差
            Mins(y_local_fp16, y_local_fp16, (half)POSITIVE_ONE_FP32, length);

            Maxs(y_local_fp16, y_local_fp16, (half)MAX_F16, length);

            // 5、将所有非零值设置为 1
            Cast(y_local, y_local_fp16, RoundMode::CAST_NONE, length);

            x1_inque.FreeTensor(x1_local);
            x2_inque.FreeTensor(x2_local);
        }
        else if constexpr (std::is_same_v<T, int32_t> || std::is_same_v<T, int64_t>)
        {
            LocalTensor<int32_t> x1_local = x1_inque.DeQue<int32_t>();
            LocalTensor<int32_t> x2_local = x2_inque.DeQue<int32_t>();
            LocalTensor<int32_t> y_compute = calc_buf_1.Get<int32_t>();

            LocalTensor<half> y_fp16 = calc_buf_3.Get<half>();
            LocalTensor<float> y_fp32 = calc_buf_4.Get<float>();

            // 1、计算差值
            Sub(y_compute, x1_local, x2_local, length);

            // 2、误差容差处理，将小于误差值的差值设置为 精度差
            Mins(y_compute, y_compute, (int32_t)POSITIVE_ONE_I32, length);

            // 3、和0比大小
            Maxs(y_compute, y_compute, 0, length);

            // 4、int32->float->half
            Cast(y_fp32, y_compute, RoundMode::CAST_NONE, length);
            Cast(y_fp16, y_fp32, RoundMode::CAST_NONE, length);
            Cast(y_local, y_fp16, RoundMode::CAST_NONE, length);

            x1_inque.FreeTensor(x1_local);
            x2_inque.FreeTensor(x2_local);
        }
        outQueueY.EnQue<int8_t>(y_local);
    }

    // 结果拷贝出队列
    __aicore__ inline void CopyOut(uint32_t start, uint32_t progress, uint32_t length)
    {
        LocalTensor<int8_t> yLocal = outQueueY.DeQue<int8_t>();
        DataCopy(yGm[start + progress * this->tileLength], yLocal, length);
        outQueueY.FreeTensor(yLocal);
    }
    __aicore__ inline void CalculateStart(int j, uint32_t &start, uint32_t *dn, uint32_t *reduce, uint32_t *d)
    {
        for (int k = dim - 1; k >= 0; k--)
        {
            uint32_t index = (j / d[k + 1] % shape[k]);

            if (reduce[k] == 0)
            {
                start += dn[k + 1] * index;
            }
        }
    }
    __aicore__ inline void InitializeDnArrays(uint32_t *d, uint32_t *dn, uint32_t *reduce, uint32_t dim, uint32_t *shape)
    {
        d[dim] = dn[dim] = 1;
        for (int k = dim - 1; k >= 0; k--)
        {
            d[k] = d[k + 1] * shape[k];
            if (reduce[k] == 0)
            {
                dn[k] = dn[k + 1] * shape[k];
            }
            else
            {
                dn[k] = dn[k + 1];
            }
        }
    }

private:
    TPipe pipe;
    TBuf<TPosition::VECCALC> calc_buf_1, calc_buf_2, calc_buf_3, calc_buf_4;
    TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> x1_inque, x2_inque;
    TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    GlobalTensor<T> x1Gm, x2Gm;
    GlobalTensor<int8_t> yGm;

    uint32_t blockLength;
    uint32_t x1Length;
    uint32_t startPointer;
    uint32_t x2Length;
    uint32_t tileNum;
    uint32_t tileLength;
    // uint32_t position;
    uint32_t totalLength;
    uint32_t *reduce1;
    uint32_t *reduce2;
    uint32_t *shape;
    uint32_t dim;
    uint32_t ALIGN_NUM;
};
extern "C" __global__ __aicore__ void greater(GM_ADDR x1, GM_ADDR x2, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling)
{
    GET_TILING_DATA(tiling_data, tiling);
    TilingParam paramList = {
        .total_length = tiling_data.total_length,
        .x1_length = tiling_data.x1_length,
        .x2_length = tiling_data.x2_length,
        .ALIGN_NUM = tiling_data.ALIGN_NUM,
        .tiling_size = tiling_data.tiling_size,
        .block_size = tiling_data.block_size,
        .core_size = tiling_data.core_size,
        .core_remain = tiling_data.core_remain,
        .dim = tiling_data.dim};
    for (int i = 0; i < 20; ++i)
    {
        paramList.shape[i] = tiling_data.shape[i];
        paramList.reduce1[i] = tiling_data.reduce1[i];
        paramList.reduce2[i] = tiling_data.reduce2[i];
    }

    // if (TILING_KEY_IS(0))
    // {
    //     KernelGreater<DTYPE_X1> op;
    //     op.Init(x1, x2, y, paramList);
    //     op.Process();
    // }
    // else if (TILING_KEY_IS(1))
    // {
    //     KernelGreater_Broadcast<DTYPE_X1> op;
    //     op.Init(x1, x2, y, paramList);
    //     op.Process();
    // }
    //printf("damn");
    if (TILING_KEY_IS(0)){
        KernelGreater_Broadcast<DTYPE_X1> op;
        op.Init(x1, x2, y, paramList);
        op.Process();
    }
    else if (TILING_KEY_IS(1))
    {
        KernelGreater_Broadcast<DTYPE_X1> op;
        op.Init(x1, x2, y, paramList);
        op.Process();
    }
    else
    {
        KernelGreater<DTYPE_X1> op;
        op.Init(x1, x2, y, paramList);
        op.Process();
    }
}

// #include "kernel_operator.h"

// using namespace AscendC;
// constexpr int32_t BUFFER_NUM = 2;
// constexpr int32_t inputVarNum = 2;
// constexpr int32_t maxDimNum = 10;

// constexpr float POSITIVE_ONE_FP32 = 1.0F;
// constexpr int32_t POSITIVE_ONE_I32 = 1;
// constexpr float MIN_ACCURACY_FP16 = 0.00000005960464477539063F;
// constexpr float MAX_MUL_FP16 = 4096;
// constexpr float MIN_ACCURACY_FP32 = 1.1754943508222875e-38;
// constexpr float MAX_MUL_1_FP32 = 1125899906842624;
// constexpr float MAX_MUL_2_FP32 = 67108864;
// constexpr uint32_t BLOCK_SIZE = 32;
// constexpr float MAX_F16 = 0.0;
// constexpr float MAX_F32 = 0.0f;


// class TileVar{
// public:    
//     uint32_t x1_length;
//     uint32_t x2_length; 
//     uint8_t is_broadcast;
//     int64_t maxShapeDim; 
//     int64_t ss[inputVarNum * maxDimNum];
//     int64_t sf[maxDimNum];
// };

// class KernelGreater {
// public:
//     __aicore__ inline KernelGreater() {}
//     __aicore__ inline void Init(GM_ADDR x1, GM_ADDR x2, GM_ADDR y,
//                                 uint32_t smallCoreDataNum,
//                                 uint32_t bigCoreDataNum, uint32_t finalBigTileNum, 
//                                 uint32_t finalSmallTileNum, uint32_t tileDataNum, 
//                                 uint32_t smallTailDataNum, uint32_t bigTailDataNum, 
//                                 uint32_t tailBlockNum, TileVar* tilevar) 
//     {
//         ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
//         uint32_t coreNum = GetBlockIdx();
//         uint32_t globalBufferIndex = bigCoreDataNum * GetBlockIdx();
//         // 单次搬运数据个数 tileDataNum
//         this->tileDataNum = tileDataNum;
//         if (coreNum < tailBlockNum) { 
//             // 每个大核要处理的总输入数据量 bigCoreDataNum*sizeof(half)
//             this->coreDataNum = bigCoreDataNum;
//             // 每个大核要搬运几次 finalBigTileNum 次
//             this->tileNum = finalBigTileNum;
//             // 每个大核最后一次搬运数据量 bigTailDataNum
//             this->tailDataNum = bigTailDataNum;
//         }
//         else { 
//             // 每个小核要处理的总输入数据量 smallCoreDataNum*sizeof(half)
//             this->coreDataNum = smallCoreDataNum;
//             // 每个小核要搬运几次 finalSmallTileNum 次
//             this->tileNum = finalSmallTileNum;
//             // 每个小核最后一次搬运数据量 smallTailDataNum
//             this->tailDataNum = smallTailDataNum;
//             globalBufferIndex -= (bigCoreDataNum - smallCoreDataNum) * (GetBlockIdx() - tailBlockNum);
//         }
//         this->maxShapeDim = tilevar->maxShapeDim;
//         for (int i = 0; i < this->maxShapeDim * inputVarNum; ++i) {
//             ((int64_t *)this->shape)[i] = tilevar->ss[i];
//         }
//         for(int i = 0; i < this->maxShapeDim; ++i) {
//             ((int64_t *)this->shapefull)[i] = tilevar->sf[i];
//         }
//         this->globalBufferIndex = globalBufferIndex;
//         this->is_broadcast = tilevar->is_broadcast;
//         x1Gm.SetGlobalBuffer((__gm__ DTYPE_X1*)x1);
//         x2Gm.SetGlobalBuffer((__gm__ DTYPE_X1*)x2);
//         yGm.SetGlobalBuffer((__gm__ int8_t*)y);
//         pipe.InitBuffer(x1_inque, BUFFER_NUM, this->tileDataNum * sizeof(DTYPE_X1));
//         pipe.InitBuffer(x2_inque, BUFFER_NUM, this->tileDataNum * sizeof(DTYPE_X1)); 
//         pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileDataNum * sizeof(int8_t));

//         pipe.InitBuffer(calc_buf_1, this->tileDataNum * sizeof(DTYPE_X1));
//         pipe.InitBuffer(calc_buf_2, this->tileDataNum * sizeof(half));
//         pipe.InitBuffer(calc_buf_3, this->tileDataNum * sizeof(half));
//         pipe.InitBuffer(calc_buf_4, this->tileDataNum * sizeof(float));
//     }
//     __aicore__ inline void Process()
//     {
//         int32_t loopCount = this->tileNum;
//         this->processDataNum = this->tileDataNum;
//         for (int32_t i = 0; i < loopCount; i++) {
//             if (i == this->tileNum - 1) {
//               this->processDataNum = this->tailDataNum;
//             }
//             CopyIn(i);
//             Compute(i);
//             CopyOut(i);
//         }
//     }

// private:
//     __aicore__ inline void CopyIn(int32_t progress)
//     {
//         LocalTensor<DTYPE_X1> x1Local = x1_inque.AllocTensor<DTYPE_X1>();
//         LocalTensor<DTYPE_X1> x2Local = x2_inque.AllocTensor<DTYPE_X1>();
//         // x0Gm broadCast to x0Local
//         // 搬运从globalBufferIndex + progress * this->tileDataNum起始索引开始，this->processDataNum长度的数据
//         if (this->is_broadcast == 0){
//             DataCopy(x1Local, x1Gm[progress * this->tileDataNum], this->processDataNum);
//             DataCopy(x2Local, x2Gm[progress * this->tileDataNum], this->processDataNum);
//         }
//         else{
//             BroadCINPUTX1(x1Local, globalBufferIndex + progress * this->tileDataNum, this->processDataNum);
//             BroadCINPUTX2(x2Local, globalBufferIndex + progress * this->tileDataNum, this->processDataNum);
//         }

//         // DumpTensor(x1Local, 1, this->processDataNum);
//         // DumpTensor(x2Local, 2, this->processDataNum);

//         x1_inque.EnQue(x1Local);
//         x2_inque.EnQue(x2Local);
//     }
//     __aicore__ inline void Compute(int32_t progress)
//     {
//         LocalTensor<int8_t> y_local = outQueueY.AllocTensor<int8_t>();
//         if constexpr (std::is_same_v<DTYPE_X1, half> || std::is_same_v<DTYPE_X1, bfloat16_t>)
//         {
//             LocalTensor<half> x1_local = x1_inque.DeQue<half>();
//             LocalTensor<half> x2_local = x2_inque.DeQue<half>();

//             LocalTensor<half> y_compute = calc_buf_1.Get<half>();
//             // 1、 计算差值
//             Sub(y_compute, x1_local, x2_local, this->tileDataNum);

//             // 2、误差容差处理，将小于误差值的差值设置为 精度差
//             Mins(y_compute, y_compute, (half)MIN_ACCURACY_FP16, this->tileDataNum);

//             // 3、和0比大小
//             Maxs(y_compute, y_compute, (half)MAX_F16, this->tileDataNum);

//             // 4、将所有非零值设置为 1
//             Muls(y_compute, y_compute, (half)MAX_MUL_FP16, this->tileDataNum);
//             Muls(y_compute, y_compute, (half)MAX_MUL_FP16, this->tileDataNum);

//             // 5、将结果转换为 FP16 类型并保存到输出张量
//             Cast(y_local, y_compute, RoundMode::CAST_NONE, this->tileDataNum);

            
//             x1_inque.FreeTensor(x1_local);
//             x2_inque.FreeTensor(x2_local);
//         }
//         else if constexpr (std::is_same_v<DTYPE_X1, float>)
//         {
//             LocalTensor<float> x1_local = x1_inque.DeQue<float>();
//             LocalTensor<float> x2_local = x2_inque.DeQue<float>();
//             LocalTensor<float> y_compute = calc_buf_1.Get<float>();

//             LocalTensor<half> y_fp16 = calc_buf_2.Get<half>();

//             // 1、计算差值
//             Sub(y_compute, x1_local, x2_local, this->tileDataNum);

//             // 2、误差容差处理，将小于误差值的差值设置为 精度差
//             Mins(y_compute, y_compute, (float)MIN_ACCURACY_FP32, this->tileDataNum);

//             // 3、和0比大小
//             Maxs(y_compute, y_compute, (float)MAX_F32, this->tileDataNum);

//             // 4、将所有非零值设置为 1
//             Muls(y_compute, y_compute, (float)MAX_MUL_1_FP32, this->tileDataNum);
//             Muls(y_compute, y_compute, (float)MAX_MUL_1_FP32, this->tileDataNum);
//             Muls(y_compute, y_compute, (float)MAX_MUL_2_FP32, this->tileDataNum);

//             // 将结果转换为 FP16 类型并保存到输出张量
//             Cast(y_fp16, y_compute, RoundMode::CAST_NONE, this->tileDataNum);
//             Cast(y_local, y_fp16, RoundMode::CAST_NONE, this->tileDataNum);

//             x1_inque.FreeTensor(x1_local);
//             x2_inque.FreeTensor(x2_local);
//         }
//         else if constexpr (std::is_same_v<DTYPE_X1, int8_t> || std::is_same_v<DTYPE_X1, uint8_t>)
//         {
//             LocalTensor<int8_t> x1_local = x1_inque.DeQue<int8_t>();
//             LocalTensor<int8_t> x2_local = x2_inque.DeQue<int8_t>();
//             LocalTensor<int8_t> y_compute = calc_buf_1.Get<int8_t>();

//             // 1、计算差值
//             LocalTensor<half> x1_local_fp16 = calc_buf_2.Get<half>();
//             LocalTensor<half> x2_local_fp16 = calc_buf_3.Get<half>();
//             LocalTensor<half> y_local_fp16 = calc_buf_4.Get<half>();

//             // 2、将输入张量转换为 FP16
//             Cast(x1_local_fp16, x1_local, RoundMode::CAST_NONE, this->tileDataNum);
//             Cast(x2_local_fp16, x2_local, RoundMode::CAST_NONE, this->tileDataNum);

//             // 3、计算差值
//             Sub(y_local_fp16, x1_local_fp16, x2_local_fp16, this->tileDataNum);

//             // 4、误差容差处理，将小于误差值的差值设置为 精度差
//             Mins(y_local_fp16, y_local_fp16, (half)POSITIVE_ONE_FP32, this->tileDataNum);

//             Maxs(y_local_fp16, y_local_fp16, (half)MAX_F16, this->tileDataNum);

//             // 5、将所有非零值设置为 1
//             Cast(y_local, y_local_fp16, RoundMode::CAST_NONE, this->tileDataNum);

//             x1_inque.FreeTensor(x1_local);
//             x2_inque.FreeTensor(x2_local);
//         }
//         else if constexpr (std::is_same_v<DTYPE_X1, int32_t> || std::is_same_v<DTYPE_X1, int64_t>)
//         {
//             LocalTensor<int32_t> x1_local = x1_inque.DeQue<int32_t>();
//             LocalTensor<int32_t> x2_local = x2_inque.DeQue<int32_t>();
//             LocalTensor<int32_t> y_compute = calc_buf_1.Get<int32_t>();

//             LocalTensor<half> y_fp16 = calc_buf_3.Get<half>();
//             LocalTensor<float> y_fp32 = calc_buf_4.Get<float>();

//             // 1、计算差值
//             Sub(y_compute, x1_local, x2_local, this->tileDataNum);
           
//             // 2、误差容差处理，将小于误差值的差值设置为 精度差
//             Mins(y_compute, y_compute, (int32_t)POSITIVE_ONE_I32, this->tileDataNum);

//             // 3、和0比大小
//             Maxs(y_compute, y_compute, 0, this->tileDataNum);

//             // 4、int32->float->half
//             Cast(y_fp32, y_compute, RoundMode::CAST_NONE, this->tileDataNum);
//             Cast(y_fp16, y_fp32, RoundMode::CAST_NONE, this->tileDataNum);
//             Cast(y_local, y_fp16, RoundMode::CAST_NONE, this->tileDataNum);

//             x1_inque.FreeTensor(x1_local);
//             x2_inque.FreeTensor(x2_local);
//         } 
//         // AscendC::Add(yLocal, x0Local, x1Local, this->processDataNum);
//         // AscendC::Add(yLocal, yLocal, x2Local, this->processDataNum);
//         outQueueY.EnQue<int8_t>(y_local);
       
//     }
//     __aicore__ inline void CopyOut(int32_t progress)
//     {
//       AscendC::LocalTensor<int8_t> yLocal = outQueueY.DeQue<int8_t>();  
//       AscendC::DataCopy(yGm[globalBufferIndex + progress * this->tileDataNum], yLocal, this->processDataNum);
//       outQueueY.FreeTensor(yLocal);
//     }
    
//     __aicore__ inline int GetPos(int target_index, int inputNo) {
//         int source_index = 0;  // 源Tensor的索引
//         int stride = 1;        // 当前维度的步长
//         // 从最低维度向最高维度遍历
//         for (int dim = this->maxShapeDim - 1; dim >= 0; --dim) {
//             int full_dim_size = shapefull[dim];  // 广播后当前维度的大小
//             int src_dim_size = shape[inputNo][dim];  // 源Tensor当前维度的大小

//             // 计算当前维度的坐标
//             int coord = target_index % full_dim_size;
//             target_index = target_index / full_dim_size;
            
//             // 如果源Tensor的当前维度不是1，则累加索引
//             if (src_dim_size > 1) {
//                 source_index += coord * stride;
//                 stride *= src_dim_size;
//             }
//             // 如果源Tensor的当前维度是1，则跳过（广播维度）
//         }

//         return source_index;
//     }
//     // 从offset索引开始到offset+length索引的全部数据，映射回原始输入x0
//     __aicore__ inline void BroadCINPUTX1(LocalTensor<DTYPE_X1> &dst, uint32_t offset, uint32_t length) {
//         // 对每一个数
//         for(uint32_t i = 0; i < length; i++) {
//             // 在dst中的索引位置 istart
//             int istart = i + offset;
//             // 在原src中的索引位置 idxtmp
//             int idxtmp = GetPos(istart, 0);
//             DTYPE_X1 tmp = x1Gm.GetValue(idxtmp);
//             dst.SetValue(i, tmp);
//         }
//     }
//     __aicore__ inline void BroadCINPUTX2(LocalTensor<DTYPE_X1> &dst, uint32_t offset, uint32_t length) {
//         // 对每一个数
//         for(uint32_t i = 0; i < length; i++) {
//             // 在dst中的索引位置 istart
//             int istart = i + offset;
//             // 在原src中的索引位置 idxtmp
//             int idxtmp = GetPos(istart, 1);
//             DTYPE_X1 tmp = x2Gm.GetValue(idxtmp);
//             dst.SetValue(i, tmp);
//         }
//     }

// private:
//     AscendC::TPipe pipe;
//     TBuf<TPosition::VECCALC> calc_buf_1, calc_buf_2, calc_buf_3, calc_buf_4;
//     TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> x1_inque, x2_inque;
//     TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueY;
//     GlobalTensor<DTYPE_X1> x1Gm, x2Gm;
//     GlobalTensor<int8_t> yGm;
//     uint32_t coreDataNum;
//     uint32_t tileNum;
//     uint32_t tileDataNum;
//     uint32_t tailDataNum;
//     uint32_t processDataNum;
//     int64_t maxShapeDim;
//     int64_t shape[2][10];
//     int64_t shapefull[10];
//     uint32_t globalBufferIndex;
//     uint8_t is_broadcast;
// };
// extern "C" __global__ __aicore__ void greater(GM_ADDR x1, GM_ADDR x2, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
//     GET_TILING_DATA(tiling_data, tiling);
//     // TODO: user kernel impl
//     KernelGreater op;
//     TileVar tilevar; 
    
//     tilevar.x1_length = tiling_data.x1_length;
//     tilevar.x2_length =  tiling_data.x2_length;  
//     tilevar.maxShapeDim =  tiling_data.maxShapeDim;  
//     tilevar.is_broadcast = tiling_data.is_broadcast;
//     for(int32_t i = 0; i < inputVarNum * maxDimNum; i++) {
//         tilevar.ss[i] = tiling_data.shape[i];  
//     }
//     for(int32_t i = 0; i < maxDimNum; i++) {
//         tilevar.sf[i] = tiling_data.shapefull[i];  
//     }
//     op.Init(x1, x2, y, tiling_data.smallCoreDataNum,
//             tiling_data.bigCoreDataNum, tiling_data.finalBigTileNum,
//             tiling_data.finalSmallTileNum, tiling_data.tileDataNum,
//             tiling_data.smallTailDataNum, tiling_data.bigTailDataNum,
//             tiling_data.tailBlockNum, &tilevar);
//     op.Process();
// }