#define K_MAX_SHAPE_DIM 0
#include "kernel_operator.h"
#include <type_traits>
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 1;
constexpr int32_t JIASU_NUM = 4;
constexpr int32_t JIASU_NUM1 = 8;
//float compress[2]==1 case5 compress[1]==511 compress[0]==511
//12/13
template<typename T> class KernelArgMaxWithValue_fast {
public:
    __aicore__ inline KernelArgMaxWithValue_fast() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR indice, GM_ADDR values, TPipe* pipeIn) {
        
        // this->compress[0] = compress[0];
        // this->compress1 = compress1;
        int32_t block_idx_user = GetBlockIdx();
        // pipe = pipeIn;

        if (block_idx_user < compress0)
        {
            length = compress2 + 1;
            num_calcu = block_idx_user * length;
        }
        else
        {
            length = compress2;
            // num_calcu = num_calcu - block_idx_user + compress0;
            num_calcu = block_idx_user * compress2 + compress0;
        }
        start_calcu = num_calcu * this->compress1;

        xGm.SetGlobalBuffer(reinterpret_cast<__gm__ float *>(x + start_calcu*4));
        // xGm = xGm[start_calcu];
        pipeIn->InitBuffer(inQueueX, BUFFER_NUM, (JIASU_NUM * this->compress1+32) * sizeof(float));
        pipeIn->InitBuffer(inQueueX1, BUFFER_NUM, ((JIASU_NUM1-JIASU_NUM) * this->compress1+32) * sizeof(float));
        pipeIn->InitBuffer(inQueueX2, BUFFER_NUM, ((length-JIASU_NUM1) * this->compress1+32) * sizeof(float));
        CopyIn(length, this->compress1);

        indiceGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_INDICE *>(indice + num_calcu*4));
        valuesGm.SetGlobalBuffer(reinterpret_cast<__gm__ float *>(values + num_calcu*4));
        
        
        // indiceGm = indiceGm[num_calcu];
        // valuesGm = valuesGm[num_calcu];

        pipeIn->InitBuffer(outQueueIndice, BUFFER_NUM, (length+32) * sizeof(DTYPE_INDICE));
        pipeIn->InitBuffer(outQueueValues, BUFFER_NUM, (length+50) * sizeof(float));
        

        pipeIn->InitBuffer(QueueTmp1, (length*16+50) * sizeof(float));
        pipeIn->InitBuffer(QueueTmp2, (length*8+50) * sizeof(float));
        pipeIn->InitBuffer(QueueTmp3, (length*8+50) * sizeof(float));

    }

    __aicore__ inline void Process() {
        Compute(length);
        CopyOut2(length);
        CopyOut1(length);
    }
private:
     __aicore__ inline void CopyIn(uint32_t group_num, uint32_t one_length)
    {
        LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();
        DataCopyExtParams copyParams {static_cast<uint16_t>(JIASU_NUM), static_cast<uint32_t>(one_length * sizeof(float)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
        DataCopyPadExtParams<float> padParams {false, 0, 1, 0};
        DataCopyPad(xLocal, xGm, copyParams, padParams);
        inQueueX.EnQue(xLocal);


        LocalTensor<float> xLocal1 = inQueueX1.AllocTensor<float>();
        DataCopyExtParams copyParams1 {static_cast<uint16_t>(JIASU_NUM1-JIASU_NUM), static_cast<uint32_t>(one_length * sizeof(float)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
        DataCopyPad(xLocal1, xGm[JIASU_NUM*one_length], copyParams1, padParams);
        inQueueX1.EnQue(xLocal1);

        LocalTensor<float> xLocal2 = inQueueX2.AllocTensor<float>();
        DataCopyExtParams copyParams2 {static_cast<uint16_t>(group_num-JIASU_NUM1), static_cast<uint32_t>(one_length * sizeof(float)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
        DataCopyPad(xLocal2, xGm[JIASU_NUM1*one_length], copyParams2, padParams);
        inQueueX2.EnQue(xLocal2);
    }
    __aicore__ inline void Compute(uint32_t group_num)
    {
        LocalTensor<float> tmp1 = QueueTmp1.Get<float>();
        LocalTensor<int32_t> tmp2 = QueueTmp2.Get<int32_t>();
        LocalTensor<int32_t> tmp3 = QueueTmp3.Get<int32_t>();
        CreateVecIndex(tmp3, 0, 8);
        CreateVecIndex(tmp3[8], 0, group_num);
        Muls(tmp3, tmp3, 64, 8);
        Muls(tmp3[8], tmp3[8], 8, group_num);

        LocalTensor<float> valuesLocal = outQueueValues.AllocTensor<float>();
        LocalTensor<DTYPE_INDICE> indiceLocal = outQueueIndice.AllocTensor<DTYPE_INDICE>();

        LocalTensor<float> xLocal = inQueueX.DeQue<float>();
        WholeReduceMax(tmp1, xLocal, 64, JIASU_NUM*8, 1, 1, 8, ReduceOrder::ORDER_VALUE_INDEX); // 使用默认order

        LocalTensor<float> xLocal1 = inQueueX1.DeQue<float>();
        WholeReduceMax(tmp1[JIASU_NUM*8*2], xLocal1, 64, 8*(JIASU_NUM1-JIASU_NUM), 1, 1, 8, ReduceOrder::ORDER_VALUE_INDEX); // 使用默认order

        LocalTensor<float> xLocal2 = inQueueX2.DeQue<float>();
        WholeReduceMax(tmp1[JIASU_NUM1*8*2], xLocal2, 64, 8*(group_num-JIASU_NUM1), 1, 1, 8, ReduceOrder::ORDER_VALUE_INDEX); // 使用默认order

        uint64_t rsvdCnt = 0; // 用于保存筛选后保留下来的元素个数
        GatherMask(tmp2, tmp1.ReinterpretCast<int32_t>(), 2, true, 16*group_num, { 1, 1, 0, 0 }, rsvdCnt);
        GatherMask(tmp1, tmp1, 1, true, 16*group_num, { 1, 1, 0, 0 }, rsvdCnt);
        WholeReduceMax(valuesLocal, tmp1, 8, group_num, 1, 1, 1, ReduceOrder::ORDER_ONLY_VALUE); // 使用默认order
        outQueueValues.EnQue(valuesLocal);

        Add(tmp2,tmp2,tmp3, 8, group_num, { 1, 1, 1, 1, 1, 0 });
        WholeReduceMax(indiceLocal.ReinterpretCast<float>(), tmp1, 8, group_num, 1, 1, 1, ReduceOrder::ORDER_ONLY_INDEX);
        
        Add(tmp3, tmp3[8], indiceLocal, group_num);
        Muls(tmp3, tmp3, 4, group_num);
        Gather(indiceLocal, tmp2, tmp3.ReinterpretCast<uint32_t>(), (uint32_t)0, group_num);

        outQueueIndice.EnQue(indiceLocal);
        
        inQueueX.FreeTensor(xLocal);
        inQueueX1.FreeTensor(xLocal1);
        inQueueX2.FreeTensor(xLocal2);
    }
    __aicore__ inline void CopyOut1(uint32_t group_num) {
        LocalTensor<DTYPE_INDICE> indiceLocal = outQueueIndice.DeQue<DTYPE_INDICE>();
        // LocalTensor<float> valuesLocal = outQueueValues.DeQue<float>();

        DataCopyExtParams copyParams {1, static_cast<uint32_t>(group_num * sizeof(float)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
        DataCopyPad(indiceGm, indiceLocal, copyParams);
        // DataCopyPad(valuesGm, valuesLocal, copyParams);

        outQueueIndice.FreeTensor(indiceLocal);
        // outQueueValues.FreeTensor(valuesLocal);
    }
    __aicore__ inline void CopyOut2(uint32_t group_num) {
        // LocalTensor<DTYPE_INDICE> indiceLocal = outQueueIndice.DeQue<DTYPE_INDICE>();
        LocalTensor<float> valuesLocal = outQueueValues.DeQue<float>();

        DataCopyExtParams copyParams {1, static_cast<uint32_t>(group_num * sizeof(float)), 0, 0, 0}; // 结构体DataCopyExtParams最后一个参数是rsv保留位
        // DataCopyPad(indiceGm, indiceLocal, copyParams);
        DataCopyPad(valuesGm, valuesLocal, copyParams);

        // outQueueIndice.FreeTensor(indiceLocal);
        outQueueValues.FreeTensor(valuesLocal);
    }

private:
    TPipe* pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX1;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX2;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueIndice;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueValues;
    TBuf<QuePosition::VECCALC> QueueTmp1,QueueTmp2,QueueTmp3;
    
    GlobalTensor<float> xGm;
    GlobalTensor<DTYPE_INDICE> indiceGm;
    GlobalTensor<float> valuesGm;

    static constexpr uint16_t compress0 = 31;
    static constexpr uint16_t compress1 = 511;
    static constexpr uint16_t compress2 = 12;
    uint32_t start_calcu;
    uint32_t num_calcu;
    int32_t length;
};

template<typename T> class KernelArgMaxWithValue {
public:
    __aicore__ inline KernelArgMaxWithValue() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR indice, GM_ADDR values, 
                            uint16_t compress0, uint16_t compress1, uint16_t compress2) {
        // ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->compress0 = compress0;
        this->compress1 = compress1;
        this->compress2 = compress2;

        xGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X *>(x), 1);
        indiceGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_INDICE *>(indice), 1);
        valuesGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_VALUES *>(values), 1);

        pipe.InitBuffer(QueueTmp1, 1 * sizeof(float));
        pipe.InitBuffer(QueueTmp2, 1 * sizeof(float));
        pipe.InitBuffer(QueueTmp3, 1 * sizeof(float));
    }

    __aicore__ inline void Process() {
        int num = 0;
        int x_i = 0;
        int x_j = 0;
        int x_z = 0;
        DTYPE_X x_data;
        DTYPE_INDICE indice_data;
        DTYPE_VALUES values_data;
        if constexpr (std::is_same_v<T, half>)
        {
            auto tmp1 = QueueTmp1.Get<float>();
            auto tmp2 = QueueTmp2.Get<DTYPE_X>();
            float values_data_f,x_data_f;
            
            for(int i=0; i<compress0; i++)
            {
                x_j = x_i;
                for(int j=0; j<compress2; j++)
                {
                    x_z = x_j;
                    indice_data = 0;
                    values_data = xGm.GetValue(x_z);
                    tmp2.SetValue(0, values_data);
                    Cast(tmp1, tmp2, RoundMode::CAST_NONE, 1);
                    values_data_f = tmp1.GetValue(0);
                    x_z += compress2;
                    for(int z=1; z<compress1; z++)
                    {
                        DTYPE_X x_data = xGm.GetValue(x_z);
                        tmp2.SetValue(0, x_data);
                        Cast(tmp1, tmp2, RoundMode::CAST_NONE, 1);
                        x_data_f = tmp1.GetValue(0);
                        if(x_data_f > values_data_f)
                        {
                            values_data_f = x_data_f;
                            values_data = x_data;
                            indice_data = z;
                        }
                        x_z += compress2;
                    }
                    indiceGm.SetValue(num, indice_data);
                    valuesGm.SetValue(num, values_data);
                    num ++;
                    x_j += 1;
                }
                x_i += compress1 * compress2;
            }
        }
        else if constexpr (std::is_same_v<T, float>)
        {
            auto tmp1 = QueueTmp1.Get<float>();
            float values_data_f,x_data_f;
            
            for(int i=0; i<compress0; i++)
            {
                x_j = x_i;
                for(int j=0; j<compress2; j++)
                {
                    x_z = x_j;
                    indice_data = 0;
                    values_data = xGm.GetValue(x_z);
                    tmp1.SetValue(0, values_data);
                    values_data_f = tmp1.GetValue(0);
                    x_z += compress2;
                    for(int z=1; z<compress1; z++)
                    {
                        DTYPE_X x_data = xGm.GetValue(x_z);
                        tmp1.SetValue(0, x_data);
                        x_data_f = tmp1.GetValue(0);
                        if(x_data_f > values_data_f)
                        {
                            values_data_f = x_data_f;
                            values_data = x_data;
                            indice_data = z;
                        }
                        x_z += compress2;
                    }
                    indiceGm.SetValue(num, indice_data);
                    valuesGm.SetValue(num, values_data);
                    num ++;
                    x_j += 1;
                }
                x_i += compress1 * compress2;
            }
        }
        else if constexpr (std::is_same_v<T, int32_t>)
        {
            auto tmp1 = QueueTmp1.Get<int32_t>();
            int32_t values_data_f,x_data_f;
            
            for(int i=0; i<compress0; i++)
            {
                x_j = x_i;
                for(int j=0; j<compress2; j++)
                {
                    x_z = x_j;
                    indice_data = 0;
                    values_data = xGm.GetValue(x_z);
                    tmp1.SetValue(0, values_data);
                    values_data_f = tmp1.GetValue(0);
                    x_z += compress2;
                    for(int z=1; z<compress1; z++)
                    {
                        DTYPE_X x_data = xGm.GetValue(x_z);
                        tmp1.SetValue(0, x_data);
                        x_data_f = tmp1.GetValue(0);
                        if(x_data_f > values_data_f)
                        {
                            values_data_f = x_data_f;
                            values_data = x_data;
                            indice_data = z;
                        }
                        x_z += compress2;
                    }
                    indiceGm.SetValue(num, indice_data);
                    valuesGm.SetValue(num, values_data);
                    num ++;
                    x_j += 1;
                }
                x_i += compress1 * compress2;
            }
        }
        else
        {
            auto tmp1 = QueueTmp1.Get<float>();
            auto tmp2 = QueueTmp2.Get<half>();
            auto tmp3 = QueueTmp3.Get<DTYPE_X>();
            float values_data_f,x_data_f;
            
            for(int i=0; i<compress0; i++)
            {
                x_j = x_i;
                for(int j=0; j<compress2; j++)
                {
                    x_z = x_j;
                    indice_data = 0;
                    values_data = xGm.GetValue(x_z);
                    tmp3.SetValue(0, values_data);
                    Cast(tmp2, tmp3, RoundMode::CAST_NONE, 1);
                    Cast(tmp1, tmp2, RoundMode::CAST_NONE, 1);
                    values_data_f = tmp1.GetValue(0);
                    x_z += compress2;
                    for(int z=1; z<compress1; z++)
                    {
                        DTYPE_X x_data = xGm.GetValue(x_z);
                        tmp3.SetValue(0, x_data);
                        Cast(tmp2, tmp3, RoundMode::CAST_NONE, 1);
                        Cast(tmp1, tmp2, RoundMode::CAST_NONE, 1);
                        x_data_f = tmp1.GetValue(0);
                        if(x_data_f > values_data_f)
                        {
                            values_data_f = x_data_f;
                            values_data = x_data;
                            indice_data = z;
                        }
                        x_z += compress2;
                    }
                    indiceGm.SetValue(num, indice_data);
                    valuesGm.SetValue(num, values_data);
                    num ++;
                    x_j += 1;
                }
                x_i += compress1 * compress2;
            }
        }
    }
private:
    TPipe pipe;
    // TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    // TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueIndice;
    // TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueValues;
    // TBuf<QuePosition::VECCALC> QueueBuff;
    TBuf<QuePosition::VECCALC> QueueTmp1, QueueTmp2, QueueTmp3;
    
    GlobalTensor<DTYPE_X> xGm;
    GlobalTensor<DTYPE_INDICE> indiceGm;
    GlobalTensor<DTYPE_VALUES> valuesGm;

    uint16_t compress0;
    uint16_t compress1;
    uint16_t compress2;
};
extern "C" __global__ __aicore__ void arg_max_with_value(GM_ADDR x, GM_ADDR indice, GM_ADDR values, GM_ADDR workspace, GM_ADDR tiling) {
    // TODO: user kernel impl
    KERNEL_TASK_TYPE_DEFAULT(KERNEL_TYPE_AIV_ONLY); // 使能VectorCore
    if(TILING_KEY_IS(1))
    {
        // ICachePreLoad(8);
        TPipe pipe;
        KernelArgMaxWithValue_fast<DTYPE_X> op;
        op.Init(x, indice, values, &pipe);
        op.Process();
    }
    else if(TILING_KEY_IS(2))//通用
    {
        GET_TILING_DATA(tiling_data, tiling);
        KernelArgMaxWithValue<DTYPE_X> op;
        op.Init(x, indice, values, 
                tiling_data.compress0, tiling_data.compress1, tiling_data.compress2);
        op.Process();
    }
}