#define K_MAX_SHAPE_DIM 0
#include "kernel_operator.h"


template<typename U>
__aicore__ inline bool com(U a, U b) {
    return a > b;
}

template<>
__aicore__ inline bool com<half>(half a, half b) {
    return (float)a > (float)b;
}

constexpr int32_t BUFFER_NUM = 2;
template<typename T> class ArgMaxWithValuebase{ 
public:
    __aicore__ inline ArgMaxWithValuebase(){}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR values, GM_ADDR indice, int32_t block_size, int32_t nxt_jump, int32_t nxt_idx , int32_t datatype) {
        // Initialization code here
        if(datatype == 1){
            this->totalLength = block_size;
            this->block_size = nxt_jump;
            this->loopcount = block_size / nxt_jump;
        }else if(datatype == 2){
            this->totalLength = block_size * nxt_jump;
            this->block_size = block_size;
            this->loopcount = nxt_jump;
        }else if(datatype == 3){
            this->totalLength = block_size * nxt_idx;
            this->block_size = block_size;
            this->loopcount = nxt_idx;
        }
        this->nxt_jump = nxt_jump;
        this->nxt_idx = nxt_idx;
        xGm.SetGlobalBuffer(reinterpret_cast<__gm__ T*>(x), this->totalLength);
        valGm.SetGlobalBuffer(reinterpret_cast<__gm__ T*>(values), this->block_size);
        idxGm.SetGlobalBuffer(reinterpret_cast<__gm__ int32_t*>(indice), this->block_size);
     }
     __aicore__ inline void Process() {
        uint32_t  i = 0;
        for(int count = 0; count < this->block_size; count += 1){
            i = count * this->nxt_idx;
            int32_t max_idx = 0;
            T max_val = xGm.GetValue(i);
            for(int j = 1; j < this->loopcount; j++){
                if(com(xGm.GetValue(i + j * this->nxt_jump), max_val)){
                    max_val = xGm.GetValue(i + j * this->nxt_jump);
                    max_idx = j;
                }
            }
            valGm.SetValue(count, max_val);
            idxGm.SetValue(count, max_idx);
        }
    }
private:
    AscendC::GlobalTensor<T> xGm;
    AscendC::GlobalTensor<T> valGm;
    AscendC::GlobalTensor<int32_t> idxGm;
    uint32_t totalLength;
    uint32_t block_size;
    uint32_t nxt_jump;
    uint32_t nxt_idx;
    uint32_t loopcount;

};


class ArgMaxWithValueMul{

public:
    __aicore__ inline ArgMaxWithValueMul(){}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR values, GM_ADDR indice, int32_t block_size, 
                                int32_t nxt_jump, int32_t nxt_idx, AscendC::TPipe* pipe) {
        // Initialization code here
        uint32_t idx = AscendC::GetBlockIdx();
        // this->loopcount = (idx + 1) * block_size > nxt_idx ? nxt_idx - idx * block_size : block_size;
        this->block_size = block_size;
        this->align_n = (nxt_jump + 7) / 8 * 8;
        this->nxt_idx = 1;
        this->nxt_jump = nxt_jump;

        uint32_t idx_block_size = idx * block_size;
        xGm.SetGlobalBuffer  (reinterpret_cast<__gm__ float*>  (x) + idx_block_size * nxt_jump, block_size * nxt_jump);
        valGm.SetGlobalBuffer(reinterpret_cast<__gm__ float*>  (values) + idx_block_size, block_size);
        idxGm.SetGlobalBuffer(reinterpret_cast<__gm__ int32_t*>(indice)  + idx_block_size, block_size);

        pipe->InitBuffer(inQueueX, BUFFER_NUM, this->align_n * 32); // align  64
        pipe->InitBuffer(outQueueidx, BUFFER_NUM, 32);
        pipe->InitBuffer(outQueueval, BUFFER_NUM, 32);
        pipe->InitBuffer(outBuf, 8);
        // pipe->InitBuffer(idxBuf, block_size * 4);
        pipe->InitBuffer(workBuf, 32);

        // AscendC::LocalTensor<float> valLocal = outQueueidx.AllocTensor<float>();
        // AscendC::LocalTensor<int32_t> idxLocal = outQueueval.AllocTensor<int32_t>();
        // outQueueidx.EnQue<float>(valLocal);
        // outQueueval.EnQue<int32_t>(idxLocal);

     }
    __aicore__ inline void Process(AscendC::TPipe* pipe) {
        int32_t loop = (this->block_size + 7) / 8;
        for(int32_t i = 0 ; i < loop ; ++i)
        {
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }

        // AscendC::LocalTensor<int32_t> idxLocal = outQueueidx.DeQue<int32_t>();
        // AscendC::LocalTensor<float>   valLocal = outQueueval.DeQue<float>();

        // AscendC::DumpTensor(idxLocal, AscendC::GetBlockIdx(), this->block_size);
        // AscendC::DumpTensor(valLocal, AscendC::GetBlockIdx(), this->block_size);

        // AscendC::DataCopy(valGm, valLocal, this->block_size);
        // AscendC::DataCopy(idxGm, idxLocal, this->block_size); 
        // outQueueidx.FreeTensor(idxLocal);
        // outQueueval.FreeTensor(valLocal);
    }
private:
    __aicore__ inline void CopyIn(int32_t i)
    {  
        AscendC::LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();
        AscendC::DataCopyPad(xLocal, xGm[i * this->block_size * this->nxt_jump], {8, this->nxt_jump * 4, 0, 0, 0}, {true, 0, 0 , 0});
        inQueueX.EnQue<float>(xLocal);
    }
    __aicore__ inline void Compute(int32_t i)
    {  
        AscendC::LocalTensor<float> xLocal = inQueueX.DeQue<float>();
        // AscendC::DumpTensor(xLocal, i, this->align_n);
        AscendC::LocalTensor<float> workLocal = workBuf.Get<float>();
        AscendC::LocalTensor<float> dstLocal  = outBuf.Get<float>();

        AscendC::LocalTensor<int32_t> idxLocal = outQueueidx.AllocTensor<int32_t>();
        AscendC::LocalTensor<float>   valLocal = outQueueidx.AllocTensor<float>();

        for(int32_t j = 0 ; j < 8 ; ++j)
        {
            AscendC::ReduceMax<float>(dstLocal, xLocal[j * this->align_n], workLocal, this->align_n, true);
            float maxIndex = dstLocal.GetValue(1);
            // int32_t realIndex = *reinterpret_cast<int32_t*>(&maxIndex);
            idxLocal.SetValue(j, *reinterpret_cast<int32_t*>(&maxIndex));
            valLocal.SetValue(j, dstLocal.GetValue(0));
        }

        outQueueidx.EnQue<int32_t>(idxLocal);
        outQueueval.EnQue<float>(valLocal);
        inQueueX.FreeTensor(xLocal);
    }
    __aicore__ inline void CopyOut(int32_t i){
        AscendC::LocalTensor<int32_t> idxLocal = outQueueidx.DeQue<int32_t>();
        AscendC::LocalTensor<float>   valLocal = outQueueval.DeQue<float>();
        AscendC::DataCopy(valGm[i * 8], valLocal, 8);
        AscendC::DataCopy(idxGm[i * 8], idxLocal, 8);
        outQueueidx.FreeTensor(idxLocal);
        outQueueval.FreeTensor(valLocal);
    }
private:
    AscendC::GlobalTensor<float> xGm;
    AscendC::GlobalTensor<float> valGm;
    AscendC::GlobalTensor<int32_t> idxGm;
    uint32_t align_n;
    uint32_t block_size;
    uint32_t nxt_idx;
    uint32_t loopcount;
    uint32_t nxt_jump;
    // AscendC::LocalTensor<float> valLocal;
    // AscendC::LocalTensor<int32_t> idxLocal;
    AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueidx, outQueueval;
    AscendC::TBuf<AscendC::TPosition::VECOUT> outBuf;
    AscendC::TBuf<AscendC::TPosition::VECOUT> workBuf;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX;

};

class ArgMaxWithValueMulV2{

public:
    __aicore__ inline ArgMaxWithValueMulV2(){}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR values, GM_ADDR indice, int32_t block_size, 
                                int32_t nxt_jump, int32_t nxt_idx, AscendC::TPipe* pipe) {
        // Initialization code here
        uint32_t idx = AscendC::GetBlockIdx();
        // this->loopcount = (idx + 1) * block_size > nxt_idx ? nxt_idx - idx * block_size : block_size;
        this->block_size = block_size;
        this->align_n = (nxt_jump + 7) / 8 * 8;
        this->nxt_idx = 1;
        this->nxt_jump = nxt_jump;

        uint32_t idx_block_size = idx * block_size;
        xGm.SetGlobalBuffer  (reinterpret_cast<__gm__ float*>  (x) + idx_block_size * nxt_jump, block_size * nxt_jump);
        valGm.SetGlobalBuffer(reinterpret_cast<__gm__ float*>  (values) + idx_block_size, block_size);
        idxGm.SetGlobalBuffer(reinterpret_cast<__gm__ int32_t*>(indice) + idx_block_size, block_size);
        

        pipe->InitBuffer(inQueueX, BUFFER_NUM, this->align_n * sizeof(float)); // align  64
        pipe->InitBuffer(idxBuf , block_size * sizeof(int32_t));
        pipe->InitBuffer(valBuf , block_size * sizeof(float));
        pipe->InitBuffer(outBuf, 128);
        pipe->InitBuffer(workBuf, 32);
     }
    __aicore__ inline void Process(AscendC::TPipe* pipe) {
        int32_t loop = this->block_size;
        for(int32_t i = 0 ; i < loop ; ++i)
        {
            CopyIn(i);
            Compute(i);
        }

        AscendC::LocalTensor<int32_t> idxLocal = idxBuf.Get<int32_t>();
        AscendC::LocalTensor<float>   valLocal = valBuf.Get<float>();

        AscendC::DumpTensor(idxLocal, AscendC::GetBlockIdx(), this->block_size);
        AscendC::DumpTensor(valLocal, AscendC::GetBlockIdx(), this->block_size);

        AscendC::DataCopy(valGm, valLocal, this->block_size);
        AscendC::DataCopy(idxGm, idxLocal, this->block_size); 
        // outQueueidx.FreeTensor(idxLocal);
        // outQueueval.FreeTensor(valLocal);
    }
private:
    __aicore__ inline void CopyIn(int32_t i)
    {
        AscendC::LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();
        AscendC::DataCopyPad(xLocal, xGm[i * this->nxt_jump], {1, this->nxt_jump * 4, 0, 0, 0}, {true, 0, 0 , 0});
        inQueueX.EnQue<float>(xLocal);
    }
    __aicore__ inline void Compute(int32_t i)
    {  
        AscendC::LocalTensor<float> xLocal = inQueueX.DeQue<float>();
        // AscendC::DumpTensor(xLocal, i, this->align_n);
        AscendC::LocalTensor<float> workLocal = workBuf.Get<float>();
        AscendC::LocalTensor<float> dstLocal  = outBuf.Get<float>();

        AscendC::LocalTensor<int32_t> idxLocal = idxBuf.Get<int32_t>();
        AscendC::LocalTensor<float>   valLocal = valBuf.Get<float>();


        AscendC::ReduceMax<float>(dstLocal, xLocal, workLocal, this->align_n, true);

        float maxIndex = dstLocal.GetValue(1);
        // int32_t realIndex = *reinterpret_cast<int32_t*>(&maxIndex);
        idxLocal.SetValue(i, *reinterpret_cast<int32_t*>(&maxIndex));
        valLocal.SetValue(i, dstLocal.GetValue(0));


        AscendC::DumpTensor(idxLocal, AscendC::GetBlockIdx(), this->block_size);
        AscendC::DumpTensor(valLocal, AscendC::GetBlockIdx(), this->block_size);
        inQueueX.FreeTensor(xLocal);
    }
private:
    AscendC::GlobalTensor<float> xGm;
    AscendC::GlobalTensor<float> valGm;
    AscendC::GlobalTensor<int32_t> idxGm;
    uint32_t align_n;
    uint32_t block_size;
    uint32_t nxt_idx;
    uint32_t loopcount;
    uint32_t nxt_jump;
    // AscendC::LocalTensor<float> valLocal;
    // AscendC::LocalTensor<int32_t> idxLocal;
    // AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueidx, outQueueval;
    AscendC::TBuf<> valBuf , idxBuf;
    AscendC::TBuf<AscendC::TPosition::VECOUT> outBuf;
    AscendC::TBuf<AscendC::TPosition::VECOUT> workBuf;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX;

};
class ArgMaxWithValueMulV3{

public:
    __aicore__ inline ArgMaxWithValueMulV3(){}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR values, GM_ADDR indice, int32_t block_size, 
                                int32_t nxt_jump, int32_t nxt_idx, AscendC::TPipe* pipe) {
        // Initialization code here
        uint32_t idx = AscendC::GetBlockIdx();
        // this->loopcount = (idx + 1) * block_size > nxt_idx ? nxt_idx - idx * block_size : block_size;
        this->block_size = block_size;
        this->align_n = (nxt_jump + 7) / 8 * 8;
        this->nxt_jump = nxt_jump;

        uint32_t idx_block_size = idx * block_size;
        xGm.SetGlobalBuffer  (reinterpret_cast<__gm__ float*>  (x) + idx_block_size * nxt_jump, block_size * nxt_jump);
        valGm.SetGlobalBuffer(reinterpret_cast<__gm__ float*>  (values) + idx_block_size, block_size);
        idxGm.SetGlobalBuffer(reinterpret_cast<__gm__ int32_t*>(indice) + idx_block_size, block_size);
        
        uint32_t base = 4 * this->block_size;
        pipe->InitBuffer(inQueueX, 1, base * this->align_n ); // align  64

        pipe->InitBuffer(outQueueVal, 1,base);   // 对齐
        pipe->InitBuffer(outQueueIdx, 1, base); // 对齐
        pipe->InitBuffer(workQueue, 1, 32); // 此处按照公式计算所需的最小work空间为32，也就是64Bytes
        pipe->InitBuffer(outQueueDst, 1, 8);

        // AscendC::LocalTensor<float> valLocal = outQueueidx.AllocTensor<float>();
        // AscendC::LocalTensor<int32_t> idxLocal = outQueueval.AllocTensor<int32_t>();
        // outQueueidx.EnQue<float>(valLocal);
        // outQueueval.EnQue<int32_t>(idxLocal);

     }
    __aicore__ inline void Process(AscendC::TPipe* pipe) {
        CopyIn(0);
        Compute(0);
        // CopyOut(0);
    }
    __aicore__ inline void Process1(AscendC::TPipe* pipe) {
        AscendC::LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();
        AscendC::DataCopyPad(xLocal, xGm, {static_cast<uint16_t>(this->block_size), this->nxt_jump * 4, 0, 0, 0}, {true, 0, 0 , 0});
        

        AscendC::LocalTensor<float> workLocal = workQueue.AllocTensor<float>();
        AscendC::LocalTensor<float> dstLocal  = outQueueDst.AllocTensor<float>();
        AscendC::LocalTensor<float> valLocal = outQueueVal.AllocTensor<float>();
        AscendC::LocalTensor<int32_t> idxLocal = outQueueIdx.AllocTensor<int32_t>();
        for(int32_t j = 0 ; j < this->block_size ; ++j)
        {
            AscendC::ReduceMax<float>(dstLocal, xLocal[j * this->align_n], workLocal, this->align_n, true);
            float maxIndex = dstLocal.GetValue(1);
            // int32_t realIndex = *reinterpret_cast<int32_t*>(&maxIndex);
            idxLocal.SetValue(j, *reinterpret_cast<int32_t*>(&maxIndex));
            valLocal.SetValue(j, dstLocal.GetValue(0));
        }

        // AscendC::SetFlag<AscendC::HardEvent::MTE2_MTE3>(0);
        // AscendC::WaitFlag<AscendC::HardEvent::MTE2_MTE3>(0);

        AscendC::DataCopy(valGm, valLocal, this->block_size);
        AscendC::DataCopy(idxGm, idxLocal, this->block_size); 

        inQueueX.FreeTensor(xLocal);
        outQueueDst.FreeTensor(dstLocal);
        workQueue.FreeTensor(workLocal);
        outQueueIdx.FreeTensor(idxLocal);
        outQueueVal.FreeTensor(valLocal);
    }

private:
    __aicore__ inline void CopyIn(int32_t i)
    {
        AscendC::LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();
        AscendC::DataCopyPad(xLocal, xGm, {static_cast<uint16_t>(this->block_size), this->nxt_jump * 4, 0, 0, 0}, {true, 0, 0 , 0});
        inQueueX.EnQue<float>(xLocal);
    }
    __aicore__ inline void Compute(int32_t i)
    {  
        int32_t loop = this->block_size;
        AscendC::LocalTensor<float> xLocal = inQueueX.DeQue<float>();
        AscendC::LocalTensor<float> workLocal = workQueue.AllocTensor<float>();
        AscendC::LocalTensor<float> dstLocal  = outQueueDst.AllocTensor<float>();
        AscendC::LocalTensor<float> valLocal = outQueueVal.AllocTensor<float>();
        AscendC::LocalTensor<int32_t> idxLocal = outQueueIdx.AllocTensor<int32_t>();
        for(int32_t j = 0 ; j < loop ; ++j)
        {
            AscendC::ReduceMax<float>(dstLocal, xLocal[j * this->align_n], workLocal, this->align_n, true);
            float maxIndex = dstLocal.GetValue(1);
            // int32_t realIndex = *reinterpret_cast<int32_t*>(&maxIndex);
            idxLocal.SetValue(j, *reinterpret_cast<int32_t*>(&maxIndex));
            valLocal.SetValue(j, dstLocal.GetValue(0));
        }
        // outQueueIdx.EnQue<int32_t>(idxLocal);
        // outQueueVal.EnQue<float>(valLocal);
        AscendC::DataCopy(valGm, valLocal, this->block_size);
        AscendC::DataCopy(idxGm, idxLocal, this->block_size); 
        inQueueX.FreeTensor(xLocal);
        outQueueDst.FreeTensor(dstLocal);
        workQueue.FreeTensor(workLocal);
        outQueueIdx.FreeTensor(idxLocal);
        outQueueVal.FreeTensor(valLocal);
    }
    __aicore__ inline void CopyOut(int32_t i){
        AscendC::LocalTensor<int32_t> idxLocal = outQueueIdx.DeQue<int32_t>();
        AscendC::LocalTensor<float>   valLocal = outQueueVal.DeQue<float>();
        AscendC::DataCopy(valGm, valLocal, this->block_size);
        AscendC::DataCopy(idxGm, idxLocal, this->block_size); 
        outQueueIdx.FreeTensor(idxLocal);
        outQueueVal.FreeTensor(valLocal);
    }
private:
    AscendC::GlobalTensor<float> xGm;
    AscendC::GlobalTensor<float> valGm;
    AscendC::GlobalTensor<int32_t> idxGm;
    uint32_t align_n;
    uint32_t block_size;
    uint32_t nxt_idx;
    uint32_t loopcount;
    uint32_t nxt_jump;
    // AscendC::LocalTensor<float> valLocal;
    // AscendC::LocalTensor<int32_t> idxLocal;
    // AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueidx, outQueueval;
    AscendC::TQue<AscendC::QuePosition::VECOUT, 1> outQueueDst;
    AscendC::TQue<AscendC::QuePosition::VECOUT, 1> workQueue;
    AscendC::TQue<AscendC::QuePosition::VECIN, 1> inQueueX;
    AscendC::TQue<AscendC::QuePosition::VECOUT, 1> outQueueIdx , outQueueVal;
};

class ArgMaxWithValueMulV4{

public:
    __aicore__ inline ArgMaxWithValueMulV4(){}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR values, GM_ADDR indice, int32_t block_size, 
                                int32_t nxt_jump, int32_t nxt_idx, AscendC::TPipe* pipe) {
        // Initialization code here
        uint32_t idx = AscendC::GetBlockIdx();
        // this->loopcount = (idx + 1) * block_size > nxt_idx ? nxt_idx - idx * block_size : block_size;
        this->block_size = block_size;
        this->align_n = (nxt_jump + 7) / 8 * 8;
        this->nxt_jump = nxt_jump;

        uint32_t idx_block_size = idx * block_size;
        xGm.SetGlobalBuffer  (reinterpret_cast<__gm__ float*>  (x) + idx_block_size * nxt_jump, block_size * nxt_jump);
        valGm.SetGlobalBuffer(reinterpret_cast<__gm__ float*>  (values) + idx_block_size, block_size);
        idxGm.SetGlobalBuffer(reinterpret_cast<__gm__ int32_t*>(indice) + idx_block_size, block_size);
        
        uint32_t base = 4 * this->block_size;
        pipe->InitBuffer(inQueueX, 1, base * this->align_n ); // align  64

        pipe->InitBuffer(outQueueVal, 1,base);   // 对齐
        pipe->InitBuffer(outQueueIdx, 1, base); // 对齐
        pipe->InitBuffer(workQueue, 1, 32); // 此处按照公式计算所需的最小work空间为32，也就是64Bytes
        pipe->InitBuffer(outQueueDst, 1, base * 2);

        // AscendC::LocalTensor<float> valLocal = outQueueidx.AllocTensor<float>();
        // AscendC::LocalTensor<int32_t> idxLocal = outQueueval.AllocTensor<int32_t>();
        // outQueueidx.EnQue<float>(valLocal);
        // outQueueval.EnQue<int32_t>(idxLocal);

     }
    __aicore__ inline void Process(AscendC::TPipe* pipe) {
        CopyIn(0);
        Compute(0);
        CopyOut(0);
    }

private:
    __aicore__ inline void CopyIn(int32_t i)
    {
        AscendC::LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();
        AscendC::DataCopyPad(xLocal, xGm, {static_cast<uint16_t>(this->block_size), this->nxt_jump * 4, 0, 0, 0}, {true, 0, 0 , 0});
        inQueueX.EnQue<float>(xLocal);
    }
    __aicore__ inline void Compute(int32_t i)
    {  
        int32_t loop = this->block_size;
        AscendC::LocalTensor<float> xLocal = inQueueX.DeQue<float>();
        AscendC::LocalTensor<float> workLocal = workQueue.AllocTensor<float>();
        AscendC::LocalTensor<float> dstLocal  = outQueueDst.AllocTensor<float>();
        AscendC::LocalTensor<float> valLocal = outQueueVal.AllocTensor<float>();
        AscendC::LocalTensor<float> idxLocal = outQueueIdx.AllocTensor<float>();
        for(int32_t j = 0 ; j < loop ; ++j)
        {
            AscendC::ReduceMax<float>(dstLocal[2 * j], xLocal[j * this->align_n], workLocal, this->align_n, true);
            // float maxIndex = dstLocal.GetValue(1);
            // // int32_t realIndex = *reinterpret_cast<int32_t*>(&maxIndex);
            // idxLocal.SetValue(j, *reinterpret_cast<int32_t*>(&maxIndex));
            // valLocal.SetValue(j, dstLocal.GetValue(0));
        }
        
        AscendC::DumpTensor(dstLocal, AscendC::GetBlockIdx(), this->block_size * 2);
        uint32_t mask = 0; // normal模式下mask需要设置为0
        uint64_t rsvdCnt = 0; // 用于保存筛选后保留下来的元素个数
        uint8_t src1Pattern = 2; // 内置固定模式        
        uint8_t src2Pattern = 1; // 内置固定模式  
        AscendC::DumpTensor(idxLocal, AscendC::GetBlockIdx(), 32);
        AscendC::DumpTensor(valLocal, AscendC::GetBlockIdx(), 32);
        AscendC::GatherMask(idxLocal, dstLocal, src1Pattern, false, mask, { 1, 1, 0, 0 }, rsvdCnt);
        AscendC::GatherMask(valLocal, dstLocal, src2Pattern, false, mask, { 1, 1, 0, 0 }, rsvdCnt);
        AscendC::DumpTensor(idxLocal, AscendC::GetBlockIdx(), 32);
        AscendC::DumpTensor(valLocal, AscendC::GetBlockIdx(), 32);
        AscendC::LocalTensor<int32_t> interpreTensor = idxLocal.ReinterpretCast<int32_t>();
        AscendC::DumpTensor(interpreTensor, AscendC::GetBlockIdx(), 32);
        AscendC::DataCopy(valGm, valLocal, this->block_size);
        AscendC::DataCopy(idxGm, interpreTensor, this->block_size); 
        // outQueueIdx.EnQue<int32_t>(idxLocal);
        // outQueueVal.EnQue<float>(valLocal);
        inQueueX.FreeTensor(xLocal);
        outQueueDst.FreeTensor(dstLocal);
        workQueue.FreeTensor(workLocal);
        outQueueIdx.FreeTensor(idxLocal);
        outQueueVal.FreeTensor(valLocal);
    }
    __aicore__ inline void CopyOut(int32_t i){
        AscendC::LocalTensor<int32_t> idxLocal = outQueueIdx.DeQue<int32_t>();
        AscendC::LocalTensor<float>   valLocal = outQueueVal.DeQue<float>();
        AscendC::DataCopy(valGm, valLocal, this->block_size);
        AscendC::DataCopy(idxGm, idxLocal, this->block_size); 
        outQueueIdx.FreeTensor(idxLocal);
        outQueueVal.FreeTensor(valLocal);
    }
private:
    AscendC::GlobalTensor<float> xGm;
    AscendC::GlobalTensor<float> valGm;
    AscendC::GlobalTensor<int32_t> idxGm;
    uint32_t align_n;
    uint32_t block_size;
    uint32_t nxt_idx;
    uint32_t loopcount;
    uint32_t nxt_jump;
    // AscendC::LocalTensor<float> valLocal;
    // AscendC::LocalTensor<int32_t> idxLocal;
    // AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueidx, outQueueval;
    AscendC::TQue<AscendC::QuePosition::VECOUT, 1> outQueueDst;
    AscendC::TQue<AscendC::QuePosition::VECOUT, 1> workQueue;
    AscendC::TQue<AscendC::QuePosition::VECIN, 1> inQueueX;
    AscendC::TQue<AscendC::QuePosition::VECOUT, 1> outQueueIdx , outQueueVal;
};


extern "C" __global__ __aicore__ void arg_max_with_value(GM_ADDR x, GM_ADDR indice, GM_ADDR values, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling); // 用于运算过程中数据的切分
    if(tiling_data.data_type == 1)
    {
        ArgMaxWithValuebase<uint8_t> op;
        op.Init(x, values, indice, tiling_data.block_size , tiling_data.nxt_jump , tiling_data.nxt_idx , tiling_data.data_type);   
        op.Process();
    }else if(tiling_data.data_type == 2)
    {
        ArgMaxWithValuebase<half> op;
        op.Init(x, values, indice, tiling_data.block_size , tiling_data.nxt_jump , tiling_data.nxt_idx, tiling_data.data_type);   
        op.Process();
    }else if(tiling_data.data_type == 3)
    {
        ArgMaxWithValuebase<int32_t> op;
        op.Init(x, values, indice, tiling_data.block_size , tiling_data.nxt_jump , tiling_data.nxt_idx, tiling_data.data_type);   
        op.Process();
    }else{
        AscendC::TPipe pipe;
        ArgMaxWithValueMulV3 op;
        op.Init(x, values, indice, tiling_data.block_size , tiling_data.nxt_jump , tiling_data.nxt_idx , &pipe);
        op.Process(&pipe);
    }
}
