#include "kernel_operator.h"
#pragma GCC optimize("O3")
#pragma GCC optimize("unroll-loops")

using namespace AscendC;


#define BUFFER_NUM 2
template <typename T>
class SIGNKernalFast
{

private:
    TQue<QuePosition::VECIN, 2> inX1;
    TQue<QuePosition::VECOUT, 2> outY;
    TBuf<QuePosition::VECCALC> tmp;
    GlobalTensor<T> x1Gm, yGm;
    
    uint32_t L, R;
    TPipe *pipe;

#define BUF_SZ 64

public:
    __aicore__ inline SIGNKernalFast() {}

    __aicore__ inline void Init(GM_ADDR input, GM_ADDR out, uint32_t size, uint32_t length, TPipe *PIPE)
    {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
        const unsigned num_cores = GetBlockNum();
        this->pipe = PIPE;

        //printf("size:%d length:%d\n",size,length);

        unsigned L = GetBlockIdx() * length;
        unsigned R = L + length;
        if (R > size)
            R = size;

        x1Gm.SetGlobalBuffer((__gm__ T *)input + L);
        yGm.SetGlobalBuffer((__gm__ T *)out + L);

        this->R = R - L;
        this->L = 0;

        pipe->InitBuffer(inX1, BUFFER_NUM, BUF_SZ * sizeof(T));
        pipe->InitBuffer(outY, BUFFER_NUM, BUF_SZ * sizeof(T));
        pipe->InitBuffer(tmp, BUF_SZ);
    }
    __aicore__ inline void CopyIn(int i, int sz)
    {
        LocalTensor<T> src0Local = inX1.AllocTensor<T>();
        DataCopy(src0Local, x1Gm[i], sz);
        inX1.EnQue(src0Local);
    }
    __aicore__ inline void Compute(int i, int sz)
    {
        LocalTensor<T> x1Local = inX1.DeQue<T>();
        LocalTensor<T> yLocal = outY.AllocTensor<T>();
        LocalTensor<uint8_t> tmpLocal= tmp.Get<uint8_t>();

       
        Duplicate(yLocal, (T)0, BUF_SZ);
        
        CompareScalar(tmpLocal, x1Local, (T)0, CMPMODE::GT, BUF_SZ);
        Select(yLocal, tmpLocal, yLocal, (T)-1, SELMODE::VSEL_TENSOR_SCALAR_MODE,BUF_SZ);

        CompareScalar(tmpLocal, x1Local, (T)0, CMPMODE::LT, BUF_SZ);
        Select(yLocal, tmpLocal, yLocal, (T)1, SELMODE::VSEL_TENSOR_SCALAR_MODE, BUF_SZ);
        
        
        inX1.FreeTensor(x1Local);
        outY.EnQue<T>(yLocal);
    }

    __aicore__ inline void CopyOut(int i, int sz)
    {
        LocalTensor<T> yoLocal = outY.DeQue<T>();
        DataCopy(yGm[i], yoLocal, sz);
        outY.FreeTensor(yoLocal);
    }

    __aicore__ inline void Process()
    {
        uint32_t i=0;
        for ( ; i+BUF_SZ < R; i+=BUF_SZ)
        {
            //printf("total:%d   processing:%d -> %d\n",R, i, i+BUF_SZ-1);
            CopyIn(i,  BUF_SZ);
            Compute(i, BUF_SZ);
            CopyOut(i, BUF_SZ);
        }
        if(i < R)
        {
            uint32_t sz = R - i;
            CopyIn(i, sz);
            Compute(i, sz);
            CopyOut(i, sz);
        }
    }
    
};

extern "C" __global__ __aicore__ void sign_custom(GM_ADDR input, GM_ADDR out, GM_ADDR workspace, GM_ADDR tiling)
{
    GET_TILING_DATA(tiling_data, tiling);
    TPipe pipe;
    SIGNKernalFast<float32_t> op;
    op.Init(input, out, tiling_data.size, tiling_data.length, &pipe);
    op.Process();
}