#include "kernel_operator.h"
#pragma GCC optimize("O3")
#pragma GCC optimize("unroll-loops")

using namespace AscendC;


#define BUFFER_NUM 2
template <typename T>
class CBRTKernalFast
{

private:
    TQue<QuePosition::VECIN, 2> inX1;
    TQue<QuePosition::VECOUT, 2> outY;
    TBuf<QuePosition::VECCALC> tmp2buf,tmp3buf;
    GlobalTensor<T> x1Gm, yGm;
    
    uint32_t L, R;
    TPipe *pipe;

#define BUF_SZ 64

public:
    __aicore__ inline CBRTKernalFast() {}

    __aicore__ inline void Init(GM_ADDR input, GM_ADDR out, uint32_t size, uint32_t length, TPipe *PIPE)
    {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
        const unsigned num_cores = GetBlockNum();
        this->pipe = PIPE;

        //printf("size:%d length:%d\n",size,length);

        unsigned L = GetBlockIdx() * length;
        unsigned R = L + length;
        if (R > size)
            R = size;

        x1Gm.SetGlobalBuffer((__gm__ T *)input + L);
        yGm.SetGlobalBuffer((__gm__ T *)out + L);

        this->R = R - L;
        this->L = 0;

        pipe->InitBuffer(inX1, BUFFER_NUM, BUF_SZ * sizeof(T));
        pipe->InitBuffer(outY, BUFFER_NUM, BUF_SZ * sizeof(T));
        pipe->InitBuffer(tmp2buf, BUF_SZ* sizeof(T));
        pipe->InitBuffer(tmp3buf, BUF_SZ* sizeof(T));
    }
    __aicore__ inline void CopyIn(int i, int sz)
    {
        LocalTensor<T> src0Local = inX1.AllocTensor<T>();
        DataCopy(src0Local, x1Gm[i], sz);
        inX1.EnQue(src0Local);
    }
    __aicore__ inline void Compute(int i, int sz)
    {
        LocalTensor<T> x1Local = inX1.DeQue<T>();
        LocalTensor<T> yLocal = outY.AllocTensor<T>();
        LocalTensor<T> tmp2= tmp2buf.Get<T>();
        LocalTensor<T> tmp3= tmp3buf.Get<T>();

        DataCopy(yLocal, x1Local, sz);
        
/*
z = z - \frac{z^3 - A}{3z^2}
*/
for(int i=0;i<128;i++){
        //printf("i:%d\n",i);
        //printf("x1Local:%f\n",x1Local[0]);
        //printf("yLocal:%f\n",yLocal[0]);
        //printf("tmp2:%f\n",tmp2[0]);
        //printf("tmp3:%f\n",tmp3[0]);

        Mul(tmp2, yLocal, yLocal, sz); // tmp2 = y^2
        Mul(tmp3, yLocal, tmp2, sz);   // tmp3 = y^3
        Muls(tmp2, tmp2, (T)3, sz); // tmp2 = 3 * y^2
        Sub(tmp3, tmp3, x1Local, sz); // tmp3 = y^3 - x1
        Div(tmp3, tmp3, tmp2, sz); // tmp3 = (y^3 - x1) / (3 * y^2)
        Sub(yLocal, yLocal, tmp3, sz); // y = y - (y^3 - x1) / (3 * y^2)

}

        //CompareScalar(tmpLocal, x1Local, (T)0, CMPMODE::GT, BUF_SZ);
        //Select(yLocal, tmpLocal, yLocal, (T)-1, SELMODE::VSEL_TENSOR_SCALAR_MODE,BUF_SZ);

        //CompareScalar(tmpLocal, x1Local, (T)0, CMPMODE::LT, BUF_SZ);
        //Select(yLocal, tmpLocal, yLocal, (T)1, SELMODE::VSEL_TENSOR_SCALAR_MODE, BUF_SZ);
        
        
        inX1.FreeTensor(x1Local);
        outY.EnQue<T>(yLocal);
    }

    __aicore__ inline void CopyOut(int i, int sz)
    {
        LocalTensor<T> yoLocal = outY.DeQue<T>();
        DataCopy(yGm[i], yoLocal, sz);
        outY.FreeTensor(yoLocal);
    }

    __aicore__ inline void Process()
    {
        uint32_t i=0;
        for ( ; i+BUF_SZ < R; i+=BUF_SZ)
        {
            //printf("total:%d   processing:%d -> %d\n",R, i, i+BUF_SZ-1);
            CopyIn(i,  BUF_SZ);
            Compute(i, BUF_SZ);
            CopyOut(i, BUF_SZ);
        }
        if(i < R)
        {
            uint32_t sz = R - i;
            CopyIn(i, sz);
            Compute(i, sz);
            CopyOut(i, sz);
        }
    }
    
};

extern "C" __global__ __aicore__ void cbrt_custom(GM_ADDR input, GM_ADDR out, GM_ADDR workspace, GM_ADDR tiling)
{
    GET_TILING_DATA(tiling_data, tiling);
    TPipe pipe;
    CBRTKernalFast<float32_t> op;
    op.Init(input, out, tiling_data.size, tiling_data.length, &pipe);
    op.Process();
}