#include "kernel_operator.h"
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;

__aicore__ inline int32_t pad(int32_t x, int32_t mod){
    return ((x+mod-1)/mod)*mod;
}
__aicore__ inline int32_t shorten(int32_t x, int32_t mod){
    return x-x%mod;
}
__aicore__ inline int32_t clip(int32_t x, int32_t low, int32_t high){
    x = x<low?low:x;
    x = x>high?high:x;
    return x;
}
__aicore__ inline int32_t clipmin(int32_t x, int32_t low){
    return x<low?low:x;
}
__aicore__ inline int32_t clipmax(int32_t x, int32_t high){
    return x>high?high:x;;
}



class KernelTril {
public:
    __aicore__ inline KernelTril() {}
    __aicore__ inline void Init(
        GM_ADDR x, GM_ADDR y, 
        int32_t height, int32_t width, int32_t diagonal,
        int32_t batchSize, int32_t batchStride,
        int32_t bufferSize, int32_t blockLen, int32_t totalLen
    )
    {
        //考生补充初始化代码
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
        this->height = height;
        this->width = width;
        this->diagonal = diagonal;
        this->batchSize = batchSize;
        this->batchStride = batchStride;
        this->bufferSize = bufferSize;
        this->blockLen = blockLen;
        this->totalLen = totalLen;
        xGm.SetGlobalBuffer((__gm__ DTYPE_X *)x, this->totalLen);
        yGm.SetGlobalBuffer((__gm__ DTYPE_Y *)y, this->totalLen);
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->bufferSize * sizeof(DTYPE_X));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->bufferSize * sizeof(DTYPE_Y));
    }
    __aicore__ inline void Process()
    {
        int32_t pos = 0;
        for(int32_t i=0;i<batchSize;i++){
            ProcessBatch(pos);
            pos+=batchStride;
        }
    }

private:
    __aicore__ inline void ProcessBatch(int batchPos){
        int32_t pos=batchPos, left_len = 0, targetPos = batchPos+batchStride;
        int32_t head_lines = -this->diagonal;
        head_lines = head_lines<0?0:head_lines;
        head_lines = head_lines>this->height?this->height:head_lines;
        if(head_lines>0){
            Line(pos, head_lines*this->width, 0);
            pos+=head_lines*this->width;
        }
        for(int32_t i=head_lines;i<this->height;i++){
            PRINTF("Line %d:\n", i);
            left_len = clip(1+i+this->diagonal, 0, this->width);
            if(left_len==this->width)break;
            Line(pos, this->width, left_len);
            pos+=this->width;
        }
        if(pos<targetPos){ 
            Line(pos, targetPos-pos, targetPos-pos);
        }
    }
    __aicore__ inline void Line(int32_t pos, int32_t len, int32_t left_len)
    {
        PRINTF("line %d %d %d\n", pos, len, left_len);
        int32_t ppos = 0;
        while(len>this->bufferSize){
            MiniLine(pos, this->bufferSize,left_len);
            len-=this->bufferSize;
            pos+=this->bufferSize;
            left_len-=this->bufferSize;
        }
        MiniLineTail(pos, len, clip(left_len, 0, this->bufferSize));
    }
    __aicore__ inline void MiniLine(int32_t pos, int32_t len, int32_t left_len)
    {
        // assert len == bufferSize
        left_len = clip(left_len, 0, len);
        PRINTF("# mini %d %d %d\n", pos, len, left_len);
        CopyIn(pos, len);
        Compute(len, left_len);
        CopyOut(pos, len);
    }
    __aicore__ inline void MiniLineTail(int32_t pos, int32_t len, int32_t left_len)
    {
        int z = len%this->blockLen;
        if(z){
                MiniLine(pos, blockLen, left_len);
        }
        MiniLine(pos+z, len-z, left_len-z);
    }

    __aicore__ inline void CopyIn(int32_t pos, int32_t len)
    {
        LocalTensor<DTYPE_X> xLocal = inQueueX.AllocTensor<DTYPE_X>();
        DataCopy(xLocal, xGm[pos], len);
        inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void Compute(int32_t len, int32_t zero_len)
    {
        LocalTensor<DTYPE_X> xLocal = inQueueX.DeQue<DTYPE_X>();
        LocalTensor<DTYPE_Y> yLocal = outQueueY.AllocTensor<DTYPE_Y>();
        Sub(yLocal, yLocal, yLocal, len);
        if(zero_len>0){
            Add(yLocal, xLocal, yLocal, zero_len);
        }
        outQueueY.EnQue<DTYPE_Y>(yLocal);
        inQueueX.FreeTensor(xLocal);
    }
    __aicore__ inline void CopyOut(int32_t pos, int32_t len)
    {
        LocalTensor<DTYPE_Y> yLocal = outQueueY.DeQue<DTYPE_Y>();
        DataCopy(yGm[pos], yLocal, len);
        outQueueY.FreeTensor(yLocal);
    }

private:
    TPipe pipe;
    //create queue for input, in this case depth is equal to buffer num
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    //create queue for output, in this case depth is equal to buffer num
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    GlobalTensor<DTYPE_X> xGm;
    GlobalTensor<DTYPE_Y> yGm;
    int32_t height, width, diagonal, batchSize, batchStride, bufferSize, blockLen, totalLen;
    //考生补充自定义成员变量

};

extern "C" __global__ __aicore__ void tril(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    KernelTril op;
    op.Init(
        x,y,
        tiling_data.height, tiling_data.width, tiling_data.diagonal,
        tiling_data.batchSize, tiling_data.batchStride, 
        tiling_data.bufferSize, tiling_data.blockLen, tiling_data.totalLen
    );
    op.Process();
}
