#include "kernel_operator.h"
#include <cmath>

using namespace AscendC;

constexpr int32_t BUFFER_NUM = 2;      //昇腾双buffer技术

class KernelReplicationPad2d {
public:
    __aicore__ inline KernelReplicationPad2d() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR paddings, GM_ADDR y,int32_t size, int32_t x_ndarray[], int32_t x_dimensional, int32_t dim_dimensional)
    {

        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");


        this->x_dimensional = x_dimensional;  //x的维度
        this->dim_dimensional = dim_dimensional;  //paddings的维度
        printf("x_dimensional is %d", this->x_dimensional);
        printf("dim_dimensional is %d", this->dim_dimensional);

        //这里我们只处理 dim_dimensional为2和4的情况，将x_dimensional固定在4维进行处理
        if(this->x_dimensional >= 4)
        {
            for(int i = 0; i < this->x_dimensional; i++)
            {
                this->x_ndarray[i] = x_ndarray[i + this->x_dimensional - 4];  //每个维度的元素个数
                printf("this->x_ndarray %d is %d\n", i, this->x_ndarray[i]);
            }
        }
        else
        {
            for(int i =0 ; i< 4- this->x_dimensional; i++)
                this->x_ndarray[i] = 1;

            for(int i = 4- this->x_dimensional; i < 4; i++)
            {
                this->x_ndarray[i] = x_ndarray[i - 4 + this->x_dimensional];  //每个维度的元素个数
                printf("this->x_ndarray %d is %d\n", i, this->x_ndarray[i]);
            }

        }

        this->size =size;                     //总元素个数
		printf("size is %d\n", this->size);
        

        //(padding_left,padding_right, padding_top,padding_bottom)
        if(this->dim_dimensional == 4)
        {
            pGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_PADDINGS *>(paddings), 4);
            for(int i = 0; i<4; i++)
            {
                this->dim[i] = pGm.GetValue(i);
                printf("this->dim %d is %d\n", i, this->dim[i]);
            }
        }
        else if(this->dim_dimensional == 2)
        {
            pGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_PADDINGS *>(paddings), 2);
            for(int i = 0; i<2; i++)
            {
                this->dim[i] = pGm.GetValue(i);
                printf("this->dim %d is %d\n", i, this->dim[i]);
            }
            this->dim[2] = 0;
            this->dim[3] = 0;
        }

        xGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X *>(x), this->size);  //+8
        yGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_Y *>(y), this->size);  //+8

        //printf("xGm is %f\n", xGm.GetValue(1));
        //printf("pGm is %d\n", pGm.GetValue(1));


        // int32_t cycles = 1;
        // int32_t interval = 1;
        // int32_t loopCount = 1;

        // for(int i = 0; i < this->x_dimensional; i++)   //如果是0，循环的就是第1维，1，1+2维，2，就是1+2+3维的总元素个数
        // {
        //     loopCount *= this->x_ndarray[i];
        // }

        // for(int i = 0; i<this->dim_dimensional; i++)
        // {
        //     loopCount = loopCount / this->x_ndarray[this->dim[i]];
        // }

        // for(int i = 0; i<this->dim_dimensional; i++)
        // {
        //     cycles *= this->x_ndarray[this->dim[i]];
        // }

        
        // for(int32_t i = this->dim[this->dim_dimensional-1]+1; i < this->x_dimensional; i++)
        // {
        //     interval *= this->x_ndarray[i];     //跨其他维度时的跨度
        // }

        // printf("cycle is %d, interval is %d, loopCount is %d\n",cycles, interval, loopCount);
        // this->cycles = cycles;
        // this->interval = interval;
        // this->loopCount = loopCount;
        // printf("cycle is %d, interval is %d, loopCount is %d\n",this->cycles, this->interval, this->loopCount);

        // 这里的分块逻辑是8192个x,1024个y和buff,也许不是最好，先用着
        // 注意这里是interval为1，也就是连续才能并行，否则串行
        // if(this->interval == 1)   
        // {
        //     pipe.InitBuffer(inQueueX, BUFFER_NUM, 8192 * sizeof(DTYPE_X));
        //     pipe.InitBuffer(outQueueY, BUFFER_NUM, 8192 * sizeof(DTYPE_Y));
        //     pipe.InitBuffer(QueueBuff, 8192 * sizeof(DTYPE_X));
        // }

    }


    __aicore__ inline void Process()
    {


        // 使用dim数组代替各个padding值
        int pad_left = this->dim[0];
        int pad_right = this->dim[1];
        int pad_top = this->dim[2];
        int pad_bottom = this->dim[3];
        int batch_size = this->x_ndarray[0];
        int channels = this->x_ndarray[1];
        int input_height = this->x_ndarray[2];
        int input_width = this->x_ndarray[3];
        printf("pad_top is %d",pad_top);
        printf("pad_bottom is %d",pad_bottom);
        printf("pad_left is %d",pad_left);
        printf("pad_right is %d",pad_right);
        printf("batch_size is %d",batch_size);
        printf("channels is %d",channels);
        printf("input_height is %d",input_height);
        printf("input_width is %d",input_width);

        // 计算输出矩阵的大小
        int output_height = input_height + pad_top + pad_bottom;
        int output_width = input_width + pad_left + pad_right;
        // printf("output_height is %d", output_height);
        // printf("output_width is %d", output_width);

        // 遍历每个batch和channel
        for (int b = 0; b < batch_size; b++) {
            for (int c = 0; c < channels; c++) {

                // 遍历输出矩阵的高度和宽度
                for (int i = 0; i < output_height; i++) {
                    for (int j = 0; j < output_width; j++) {

                        // 计算输出矩阵的索引
                        int output_index = (b * channels + c) * output_height * output_width + i * output_width + j;
                        // 计算输入矩阵的索引
                        int input_index = (b * channels + c) * input_height * input_width + i  * input_width + j;

                        //printf("output_index is %d\n", output_index);

                        // 如果索引在输入矩阵内
                        if (i >= pad_top && i < pad_top + input_height && j >= pad_left && j < pad_left + input_width) {
                            // 将输入矩阵的值复制到输出矩阵中
                            //yGm[output_index] = xGm[(b * channels + c) * input_height * input_width + (i - pad_top) * input_width + (j - pad_left)];
                            yGm.SetValue(output_index, xGm.GetValue((b * channels + c) * input_height * input_width + (i - pad_top) * input_width + (j - pad_left)));
                            //printf("yGm is %d, xGm is %d\n",yGm[output_index] , xGm[(b * channels + c) * input_height * input_width + (i - pad_top) * input_width + (j - pad_left)] );
                            //printf("intput_index is %d\n", (b * channels + c) * input_height * input_width + (i - pad_top) * input_width + (j - pad_left));
                        } else {
                            // 边界外的部分使用边缘值填充
                            // 填充上下边界
                            if ((i < pad_top) && (j >= pad_left && j < pad_left + input_width) ) {
                                //yGm[output_index] = xGm[(b * channels + c) * input_height * input_width + 0 * input_width + (j - pad_left)];
                                yGm.SetValue(output_index, xGm.GetValue((b * channels + c) * input_height * input_width + 0 * input_width + (j - pad_left)));
                                //printf("intput_index is %d\n", (b * channels + c) * input_height * input_width + 0 * input_width + (j - pad_left));
                            } 
                            else if ((i < pad_top) && (j < pad_left ) ) {
                                //yGm[output_index] = xGm[(b * channels + c) * input_height * input_width];
                                yGm.SetValue(output_index, xGm.GetValue((b * channels + c) * input_height * input_width));
                                //printf("intput_index is %d\n", (b * channels + c) * input_height * input_width);
                            } 
                            else if ((i < pad_top) && ( j >= pad_left + input_width) ) {
                                //yGm[output_index] = xGm[(b * channels + c) * input_height * input_width + (input_width - 1)];
                                yGm.SetValue(output_index, xGm.GetValue((b * channels + c) * input_height * input_width + (input_width - 1)));
                                //printf("intput_index is %d\n", (b * channels + c) * input_height * input_width + (input_width - 1));
                            } 
                            else if ((i >= pad_top + input_height) && (j >= pad_left && j < pad_left + input_width)) {
                                //yGm[output_index] = xGm[(b * channels + c) * input_height * input_width + (input_height - 1) * input_width + (j - pad_left)];
                                yGm.SetValue(output_index, xGm.GetValue((b * channels + c) * input_height * input_width + (input_height - 1) * input_width + (j - pad_left)));
                                //printf("intput_index is %d\n", (b * channels + c) * input_height * input_width + (input_height - 1) * input_width + (j - pad_left));
                            }
                            else if ((i >= pad_top + input_height) && (j < pad_left)) {
                                //yGm[output_index] = xGm[(b * channels + c) * input_height * input_width + (input_height - 1) * input_width];
                                yGm.SetValue(output_index, xGm.GetValue((b * channels + c) * input_height * input_width + (input_height - 1) * input_width));
                                //printf("intput_index is %d\n", (b * channels + c) * input_height * input_width + (input_height - 1) * input_width);
                            }
                            else if ((i >= pad_top + input_height) && (j >= pad_left + input_width)) {
                                //yGm[output_index] = xGm[(b * channels + c) * input_height * input_width + (input_height - 1) * input_width + (input_width - 1)];
                                yGm.SetValue(output_index, xGm.GetValue((b * channels + c) * input_height * input_width + (input_height - 1) * input_width + (input_width - 1)));
                                //printf("intput_index is %d\n", (b * channels + c) * input_height * input_width + (input_height - 1) * input_width + (input_width - 1));
                            }
                            // 填充左右边界
                            else if ((i >= pad_top && i < pad_top + input_height) &&  (j < pad_left))  {
                                //yGm[output_index] = xGm[(b * channels + c) * input_height * input_width + (i - pad_top +1) * input_width + 0];
                                yGm.SetValue(output_index, xGm.GetValue((b * channels + c) * input_height * input_width + (i - pad_top) * input_width + 0));
                                //printf("intput_index is %d\n", (b * channels + c) * input_height * input_width + (i - pad_top) * input_width + 0);
                            } 
                            else if ((i >= pad_top && i < pad_top + input_height) &&  (j >= pad_left + input_width))  {
                                //yGm[output_index] = xGm[(b * channels + c) * input_height * input_width + (i - pad_top) * input_width + (input_width - 1)];
                                yGm.SetValue(output_index, xGm.GetValue((b * channels + c) * input_height * input_width + (i - pad_top) * input_width + (input_width - 1)));
                                //printf("intput_index is %d\n", (b * channels + c) * input_height * input_width + (i - pad_top) * input_width + (input_width - 1));
                            }
                        }
                    }
                }
            }
        }
    }

private:
    // __aicore__ inline void CopyIn(int32_t i, int32_t capacity)
    // {
    //     LocalTensor<DTYPE_X> xLocal = inQueueX.AllocTensor<DTYPE_X>();
    //     DataCopy(xLocal, xGm[i*this->cycles], (this->cycles*capacity+7)/8*8);
    //     inQueueX.EnQue(xLocal);

    // }
    // __aicore__ inline void Compute(int32_t i, int32_t capacity)
    // {
    //     LocalTensor<float> yLocal = outQueueY.AllocTensor<float>();

    //     LocalTensor<float> xLocal = inQueueX.DeQue<float>();
    //     auto buff = QueueBuff.Get<float>();

    //     //max
    //     float tmp = 0.0;
    //     for(int z=0;z<capacity;z++)
    //     {
    //         ReduceMax(yLocal[z], xLocal[this->cycles*z], buff, this->cycles); 
    //         tmp = (float)yLocal[z].GetValue(0);
    //         //printf("max is %f\n",tmp);
    //         Duplicate(yLocal[this->cycles*z], tmp, this->cycles);
    //     }
    //     Sub(xLocal,xLocal,yLocal,8192);
    //     //printf("sub is %f\n",xLocal(0));
        
    //     //exp
    //     Exp(xLocal,xLocal,8192);
    //     //printf("exp is %f\n",xLocal(0));
    //     for(int z=0;z<capacity;z++)
    //     {
    //         ReduceSum(xLocal[z], xLocal[this->cycles*z], buff, this->cycles);  //sum
    //         ReduceMax(yLocal[z], yLocal[this->cycles*z], buff, this->cycles);  //max
    //     }
                    
    //     //max + log(sum)
    //     Ln(xLocal,xLocal,capacity);
    //     Add(yLocal,xLocal,yLocal,capacity);
    //     //printf("max + log(sum) is %f\n",yLocal(0));
        
    //     outQueueY.EnQue<float>(yLocal);
    //     inQueueX.FreeTensor(xLocal);
    // }
    // __aicore__ inline void CopyOut(int32_t i, int32_t j)
    // {
    //     LocalTensor<DTYPE_Y> yLocal = outQueueY.DeQue<DTYPE_Y>();
    //     //DataCopy(yGm[i], yLocal, (j+7)/8*8);
    //     DataCopy(yGm[i], yLocal, (j+7)/8*8);
    //     //printf("yGm %d is %f\n",i, yGm(i));
    //     outQueueY.FreeTensor(yLocal);
    // }


private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    TBuf<QuePosition::VECCALC> QueueBuff;
    
    GlobalTensor<DTYPE_X> xGm;
    GlobalTensor<DTYPE_PADDINGS> pGm;
    GlobalTensor<DTYPE_Y> yGm;

    int32_t size;
    int32_t x_ndarray[20];
    int32_t x_dimensional;
    int32_t dim[4];
    int32_t dim_dimensional;

    // int32_t cycles;
    // int32_t interval;
    // int32_t loopCount;
};

extern "C" __global__ __aicore__ void replication_pad2d(GM_ADDR x, GM_ADDR paddings, GM_ADDR y,  GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    KernelReplicationPad2d op;
    //补充init和process函数调用内容
    op.Init(x,paddings, y,tiling_data.size, tiling_data.x_ndarray, tiling_data.x_dimensional, tiling_data.dim_dimensional);
    op.Process();
}

