/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.
 *
 * 对于一个输入的二维张量（例如大小为C×H×W，其中C是通道数，H是高度，W是宽度），ReplicationPad2d会在其边界按照指定的填充量进行重复边缘值的填充。
 */

#include "kernel_operator.h"

/* 在代码开头添加Tiling数据结构定义 */
constexpr int32_t BUFFER_NUM = 1;

// 修改模板参数定义（增加空格避免宏冲突）
template <typename T_DTYPE_X, typename T_DTYPE_PADDINGS, typename T_DTYPE_Y>
class KernelReplicationPad2d {
public:
    __aicore__ inline KernelReplicationPad2d() {}
    
    // 初始化函数，完成内存初始化相关操作
    // 修改说明：增加H/W参数存储原始尺寸
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR paddings, GM_ADDR y,
                    int32_t inputNum, int32_t outputNum,  int32_t input_stride, 
                    int32_t output_stride, int32_t batch_number, int32_t last_dim
    )
    {
        this->inputNum = inputNum;
        this->outputNum = outputNum;
        this->input_stride = input_stride;
        this->output_stride = output_stride;
        this->batch_number = batch_number;
        this->last_dim = last_dim;
        
        int32_t total_lines = input_stride/last_dim;


        this->pad_input_stride = (input_stride+32/sizeof(DTYPE_X)-1)/(32/sizeof(DTYPE_X)) *32/sizeof(DTYPE_X);
        this->pad_output_stride = (output_stride+32/sizeof(DTYPE_X)-1)/(32/sizeof(DTYPE_X)) *32/sizeof(DTYPE_X);
        


        
        xGm.SetGlobalBuffer((__gm__ DTYPE_X*)x , inputNum+this->pad_input_stride-this->input_stride);
        paddingsGm.SetGlobalBuffer((__gm__ DTYPE_PADDINGS*)paddings , 4);
        yGm.SetGlobalBuffer((__gm__ DTYPE_Y*)y , outputNum+this->pad_output_stride-this->output_stride);

        this->pad_l = paddingsGm.GetValue(0);
        this->pad_r = paddingsGm.GetValue(1);
        this->pad_u = paddingsGm.GetValue(2);
        this->pad_d = paddingsGm.GetValue(3);



        AscendC::printf("inputNum %d, outputNum %d , input_stride %d , output_stride %d, batch_number %d, pad_input_stride %d, pad_output_stride %d, l %d, r %d, u %d, d %d\n",
        inputNum, outputNum, input_stride, output_stride, batch_number, this->pad_input_stride, this->pad_output_stride, this->pad_l, this->pad_r, this->pad_u, this->pad_d);

        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->pad_input_stride * sizeof(DTYPE_X));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->pad_output_stride * sizeof(DTYPE_Y));
        pipe.InitBuffer(indexBuf, this->pad_output_stride * sizeof(uint32_t));


        this->index = indexBuf.Get<uint32_t>();

        
        AscendC::Duplicate(this->index, uint32_t(0), this->pad_output_stride);

        for (int i = 0; i<output_stride; ++i)
        {
            int32_t  row_number = i/(last_dim+this->pad_l+this->pad_r);
            int32_t  col_number = i-row_number*(last_dim+this->pad_l+this->pad_r);

            uint32_t result = 0;

            row_number = row_number-this->pad_u;
            col_number = col_number-this->pad_l;

            if (row_number<0) row_number = 0;
            if (row_number>total_lines-1) row_number = total_lines-1;

            if (col_number<0) col_number = 0;
            if (col_number>last_dim-1) col_number = last_dim-1;

            result = row_number*last_dim+col_number;

            this->index.SetValue(i, result*sizeof(DTYPE_X));

        }

    }

    // 核心处理函数，实现三级流水线逻辑
    __aicore__ inline void Process()
    {
        int32_t loopCount = this->batch_number;
        for (int32_t i = 0; i < loopCount; i++) {
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }

private:
    // 搬入函数（三级流水线第一阶段）
    __aicore__ inline void CopyIn(int32_t progress)
    {
        // 从全局内存分配LocalTensor
        AscendC::LocalTensor<DTYPE_X> xLocal = inQueueX.AllocTensor<DTYPE_X>();
        
        AscendC::DataCopy(xLocal, xGm[progress * this->input_stride], this->pad_input_stride );
        
        // 将Tensor放入输入队列
        inQueueX.EnQue(xLocal);
    }

    // 计算函数（三级流水线第二阶段）
    __aicore__ inline void Compute(int32_t progress)
    {
        // 从队列中取出输入Tensor
        AscendC::LocalTensor<DTYPE_X> xLocal = inQueueX.DeQue<DTYPE_X>();
       
        AscendC::LocalTensor<DTYPE_Y> yLocal = outQueueY.AllocTensor<DTYPE_Y>();
        
        // AscendC::DumpTensor(xLocal, 1, this->pad_output_stride);
        // AscendC::DumpTensor(this->index, 2, this->pad_output_stride);
        
        Gather(yLocal, xLocal, this->index, 0, this->pad_output_stride);
        // AscendC::DumpTensor(yLocal, 3, this->pad_output_stride);
        // 将结果Tensor放入输出队列
        outQueueY.EnQue(yLocal);
        
        // 释放输入Tensor内存
        inQueueX.FreeTensor(xLocal);
    }

    // 搬出函数（三级流水线第三阶段）
    __aicore__ inline void CopyOut(int32_t progress)
    {
        // 从输出队列取出结果Tensor
        AscendC::LocalTensor<DTYPE_Y> yLocal = outQueueY.DeQue<DTYPE_Y>();
        
        // 执行数据拷贝（Local->Global）
        AscendC::DataCopy(yGm[progress * this->output_stride], yLocal,  this->pad_output_stride );
        
        // 释放LocalTensor内存
        outQueueY.FreeTensor(yLocal);
    }

private:
    // Pipe内存管理对象（三级流水线核心组件）
    AscendC::TPipe pipe;
    
    // 输入队列（VECIN表示向量化输入位置）
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX;

    AscendC::TBuf<AscendC::QuePosition::VECCALC> indexBuf;

    AscendC::LocalTensor<uint32_t> index;
    
    // 输出队列（VECOUT表示向量化输出位置）
    AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    
    // 全局内存对象
    AscendC::GlobalTensor<DTYPE_X> xGm;          // 输入数据
    AscendC::GlobalTensor<DTYPE_PADDINGS> paddingsGm; // 填充参数
    AscendC::GlobalTensor<DTYPE_Y> yGm;          // 输出数据

    // 计算相关参数
    int32_t inputNum, outputNum, input_stride, output_stride, batch_number,last_dim;
    int32_t pad_input_stride, pad_output_stride;
    int32_t pad_l, pad_r, pad_u,pad_d;

};



// 修改模板参数定义（增加空格避免宏冲突）
template <typename T_DTYPE_X, typename T_DTYPE_PADDINGS, typename T_DTYPE_Y>
class KernelReplicationPad2d_trival {
public:
    __aicore__ inline KernelReplicationPad2d_trival() {}
    
    // 初始化函数，完成内存初始化相关操作
    // 修改说明：增加H/W参数存储原始尺寸
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR paddings, GM_ADDR y,
                    int32_t inputNum, int32_t outputNum,  int32_t input_stride, 
                    int32_t output_stride, int32_t batch_number, int32_t last_dim
    )
    {
        this->inputNum = inputNum;
        this->outputNum = outputNum;
        this->input_stride = input_stride;
        this->output_stride = output_stride;
        this->batch_number = batch_number;
        this->last_dim = last_dim;
        
        int32_t total_lines = input_stride/last_dim;


        this->pad_input_stride = (input_stride+32/sizeof(DTYPE_X)-1)/(32/sizeof(DTYPE_X)) *32/sizeof(DTYPE_X);
        this->pad_output_stride = (output_stride+32/sizeof(DTYPE_X)-1)/(32/sizeof(DTYPE_X)) *32/sizeof(DTYPE_X);
        


        
        xGm.SetGlobalBuffer((__gm__ DTYPE_X*)x , inputNum+this->pad_input_stride-this->input_stride);
        paddingsGm.SetGlobalBuffer((__gm__ DTYPE_PADDINGS*)paddings , 4);
        yGm.SetGlobalBuffer((__gm__ DTYPE_Y*)y , outputNum+this->pad_output_stride-this->output_stride);

        this->pad_l = paddingsGm.GetValue(0);
        this->pad_r = paddingsGm.GetValue(1);
        this->pad_u = paddingsGm.GetValue(2);
        this->pad_d = paddingsGm.GetValue(3);



        AscendC::printf("inputNum %d, outputNum %d , input_stride %d , output_stride %d, batch_number %d, pad_input_stride %d, pad_output_stride %d, l %d, r %d, u %d, d %d\n",
        inputNum, outputNum, input_stride, output_stride, batch_number, this->pad_input_stride, this->pad_output_stride, this->pad_l, this->pad_r, this->pad_u, this->pad_d);

        
        for (int batch=0;batch<batch_number;batch++)
        {
            for (int i = 0; i<output_stride; ++i)
            {
                int32_t  row_number = i/(last_dim+this->pad_l+this->pad_r);
                int32_t  col_number = i-row_number*(last_dim+this->pad_l+this->pad_r);

                uint32_t result = 0;

                row_number = row_number-this->pad_u;
                col_number = col_number-this->pad_l;

                if (row_number<0) row_number = 0;
                if (row_number>total_lines-1) row_number = total_lines-1;

                if (col_number<0) col_number = 0;
                if (col_number>last_dim-1) col_number = last_dim-1;

                result = row_number*last_dim+col_number;

                int32_t index_input = batch*input_stride+result;
                int32_t output_index = batch*output_stride+i;

                
                T_DTYPE_X input_value = xGm.GetValue(index_input);
                yGm.SetValue(output_index, input_value);


            }

        }

    }

private:

    // 全局内存对象
    AscendC::GlobalTensor<DTYPE_X> xGm;          // 输入数据
    AscendC::GlobalTensor<DTYPE_PADDINGS> paddingsGm; // 填充参数
    AscendC::GlobalTensor<DTYPE_Y> yGm;          // 输出数据

    // 计算相关参数
    int32_t inputNum, outputNum, input_stride, output_stride, batch_number,last_dim;
    int32_t pad_input_stride, pad_output_stride;
    int32_t pad_l, pad_r, pad_u,pad_d;

};

// 算子入口函数
extern "C" __global__ __aicore__ void replication_pad2d(
    GM_ADDR x,          // 输入张量
    GM_ADDR paddings,   // 填充参数
    GM_ADDR y,          // 输出张量
    GM_ADDR workspace,  // 工作空间
    GM_ADDR tiling      // 分块参数
) {
    // // 获取分块参数
    GET_TILING_DATA(tiling_data, tiling);
    
    // // // 实例化算子核
    // // // 初始化核函数

    if (tiling_data.mode  == 0)
    {

        KernelReplicationPad2d<DTYPE_X, DTYPE_PADDINGS, DTYPE_Y> op;
        op.Init(x, paddings, y, tiling_data.inputNum, 
        tiling_data.outputNum, tiling_data.input_stride, 
        tiling_data.output_stride, tiling_data.batch_number, tiling_data.last_dim);
        // // // 执行计算流程
        op.Process();
    }

    else 
    {

        KernelReplicationPad2d_trival<DTYPE_X, DTYPE_PADDINGS, DTYPE_Y> op;
        op.Init(x, paddings, y, tiling_data.inputNum, 
        tiling_data.outputNum, tiling_data.input_stride, 
        tiling_data.output_stride, tiling_data.batch_number, tiling_data.last_dim);

    }

}