#include "kernel_operator.h"
// tensor num for each queue
constexpr int32_t BUFFER_NUM = 2;

template<typename TYPE_X, typename TYPE_Z> class KernelConstantPad {
    using T = TYPE_X;
public:
    __aicore__ inline KernelConstantPad() {}
    __aicore__ inline uint32_t AlignUp(uint32_t a, uint32_t b) 
    {
        if (b == 0)
            return a;
        return ((a + 32 / b - 1) / (32 / b)) * (32 / b);
    }

    __aicore__ inline void Init(GM_ADDR x, GM_ADDR z, const ConstantPadCustomTilingData &tiling_data)
    {
        this->tiling = tiling_data;

        ASSERT(AscendC::GetBlockNum() != 0 && "block dim can not be zero!");
        this->core_id = AscendC::GetBlockIdx();
        // 如果当前核没有分配到向量，直接返回
        if (tiling.core_vector_count[this->core_id] <= 0) {
            return ;
        }

        // 计算数据偏移
        uint32_t vector_offset = tiling.core_vector_start[this->core_id];
        uint32_t element_offset = vector_offset * tiling.outputW;

        this->align_input_vector_size = AlignUp(tiling.inputW, sizeof(TYPE_X));
        this->align_output_vector_size = AlignUp(tiling.outputW, sizeof(TYPE_Z));

        xGm.SetGlobalBuffer((__gm__ TYPE_X *)x, (uint32_t)tiling.total_input_elements);
        zGm.SetGlobalBuffer((__gm__ TYPE_Z *)z + element_offset, (uint32_t)tiling.core_vector_count[this->core_id] * tiling.outputW);

        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->align_output_vector_size * sizeof(TYPE_X));
        pipe.InitBuffer(outQueueZ, BUFFER_NUM, this->align_output_vector_size * sizeof(TYPE_Z));
    }

    __aicore__ inline void Process()
    {
        int32_t loopCount = tiling.core_loop_times_vector[this->core_id];
        
        // 处理完整的循环
        for (int32_t i = 0; i < loopCount; i++) 
        {
            // 判断是否是最后一次循环
            if (i == loopCount - 1) {
                // 最后一次循环，处理tail
                this->tileNum = tiling.core_tail_vector[this->core_id];
            } else {
                // 正常循环
                this->tileNum = tiling.tile_vector_num;
            }

            ComputePadRow(i);
        }
    }

    // 每个逻辑行的全局坐标是(n, c, h), w = dimW
    __aicore__ inline void LinearToNCH(int32_t linear_index, int32_t& n, int32_t& c, int32_t& h)
    {
        int32_t rows_per_image = tiling.outputC * tiling.outputH;  // 每张图片的逻辑行数
        int32_t rows_per_channel = tiling.outputH;             // 每个通道的逻辑行数
        
        // 第一步：确定批次索引n
        n = linear_index / rows_per_image;

        // 第二步：确定在图片内的位置
        int32_t remaining = linear_index % rows_per_image;

        // 第三步：确定通道索引c 
        c = remaining / rows_per_channel;

        // 第四步：确定高度索引h
        h = remaining % rows_per_channel;
    }

    __aicore__ inline int32_t NCHToLinear(int32_t n, int32_t c, int32_t h)
    {
        return n * (tiling.inputC * tiling.inputH) + c * tiling.inputH + h;
    }

private:
    __aicore__ inline void ComputePadRow(int32_t progress) 
    {
        for (int32_t row = 0; row < this->tileNum; row++) {
            int32_t row_offest_out = tiling.core_vector_start[this->core_id] + progress * tiling.tile_vector_num + row;
            int32_t n, c, h;

            LinearToNCH(row_offest_out, n, c, h);

            if (h < tiling.pad_top || h >= tiling.pad_top + tiling.inputH) {
                ComputeInValid(row);
            }
            else {
                int32_t h_input = h - tiling.pad_top;
                int32_t row_offest_in = NCHToLinear(n, c, h_input);

                CopyInValid(row_offest_in);
                ComputeValid(row);
            }
            
            // 修正：传入完整的局部偏移（考虑progress和row）
            int32_t local_row_offset = progress * tiling.tile_vector_num + row;
            CopyOut(local_row_offset);
        }
    }

    __aicore__ inline void CopyInValid(int32_t row_offest)
    {   
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
        
        AscendC::DataCopyExtParams copyParams{
            1,
            static_cast<uint32_t>(tiling.inputW * sizeof(TYPE_X)),
            0, 0, 0
        };
        
        uint32_t padLeft = tiling.pad_left;
        uint32_t padRight = tiling.pad_right + this->align_output_vector_size - tiling.outputW;

        AscendC::DataCopyPadExtParams<TYPE_X> padParams{
            true, 
            static_cast<uint8_t>(padLeft),
            static_cast<uint8_t>(padRight),
            (TYPE_X)tiling.pad_value
        };
        
        uint32_t row_element_offset = row_offest * tiling.inputW;

        // 使用相对偏移，因为xGm的base已经是当前core的起始位置
        AscendC::DataCopyPad(xLocal[0], xGm[row_element_offset], copyParams, padParams);
        
        inQueueX.EnQue(xLocal);
    }

    __aicore__ inline void ComputeValid(int32_t row) 
    {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.AllocTensor<TYPE_Z>();

        AscendC::DataCopy(zLocal, xLocal, this->align_output_vector_size);

        outQueueZ.EnQue<TYPE_Z>(zLocal);
        inQueueX.FreeTensor(xLocal);
    }

    __aicore__ inline void ComputeInValid(int32_t row) 
    {
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.AllocTensor<TYPE_Z>();

        AscendC::Duplicate(zLocal, (TYPE_Z)tiling.pad_value, this->align_output_vector_size);

        outQueueZ.EnQue<TYPE_Z>(zLocal);
    }

    __aicore__ inline void CopyOut(int32_t local_row_offset)
    {
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.DeQue<TYPE_Z>();

        // 输出时使用相对偏移(row)而不是全局偏移
        AscendC::DataCopyExtParams copyParams{
            1,
            static_cast<uint32_t>(tiling.outputW * sizeof(TYPE_Z)),
            0,
            0,
            0
        };
        
        uint32_t local_offset = local_row_offset * tiling.outputW;
        AscendC::DataCopyPad(zGm[local_offset], 
                            zLocal, 
                            copyParams);

        outQueueZ.FreeTensor(zLocal);
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX;
    AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueZ;

    AscendC::GlobalTensor<TYPE_X> xGm;
    AscendC::GlobalTensor<TYPE_Z> zGm;

    int32_t core_id;
    int32_t align_input_vector_size;
    int32_t align_output_vector_size;
    uint32_t tileNum;

    ConstantPadCustomTilingData tiling;

};
extern "C" __global__ __aicore__ void constant_pad_custom(GM_ADDR x, GM_ADDR z, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    KernelConstantPad<DTYPE_X, DTYPE_Z> op;
    op.Init(x, z, tiling_data);  
    op.Process();
}


