#include "constantpad_custom_tiling.h"
#include "kernel_operator.h"

constexpr int32_t BUFFER_NUM = 2; // tensor num for each queue

class KernelConstantPad {
public:
    __aicore__ inline KernelConstantPad() {}

    __aicore__ inline uint32_t AlignUp(uint32_t a, uint32_t b) 
    {
        if (b == 0)
            return a;
        return ((a + 32 / b - 1) / (32 / b)) * (32 / b);
    }

    __aicore__ inline void Init(GM_ADDR x, GM_ADDR z, ConstantPadCustomTilingData tiling)
    {
        this->pad_left = tiling.pad_left;
        this->pad_right = tiling.pad_right;
        this->pad_bottom = tiling.pad_bottom;
        this->pad_top = tiling.pad_top;
        this->pad_value = tiling.pad_value;

        this->outputN = tiling.outputN;
        this->outputC = tiling.outputC;
        this->outputH = tiling.outputH;
        this->outputW = tiling.outputW;

        this->totalRows = tiling.totalRows;
        this->tileNum = tiling.tileNum;

        // 计算输出shape
        this->inputN = this->outputN;  // 8
        this->inputC = this->outputC;  // 1
        this->inputH = this->outputH - this->pad_top - this->pad_bottom;  // 9
        this->inputW = this->outputW - this->pad_left - this->pad_right;  // 2045

        // 输入：每个核处理 INPUT_H * INPUT_W 个元素    
        int64_t input_slice_size = this->inputC * this->inputH * this->inputW;
        xGm.SetGlobalBuffer((__gm__ float *)x + input_slice_size * AscendC::GetBlockIdx(), input_slice_size);

        // 输出：每个核处理 OUTPUT_H * OUTPUT_W 个元素  
        int64_t output_slice_size = this->outputC * this->outputH * this->outputW;
        zGm.SetGlobalBuffer((__gm__ float *)z + output_slice_size * AscendC::GetBlockIdx(), output_slice_size);

        // 计算输入输出的ub(对齐)
        this->input_tile_length = this->inputW;
        this->align_input_tile_length = AlignUp((uint32_t)this->input_tile_length, sizeof(float));  // 2048

        this->BLOCK_ROWS_NUM = this->totalRows / AscendC::GetBlockNum();  // 16     
        this->TILE_ROWS_NUM = this->BLOCK_ROWS_NUM / this->tileNum / BUFFER_NUM;  // 1

        this->output_tile_length = this->TILE_ROWS_NUM * this->outputW;
        this->align_output_tile_length = AlignUp((uint32_t)this->output_tile_length, sizeof(float));  // 2048

        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->align_input_tile_length * sizeof(float));
        pipe.InitBuffer(outQueueZ, BUFFER_NUM, this->align_output_tile_length * sizeof(float));

    }

    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum * BUFFER_NUM;
        for (int32_t i = 0; i < loopCount; i++) {
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress) 
    {
        AscendC::LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();
        AscendC::DataCopyExtParams copyParams{
            1,
            static_cast<uint32_t>(this->inputW * sizeof(float)),
            0, 0, 0
        };
            
        AscendC::DataCopyPadExtParams<float> padParams{
            true, 
            static_cast<uint8_t>(this->pad_left),
            static_cast<uint8_t>(this->pad_right),
            this->pad_value
        };
            
        // 使用相对偏移
        int32_t rows_offset = progress - this->pad_top;  
        AscendC::DataCopyPad(xLocal, xGm[rows_offset * this->inputW], copyParams, padParams);

        inQueueX.EnQue(xLocal);
    }
    
    __aicore__ inline void Compute(int32_t progress)
    {
        AscendC::LocalTensor<float> xLocal = inQueueX.DeQue<float>();
        AscendC::LocalTensor<float> zLocal = outQueueZ.AllocTensor<float>();

        int32_t valid_start_idx = this->pad_top;
        int32_t valid_end_idx = outputH - this->pad_bottom - 1;
        
        if (progress < valid_start_idx || progress > valid_end_idx) {
            AscendC::Duplicate(zLocal, this->pad_value, this->align_output_tile_length);
        }
        else {
            AscendC::DataCopy(zLocal, xLocal, this->align_output_tile_length);
        }

        // if (progress == 0 && AscendC::GetBlockIdx() == 0) {
        //     AscendC::DumpTensor(zLocal, 0, 32);
        // }

        // if (progress == 3 && AscendC::GetBlockIdx() == 0) {
        //     AscendC::DumpTensor(zLocal, 1, 2048);
        // }
        // if (progress == 9 && AscendC::GetBlockIdx() == 0) {
        //     AscendC::DumpTensor(zLocal, 2, 2048);
        // }
        

        outQueueZ.EnQue<float>(zLocal);
        inQueueX.FreeTensor(xLocal);
    }
    
    __aicore__ inline void CopyOut(int32_t progress)
    {
        AscendC::LocalTensor<float> zLocal = outQueueZ.DeQue<float>();

        // 输出时使用 DataCopyPad 只复制有效数据
        AscendC::DataCopyExtParams copyParams{
            1,  // blockCount
            static_cast<uint32_t>(this->outputW * sizeof(float)),  // blockLen: 只复制有效数据
            0,  // srcStride
            0,  // dstStride
            0   // rsv
        };
        
        AscendC::DataCopyPad(zGm[progress * this->outputW], 
                            zLocal, 
                            copyParams);
        outQueueZ.FreeTensor(zLocal);
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::TPosition::VECIN, BUFFER_NUM> inQueueX;
    AscendC::TQue<AscendC::TPosition::VECOUT, BUFFER_NUM> outQueueZ;

    AscendC::GlobalTensor<float> xGm;
    AscendC::GlobalTensor<float> zGm;

    uint32_t totalRows;
    uint32_t tileNum;

    int32_t outputN;
    int32_t outputC;
    int32_t outputH;
    int32_t outputW;
    
    int32_t pad_left;
    int32_t pad_right;
    int32_t pad_top;
    int32_t pad_bottom;
    float pad_value;

    int32_t inputN;
    int32_t inputC;
    int32_t inputH;
    int32_t inputW;

    int32_t input_tile_length;
    int32_t output_tile_length;
    uint32_t align_input_tile_length;
    uint32_t align_output_tile_length;

    int32_t TILE_ROWS_NUM;
    int32_t BLOCK_ROWS_NUM;

};

extern "C" __global__ __aicore__ void constantpad_custom(GM_ADDR x, GM_ADDR z, ConstantPadCustomTilingData tiling)
{
    KernelConstantPad op;
    op.Init(x, z, tiling);
    op.Process();
}
