#include "kernel_operator.h"
using namespace AscendC;

constexpr int32_t BUFFER_NUM = 2;
constexpr int32_t TOTAL_NC = 8 * 2;
constexpr int32_t USE_CORE_NUM = 8;
constexpr int32_t NC_PER_CORE = TOTAL_NC / USE_CORE_NUM;
constexpr int32_t W_SIZE = 1024;

template <typename T>
class ReflectionPad1d {
public:
    __aicore__ inline ReflectionPad1d() {} 

    __aicore__ inline void Init(GM_ADDR x, GM_ADDR paddings, GM_ADDR y) {
        paddingsGm.SetGlobalBuffer(reinterpret_cast<__gm__ int32_t*>(paddings), 2);
        left = paddingsGm.GetValue(0);
        right = paddingsGm.GetValue(1);
        outW = W_SIZE + left + right;
        // 计算当前核心的N×C任务范围
        startNC = GetBlockIdx() * NC_PER_CORE;
        endNC = (startNC + NC_PER_CORE) > static_cast<int32_t>(TOTAL_NC) ? static_cast<int32_t>(TOTAL_NC) : (startNC + NC_PER_CORE);

        // 绑定GM内存
        xGm.SetGlobalBuffer(reinterpret_cast<__gm__ T*>(x) + startNC * W_SIZE, NC_PER_CORE * W_SIZE); 
        yGm.SetGlobalBuffer(reinterpret_cast<__gm__ T*>(y) + startNC * outW, NC_PER_CORE * outW);    


        pipe.InitBuffer(inQueueX, BUFFER_NUM, W_SIZE * sizeof(T)); 
        pipe.InitBuffer(outQueueY, BUFFER_NUM, outW * sizeof(T));
    }

    __aicore__ inline void Process() {
        for (int32_t nc = startNC; nc < endNC; ++nc) {
            int32_t localNcIdx = nc - startNC;
            int32_t inGmOffset = localNcIdx * W_SIZE;
            int32_t outGmOffset = localNcIdx * outW;

            CopyIn(inGmOffset);
            Compute();
            CopyOut(outGmOffset);
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t inGmOffset) {
        LocalTensor<T> xLocal = inQueueX.AllocTensor<T>();
        DataCopy(xLocal, xGm[inGmOffset], W_SIZE);
        inQueueX.EnQue(xLocal);
    }

    __aicore__ inline void Compute() {
        LocalTensor<T> xLocal = inQueueX.DeQue<T>();
        LocalTensor<T> yLocal = outQueueY.AllocTensor<T>();

        // 左填充
        for (int32_t i = 0; i < left; ++i) {
            int32_t refIdx = left - i;
            refIdx = refIdx < 0 ? 0 : refIdx;
            T val = xLocal.GetValue(static_cast<uint32_t>(refIdx));
            yLocal.SetValue(static_cast<uint32_t>(i), val);
        }

        // 中间原始数据
        for (int32_t i = 0; i < W_SIZE; ++i) {
            T val = xLocal.GetValue(static_cast<uint32_t>(i));
            yLocal.SetValue(static_cast<uint32_t>(left + i), val);
        }

        // 右填充
        for (int32_t i = 0; i < right; ++i) {
            int32_t refIdx = W_SIZE - 2 - i;
            refIdx = refIdx < 0 ? 0 : refIdx;
            T val = xLocal.GetValue(static_cast<uint32_t>(refIdx));
            yLocal.SetValue(static_cast<uint32_t>(left + W_SIZE + i), val);
        }

        outQueueY.EnQue(yLocal);
        inQueueX.FreeTensor(xLocal);
    }

    __aicore__ inline void CopyOut(int32_t outGmOffset) {
        LocalTensor<T> yLocal = outQueueY.DeQue<T>();
        DataCopyExtParams copyParams{1, static_cast<uint16_t>(this->outW * sizeof(T)), 0, 0, 0};
        DataCopyPad(yGm[outGmOffset], yLocal, copyParams);
        outQueueY.FreeTensor(yLocal);
    }

private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;   
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY; 
    GlobalTensor<T> xGm;   
    GlobalTensor<T> yGm;    
    GlobalTensor<int32_t> paddingsGm; 

    int32_t left = 0;      // 左填充量
    int32_t right = 0;     // 右填充量
    int32_t outW = 0;      // 输出W维度大小
    int32_t startNC = 0;   // 起始N×C索引
    int32_t endNC = 0;     // 结束N×C索引
};

extern "C" __global__ __aicore__ void reflection_pad1d(GM_ADDR x, GM_ADDR paddings, GM_ADDR y) {
    ReflectionPad1d<float> op;
    op.Init(x, paddings, y);
    op.Process();
}

#ifndef ASCENDC_CPU_DEBUG
void reflection_pad1d_do(uint32_t blockDim, void* stream, uint8_t* x, uint8_t* paddings, uint8_t* y) {
    reflection_pad1d<<<blockDim, nullptr, stream>>>(x, paddings, y);
}
#endif