#include "kernel_operator.h"

constexpr int32_t N = 1;
constexpr int32_t C = 1;
constexpr int32_t H = 9;
constexpr int32_t W = 257;

// 计算输出尺寸
constexpr int32_t kH = 2;
constexpr int32_t kW = 2;
constexpr int32_t stride = 1;
constexpr int32_t padding = 0;

// 计算输出尺寸
constexpr int32_t out_H = (H + 2 * padding - kH) / stride + 1;  
constexpr int32_t out_W = (W + 2 * padding - kW) / stride + 1; 
constexpr int32_t L = out_H * out_W; 
constexpr int32_t output_channels = C * kH * kW;  // 1 * 2 * 2

constexpr int32_t INPUT_LENGTH = N * C * H * W;
constexpr int32_t OUTPUT_LENGTH = N * output_channels * L;
constexpr int32_t USE_CORE_NUM = 8;
constexpr int32_t BLOCK_LENGTH = OUTPUT_LENGTH / USE_CORE_NUM;
constexpr int32_t TILE_NUM = 8;
constexpr int32_t BUFFER_NUM = 2;
constexpr int32_t TILE_LENGTH = BLOCK_LENGTH / TILE_NUM / BUFFER_NUM;

class KernelIm2Col {
public:
    __aicore__ inline KernelIm2Col() {}
    
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR z, 
                               int32_t kernel_h, int32_t kernel_w, 
                               int32_t stride_val, int32_t padding_val)
    {
        this->kernel_h = kernel_h;
        this->kernel_w = kernel_w;
        this->stride_val = stride_val;
        this->padding_val = padding_val;
        
        // 计算输出尺寸
        int32_t out_H = (H + 2 * this->padding_val - this->kernel_h) / this->stride_val + 1;
        int32_t out_W = (W + 2 * this->padding_val - this->kernel_w) / this->stride_val + 1;
        this->L = out_H * out_W;
        this->output_channels = C * this->kernel_h * this->kernel_w;

        // 输入和输出GM地址设置
        xGm.SetGlobalBuffer((__gm__ float *)x, INPUT_LENGTH);
        zGm.SetGlobalBuffer((__gm__ float *)z + BLOCK_LENGTH * AscendC::GetBlockIdx(), BLOCK_LENGTH);
        
        // 只需要输出缓冲区，直接从GM读取输入
        pipe.InitBuffer(outQueueZ, BUFFER_NUM, TILE_LENGTH * sizeof(float));
    }
    
    __aicore__ inline void Process()
    {
        int32_t loopCount = TILE_NUM * BUFFER_NUM;
        for (int32_t i = 0; i < loopCount; i++) {
            Compute(i);
            CopyOut(i);
        }
    }

private:
    __aicore__ inline void Compute(int32_t progress)
    {
        AscendC::LocalTensor<float> zLocal = outQueueZ.AllocTensor<float>();

        // 计算当前tile在输出矩阵中的起始位置
        int32_t outputStart = AscendC::GetBlockIdx() * BLOCK_LENGTH + progress * TILE_LENGTH;
        
        // 为当前tile生成im2col数据
        for (int32_t i = 0; i < TILE_LENGTH; i++) {
            int32_t outputIdx = outputStart + i;
            
            // 边界检查
            if (outputIdx >= OUTPUT_LENGTH) {
                zLocal.SetValue(i, 0.0f);
                continue;
            }
            
            // 计算输出索引对应的输入位置
            int32_t inputIdx = CalculateInputIndex(outputIdx);
            
            if (inputIdx >= 0 && inputIdx < INPUT_LENGTH) {
                // 直接从GM读取
                float value = xGm.GetValue(inputIdx);
                zLocal.SetValue(i, value);
            } else {
                // 填充区域设为0
                zLocal.SetValue(i, 0.0f);
            }
        }
        
        outQueueZ.EnQue<float>(zLocal);
    }

    __aicore__ inline void CopyOut(int32_t progress)
    {
        AscendC::LocalTensor<float> zLocal = outQueueZ.DeQue<float>();
        
        int32_t outputOffset = progress * TILE_LENGTH;
        int32_t copyLength = TILE_LENGTH;
        
        // 边界检查
        int32_t remaining = BLOCK_LENGTH - outputOffset;
        if (copyLength > remaining) {
            copyLength = remaining;
        }
        
        if (copyLength > 0) {
            AscendC::DataCopy(zGm[outputOffset], zLocal, copyLength);
        }
        
        outQueueZ.FreeTensor(zLocal);
    }

    // 计算输出索引对应的输入索引
    __aicore__ inline int32_t CalculateInputIndex(int32_t outputIdx)
    {
        // 将输出展平索引转换为 [N, output_channels, L] 坐标
        int32_t n_idx = outputIdx / (output_channels * L);
        int32_t remaining = outputIdx % (output_channels * L);
        int32_t channel_block = remaining / L;  // 0 ~ (C*kH*kW-1)
        int32_t pos_idx = remaining % L;        // 0 ~ (L-1)
        
        // 从channel_block中分解出c, kh, kw
        int32_t c_idx = channel_block / (kernel_h * kernel_w);
        int32_t kernel_idx = channel_block % (kernel_h * kernel_w);
        int32_t kh_idx = kernel_idx / kernel_w;
        int32_t kw_idx = kernel_idx % kernel_w;
        
        // 从pos_idx中分解出out_h, out_w
        int32_t out_h = pos_idx / out_W;
        int32_t out_w = pos_idx % out_W;
        
        // 计算对应的输入位置
        int32_t in_h = out_h * stride_val + kh_idx - padding_val;
        int32_t in_w = out_w * stride_val + kw_idx - padding_val;
        
        // 检查边界
        if (in_h < 0 || in_h >= H || in_w < 0 || in_w >= W) {
            return -1;  // 表示填充位置
        }
        
        // 计算输入张量中的索引
        return n_idx * (C * H * W) + c_idx * (H * W) + in_h * W + in_w;
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::TPosition::VECOUT, BUFFER_NUM> outQueueZ;
    AscendC::GlobalTensor<float> xGm;
    AscendC::GlobalTensor<float> zGm;

    int32_t kernel_h;
    int32_t kernel_w;
    int32_t stride_val;
    int32_t padding_val;
    int32_t L;
    int32_t output_channels;
};

extern "C" __global__ __aicore__ void im_2_col_custom(GM_ADDR x, GM_ADDR z, 
                                                   int32_t kernel_h, int32_t kernel_w,
                                                   int32_t stride_val, int32_t padding_val)
{
    KernelIm2Col op;
    op.Init(x, z, kernel_h, kernel_w, stride_val, padding_val);
    op.Process();
}

#ifndef ASCENDC_CPU_DEBUG
void im_2_col_custom_do(uint32_t blockDim, void *stream, 
                      uint8_t *x, uint8_t *z,
                      int32_t kernel_h, int32_t kernel_w,
                      int32_t stride_val, int32_t padding_val)
{
    im_2_col_custom<<<blockDim, nullptr, stream>>>(x, z, kernel_h, kernel_w, stride_val, padding_val);
}
#endif