#include "kernel_operator.h"

constexpr int32_t BUFFER_NUM = 2; // tensor num for each queue

template<typename TYPE_X, typename TYPE_Z> 
class KernelFlip {
    using T = TYPE_X;
public:
    __aicore__ inline KernelFlip() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR z, const FlipCustomTilingData &tiling_data)
    {
        this->tiling = tiling_data;

        ASSERT(AscendC::GetBlockNum() != 0 && "block dim can not be zero!");
        this->core_id = AscendC::GetBlockIdx();
        // 如果当前核没有分配到向量，直接返回
        if (tiling.core_vector_count[this->core_id] <= 0) {
            return ;
        }

        // 计算数据偏移
        uint32_t vector_offset = tiling.core_vector_start[this->core_id];
        uint32_t element_offset = vector_offset * tiling.computeSize;
        uint32_t xBuf = tiling.tile_vector_num * tiling.aligned_compute_size * sizeof(TYPE_X);
        uint32_t zBuf = tiling.tile_vector_num * tiling.aligned_compute_size * sizeof(TYPE_Z);
        uint32_t workBuf = tiling.tile_vector_num * tiling.aligned_compute_size * sizeof(TYPE_X);

        xGm.SetGlobalBuffer((__gm__ TYPE_X *)x + element_offset, (uint32_t)tiling.core_vector_count[this->core_id] * tiling.computeSize);
        zGm.SetGlobalBuffer((__gm__ TYPE_Z *)z, (uint32_t)tiling.total_elements);

        pipe.InitBuffer(inQueueX, BUFFER_NUM, xBuf);
        pipe.InitBuffer(outQueueZ, BUFFER_NUM, zBuf);
        pipe.InitBuffer(workBuffer, workBuf);

    }

    __aicore__ inline void Process()
    {
        int32_t loopCount = tiling.core_loop_times_vector[this->core_id];
        
        // 处理完整的循环
        for (int32_t i = 0; i < loopCount; i++) 
        {
            // 判断是否是最后一次循环
            if (i == loopCount - 1) {
                // 最后一次循环，处理tail
                this->tileNum = tiling.core_tail_vector[this->core_id];
                this->processDataNum = this->tileNum * tiling.aligned_compute_size;
            } else {
                // 正常循环
                this->tileNum = tiling.tile_vector_num;
                this->processDataNum = this->tileNum * tiling.aligned_compute_size;
            }
            
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }

    // 每个逻辑行的全局坐标是(n, c, h), w = dimW
    __aicore__ inline void LinearToNCH(int32_t linear_index, int32_t& n, int32_t& c, int32_t& h)
    {
        int32_t rows_per_image = tiling.C * tiling.H;  // 每张图片的逻辑行数
        int32_t rows_per_channel = tiling.H;             // 每个通道的逻辑行数
        
        // 第一步：确定批次索引n
        n = linear_index / rows_per_image;

        // 第二步：确定在图片内的位置
        int32_t remaining = linear_index % rows_per_image;

        // 第三步：确定通道索引c 
        c = remaining / rows_per_channel;

        // 第四步：确定高度索引h
        h = remaining % rows_per_channel;
    }

    __aicore__ inline int32_t NCHToLinear(int32_t n, int32_t c, int32_t h)
    {
        return n * (tiling.C * tiling.H) + c * tiling.H + h;
    }

private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
        int32_t current_tile_num = this->tileNum;
        
        // 计算相对于当前core起始位置的偏移（不是全局偏移）
        uint32_t relative_offset = progress * tiling.tile_vector_num * tiling.computeSize;
        
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
        
        AscendC::DataCopyExtParams copyParams{
            static_cast<uint16_t>(current_tile_num),
            static_cast<uint32_t>(tiling.computeSize * sizeof(TYPE_X)),
            0, 0, 0
        };
        
        AscendC::DataCopyPadExtParams<TYPE_X> padParams{
            true, 0,
            static_cast<uint8_t>(tiling.aligned_compute_size - tiling.computeSize),
            0
        };
        
        // 使用相对偏移，因为xGm的base已经是当前core的起始位置
        AscendC::DataCopyPad(xLocal[0], xGm[relative_offset], copyParams, padParams);
        
        inQueueX.EnQue(xLocal);
    }

    __aicore__ inline void Compute(int32_t progress)
    {
        switch(tiling.dim) {
            case 0:
                ComputeDim0(progress);
                break;
            case 1:
                ComputeDim1(progress);
                break;
            case 2:
                ComputeDim2(progress);
                break;
            case 3:
                ComputeDim3(progress);
                break;
            default:
                return ;
                break;
        
        }    
    }

    __aicore__ inline void ComputeDim0(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.AllocTensor<TYPE_Z>();

        // 计算当前循环需要copy的数据量
        int32_t current_tile_num = this->tileNum;
        int32_t elements_num = current_tile_num * tiling.aligned_compute_size;

        AscendC::DataCopy(zLocal, xLocal, elements_num);
        
        outQueueZ.EnQue<TYPE_Z>(zLocal);
        inQueueX.FreeTensor(xLocal);
    }

    __aicore__ inline void ComputeDim1(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.AllocTensor<TYPE_Z>();

        // 计算当前循环需要copy的数据量
        int32_t current_tile_num = this->tileNum;
        int32_t elements_num = current_tile_num * tiling.aligned_compute_size;

        AscendC::DataCopy(zLocal, xLocal, elements_num);
        
        outQueueZ.EnQue<TYPE_Z>(zLocal);
        inQueueX.FreeTensor(xLocal);
    }

    __aicore__ inline void ComputeDim2(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.AllocTensor<TYPE_Z>();

        // 计算当前循环需要copy的数据量
        int32_t current_tile_num = this->tileNum;
        int32_t elements_num = current_tile_num * tiling.aligned_compute_size;

        AscendC::DataCopy(zLocal, xLocal, elements_num);
        
        outQueueZ.EnQue<TYPE_Z>(zLocal);
        inQueueX.FreeTensor(xLocal);
    }
        
    // 逐块处理（存在一定的性能缺陷，保留下来为后续优化做铺底）
    // __aicore__ inline void ComputeDim3(int32_t progress)
    // {
    //     AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
    //     AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.AllocTensor<TYPE_Z>();
    //     AscendC::LocalTensor<uint32_t> offsetLocal = workBuffer.Get<uint32_t>();
        
    //     // 关键参数
    //     int32_t current_tile_num = this->tileNum;
    //     int32_t valid_per_row = tiling.computeSize;           // 每行有效元素数
    //     int32_t aligned_per_row = tiling.aligned_compute_size; // 对齐后每行元素数
    //     int32_t total_rows = current_tile_num;                // 总行数
    //     int32_t elements_per_block = (int32_t)256 / sizeof(TYPE_X);                      // 每个块处理64个元素
        
    //     // 计算每行需要分成多少块
    //     int32_t blocks_per_row = (valid_per_row + elements_per_block - 1) / elements_per_block;
        
    //     // 为每个块预计算偏移量
    //     for (int block = 0; block < blocks_per_row; block++) {
    //         // 计算当前块在行内的范围
    //         int32_t block_start = block * elements_per_block;
    //         int32_t block_end = min(block_start + elements_per_block, valid_per_row);
    //         int32_t block_size = block_end - block_start;
            
    //         // 为当前块的所有行计算偏移量
    //         for (int row = 0; row < total_rows; row++) {
    //             for (int i = 0; i < block_size; i++) {
    //                 // 当前元素在块内的位置
    //                 int local_idx = i;
    //                 // 当前元素在行内的原始列位置
    //                 int original_col = block_start + i;
    //                 // W翻转后的列位置
    //                 int flipped_col = valid_per_row - 1 - original_col;
                    
    //                 // 计算线性索引
    //                 int dst_index = row * aligned_per_row + original_col;
    //                 int src_index = row * aligned_per_row + flipped_col;
                    
    //                 // 设置偏移量
    //                 offsetLocal.SetValue(dst_index, src_index * sizeof(TYPE_X));
    //             }
    //         }
            
    //         // 执行当前块的Gather操作
    //         uint32_t block_elements = total_rows * block_size;
    //         uint8_t repeat_times = block_elements / 64;
    //         uint64_t mask = (block_size == elements_per_block) ? ( (uint64_t)256 / sizeof(TYPE_X) ) : block_size;
            
    //         AscendC::Gather(
    //             zLocal[block_start],           // 目标：从当前块起始位置开始
    //             xLocal,                        // 源：整个输入数据
    //             offsetLocal[block_start], // 偏移量：从对应位置开始
    //             0,
    //             mask,
    //             repeat_times,
    //             8
    //         );
    //     }
        
    //     // 处理填充区域（保持不变）
    //     ProcessPaddingArea(zLocal, xLocal, total_rows, valid_per_row, aligned_per_row);
        
    //     workBuffer.FreeTensor(offsetLocal);
    //     outQueueZ.EnQue<TYPE_Z>(zLocal);
    //     inQueueX.FreeTensor(xLocal);
    // }

    // 暂时使用相对简单高效的逐元素翻转
    __aicore__ inline void ComputeDim3(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.AllocTensor<TYPE_Z>();
        
        int32_t current_tile_num = this->tileNum;
        int32_t valid_per_row = tiling.computeSize;
        int32_t aligned_per_row = tiling.aligned_compute_size;
        
        // 简单的行内翻转
        for (int32_t row = 0; row < current_tile_num; row++) {
            for (int32_t col = 0; col < valid_per_row; col++) {
                int32_t src_idx = row * aligned_per_row + col;
                int32_t dst_col = valid_per_row - 1 - col;
                int32_t dst_idx = row * aligned_per_row + dst_col;
                
                TYPE_X value = xLocal.GetValue(src_idx);
                zLocal.SetValue(dst_idx, static_cast<TYPE_Z>(value));
            }
        }
        
        // 处理填充区域
        ProcessPaddingArea(zLocal, xLocal, current_tile_num, valid_per_row, aligned_per_row);
        
        outQueueZ.EnQue<TYPE_Z>(zLocal);
        inQueueX.FreeTensor(xLocal);
    }

    __aicore__ inline void ProcessPaddingArea(
                                AscendC::LocalTensor<TYPE_Z>& dstLocal,
                                const AscendC::LocalTensor<TYPE_X>& srcLocal,
                                int32_t total_rows,
                                int32_t valid_per_row,
                                int32_t aligned_per_row)
    {
        // 计算填充区域的大小
        int32_t padding_size = aligned_per_row - valid_per_row;
        
        if (padding_size <= 0) {
            return;  // 没有填充区域，直接返回
        }
        
        // 逐行处理填充区域
        for (int row = 0; row < total_rows; row++) {
            int row_start = row * aligned_per_row;
            
            // 将填充区域从源张量复制到目标张量
            for (int col = valid_per_row; col < aligned_per_row; col++) {
                int element_index = row_start + col;
                TYPE_X padding_value = srcLocal.GetValue(element_index);
                dstLocal.SetValue(element_index, padding_value);
            }
        }
    }

    __aicore__ inline void CopyOut(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.DeQue<TYPE_Z>();
        
        // 计算当前tile的起始逻辑行索引
        uint32_t vector_start_index = tiling.core_vector_start[this->core_id] + progress * tiling.tile_vector_num;
        
        // 当前tile的行数
        int32_t current_tile_num = this->tileNum;
        
        switch (tiling.dim) {
            case 0: // N维度翻转
                CopyOutDim0(zLocal, vector_start_index, current_tile_num);
                break;
            case 1: // C维度翻转
                CopyOutDim1(zLocal, vector_start_index, current_tile_num);
                break;
            case 2: // H维度翻转
                CopyOutDim2(zLocal, vector_start_index, current_tile_num);
                break;
            case 3: // W维度翻转
                CopyOutDim3(zLocal, vector_start_index, current_tile_num);
                break;
            default:
                // 默认不翻转，直接拷贝
                {
                    uint32_t element_offset = vector_start_index * tiling.computeSize;
                    AscendC::DataCopy(zGm[element_offset], zLocal, this->processDataNum);
                }
                break;
        }
        
        outQueueZ.FreeTensor(zLocal);
    }

    // N维度翻转：逐行计算翻转后的输出位置
    __aicore__ inline void CopyOutDim0(AscendC::LocalTensor<TYPE_Z>& zLocal, 
                                    uint32_t vector_start_index, 
                                    int32_t current_tile_num)
    {
        for (int32_t row = 0; row < current_tile_num; row++) {
            int32_t current_vector_index = vector_start_index + row;
            
            int32_t n, c, h;
            LinearToNCH(current_vector_index, n, c, h);
            
            int32_t flipped_n = tiling.N - 1 - n;
            int32_t flipped_vector_index = NCHToLinear(flipped_n, c, h);
            
            uint32_t flipped_element_offset = flipped_vector_index * tiling.computeSize;
            uint32_t local_offset = row * tiling.aligned_compute_size;
            
            AscendC::DataCopyExtParams copyParams{
                1,
                static_cast<uint32_t>(tiling.computeSize * sizeof(TYPE_Z)),
                0, 
                0, 
                0
            };
            
            AscendC::DataCopyPad(zGm[flipped_element_offset], 
                            zLocal[local_offset], 
                            copyParams);
        }
    }

    // C维度翻转：逐行计算翻转后的输出位置
    __aicore__ inline void CopyOutDim1(AscendC::LocalTensor<TYPE_Z>& zLocal, 
                                    uint32_t vector_start_index, 
                                    int32_t current_tile_num)
    {
        for (int32_t row = 0; row < current_tile_num; row++) {
            int32_t current_vector_index = vector_start_index + row;
            
            int32_t n, c, h;
            LinearToNCH(current_vector_index, n, c, h);
            
            int32_t flipped_c = tiling.C - 1 - c;
            int32_t flipped_vector_index = NCHToLinear(n, flipped_c, h);
            
            uint32_t flipped_element_offset = flipped_vector_index * tiling.computeSize;
            uint32_t local_offset = row * tiling.aligned_compute_size;
            
            AscendC::DataCopyExtParams copyParams{
                1,
                static_cast<uint32_t>(tiling.computeSize * sizeof(TYPE_Z)),
                0, 
                0, 
                0
            };
            
            AscendC::DataCopyPad(zGm[flipped_element_offset], 
                            zLocal[local_offset], 
                            copyParams);
        }
    }

    // H维度翻转：逐行计算翻转后的输出位置
    __aicore__ inline void CopyOutDim2(AscendC::LocalTensor<TYPE_Z>& zLocal, 
                                    uint32_t vector_start_index, 
                                    int32_t current_tile_num)
    {
        for (int32_t row = 0; row < current_tile_num; row++) {
            int32_t current_vector_index = vector_start_index + row;
            
            int32_t n, c, h;
            LinearToNCH(current_vector_index, n, c, h);
            
            int32_t flipped_h = tiling.H - 1 - h;
            int32_t flipped_vector_index = NCHToLinear(n, c, flipped_h);
            
            uint32_t flipped_element_offset = flipped_vector_index * tiling.computeSize;
            uint32_t local_offset = row * tiling.aligned_compute_size;
            
            // 使用DataCopyPad支持非对齐搬运
            AscendC::DataCopyExtParams copyParams{
                1,                                                      // blockCount
                static_cast<uint32_t>(tiling.computeSize * sizeof(TYPE_Z)), // blockLen (字节数)
                0,                                                      // srcStride
                0,                                                      // dstStride
                0                                                       // rsv
            };
            
            AscendC::DataCopyPad(zGm[flipped_element_offset], 
                            zLocal[local_offset], 
                            copyParams);
        }
    }

    // W维度翻转：数据已在Compute阶段完成翻转，直接顺序拷出
    __aicore__ inline void CopyOutDim3(AscendC::LocalTensor<TYPE_Z>& zLocal, 
                                    uint32_t vector_start_index, 
                                    int32_t current_tile_num)
    {
        for (int32_t row = 0; row < current_tile_num; row++) {
            int32_t current_vector_index = vector_start_index + row;
            uint32_t element_offset = current_vector_index * tiling.computeSize;
            uint32_t local_offset = row * tiling.aligned_compute_size;
            
            AscendC::DataCopyExtParams copyParams{
                1,
                static_cast<uint32_t>(tiling.computeSize * sizeof(TYPE_Z)),
                0, 
                0, 
                0
            };
            
            AscendC::DataCopyPad(zGm[element_offset], 
                            zLocal[local_offset], 
                            copyParams);
        }
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::TPosition::VECIN, BUFFER_NUM> inQueueX;
    AscendC::TQue<AscendC::TPosition::VECOUT, BUFFER_NUM> outQueueZ;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> workBuffer;
    AscendC::GlobalTensor<TYPE_X> xGm;
    AscendC::GlobalTensor<TYPE_Z> zGm;

    int32_t core_id;
    int32_t tileNum;
    int32_t processDataNum;

    FlipCustomTilingData tiling;
};

extern "C" __global__ __aicore__ void flip_custom(GM_ADDR x, GM_ADDR z, GM_ADDR workspace, GM_ADDR tiling)
{
    GET_TILING_DATA(tiling_data, tiling); 
    KernelFlip<DTYPE_X, DTYPE_Z> op;
    op.Init(x, z, tiling_data);
    op.Process();
}