#include "im2_col_custom_tiling.h"
#include "register/op_def_registry.h"
#include "graph/utils/type_utils.h"
#include "tiling/platform/platform_ascendc.h"
#include <vector>
#include <cstdint>
#include <cmath>
#include <random>
#include <iostream>
#include <algorithm>

namespace optiling {
    static inline uint32_t AlignUp(uint32_t a, uint32_t b) 
    {
        if (b == 0)
            return a;
        return (a + b - 1) / b * b;
    }
    
    const uint32_t BLOCK_SIZE = 32;         // 基础对齐块大小（字节）
    const uint32_t CORE_NUM = 8;            // AI Core 数量
    const uint32_t MIN_TILE_SIZE = 16;      // 最小切分大小
    const uint32_t BUFFER_NUM = 2;          // 双缓冲数量
    const uint32_t VEC_LEN = 8;             // 向量化长度

    static ge::graphStatus TilingFunc(gert::TilingContext* context) {
        if (!context) {
            std::cout << "ERROR: TilingContext is null!" << std::endl;
            return ge::GRAPH_FAILED;
        }

        Im2ColCustomTilingData tiling;
        auto ascendcPlatform = platform_ascendc::PlatformAscendC(context->GetPlatformInfo());

        // 设置多核并行
        context->SetBlockDim(CORE_NUM);

        // 获取输入 shape
        const auto InputShape = context->GetInputTensor(0)->GetOriginShape();
        auto dimNum = InputShape.GetDimNum();
        
        if (dimNum < 2) {
            return ge::GRAPH_FAILED;
        }

        // fold 高维到 batch (N)
        int32_t N = 1;
        if (dimNum == 4) {
            N = InputShape.GetDim(0);
        } 
        else if (dimNum >= 3) {
            N = 1;
            for (int i = 0; i < dimNum - 2; ++i) {
                N *= InputShape.GetDim(i);
            }
        }

        int32_t H = InputShape.GetDim(dimNum - 2);
        int32_t W = InputShape.GetDim(dimNum - 1);
        int32_t C = 1; // 默认
        if (dimNum >= 4) {
            C = InputShape.GetDim(1);
        }

        // 获取卷积参数
        int32_t kernel_h = 2;  // 默认值
        int32_t kernel_w = 2;  // 默认值
        int32_t stride_val = 1; // 默认值
        int32_t padding_val = 0; // 默认值

        auto attrPtr = context->GetAttrs();
        if (attrPtr) {
            // 假设属性顺序为: kernel_h, kernel_w, stride_val, padding_val
            if (attrPtr->GetInt(0)) {
                kernel_h = static_cast<int32_t>(*(attrPtr->GetInt(0)));
            }
            if (attrPtr->GetInt(1)) {
                kernel_w = static_cast<int32_t>(*(attrPtr->GetInt(1)));
            }
            if (attrPtr->GetInt(2)) {
                stride_val = static_cast<int32_t>(*(attrPtr->GetInt(2)));
            }
            if (attrPtr->GetInt(3)) {
                padding_val = static_cast<int32_t>(*(attrPtr->GetInt(3)));
            }
        }

        // 获取UB大小
        uint64_t ubSize = 0;
        ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::UB, ubSize);

        // ======= Im2Col输出尺寸计算 =======
        int32_t out_H = (H + 2 * padding_val - kernel_h) / stride_val + 1;
        int32_t out_W = (W + 2 * padding_val - kernel_w) / stride_val + 1;
        int32_t L = out_H * out_W;
        int32_t output_channels = C * kernel_h * kernel_w;

        // 输入输出总元素
        uint64_t input_elements = static_cast<uint64_t>(N) * C * H * W;
        uint64_t output_elements = static_cast<uint64_t>(N) * output_channels * L;

        if (output_elements == 0) {
            return ge::GRAPH_FAILED;
        }

        // ======= 核间划分策略 =======
        // Im2Col按输出元素进行划分
        uint32_t total_output_elements = output_elements;
        
        // 大核小核分配
        int32_t base_elements_per_core = total_output_elements / CORE_NUM;
        int32_t big_core_num = total_output_elements % CORE_NUM; // 大核个数
        int32_t big_core_elements = base_elements_per_core + 1; // 大核处理的元素数
        int32_t small_core_num = CORE_NUM - big_core_num; // 小核个数
        int32_t small_core_elements = base_elements_per_core; // 小核处理的元素数

        // 分配数组存储每个核的划分信息
        int32_t* core_element_start = new int32_t[CORE_NUM];
        int32_t* core_element_end = new int32_t[CORE_NUM];
        int32_t* core_element_count = new int32_t[CORE_NUM];

        auto cleanup = [&]() {
            delete[] core_element_start;
            delete[] core_element_end;
            delete[] core_element_count;
        };

        // 计算每个核的元素分配
        int32_t current_element = 0;
        for (int32_t core_id = 0; core_id < CORE_NUM; core_id++) {
            if (core_id < big_core_num) {
                core_element_count[core_id] = big_core_elements;
            } else {
                core_element_count[core_id] = small_core_elements;
            }
            core_element_start[core_id] = current_element;
            
            if (core_element_count[core_id] > 0) {
                core_element_end[core_id] = current_element + core_element_count[core_id] - 1;
                current_element += core_element_count[core_id];
            } 
            else {
                core_element_end[core_id] = current_element - 1;
            }
        }

        // ====== 核内划分策略 ======
        int32_t tile_element_num = 1;        // 每次处理的元素数量

        // 获取数据类型并映射到字节数
        const auto inputDataType = context->GetInputTensor(0)->GetDataType();
        uint32_t typeSize = 0;
        switch (inputDataType) {
            case ge::DT_FLOAT:
                typeSize = 4;
                break;
            case ge::DT_FLOAT16:
                typeSize = 2;
                break;
            case ge::DT_INT8:
                typeSize = 1;
                break;
            default:
                cleanup();
                return ge::GRAPH_FAILED;
        }

        // UB内存需求估算函数
        auto EstimateUBUsage = [&](int32_t elem_num) -> uint32_t {
            // 输入数据缓存：需要缓存可能被重复访问的输入区域
            uint32_t inputBytes = AlignUp(elem_num * 4 * typeSize, BLOCK_SIZE); // 预估4倍输入数据
            // 输出数据
            uint32_t outputBytes = AlignUp(elem_num * typeSize, BLOCK_SIZE);
            // 临时数据：索引计算和工作缓存
            uint32_t workBytes = AlignUp(elem_num * sizeof(int32_t) + VEC_LEN * typeSize, BLOCK_SIZE);
            
            uint32_t total = BUFFER_NUM * (inputBytes + outputBytes) + workBytes;
            
            return total;
        };

        // 动态调整tile大小以充分利用UB内存
        int32_t max_elements_per_core = base_elements_per_core + (big_core_num > 0 ? 1 : 0);
        
        while (tile_element_num * 2 <= max_elements_per_core && 
               EstimateUBUsage(tile_element_num * 2) <= ubSize * 90 / 100) { // 90%阈值，留有余量
            tile_element_num *= 2;
        }

        // 确保最小tile大小
        if (tile_element_num < MIN_TILE_SIZE) {
            tile_element_num = std::min(MIN_TILE_SIZE, static_cast<uint32_t>(max_elements_per_core));
        }

        // 计算每个核的循环参数
        int32_t* core_loop_times = new int32_t[CORE_NUM];
        int32_t* core_tail_elements = new int32_t[CORE_NUM];

        for (int32_t core_id = 0; core_id < CORE_NUM; core_id++) {
            if (core_element_count[core_id] > 0) {
                core_loop_times[core_id] = (core_element_count[core_id] + tile_element_num - 1) / tile_element_num;
                core_tail_elements[core_id] = core_element_count[core_id] % tile_element_num;
                if (core_tail_elements[core_id] == 0) {
                    core_tail_elements[core_id] = tile_element_num;
                }
            } else {
                core_loop_times[core_id] = 0;
                core_tail_elements[core_id] = 0;
            }
        }

        auto cleanup_all = [&]() {
            cleanup();
            delete[] core_loop_times;
            delete[] core_tail_elements;
        };

        // 计算对齐参数
        int32_t aligned_element_size = ((tile_element_num + 32 / (int32_t)typeSize - 1) / (32 / (int32_t)typeSize)) * (32 / (int32_t)typeSize);
        
        // 设置tiling数据
        // 基础计算参数
        tiling.set_N(static_cast<int32_t>(N));
        tiling.set_C(static_cast<int32_t>(C));
        tiling.set_H(static_cast<int32_t>(H));
        tiling.set_W(static_cast<int32_t>(W));
        tiling.set_kernel_h(kernel_h);
        tiling.set_kernel_w(kernel_w);
        tiling.set_stride_val(stride_val);
        tiling.set_padding_val(padding_val);
        tiling.set_out_H(out_H);
        tiling.set_out_W(out_W);
        tiling.set_L(L);
        tiling.set_output_channels(output_channels);

        // 核间划分信息
        tiling.set_input_elements(input_elements);
        tiling.set_output_elements(output_elements);
        tiling.set_total_output_elements(total_output_elements);
        tiling.set_base_elements_per_core(base_elements_per_core);
        tiling.set_big_core_num(big_core_num);
        tiling.set_core_element_start(core_element_start);
        tiling.set_core_element_end(core_element_end);
        tiling.set_core_element_count(core_element_count);

        // 核内划分信息
        tiling.set_tile_element_num(tile_element_num);
        tiling.set_core_loop_times(core_loop_times);
        tiling.set_core_tail_elements(core_tail_elements);
        tiling.set_aligned_element_size(aligned_element_size);

        size_t *currentWorkspace = context->GetWorkspaceSizes(1);
        if (!currentWorkspace) { 
            cleanup_all();
            return ge::GRAPH_FAILED;
        }
        currentWorkspace[0] = 0;

        // 保存tiling数据
        auto tilingData = context->GetRawTilingData();
        if (!tilingData) {
            cleanup_all();
            return ge::GRAPH_FAILED;
        }
        
        tiling.SaveToBuffer(tilingData->GetData(), tilingData->GetCapacity());
        tilingData->SetDataSize(tiling.GetDataSize());
        
        cleanup_all();
        return ge::GRAPH_SUCCESS;
    }
}

namespace ge {
static graphStatus InferShape(gert::InferShapeContext *context)
{
    if (!context) {
        return ge::GRAPH_FAILED;
    }

    const gert::Shape *InputShape = context->GetInputShape(0);
    gert::Shape *OutputShape = context->GetOutputShape(0);
    
    if (!InputShape || !OutputShape) {
        return ge::GRAPH_FAILED;
    }

    // 获取卷积参数
    int32_t kernel_h = 2;
    int32_t kernel_w = 2;
    int32_t stride_val = 1;
    int32_t padding_val = 0;

    auto attrPtr = context->GetAttrs();
    if (attrPtr) {
        // 假设属性顺序为: kernel_h, kernel_w, stride_val, padding_val
        if (attrPtr->GetInt(0)) {
            kernel_h = static_cast<int32_t>(*(attrPtr->GetInt(0)));
        }
        if (attrPtr->GetInt(1)) {
            kernel_w = static_cast<int32_t>(*(attrPtr->GetInt(1)));
        }
        if (attrPtr->GetInt(2)) {
            stride_val = static_cast<int32_t>(*(attrPtr->GetInt(2)));
        }
        if (attrPtr->GetInt(3)) {
            padding_val = static_cast<int32_t>(*(attrPtr->GetInt(3)));
        }
    }

    // 解析输入形状
    auto dimNum = InputShape->GetDimNum();
    if (dimNum < 2) {
        return ge::GRAPH_FAILED;
    }

    int32_t H = InputShape->GetDim(dimNum - 2);
    int32_t W = InputShape->GetDim(dimNum - 1);
    int32_t C = 1;
    if (dimNum >= 4) {
        C = InputShape->GetDim(1);
    }

    // 计算输出形状 [N, C*kH*kW, L]
    int32_t out_H = (H + 2 * padding_val - kernel_h) / stride_val + 1;
    int32_t out_W = (W + 2 * padding_val - kernel_w) / stride_val + 1;
    int32_t L = out_H * out_W;
    int32_t output_channels = C * kernel_h * kernel_w;

    // 设置输出形状
    std::vector<int64_t> outputDims;
    if (dimNum == 4) {
        outputDims = {InputShape->GetDim(0), output_channels, L};
    } else if (dimNum == 3) {
        outputDims = {InputShape->GetDim(0), output_channels, L};
    } else { // dimNum == 2
        outputDims = {1, output_channels, L};
    }

    OutputShape->SetDimNum(outputDims.size());
    for (size_t i = 0; i < outputDims.size(); ++i) {
        OutputShape->SetDim(i, outputDims[i]);
    }

    return ge::GRAPH_SUCCESS;
}

static graphStatus InferDataType(gert::InferDataTypeContext *context)
{
    if (!context) {
        return ge::GRAPH_FAILED;
    }

    const auto inputDataType = context->GetInputDataType(0);
    context->SetOutputDataType(0, inputDataType);
    return ge::GRAPH_SUCCESS;
}

} // namespace ge

namespace ops {
class Im2ColCustom : public OpDef {
public:
    explicit Im2ColCustom(const char *name) : OpDef(name)
    {
        this->Input("x")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND});
        this->Output("z")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND});
        
        // 定义卷积参数属性
        this->Attr("kernel_h")
            .AttrType(OPTIONAL)
            .Int(2);
        this->Attr("kernel_w")
            .AttrType(OPTIONAL)
            .Int(2);
        this->Attr("stride_val")
            .AttrType(OPTIONAL)
            .Int(1);
        this->Attr("padding_val")
            .AttrType(OPTIONAL)
            .Int(0);

        this->SetInferShape(ge::InferShape).SetInferDataType(ge::InferDataType);
        this->AICore()
            .SetTiling(optiling::TilingFunc)
            .AddConfig("ascend910")
            .AddConfig("ascend310p")
            .AddConfig("ascend310b")
            .AddConfig("ascend910b");
    }
};
OP_ADD(Im2ColCustom);
} // namespace ops