#include "constant_pad_custom_tiling.h"
#include "register/op_def_registry.h"
#include "graph/utils/type_utils.h"
#include "tiling/platform/platform_ascendc.h"
#include <vector>
#include <cstdint>
#include <cmath>
#include <random>
#include <iostream>
#include <algorithm>

namespace optiling {

    const uint32_t BLOCK_SIZE = 32;         // 基础对齐块大小（字节）
    const uint32_t MAX_CORE_NUM = 64;       // AI Core 数量
    const uint32_t MIN_TILE_SIZE = 16;      // 最小切分大小
    const uint32_t BUFFER_NUM = 2;          // 双缓冲数量
    const uint32_t VEC_LEN = 8;             // 向量化长度

    static inline uint32_t AlignUp(uint32_t a, uint32_t b) 
    {
        if (b == 0)
            return a;
        return (a + b - 1) / b * b;
    }

    // UB内存需求估算函数
    static inline uint32_t EstimateUBUsage(int32_t vec_num, int32_t vec_len, uint32_t typeSize) 
    {
        // 输入数据：vec_num * vec_len
        uint32_t inputBytes = AlignUp(vec_num * vec_len * typeSize, BLOCK_SIZE);
        // 输出数据：vec_num * vec_len
        uint32_t outputBytes = AlignUp(vec_num * vec_len * typeSize, BLOCK_SIZE);
        // 临时数据：工作缓存
        uint32_t workBytes = AlignUp(VEC_LEN * typeSize, BLOCK_SIZE);
            
        uint32_t total = BUFFER_NUM * (inputBytes + outputBytes) + workBytes;
            
        return total;
    }

    // 混合策略快速迭代出合理的tile大小
    static inline int32_t FindOptimalTileNumHybrid(int32_t tile_compute_size, int32_t max_vectors_per_core, uint64_t ubSize, uint32_t typeSize) 
    {
        int32_t tile_num = 1;
        
        // 阶段1: 快速指数增长（2倍）
        while (tile_num * 2 <= max_vectors_per_core) {
            uint32_t usage = EstimateUBUsage(tile_num * 2, tile_compute_size, typeSize);
            if (usage <= ubSize * 50 / 100) {  // 50%阈值内快速翻倍
                tile_num *= 2;
            } else {
                break;
            }
        }
        
        // 阶段2: 线性精细搜索
        int32_t step = std::max(1, tile_num / 4);  // 步长为当前的1/4
        while (step >= 1) {
            uint32_t next_usage = EstimateUBUsage(tile_num + step, tile_compute_size, typeSize);
            if (tile_num + step <= max_vectors_per_core && 
                next_usage <= ubSize * 95 / 100) {
                tile_num += step;
            } else {
                step /= 2;  // 步长减半，继续尝试
            }
        }
        
        return tile_num;
    }

    static ge::graphStatus TilingFunc(gert::TilingContext* context) {
        if (!context) {
            std::cout << "ERROR: TilingContext is null!" << std::endl;
            return ge::GRAPH_FAILED;
        }

        ConstantPadCustomTilingData tiling;
        auto ascendcPlatform = platform_ascendc::PlatformAscendC(context->GetPlatformInfo());

        // 设置多核并行
        auto CORE_NUM = ascendcPlatform.GetCoreNum();
        std::cout << "=== 平台核心信息 ===" << std::endl;
        std::cout << "原始核心数: " << CORE_NUM << std::endl;
        if (CORE_NUM > MAX_CORE_NUM) {
            CORE_NUM = MAX_CORE_NUM;
        }
        std::cout << "实际使用核心数: " << CORE_NUM << std::endl;
        context->SetBlockDim(CORE_NUM);

        // 获取输入 shape
        const auto InputShape = context->GetInputTensor(0)->GetOriginShape();
        auto dimNum = InputShape.GetDimNum();
        
        std::cout << "=== 输入张量信息 ===" << std::endl;
        std::cout << "输入维度数: " << dimNum << std::endl;
        
        if (dimNum < 2) {
            return ge::GRAPH_FAILED;
        }

        // fold 高维到 batch (N)
        int32_t inputN = 1;
        if (dimNum == 4) {
            inputN = InputShape.GetDim(0);
        } 
        else if (dimNum >= 3) {
            inputN = 1;
            for (int i = 0; i < dimNum - 2; ++i) {
                inputN *= InputShape.GetDim(i);
            }
        }

        int32_t inputH = InputShape.GetDim(dimNum - 2);
        int32_t inputW = InputShape.GetDim(dimNum - 1);
        int32_t inputC = 1; // 默认
        if (dimNum >= 4) {
            inputC = InputShape.GetDim(1);
        }

        std::cout << "输入形状 - N: " << inputN << ", C: " << inputC 
                  << ", H: " << inputH << ", W: " << inputW << std::endl;

        int32_t pad_left = 1;
        int32_t pad_right = 2;
        int32_t pad_top = 3;
        int32_t pad_bottom = 4;
        float pad_value = 0.0;

        auto attrPtr = context->GetAttrs();
        if (attrPtr) {
            // 假设属性顺序为: pad_left, pad_right, pad_top, pad_bottom, pad_value
            if (attrPtr->GetInt(0)) {
                pad_left = static_cast<int32_t>(*(attrPtr->GetInt(0)));
            }
            if (attrPtr->GetInt(1)) {
                pad_right = static_cast<int32_t>(*(attrPtr->GetInt(1)));
            }
            if (attrPtr->GetInt(2)) {
                pad_top = static_cast<int32_t>(*(attrPtr->GetInt(2)));
            }
            if (attrPtr->GetInt(3)) {
                pad_bottom = static_cast<int32_t>(*(attrPtr->GetInt(3)));
            }
            if (attrPtr->GetFloat(4)) {
                pad_value = *(attrPtr->GetFloat(4));
            }
        }

        std::cout << "=== 填充参数 ===" << std::endl;
        std::cout << "左填充: " << pad_left << ", 右填充: " << pad_right << std::endl;
        std::cout << "上填充: " << pad_top << ", 下填充: " << pad_bottom << std::endl;
        std::cout << "填充值: " << pad_value << std::endl;

        // 计算输出张量形状
        int32_t outputN = inputN;
        int32_t outputC = inputC;
        int32_t outputW = inputW + pad_left + pad_right; 
        int32_t outputH = inputH + pad_top + pad_bottom;

        std::cout << "=== 输出张量信息 ===" << std::endl;
        std::cout << "输出形状 - N: " << outputN << ", C: " << outputC 
                  << ", H: " << outputH << ", W: " << outputW << std::endl;

        // 获取UB大小
        uint64_t ubSize = 0; // 默认191KB
        ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::UB, ubSize);
        std::cout << "UB大小: " << ubSize << " 字节" << std::endl;

        // ======= 核间划分策略 =======
        // 基础量
        uint64_t total_input_elements = static_cast<uint64_t>(inputN) * inputC * inputH * inputW;    // 输入张量总元素个数
        uint64_t total_output_elements = static_cast<uint64_t>(outputN) * outputC * outputH * outputW;    // 张量总元素个数
        
        std::cout << "=== 数据量统计 ===" << std::endl;
        std::cout << "总输入元素数: " << total_input_elements << std::endl;
        std::cout << "总输出元素数: " << total_output_elements << std::endl;

        if (total_input_elements == 0) {
            return ge::GRAPH_FAILED;
        }

        uint32_t total_vectors = outputN * outputC * outputH;  // 张量总向量数
        std::cout << "总向量数: " << total_vectors << std::endl;
        
        // 修正：处理边界情况
        if (total_vectors == 0) {
            return ge::GRAPH_FAILED;
        }
        
        int32_t base_vectors_per_core = total_vectors / CORE_NUM;
        int32_t big_core_num = total_vectors % CORE_NUM; // 大核个数
        int32_t big_core_vectors = base_vectors_per_core + 1; // 大核处理的向量数
        int32_t small_core_num = CORE_NUM - big_core_num; // 小核个数
        int32_t small_core_vectors = base_vectors_per_core; // 小核处理的向量数

        std::cout << "=== 核间划分策略 ===" << std::endl;
        std::cout << "基础每核向量数: " << base_vectors_per_core << std::endl;
        std::cout << "大核数量: " << big_core_num << ", 每核向量数: " << big_core_vectors << std::endl;
        std::cout << "小核数量: " << small_core_num << ", 每核向量数: " << small_core_vectors << std::endl;

        // 分配数组存储每个核的划分信息
        int32_t* core_vector_start = new int32_t[CORE_NUM];
        int32_t* core_vector_end = new int32_t[CORE_NUM];
        int32_t* core_vector_count = new int32_t[CORE_NUM];

        auto cleanup = [&]() {
            delete[] core_vector_start;
            delete[] core_vector_end;
            delete[] core_vector_count;
        };

        // 计算每个核的向量分配
        int32_t current_vector = 0;
        for (int32_t core_id = 0; core_id < CORE_NUM; core_id++) {
            if (core_id < big_core_num) {
                core_vector_count[core_id] = big_core_vectors;
            } else {
                core_vector_count[core_id] = small_core_vectors;
            }
            core_vector_start[core_id] = current_vector;
            
            if (core_vector_count[core_id] > 0) {
                core_vector_end[core_id] = current_vector + core_vector_count[core_id] - 1;
                current_vector += core_vector_count[core_id];
            } 
            else {
                core_vector_end[core_id] = current_vector - 1; // 处理空核的情况
            }
        }

        // 输出每个核的分配情况
        std::cout << "=== 各核分配详情 ===" << std::endl;
        for (int32_t core_id = 0; core_id < CORE_NUM; core_id++) {
            std::cout << "核 " << core_id << ": 起始=" << core_vector_start[core_id] 
                      << ", 结束=" << core_vector_end[core_id] 
                      << ", 数量=" << core_vector_count[core_id] << std::endl;
        }

        // ====== 核内划分策略 ======

        int32_t tile_vector_num = 1;        // 每次处理的向量数量
        int32_t computeSize = outputW;
        int32_t tile_compute_size = computeSize; // 每次处理的向量长度

        // 获取数据类型并映射到字节数
        const auto inputDataType = context->GetInputTensor(0)->GetDataType();
        uint32_t typeSize = 0;
        switch (inputDataType) {
            case ge::DT_FLOAT:
                typeSize = 4;
                std::cout << "数据类型: FLOAT32" << std::endl;
                break;
            case ge::DT_FLOAT16:
                typeSize = 2;
                std::cout << "数据类型: FLOAT16" << std::endl;
                break;
            case ge::DT_INT8:
                typeSize = 1;
                std::cout << "数据类型: INT8" << std::endl;
                break;
            default:
                std::cout << "不支持的数据类型" << std::endl;
                return ge::GRAPH_FAILED;
        }

        std::cout << "类型大小: " << typeSize << " 字节" << std::endl;

        // 首先尝试增大tile_vector_num以提高并行度
        int32_t max_vectors_per_core = base_vectors_per_core + (big_core_num > 0 ? 1 : 0);
        
        std::cout << "=== 核内划分参数 ===" << std::endl;
        std::cout << "计算大小: " << computeSize << std::endl;
        std::cout << "每核最大向量数: " << max_vectors_per_core << std::endl;

        // 每个ub搬入的行数
        tile_vector_num = FindOptimalTileNumHybrid(tile_compute_size, max_vectors_per_core, ubSize, typeSize);
        std::cout << "优化后的tile_vector_num: " << tile_vector_num << std::endl;

        // 计算每个核的循环参数
        int32_t* core_loop_times_vector = new int32_t[CORE_NUM];
        int32_t* core_tail_vector = new int32_t[CORE_NUM];
        int32_t loop_times_compute = (computeSize + tile_compute_size - 1) / tile_compute_size;
        int32_t tail_compute = computeSize % tile_compute_size;
        if (tail_compute == 0) {
            tail_compute = tile_compute_size;
        }

        std::cout << "计算循环次数: " << loop_times_compute << ", 尾部大小: " << tail_compute << std::endl;

        for (int32_t core_id = 0; core_id < CORE_NUM; core_id++) {
            if (core_vector_count[core_id] > 0) {
                core_loop_times_vector[core_id] = (core_vector_count[core_id] + tile_vector_num - 1) / tile_vector_num;
                core_tail_vector[core_id] = core_vector_count[core_id] % tile_vector_num;
                if (core_tail_vector[core_id] == 0) {
                    core_tail_vector[core_id] = tile_vector_num;
                }
            } else {
                core_loop_times_vector[core_id] = 0;
                core_tail_vector[core_id] = 0;
            }
            std::cout << "核 " << core_id << ": 循环次数=" << core_loop_times_vector[core_id] 
                      << ", 尾部大小=" << core_tail_vector[core_id] << std::endl;
        }

        auto cleanup_all = [&]() {
            cleanup();
            delete[] core_loop_times_vector;
            delete[] core_tail_vector;
        };

        // 计算对齐参数
        int32_t aligned_compute_size = ((computeSize + 32 / (int32_t)typeSize - 1) / (32 / (int32_t)typeSize)) * (32 / (int32_t)typeSize);
        std::cout << "对齐后计算大小: " << aligned_compute_size << std::endl;

        // 设置tiling数据
        // 设置输入shape
        tiling.set_inputN(static_cast<int32_t>(inputN));
        tiling.set_inputC(static_cast<int32_t>(inputC));
        tiling.set_inputH(static_cast<int32_t>(inputH));
        tiling.set_inputW(static_cast<int32_t>(inputW));

        // 设置输出shape
        tiling.set_outputN(static_cast<int32_t>(outputN));
        tiling.set_outputC(static_cast<int32_t>(outputC));
        tiling.set_outputH(static_cast<int32_t>(outputH));
        tiling.set_outputW(static_cast<int32_t>(outputW));

        // 设置填充参数
        tiling.set_pad_left(pad_left);
        tiling.set_pad_right(pad_right);
        tiling.set_pad_top(pad_top);
        tiling.set_pad_bottom(pad_bottom);
        tiling.set_pad_value(pad_value);

        // 核间划分信息
        tiling.set_total_vectors(total_vectors);
        tiling.set_big_core_num(big_core_num);
        tiling.set_core_vector_start(core_vector_start);
        tiling.set_core_vector_end(core_vector_end);
        tiling.set_core_vector_count(core_vector_count);

        // 核内划分信息
        tiling.set_tile_vector_num(tile_vector_num);
        tiling.set_tile_compute_size(tile_compute_size);
        tiling.set_loop_times_compute(loop_times_compute);
        tiling.set_tail_compute(tail_compute);
        tiling.set_core_loop_times_vector(core_loop_times_vector);
        tiling.set_core_tail_vector(core_tail_vector);
        tiling.set_aligned_compute_size(aligned_compute_size);

        // 翻转辅助相关量
        tiling.set_total_input_elements(total_input_elements);
        tiling.set_total_output_elements(total_output_elements);

        size_t systemWorkspaceSize = ascendcPlatform.GetLibApiWorkSpaceSize();
        size_t *currentWorkspace = context->GetWorkspaceSizes(1);
        if (!currentWorkspace) { 
            cleanup_all();
            return ge::GRAPH_FAILED;
        }
        currentWorkspace[0]= systemWorkspaceSize;

        // 保存tiling数据
        auto tilingData = context->GetRawTilingData();
        if (!tilingData) {
            cleanup_all();
            return ge::GRAPH_FAILED;
        }
        
        tiling.SaveToBuffer(tilingData->GetData(), tilingData->GetCapacity());
        tilingData->SetDataSize(tiling.GetDataSize());
        
        std::cout << "=== Tiling完成 ===" << std::endl;
        std::cout << "Tiling数据大小: " << tiling.GetDataSize() << " 字节" << std::endl;
        
        cleanup_all();
        return ge::GRAPH_SUCCESS;
    }
}

namespace ge {
static graphStatus InferShape(gert::InferShapeContext *context)
{
    if (!context) {
        return ge::GRAPH_FAILED;
    }

    const gert::Shape *InputShape = context->GetInputShape(0);
    gert::Shape *OutputShape = context->GetOutputShape(0);
    
    if (!InputShape || !OutputShape) {
        return ge::GRAPH_FAILED;
    }

    // 获取卷积参数
    int32_t pad_left = 1;
    int32_t pad_right = 2;
    int32_t pad_top = 3;
    int32_t pad_bottom = 4;

    auto attrPtr = context->GetAttrs();
    if (attrPtr) {
        if (attrPtr->GetInt(0)) {
            pad_left = static_cast<int32_t>(*(attrPtr->GetInt(0)));
        }
        if (attrPtr->GetInt(1)) {
            pad_right = static_cast<int32_t>(*(attrPtr->GetInt(1)));
        }
        if (attrPtr->GetInt(2)) {
            pad_top = static_cast<int32_t>(*(attrPtr->GetInt(2)));
        }
        if (attrPtr->GetInt(3)) {
            pad_bottom = static_cast<int32_t>(*(attrPtr->GetInt(3)));
        }
    }

    std::cout << "=== 形状推理 ===" << std::endl;
    std::cout << "填充参数 - 左:" << pad_left << " 右:" << pad_right 
              << " 上:" << pad_top << " 下:" << pad_bottom << std::endl;

    // 解析输入形状
    auto dimNum = InputShape->GetDimNum();
    if (dimNum < 2) {
        return ge::GRAPH_FAILED;
    }

    // fold 高维到 batch (N)
    int32_t inputN = 1;
    if (dimNum == 4) {
        inputN = InputShape->GetDim(0);
    } 
    else if (dimNum >= 3) {
        inputN = 1;
        for (int i = 0; i < dimNum - 2; ++i) {
            inputN *= InputShape->GetDim(i);
        }
    }

    int32_t inputH = InputShape->GetDim(dimNum - 2);
    int32_t inputW = InputShape->GetDim(dimNum - 1);
    int32_t inputC = 1;
    if (dimNum >= 4) {
        inputC = InputShape->GetDim(1);
    }

    std::cout << "输入形状 - N:" << inputN << " C:" << inputC 
              << " H:" << inputH << " W:" << inputW << std::endl;

    // 计算输出形状
    int32_t outputN = inputN;
    int32_t outputC = inputC;
    int32_t outputW = inputW + pad_left + pad_right; 
    int32_t outputH = inputH + pad_top + pad_bottom;

    std::cout << "输出形状 - N:" << outputN << " C:" << outputC 
              << " H:" << outputH << " W:" << outputW << std::endl;

    // 设置输出形状
    std::vector<int64_t> outputDims;
    if (dimNum == 4) {
        outputDims = {outputN, outputC, outputH, outputW};
    } else if (dimNum == 3) {
        outputDims = {InputShape->GetDim(0), outputH, outputW};
    } else { // dimNum == 2
        outputDims = {outputH, outputW};
    }

    OutputShape->SetDimNum(outputDims.size());
    for (size_t i = 0; i < outputDims.size(); ++i) {
        OutputShape->SetDim(i, outputDims[i]);
    }

    std::cout << "最终输出维度: [";
    for (size_t i = 0; i < outputDims.size(); ++i) {
        std::cout << outputDims[i];
        if (i < outputDims.size() - 1) std::cout << ", ";
    }
    std::cout << "]" << std::endl;

    return ge::GRAPH_SUCCESS;
}

static graphStatus InferDataType(gert::InferDataTypeContext *context)
{
    if (!context) {
        return ge::GRAPH_FAILED;
    }

    const auto inputDataType = context->GetInputDataType(0);
    context->SetOutputDataType(0, inputDataType);
    
    std::cout << "=== 数据类型推理 ===" << std::endl;
    std::cout << "输入数据类型: " << inputDataType << std::endl;
    std::cout << "输出数据类型: " << inputDataType << std::endl;
    
    return ge::GRAPH_SUCCESS;
}

} // namespace ge

namespace ops {
class ConstantPadCustom : public OpDef {
public:
    explicit ConstantPadCustom(const char *name) : OpDef(name)
    {
        this->Input("x")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND});
        this->Output("z")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND});
        // 定义属性
        this->Attr("pad_left")
            .AttrType(OPTIONAL)
            .Int(1);
        this->Attr("pad_right")
            .AttrType(OPTIONAL)
            .Int(2);
        this->Attr("pad_top")
            .AttrType(OPTIONAL)
            .Int(3);
        this->Attr("pad_bottom")
            .AttrType(OPTIONAL)
            .Int(4);
        this->Attr("pad_value")
            .AttrType(OPTIONAL)
            .Float(0.0f);    

        this->SetInferShape(ge::InferShape).SetInferDataType(ge::InferDataType);
        this->AICore()
            .SetTiling(optiling::TilingFunc)
            .AddConfig("ascend910")
            .AddConfig("ascend310p")
            .AddConfig("ascend310b")
            .AddConfig("ascend910b");
            
        std::cout << "=== 算子注册完成 ===" << std::endl;
        std::cout << "算子名称: ConstantPadCustom" << std::endl;
    }
};
OP_ADD(ConstantPadCustom);
} // namespace ops