#include "flip_custom_tiling.h"
#include "register/op_def_registry.h"
#include "graph/utils/type_utils.h"
#include "tiling/platform/platform_ascendc.h"
#include <vector>
#include <cstdint>
#include <cmath>
#include <random>
#include <iostream>
#include <algorithm>

namespace optiling {
    static inline uint32_t AlignUp(uint32_t a, uint32_t b) 
    {
        if (b == 0)
            return a;
        return (a + b - 1) / b * b;
    }
    
    const uint32_t BLOCK_SIZE = 32;         // 基础对齐块大小（字节）
    const uint32_t CORE_NUM = 8;            // AI Core 数量
    const uint32_t MIN_TILE_SIZE = 16;      // 最小切分大小
    const uint32_t BUFFER_NUM = 2;          // 双缓冲数量
    const uint32_t VEC_LEN = 8;             // 向量化长度

    static ge::graphStatus TilingFunc(gert::TilingContext* context) {
        if (!context) {
            std::cout << "ERROR: TilingContext is null!" << std::endl;
            return ge::GRAPH_FAILED;
        }

        FlipCustomTilingData tiling;
        auto ascendcPlatform = platform_ascendc::PlatformAscendC(context->GetPlatformInfo());

        // 设置多核并行
        context->SetBlockDim(CORE_NUM);

        // 获取输入 shape
        const auto InputShape = context->GetInputTensor(0)->GetOriginShape();
        auto dimNum = InputShape.GetDimNum();
        
        if (dimNum < 2) {
            return ge::GRAPH_FAILED;
        }

        // fold 高维到 batch (N)
        int32_t N = 1;
        if (dimNum == 4) {
            N = InputShape.GetDim(0);
        } 
        else if (dimNum >= 3) {
            N = 1;
            for (int i = 0; i < dimNum - 2; ++i) {
                N *= InputShape.GetDim(i);
            }
        }

        int32_t H = InputShape.GetDim(dimNum - 2);
        int32_t W = InputShape.GetDim(dimNum - 1);
        int32_t C = 1; // 默认
        if (dimNum >= 4) {
            C = InputShape.GetDim(1);
        }


        int64_t dim = 2; // 默认dim = 2(翻转维度H)
        auto attrPtr = context->GetAttrs();
        if (attrPtr && attrPtr->GetInt(0)) 
        {
            dim = static_cast<int32_t>(*(attrPtr->GetInt(0)));
        }

        // // 设置tilingkey
        // switch (dim) {
        //     case 0:
        //         context->SetTilingKey(0);
        //         break;
        //     case 1:
        //         context->SetTilingKey(1);
        //         break;
        //     case 2:
        //         context->SetTilingKey(2);
        //         break;
        //     case 3:
        //         context->SetTilingKey(3);
        //         break;
        //     default:
        //         return ge::GRAPH_FAILED;
        // }

        // 获取UB大小
        uint64_t ubSize = 0; // 默认256KB
        ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::UB, ubSize);

        // ======= 核间划分策略 =======
        // 基础量
        uint64_t elems_per_batch = static_cast<uint64_t>(C) * H * W;    // 每个批次元素个数
        uint64_t total_elements = static_cast<uint64_t>(N) * elems_per_batch;    // 张量总元素个数
        
        if (total_elements == 0) {
            return ge::GRAPH_FAILED;
        }

        uint32_t total_vectors = N * C * H;  // 张量总向量数
        uint32_t per_batch_vectors = C * H;  // 每个批次的向量数
        uint32_t per_channel_vectors = H;
        
        // 修正：处理边界情况
        if (total_vectors == 0) {
            return ge::GRAPH_FAILED;
        }
        
        int32_t base_vectors_per_core = total_vectors / CORE_NUM;
        int32_t big_core_num = total_vectors % CORE_NUM; // 大核个数
        int32_t big_core_vectors = base_vectors_per_core + 1; // 大核处理的向量数
        int32_t small_core_num = CORE_NUM - big_core_num; // 小核个数
        int32_t small_core_vectors = base_vectors_per_core; // 小核处理的向量数

        // 分配数组存储每个核的划分信息
        int32_t* core_vector_start = new int32_t[CORE_NUM];
        int32_t* core_vector_end = new int32_t[CORE_NUM];
        int32_t* core_vector_count = new int32_t[CORE_NUM];

        auto cleanup = [&]() {
            delete[] core_vector_start;
            delete[] core_vector_end;
            delete[] core_vector_count;
        };

        // 计算每个核的向量分配
        int32_t current_vector = 0;
        for (int32_t core_id = 0; core_id < CORE_NUM; core_id++) {
            if (core_id < big_core_num) {
                core_vector_count[core_id] = big_core_vectors;
            } else {
                core_vector_count[core_id] = small_core_vectors;
            }
            core_vector_start[core_id] = current_vector;
            
            if (core_vector_count[core_id] > 0) {
                core_vector_end[core_id] = current_vector + core_vector_count[core_id] - 1;
                current_vector += core_vector_count[core_id];
            } 
            else {
                core_vector_end[core_id] = current_vector - 1; // 处理空核的情况
            }
        }

        // ====== 核内划分策略 ======

        int32_t computeSize = W;
        int32_t tile_vector_num = 1;        // 每次处理的向量数量
        int32_t tile_compute_size = computeSize; // 每次处理的向量长度

        // 获取数据类型并映射到字节数
        const auto inputDataType = context->GetInputTensor(0)->GetDataType();
        uint32_t typeSize = 0;
        switch (inputDataType) {
            case ge::DT_FLOAT:
                typeSize = 4;
                break;
            case ge::DT_FLOAT16:
                typeSize = 2;
                break;
            case ge::DT_INT8:
                typeSize = 1;
                break;
            default:
                return ge::GRAPH_FAILED;
        }

        // UB内存需求估算函数
        auto EstimateUBUsage = [&](int32_t vec_num, int32_t vec_len) -> uint32_t {
            // 输入数据：vec_num * vec_len
            uint32_t inputBytes = AlignUp(vec_num * vec_len * typeSize, BLOCK_SIZE);
            // 输出数据：vec_num * vec_len
            uint32_t outputBytes = AlignUp(vec_num * vec_len * typeSize, BLOCK_SIZE);
            // 临时数据：工作缓存
            uint32_t workBytes = AlignUp(VEC_LEN * typeSize, BLOCK_SIZE);
            
            uint32_t total = BUFFER_NUM * (inputBytes + outputBytes) + workBytes;
            
            return total;
        };

        // 首先尝试增大tile_vector_num以提高并行度
        int32_t max_vectors_per_core = base_vectors_per_core + (big_core_num > 0 ? 1 : 0);
        
        while (tile_vector_num * 2 <= max_vectors_per_core && 
               EstimateUBUsage(tile_vector_num * 2, tile_compute_size) <= ubSize * 95 / 100) {
            tile_vector_num *= 2;
        }

        // 计算每个核的循环参数
        int32_t* core_loop_times_vector = new int32_t[CORE_NUM];
        int32_t* core_tail_vector = new int32_t[CORE_NUM];
        int32_t loop_times_compute = (computeSize + tile_compute_size - 1) / tile_compute_size;
        int32_t tail_compute = computeSize % tile_compute_size;
        if (tail_compute == 0) {
            tail_compute = tile_compute_size;
        }


        for (int32_t core_id = 0; core_id < CORE_NUM; core_id++) {
            if (core_vector_count[core_id] > 0) {
                core_loop_times_vector[core_id] = (core_vector_count[core_id] + tile_vector_num - 1) / tile_vector_num;
                core_tail_vector[core_id] = core_vector_count[core_id] % tile_vector_num;
                if (core_tail_vector[core_id] == 0) {
                    core_tail_vector[core_id] = tile_vector_num;
                }
            } else {
                core_loop_times_vector[core_id] = 0;
                core_tail_vector[core_id] = 0;
            }
        }

        auto cleanup_all = [&]() {
            cleanup();
            delete[] core_loop_times_vector;
            delete[] core_tail_vector;
        };

        // 计算对齐参数
        int32_t aligned_compute_size = ((computeSize + 32 / (int32_t)typeSize - 1) / (32 / (int32_t)typeSize)) * (32 / (int32_t)typeSize);
        
        // 设置tiling数据
        tiling.set_N(static_cast<int32_t>(N));
        tiling.set_C(static_cast<int32_t>(C));
        tiling.set_H(static_cast<int32_t>(H));
        tiling.set_W(static_cast<int32_t>(W));
        tiling.set_computeSize(computeSize);
        tiling.set_dim(dim);

        // 核间划分信息
        tiling.set_total_vectors(total_vectors);
        tiling.set_per_batch_vectors(per_batch_vectors);
        tiling.set_per_channel_vectors(per_channel_vectors);
        tiling.set_base_vectors_per_core(base_vectors_per_core);
        tiling.set_big_core_num(big_core_num);
        tiling.set_core_vector_start(core_vector_start);
        tiling.set_core_vector_end(core_vector_end);
        tiling.set_core_vector_count(core_vector_count);

        // 核内划分信息
        tiling.set_tile_vector_num(tile_vector_num);
        tiling.set_tile_compute_size(tile_compute_size);
        tiling.set_loop_times_compute(loop_times_compute);
        tiling.set_tail_compute(tail_compute);
        tiling.set_core_loop_times_vector(core_loop_times_vector);
        tiling.set_core_tail_vector(core_tail_vector);
        tiling.set_aligned_compute_size(aligned_compute_size);

        // 翻转辅助相关量
        tiling.set_total_elements(total_elements);
        tiling.set_elems_per_batch(elems_per_batch);

        size_t *currentWorkspace = context->GetWorkspaceSizes(1);
        if (!currentWorkspace) { 
            cleanup_all();
            return ge::GRAPH_FAILED;
        }
        currentWorkspace[0] = 0;

        // 保存tiling数据
        auto tilingData = context->GetRawTilingData();
        if (!tilingData) {
            cleanup_all();
            return ge::GRAPH_FAILED;
        }
        
        tiling.SaveToBuffer(tilingData->GetData(), tilingData->GetCapacity());
        tilingData->SetDataSize(tiling.GetDataSize());
        
        cleanup_all();
        return ge::GRAPH_SUCCESS;
    }
}

namespace ge {
static graphStatus InferShape(gert::InferShapeContext *context)
{
    if (!context) {
        return ge::GRAPH_FAILED;
    }

    const gert::Shape *InputShape = context->GetInputShape(0);
    gert::Shape *OutputShape = context->GetOutputShape(0);
    
    if (!InputShape || !OutputShape) {
        return ge::GRAPH_FAILED;
    }

    // 直接复制输入形状到输出
    *OutputShape = *InputShape;

    return ge::GRAPH_SUCCESS;
}

static graphStatus InferDataType(gert::InferDataTypeContext *context)
{
    if (!context) {
        return ge::GRAPH_FAILED;
    }

    const auto inputDataType = context->GetInputDataType(0);
    context->SetOutputDataType(0, inputDataType);
    return ge::GRAPH_SUCCESS;
}

} // namespace ge

namespace ops {
class FlipCustom : public OpDef {
public:
    explicit FlipCustom(const char *name) : OpDef(name)
    {
        this->Input("x")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND});
        this->Output("z")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND});
        // 定义属性
        this->Attr("dim")
            .AttrType(OPTIONAL)
            .Int(2);

        this->SetInferShape(ge::InferShape).SetInferDataType(ge::InferDataType);
        this->AICore()
            .SetTiling(optiling::TilingFunc)
            .AddConfig("ascend910")
            .AddConfig("ascend310p")
            .AddConfig("ascend310b")
            .AddConfig("ascend910b");
    }
};
OP_ADD(FlipCustom);
} // namespace ops