
#include "addcmul_tiling.h"
#include "register/op_def_registry.h"
#include "tiling/platform/platform_ascendc.h"
#include <algorithm>

namespace optiling {
const uint32_t BLOCK_SIZE = 32;
static ge::graphStatus TilingFunc(gert::TilingContext* context) {
    TilingData tiling;
    auto ascendcPlatform = platform_ascendc::PlatformAscendC(context->GetPlatformInfo());
    uint64_t ub_size; 
    ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::UB, ub_size);
    // 表示计算用到的核
    auto aivNum = ascendcPlatform.GetCoreNum();

    uint32_t inputSize = context->GetInputShape(0)->GetStorageShape().GetShapeSize();
    uint32_t x1Size = context->GetInputShape(1)->GetStorageShape().GetShapeSize();
    uint32_t x2Size = context->GetInputShape(2)->GetStorageShape().GetShapeSize();
    uint32_t ySize = context->GetOutputShape(0)->GetStorageShape().GetShapeSize();

    uint32_t y_ndarray[20], y_sumndarray[20];
    uint32_t y_dimensional;

    auto shape_y = context->GetOutputShape(0)->GetOriginShape();
    y_dimensional = shape_y.GetDimNum();
    tiling.set_y_dimensional(y_dimensional);
    for (int i = 0; i < y_dimensional; i++)
    {
        if (i < y_dimensional)
            y_ndarray[y_dimensional - i - 1] = shape_y.GetDim(i);
        else
            y_ndarray[i] = 1;
    }
    tiling.set_y_ndarray(y_ndarray);

    y_sumndarray[0] = 1;
    for (int i = 1; i <= y_dimensional; i++)
    {
        y_sumndarray[i] = y_sumndarray[i - 1] * y_ndarray[i - 1];
    }
    tiling.set_y_sumndarray(y_sumndarray);

    for(uint32_t i = 0; i <= y_dimensional; i++) {
        printf("y_sumndarray[%u]: %u\n", i,  y_sumndarray[i]);
    }
    
    bool input_broad = false;
    if(inputSize != ySize) {
        // 对inputSize进行传播
        // 存储广播后shape（逆序）, 以及xy的维度
        uint32_t x_ndarray[20];
        uint32_t x_dimensional;

        // 获取原始shape
        auto shape_x = context->GetInputTensor(0)->GetOriginShape();

        x_dimensional = shape_x.GetDimNum();

        // 将维度较小的张量的高位填充为1, 如原shape = [2, 4], 则填充后为 [4, 2, 1], 逆序的
        for (int i = 0; i < y_dimensional; i++)
        {
            if (i < x_dimensional)
                x_ndarray[x_dimensional - i - 1] = shape_x.GetDim(i);
            else
                x_ndarray[i] = 1;
        }
        tiling.set_input_ndarray(x_ndarray);

        // 每个张量的累积形状数组，如 y_ndarray 为 [4, 2, 2]，则 y_sumndarray 为 [1, 4, 8, 16]
        uint32_t x_sumndarray[20];
        x_sumndarray[0] = 1;
        for (int i = 1; i <= y_dimensional; i++)
        {
            x_sumndarray[i] = x_sumndarray[i - 1] * x_ndarray[i - 1];
        }
        tiling.set_input_sumndarray(x_sumndarray);
        input_broad = true;
        
    } 
    tiling.set_input_broad(input_broad);


    bool x1_broad = false;
    if(x1Size != ySize) {
        // 对inputSize进行传播
        // 存储广播后shape（逆序）, 以及xy的维度
        uint32_t x_ndarray[20];
        uint32_t x_dimensional;

        // 获取原始shape
        auto shape_x = context->GetInputTensor(1)->GetOriginShape();

        x_dimensional = shape_x.GetDimNum();

        // 将维度较小的张量的高位填充为1, 如原shape = [2, 4], 则填充后为 [4, 2, 1], 逆序的
        for (int i = 0; i < y_dimensional; i++)
        {
            if (i < x_dimensional)
                x_ndarray[x_dimensional - i - 1] = shape_x.GetDim(i);
            else
                x_ndarray[i] = 1;
        }
        tiling.set_x1_ndarray(x_ndarray);

        // 每个张量的累积形状数组，如 y_ndarray 为 [4, 2, 2]，则 y_sumndarray 为 [1, 4, 8, 16]
        uint32_t x_sumndarray[20];
        x_sumndarray[0] = 1;
        for (int i = 1; i <= y_dimensional; i++)
        {
            x_sumndarray[i] = x_sumndarray[i - 1] * x_ndarray[i - 1];
        }
        tiling.set_x1_sumndarray(x_sumndarray);
        x1_broad = true;
    }
    tiling.set_x1_broad(x1_broad);


    bool x2_broad = false;
    if(x2Size != ySize) {
        // 对inputSize进行传播
        // 存储广播后shape（逆序）, 以及xy的维度
        uint32_t x_ndarray[20];
        uint32_t x_dimensional;

        // 获取原始shape
        auto shape_x = context->GetInputTensor(2)->GetOriginShape();

        x_dimensional = shape_x.GetDimNum();

        // 将维度较小的张量的高位填充为1, 如原shape = [2, 4], 则填充后为 [4, 2, 1], 逆序的
        for (int i = 0; i < y_dimensional; i++)
        {
            if (i < x_dimensional)
                x_ndarray[x_dimensional - i - 1] = shape_x.GetDim(i);
            else
                x_ndarray[i] = 1;
        }
        tiling.set_x2_ndarray(x_ndarray);

        for(uint32_t i = 0; i < y_dimensional; i++) {
            printf("x2_ndarray[%u]: %u\n", i,  x_ndarray[i]);
        }

        // 每个张量的累积形状数组，如 y_ndarray 为 [4, 2, 2]，则 y_sumndarray 为 [1, 4, 8, 16]
        uint32_t x_sumndarray[20];
        x_sumndarray[0] = 1;
        for (int i = 1; i <= y_dimensional; i++)
        {
            x_sumndarray[i] = x_sumndarray[i - 1] * x_ndarray[i - 1];
        }

        for(uint32_t i = 0; i <= y_dimensional; i++) {
            printf("x_sumndarray[%u]: %u\n", i,  x_sumndarray[i]);
        }
        tiling.set_x2_sumndarray(x_sumndarray);
        x2_broad = true;
    }
    tiling.set_x2_broad(x2_broad);
    
    uint32_t total_length = ySize;

    // uint32_t total_length = 0, min_length = context->GetInputTensor(0)->GetShapeSize();
    // for (int i = 0; i < 3; ++i) {
    //     total_length = std::max<uint32_t>(total_length, context->GetInputTensor(i)->GetShapeSize());
    //     min_length = std::min<uint32_t>(min_length, context->GetInputTensor(i)->GetShapeSize());
    // }
    uint32_t input_data_length = context->GetInputTensor(0)->GetShapeSize();
    uint32_t x1_length = context->GetInputTensor(1)->GetShapeSize();
    uint32_t x2_length = context->GetInputTensor(2)->GetShapeSize();
    auto dt = context->GetInputTensor(0)->GetDataType();
    // NUM相当于计算使用到的临时存储空间，分为输入输出和TBuf
    int32_t NUM = 4;
    uint32_t sizeofdatatype;
    if (dt == ge::DT_INT8) {
        sizeofdatatype = 1;
        NUM = 6;
    }
    else if (dt == ge::DT_FLOAT16 || dt == ge::DT_BF16) {
        sizeofdatatype = 2;
    }
    else if(dt == ge::DT_INT32) {
        sizeofdatatype = 4;
        NUM = 6;
    } else {
        sizeofdatatype = 4;
    }

    uint32_t ALIGN_NUM = BLOCK_SIZE / sizeofdatatype;
    // tiling_size 表示一个tiling包含几个32B的block
    uint32_t tiling_size = ((ub_size) / BLOCK_SIZE / 2) / NUM;
    tiling_size = tiling_size <= 8 ? tiling_size : tiling_size / 8 * 8;

    // block_size
    uint32_t block_size = tiling_size * ALIGN_NUM;
    // if (total_length != min_length) {
    //     block_size = std::min(block_size, min_length);
    //     while (min_length % block_size || min_length % ALIGN_NUM) {
    //         block_size -= 1;
    //     }
    // }

    aivNum = (aivNum < total_length / block_size) ? aivNum : (total_length / block_size);
    aivNum = aivNum >= 1 ? aivNum : 1;
    // 310b和310p只用了一个核，此处这样设计方便测试
    aivNum = 1;

    // core_size为32B向下取整
    uint32_t core_size = (total_length / aivNum) / (ALIGN_NUM * 8) * (ALIGN_NUM * 8);
    uint32_t core_remain = total_length - aivNum * core_size;

    tiling.set_ALIGN_NUM(ALIGN_NUM);
    tiling.set_block_size(block_size);
    tiling.set_aivNum(aivNum);
    tiling.set_core_size(core_size);
    tiling.set_core_remain(core_remain);
    tiling.set_total_length(total_length);
    tiling.set_input_data_length(input_data_length);
    tiling.set_x1_length(x1_length);
    tiling.set_x2_length(x2_length);

    printf("core_remain: %u\n", core_remain);
    printf("total_length: %u\n", total_length);
    printf("tile_length: %u\n", block_size);
    printf("total_length: %u\n", total_length);
    printf("input_broad: %u\n", input_broad);
    printf("x1_broad: %u\n", x1_broad);
    printf("x2_broad: %u\n", x2_broad);
    printf("y_dimensional: %u\n", y_dimensional);
    


    if(input_broad || x1_broad || x2_broad) {
        printf("====需要BroadCast====\n");
    } else {
        printf("====不需要BroadCast====\n");
    }

    context->SetBlockDim(aivNum);

    tiling.SaveToBuffer(context->GetRawTilingData()->GetData(), context->GetRawTilingData()->GetCapacity());
    context->GetRawTilingData()->SetDataSize(tiling.GetDataSize());
    size_t *currentWorkspace = context->GetWorkspaceSizes(1);
    currentWorkspace[0] = 0;
    return ge::GRAPH_SUCCESS;
}
}


namespace ge {
static ge::graphStatus InferShape(gert::InferShapeContext* context)
{
    const gert::Shape* x1_shape = context->GetInputShape(0);
    gert::Shape* y_shape = context->GetOutputShape(0);
    *y_shape = *x1_shape;
    return GRAPH_SUCCESS;
}
}


namespace ops {
class Addcmul : public OpDef {
public:
    explicit Addcmul(const char* name) : OpDef(name)
    {
        this->Input("input_data")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16, ge::DT_INT32, ge::DT_INT8})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
        this->Input("x1")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16, ge::DT_INT32, ge::DT_INT8})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
        this->Input("x2")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16, ge::DT_INT32, ge::DT_INT8})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
        this->Input("value")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16, ge::DT_INT32, ge::DT_INT8})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
        this->Output("y")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16, ge::DT_INT32, ge::DT_INT8})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});

        this->SetInferShape(ge::InferShape);

        this->AICore()
            .SetTiling(optiling::TilingFunc);
        this->AICore().AddConfig("ascend910");
        this->AICore().AddConfig("ascend310b");
    }
};

OP_ADD(Addcmul);
}
