
#include "mat_mul_sub_tiling.h"
#include "register/op_def_registry.h"
#include "tiling/platform/platform_ascendc.h"
#include "tiling/tiling_api.h"
using namespace matmul_tiling;

namespace optiling {
static ge::graphStatus TilingFunc(gert::TilingContext* context)
{

  //196352
//   const gert::StorageShape* x1_shape = context->GetInputShape(0);
//   int32_t data_sz = 1;
//   for (int i = 0; i < x1_shape->GetStorageShape().GetDimNum(); i++)
//     data_sz *= x1_shape->GetStorageShape().GetDim(i);
//   tiling.set_size(data_sz);
    auto ascendcPlatform = platform_ascendc::PlatformAscendC(context->GetPlatformInfo()); //获取硬件平台信息



    //auto socVersion = ascendcPlatform.GetSocVersion(); //获取当前硬件平台版本型号

    //ub 196352 
    //L0_A 65536 
    //L0_B 65536 
    //L0_C 131072 
    //L1 524032 
    //L2 201326592 
    //HBM 68719476736
    //Cube核 20 
    //Vector核数 40
    // uint64_t ub_size, l0a_size, l0b_size, l0c_size, l1_size, l2_size, hbm_size;
    // ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::UB, ub_size); //获取ub大小
    // ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::L0_A , l0a_size);
    // ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::L0_B , l0b_size);
    // ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::L0_C , l0c_size);
    // ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::L1 , l1_size);
    // ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::L2 , l2_size); 
    // ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::HBM, hbm_size); 

    // std::cout << ub_size << " " << l0a_size << " " << l0b_size << " " << l0c_size << " " << l1_size << " " << l2_size << " " << hbm_size << std::endl;


    auto x1_shape = context->GetInputShape(0)->GetStorageShape();//获取运行时Shape
    auto x2_shape = context->GetInputShape(1)->GetStorageShape();//获取运行时Shape
    auto dt = context->GetInputDesc(0)->GetDataType();//输入类型

    int32_t blockDim = 1;
    
    uint16_t M,N,K;
    M = x1_shape.GetDim(0);
    K = x1_shape.GetDim(1);
    N = x2_shape.GetDim(1);
    if(N>64)
    {
        if (dt == ge::DT_FLOAT)
        {
            blockDim = (M+7)/8;
        }
        else
        {
            blockDim = (M+15)/16;
        }
    }
    if(blockDim > 20) blockDim = 20;
    // int32_t baseM = 16;
    // int32_t baseN = 16;
    auto x3_shape = context->GetInputShape(2)->GetStorageShape();//获取运行时Shape
    auto x3_shape_DimNum = x3_shape.GetDimNum();
    int8_t broadcast;
    
    MultiCoreMatmulTiling cubeTiling(ascendcPlatform);  //创建Tiling对象
    cubeTiling.SetDim(blockDim); // Set the number of cores that participate in multi-core computaion is 2.
    // cubeTiling.SetDim(2*blockDim); // Set the number of cores that participate in multi-core computaion is 2.

    if (dt == ge::DT_FLOAT)
    {
        //设置A、B、Bias的数据类型和格式
        cubeTiling.SetAType(TPosition::GM, CubeFormat::ND, matmul_tiling::DataType::DT_FLOAT);
        cubeTiling.SetBType(TPosition::GM, CubeFormat::ND, matmul_tiling::DataType::DT_FLOAT);
        // cubeTiling.SetCType(TPosition::VECIN, CubeFormat::ND, matmul_tiling::DataType::DT_FLOAT16);
        cubeTiling.SetCType(TPosition::GM, CubeFormat::ND, matmul_tiling::DataType::DT_FLOAT);
        // cubeTiling.SetBiasType(TPosition::GM, CubeFormat::ND, matmul_tiling::DataType::DT_FLOAT);
    }
    else
    {
        //设置A、B、Bias的数据类型和格式
        cubeTiling.SetAType(TPosition::GM, CubeFormat::ND, matmul_tiling::DataType::DT_FLOAT16);
        cubeTiling.SetBType(TPosition::GM, CubeFormat::ND, matmul_tiling::DataType::DT_FLOAT16);
        // cubeTiling.SetCType(TPosition::VECIN, CubeFormat::ND, matmul_tiling::DataType::DT_FLOAT16);
        cubeTiling.SetCType(TPosition::GM, CubeFormat::ND, matmul_tiling::DataType::DT_FLOAT16);
        // cubeTiling.SetBiasType(TPosition::GM, CubeFormat::ND, matmul_tiling::DataType::DT_FLOAT);
    }
    //设置矩阵shape信息
    cubeTiling.SetShape(M, N, K);
    cubeTiling.SetOrgShape(M, N, K);

    // cubeTiling.SetFixSplit(baseM, baseN, -1); // Set the fixed baseM=128, baseN=128.
    cubeTiling.SetBias(false);
    cubeTiling.SetBufferSpace(-1, -1, -1);
    //N == 2048
    //M == 2048
    //K == 2048
    if((x3_shape_DimNum != 1) && (N%256 == 0) && N > 1024)//&& (N == 2048) && (M == 2048) && (K == 2048))
    {
        broadcast = 0;
        //168 168
        cubeTiling.SetFixSplit(-1, 128, -1); 
        cubeTiling.SetSingleShape(-1, -1, -1); 
        // cubeTiling.SetFixSplit(208, 128, -1); // Set the fixed baseM=128, baseN=128. 208 128
        // cubeTiling.SetSingleShape(208, 128, -1); 
        // cubeTiling.SetSplitK(true);
        // cubeTiling.SetMatmulConfigParams(1, false);
        // cubeTiling.SetDoubleBuffer();
        // cubeTiling.dbL0A = 2;
        // cubeTiling.dbL0B = 2;
        // cubeTiling.dbL0C = 2;
        context->SetTilingKey(1);
    }
    else if(x3_shape_DimNum == 1)
    {
        broadcast = 1;
        cubeTiling.SetFixSplit(-1, -1, -1); // Set the fixed baseM=128, baseN=128.
        context->SetTilingKey(2);
    }
    else
    {
        broadcast = 0;
        cubeTiling.SetFixSplit(-1, -1, -1); // Set the fixed baseM=128, baseN=128.
        context->SetTilingKey(2);
    }
    
    //获取Tiling参数
    MatMulSubTilingData tiling;
    if (cubeTiling.GetTiling(tiling.cubeTilingData) == -1){ 
        // std::cout << "error:GetTiling(tiling.cubeTilingData)"<< std::endl;
        return ge::GRAPH_FAILED;  
    }
    // std::cout << M << " " << N << " " << K << std::endl;
    // std::cout << blockDim << std::endl;
    // std::cout << cubeTiling.singleCoreM << " " << cubeTiling.singleCoreN << " " << cubeTiling.singleCoreK << std::endl;
    // std::cout << cubeTiling.baseM << " " << cubeTiling.baseN << " " << cubeTiling.baseK << std::endl;

    // uint32_t stepM = 1;
    // uint32_t stepN = 1;
    // tiling.cubeTilingData.set_stepM(stepM); // Set the matmul tiling stepM=1.
    // tiling.cubeTilingData.set_stepN(stepN); // Set the matmul tiling stepN=1.
    tiling.set_broadcast(broadcast);

    context->SetBlockDim(blockDim);

    tiling.SaveToBuffer(context->GetRawTilingData()->GetData(), context->GetRawTilingData()->GetCapacity());
    context->GetRawTilingData()->SetDataSize(tiling.GetDataSize());
    size_t userWorkspaceSize = 0;
    size_t systemWorkspaceSize = static_cast<size_t>(ascendcPlatform.GetLibApiWorkSpaceSize());
    size_t *currentWorkspace = context->GetWorkspaceSizes(1);
    currentWorkspace[0] = userWorkspaceSize + systemWorkspaceSize;


    // std::cout << "m:" << m << " k:" << k << " n:" << n << std::endl;

  
//   tiling.SaveToBuffer(context->GetRawTilingData()->GetData(), context->GetRawTilingData()->GetCapacity());
//   context->GetRawTilingData()->SetDataSize(tiling.GetDataSize());
  return ge::GRAPH_SUCCESS;
}
}

namespace ops {
class MatMulSub : public OpDef {
public:
    explicit MatMulSub(const char* name) : OpDef(name)
    {
        this->Input("x1")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND});
        this->Input("x2")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND});
        this->Input("x3")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND});
        this->Output("y")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND});

        // this->SetInferShape(ge::InferShape);

        this->AICore()
            .SetTiling(optiling::TilingFunc);
        this->AICore().AddConfig("ascend910b");

    }
};

OP_ADD(MatMulSub);
}
