#include "ge_api.h"
#include "onnx_parser.h"


// 二进制格式读取onnx模型文件
FILE *pFile = fopen("model.onnx", "rb" );
if(pFile==NULL)
{
    fputs("File error",stderr);
    exit(1);
}

// 获取文件size
fseek(pFile, 0, SEEK_END);
long lSize = ftell(pFile);
rewind(pFile);

// 分配相应size的内存buffer
char *buffer =(char*) malloc(sizeof(char)*lSize);
if(buffer == NULL)
{
    fputs("Memory error", stderr); 
    exit(2);
}

// copy 二进制文件到内存buffer
size_t result = fread(buffer, 1, lSize, pFile);
if(result != lSize)
{
    fputs("Reading error", stderr);
    exit(3);
}

//将内存buffer里的数据解析为GE计算图对象
std::map<ge::AscendString, ge::AscendString> parser_params= {
            {ge::AscendString(ge::ir_option::INPUT_FP16_NODES), ge::AscendString("input1;input2")},
            {ge::AscendString(ge::ir_option::OUTPUT), ge::AscendString("newIssue")}};
ge::Graph compute_graph;
auto onnxStatus = ge::aclgrphParseONNXFromMem(buffer, result, parser_params, compute_graph);


//计算图编译和运行
std::map<AscendString, AscendString>config = {{"ge.exec.deviceId", "0"},  //可以通过config配置传入ge运行的初始化信息，配置参数ge.exec.deviceId和ge.graphRunMode，
                                              {"ge.graphRunMode", "1"}};  //分别用于指定GE实例运行设备，图执行模式（在线推理请配置为0，训练请配置为1）
                                                                         
Status ret = ge::GEInitialize(config);  //初始化
std::map <AscendString, AscendString> options;
ge::Session *session = new Session(options); //创建运行实例
if(session == nullptr) {
  std::cout << "Create session failed." << std::endl;
  ge::GEFinalize();  //释放资源
  return FAILED;
}
uint32_t graph_id = 0;
Status ret = session->AddGraph(graph_id, compute_graph); //运行实例添加计算图
if(ret != SUCCESS) {
  ge::GEFinalize(); //释放资源
  delete session;
  return FAILED;
}
std::vector<ge::Tensor> input; //定义输入tensor
std::vector<ge::Tensor> output; //定义输出tensor
ret = session->RunGraph(graph_id, input, output); //执行计算图推理，结果保存在输出tensor
if(ret != SUCCESS) {
  ge::GEFinalize(); //释放资源
  delete session;
  return FAILED;
}