#pragma once

#include <string>
#include <vector>

namespace infer {

/**
 * @brief 推理选项配置结构体
 *
 */
struct InferConfig {
  std::vector<int> device_id{0};            // device id list
  int tp_size = 1;                          // tensor parallel size, default 1 with no TP
  int batch_size = 1;                       // batch size, default 1
  std::vector<std::vector<int>> out_shape;  // output shape list
  bool enable_compile = false;              // enable compile
  std::string compile_option = "-O2";       // compile option
  std::string engine_path = "";             // model path
};

} // namespace infer