#include "modelloader.h"

ModelLoader::ModelLoader(QObject *parent ) : QThread(parent)  {

}

void ModelLoader::init(std::shared_ptr<Inference> inference1,bool run_with_cuda,const QString &path, float iou_thres, float conf_thres, const cv::Size &input_shape)
{
    this->iou_thres=iou_thres;
    this->conf_thres=conf_thres;
    this->path=path;
    this->input_shape=input_shape;
    this->inference=inference1;
    this->RunWithCuda=run_with_cuda;
}

void ModelLoader::run()
{


    emit workstarted();
    qDebug()<<"onnx path : "<<path.toStdString()<<" input_shape width: "<<this->input_shape.width<<" input_shape height: "<<this->input_shape.height;
    inference->loadOnnxNetwork(this->path.toStdString(), this->input_shape, RunWithCuda);
    inference->modelNMSThreshold = this->iou_thres;
    inference->modelScoreThreshold = this->conf_thres;
    cv::Mat zero = cv::Mat::zeros(this->input_shape, CV_8UC3);
    inference->doPredict(zero);
    emit workfinished(1,this->path,"ok");
}
