﻿#include "emotion_StaticLib.h" 

#ifdef WIN32
const char sep = '\\';
#else
const char sep = '/';
#endif

std::string emotion_Infer(fastdeploy::vision::classification::PaddleClasModel& model, const cv::Mat image) {
    cv::Mat vis_im;
    auto im = image;

    fastdeploy::vision::ClassifyResult res;

    if (!model.Predict(&im, &res)) {
        qDebug() << "Failed to predict.";
    }
   // std::cout << res.Str() << std::endl;
   // qDebug() << QString::fromStdString(res.Str());
    return res.Str();
}


fastdeploy::vision::classification::PaddleClasModel emotion_Cpu_init(const std::string& model_dir) {
    auto model_file = model_dir + sep + "model.pdmodel";
    auto params_file = model_dir + sep + "model.pdiparams";
    auto config_file = model_dir + sep + "inference.yml";

    auto option = fastdeploy::RuntimeOption();
    option.UseCpu();
    auto model = fastdeploy::vision::classification::PaddleClasModel(
        model_file, params_file, config_file, option);
    std::string result;
    if (!model.Initialized()) {
       // std::cerr << "Failed to initialize." << std::endl;
        qDebug() << "Failed to predict.";
    }
    return model;
}


fastdeploy::vision::classification::PaddleClasModel emotion_Gpu_init(const std::string& model_dir) {
    auto model_file = model_dir + sep + "model.pdmodel";
    auto params_file = model_dir + sep + "model.pdiparams";
    auto config_file = model_dir + sep + "inference.yml";

    auto option = fastdeploy::RuntimeOption();
    option.UseGpu();
    auto model = fastdeploy::vision::classification::PaddleClasModel(
        model_file, params_file, config_file, option);
    if (!model.Initialized()) {
       // std::cerr << "Failed to initialize." << std::endl;
        qDebug() << "Failed to predict.";
    }
    return model;
}   


