#pragma once
#include <opencv2/opencv.hpp>
#include <paddle_inference_api.h>
#include <numeric> 
using namespace cv;
using namespace std;

class Paddle {
private:
	paddle_infer::Config config;

public:

	bool loadModel(string& model_dir, string& model_file, string& params_file, int threads);

	void softmax(const vector<float>& input, vector<float>& result);

	void preprocess(Mat& src, Mat& dst, float meanValue, float stdValue);

	void gpuInference(Mat& srcImage, int srcWidth, int srcHeight, int matType, float meanValue, float stdValue, int& labelIndex, double& probability);

};