// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "opencv2/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "paddle_api.h"
#include "paddle_inference_api.h"
#include <chrono>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <vector>

#include <cstring>
#include <fstream>
#include <numeric>

#include <include/postprocess_op.h>
#include <include/preprocess_op.h>
#include <include/utility.h>

using namespace paddle_infer;

namespace PaddleVideo
{

    class VideoRecognizer
    {
    public:
        explicit VideoRecognizer(const std::string &model_dir, const std::string &inference_model_name, const bool &use_gpu, const int &num_seg,
                                 const int &rec_batch_num, const int &gpu_id,
                                 const int &gpu_mem, const int &cpu_math_library_num_threads,
                                 const bool &use_mkldnn, const std::string &label_path,
                                 const bool &use_tensorrt, const std::string &precision, const std::vector<float> &_mean = {0.406, 0.456, 0.485},
                                 const std::vector<float> &_scale = {0.225, 0.224, 0.229})
        {
            this->inference_model_name = inference_model_name;
            this->use_gpu_ = use_gpu;
            this->num_seg = num_seg;
            this->rec_batch_num = rec_batch_num;
            this->gpu_id_ = gpu_id;
            this->gpu_mem_ = gpu_mem;
            this->cpu_math_library_num_threads_ = cpu_math_library_num_threads;
            this->use_mkldnn_ = use_mkldnn;
            this->use_tensorrt_ = use_tensorrt;
            this->precision_ = precision;
            this->mean_ = _mean;
            this->scale_ = _scale;
            this->label_list_ = Utility::ReadDict(label_path);
            LoadModel(model_dir);
        }

        // Load Paddle inference model
        void LoadModel(const std::string &model_dir);

        void Run(const std::vector<string> &frames_batch_path, const std::vector<std::vector<cv::Mat> > &frames_batch, std::vector<double> *times);

    private:
        std::string inference_model_name;
        std::shared_ptr<Predictor> predictor_;

        bool use_gpu_ = false;
        int gpu_id_ = 0;

        int rec_batch_num = 1;
        int gpu_mem_ = 4000;
        int cpu_math_library_num_threads_ = 4;
        bool use_mkldnn_ = false;
        int num_seg = 8;
        std::vector<std::string> label_list_;
        std::vector<float> mean_ = {0.406, 0.456, 0.485};
        std::vector<float> scale_ = {0.225, 0.224, 0.229};
        bool is_scale_ = true;
        bool use_tensorrt_ = false;
        std::string precision_ = "fp32";

        // Instantiate pre-process operation object(s)
        Scale scale_op_;

        CenterCrop centercrop_op_;
        TenCrop tencrop_op_;

        Normalize normalize_op_;
        Permute permute_op_;

        // Instantiate post-process operation object(s)
        Softmax softmax_op_;

    }; // class VideoRecognizer

} // namespace PaddleVideo
