//
// Created by syshen on 2021/6/28.
//


#include <cmath>
#include <algorithm>
#include "OCR.h"

OCR::OCR(int device_id, std::string det_method, std::string recog_method){
//    // int device_id, bool trt_serialize = false
    //auto dets = TextDetRegistry::Registry();
    detector = TextDetRegistry::CreateDetection(det_method, device_id, false);
    recognizer = CrnnRegistry::CreateRecognizer(recog_method, device_id, false);
    classifier = std::shared_ptr<AgnleNet>(new AgnleNet(0, true));
}

void OCR::init(std::string det_model, std::string cls_model, std::string recog_model){
    detector->init(det_model);
    recognizer->init(recog_model);
    classifier->init(cls_model);
}

cv::Mat OCR::get_rotate_crop_image(const cv::Mat &img, std::vector<cv::Point2f> &points){
    auto distance = [](cv::Point2f &p1, cv::Point2f &p2)->float {
        float diff_x = p1.x - p2.x;
        float diff_y = p1.y - p2.y;
        return sqrtf(diff_x * diff_x + diff_y * diff_y);
    };
    int img_crop_width = static_cast<int>(std::max(distance(points[0], points[1]), distance(points[2], points[3])));
    int img_crop_height = static_cast<int>(std::max(distance(points[0], points[3]), distance(points[1], points[2])));

    cv::Point2f pts_std[] = {
            cv::Point2f(0, 0),
            cv::Point2f(img_crop_width, 0),
            cv::Point2f(img_crop_width, img_crop_height),
            cv::Point2f(0, img_crop_height) };
    cv::Point2f src_points[] = {
            points[0], points[1], points[2], points[3]
    };
    cv::Mat M = cv::getPerspectiveTransform(src_points, pts_std), dst_img;
    cv::warpPerspective(
            img, dst_img,
            M, cv::Size(img_crop_width, img_crop_height),
            cv::BORDER_REPLICATE,
            cv::INTER_CUBIC);
    if ((float(dst_img.rows) / float(dst_img.cols)) >= 5){
        cv::transpose(dst_img, dst_img);
        cv::flip(dst_img, dst_img, 0);
    }

    return dst_img;
}

std::vector<data> OCR::recognize(const cv::Mat &img){
    double time0=static_cast<double>(cv::getTickCount());
    std::vector<data> results;
    std::vector <std::vector<cv::Point2f>> dets = detector->extract(img);
    for(auto det : dets) {
        cv::Mat text_im = get_rotate_crop_image(img, det);
        std::vector<float> feats = classifier->extract(text_im);
        if (feats[1] > feats[0] && feats[1] > 0.9) {
            cv::rotate(text_im, text_im, cv::ROTATE_180);
        }
        std::vector<std::string> res = recognizer->extract(text_im);
        data dd;
        dd.text = res;
        dd.points = det;
        results.push_back(dd);
    }

    return results;
}

OCR::~OCR(){
    detector.reset();
    recognizer.reset();
    classifier.reset();
}
