﻿#include "lah/facecheck.h"

const std::string
FaceAntiSpoofing_operating_Infer(seeta::FaceDetector* pFD,
                                             seeta::FaceLandmarker* pFLm, seeta::FaceAntiSpoofing* pFAS,cv::Mat& image) {
    seeta::cv::ImageData images = image;
    cv::Scalar color;
    color = CV_RGB(0, 255, 0);
    std::vector<SeetaPointF> points(pFLm->number());
    float clarity;
    float reality;
    std::string stateOfTheFace = "";
    auto faces = pFD->detect(images);

    if (faces.size == 1) {
        auto& face = faces.data[0].pos;
        pFLm->mark(images, face, points.data());

        auto status = pFAS->PredictVideo(images, face, points.data());
        switch (status)
        {
        case seeta::FaceAntiSpoofing::SPOOF:
            stateOfTheFace = "spoof";
            color = CV_RGB(255, 0, 0);
            break;
        case seeta::FaceAntiSpoofing::REAL:
            stateOfTheFace = "real";
            color = CV_RGB(255, 255, 255);
            break;
        case seeta::FaceAntiSpoofing::FUZZY:
            stateOfTheFace = "fuzzy";
            break;
        case seeta::FaceAntiSpoofing::DETECTING:
            stateOfTheFace = "detecting";
            break;
        }

        pFAS->GetPreFrameScore(&clarity, &reality);

        //std::cout << "Clarity = " << clarity << ", Reality = " << reality << std::endl;

        cv::putText(image, stateOfTheFace, cv::Point(face.x, face.y - 10), cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, cv::LINE_4);

        rectangle(image, cv::Rect(face.x, face.y, face.width, face.height), color, 1, 8, 0);


    }else
    {
        for (int i = 0; i < faces.size; i++)
        {
            auto face = faces.data[i].pos;
            rectangle(image, cv::Rect(face.x, face.y, face.width, face.height), cv::Scalar(255, 255, 255), 1, 8, 0); //画人脸检测框
        }
        pFAS->ResetVideo();

        stateOfTheFace = "more";
    }
    return stateOfTheFace;
}

//FAS INIT fas_first.csta fas_second.csta
seeta::FaceAntiSpoofing* getFaceAntiSpoofing() {
    seeta::ModelSetting _Model;
    std::string ModelPath = (QDir::currentPath() + "/models/").toStdString();
    _Model.append(ModelPath + "fas_first.csta");
    _Model.append(ModelPath + "fas_second.csta");
    return new seeta::FaceAntiSpoofing(_Model);
}

//FR INIT face_recognizer.csta
seeta::FaceRecognizer* getFaceRecognizer(const seeta::ModelSetting& FR) {
    return new seeta::FaceRecognizer(FR);
}

//提取特一部到位
std::shared_ptr<float> extract(
    seeta::FaceRecognizer* fr,
    const SeetaImageData& image,
    const std::vector<SeetaPointF>& points) {
    std::shared_ptr<float> features(
        new float[fr->GetExtractFeatureSize()],
        std::default_delete<float[]>());
    fr->Extract(image, points.data(), features.get());
    return features;
}


//对比相似度
float compare(seeta::FaceRecognizer* fr,
    const std::shared_ptr<float>& feat1,
    const std::shared_ptr<float>& feat2) {
    return fr->CalculateSimilarity(feat1.get(), feat2.get());
}


std::shared_ptr<float> FaceRecognizer_infer(seeta::FaceRecognizer* FR, seeta::FaceLandmarker* FLM5, seeta::FaceDetector* FD,cv::Mat image) {
    seeta::cv::ImageData Image = image;
    SeetaFaceInfoArray faces = FD->detect(Image);
    for (int i = 0; i < faces.size; i++) {
        std::vector<SeetaPointF> points = FLM5->mark(Image, faces.data[i].pos);
//        for (auto& point : points)
//            cv::circle(image, cv::Point(point.x, point.y), 2, CV_RGB(255, 255, 255), -1);

        return extract(FR, Image, points);
        //auto patch = FR->CropFaceV2(Image, points.data());
        //std::shared_ptr<float> features(new float[FR->GetExtractFeatureSize()]);
        //FR->ExtractCroppedFace(patch, features.get());
    }
    return nullptr;
}

cv::Mat FaceScreenshot_infer( seeta::FaceDetector* FD, cv::Mat image) {
    seeta::cv::ImageData Image = image;
    SeetaFaceInfoArray faces = FD->detect(Image);
    if (faces.size == 1) {
        auto roi = faces.data[0].pos;
        if(0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= image.cols && 0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= image.rows)
        image = image(cv::Rect(faces.data[0].pos.x, faces.data[0].pos.y, faces.data[0].pos.width, faces.data[0].pos.height));
    }
    return image;
}

//亮度评估
seeta::QualityAssessor* getQualityAssessor() {

    seeta::QualityAssessor* QA = new seeta::QualityAssessor;

    QA->add_rule(seeta::BRIGHTNESS);
    QA->add_rule(seeta::CLARITY);
    QA->add_rule(seeta::POSE);
    QA->add_rule(seeta::INTEGRITY);
    QA->add_rule(seeta::RESOLUTION);

    return QA;
}

seeta::QualityResult* QualityAssessor_infer(seeta::QualityAssessor* QA, seeta::FaceLandmarker* FLM, seeta::FaceDetector* FD, cv::Mat& image) {
    seeta::cv::ImageData Image = image;
    auto face_array = FD->detect(Image);

    if (face_array.size > 0)
    {
        seeta::QualityResult* RS = new seeta::QualityResult[5];
        SeetaRect& face = face_array.data[0].pos;
        SeetaPointF points[5];
        FLM->mark(Image, face, points);
        QA->feed(Image, face, points, 5);
        RS[0] = QA->query(seeta::BRIGHTNESS);
        RS[1] = QA->query(seeta::CLARITY);
        RS[2] = QA->query(seeta::POSE);
        RS[3] = QA->query(seeta::INTEGRITY);
        RS[4] = QA->query(seeta::RESOLUTION);

        //cv::rectangle(image, cv::Rect(face.x, face.y, face.width, face.height), CV_RGB(128, 255, 128), 2);
        return RS;
    }
    else return nullptr;

}

void faceRecAndDraw(seeta::FaceRecognizer* FR, seeta::FaceLandmarker* FLM5, seeta::FaceDetector* FD,cv::Mat &image,QMap<QVector<QString>, std::shared_ptr<float>> temp) {

    /***
    ****识别人脸，并且在传入的引用图像中画出人脸框和信息
    ****
    ****传入参数：1、人脸识别器，2、5点人脸关键点检测器，3、人脸检测器 4、传入图像引用 5、传入QMap<QVector<人脸信息>, 人脸特征>
    ****
    ****返回结果：无
    ***/
    seeta::cv::ImageData Image = image;
    SeetaFaceInfoArray faces = FD->detect(Image);
    for (int i = 0; i < faces.size; i++) {
        std::vector<SeetaPointF> points1 = FLM5->mark(Image, faces.data[i].pos);
        auto features = extract(FR, Image, points1);
        cv::rectangle(image, cv::Rect(faces.data[i].pos.x, faces.data[i].pos.y, faces.data[i].pos.width, faces.data[i].pos.height), CV_RGB(255, 255, 255), 1, 8, 0);
        QMapIterator<QVector<QString>, std::shared_ptr<float>> iterator(temp);
        while (iterator.hasNext()) {
            iterator.next();

            auto name = iterator.key()[0];
            auto sex = iterator.key()[1];
            auto nation = iterator.key()[2];

            //qDebug() << name << ' ' << sex << ' ' << nation;

            name = ConvertChineseUnicodeToPyt(name);
            sex = ConvertChineseUnicodeToPyt(sex);
            nation = ConvertChineseUnicodeToPyt(nation);
            float score = compare(FR, features, iterator.value());

            if (score > 0.62) {

                //qDebug() << name << ' ' << sex << ' ' << nation;
                auto x = faces.data[i].pos.x + faces.data[i].pos.width + 10;
                auto y = faces.data[i].pos.y + 15;
                cv::putText(image,
                    name.toStdString(),
                    cv::Point(x, y),
                    cv::FONT_HERSHEY_COMPLEX,
                    0.5,
                    cv::Scalar(255, 255, 255), 1, cv::LINE_4);
                y += 25;

                cv::putText(image,
                            sex.toStdString(),
                            cv::Point(x, y),
                            cv::FONT_HERSHEY_COMPLEX,
                            0.5,
                            cv::Scalar(255, 255, 255), 1, cv::LINE_4);
                y += 25;
                cv::putText(image,
                            nation.toStdString(),
                            cv::Point(x, y),
                            cv::FONT_HERSHEY_COMPLEX,
                            0.5,
                            cv::Scalar(255, 255, 255), 1, cv::LINE_4);
                y += 25;
                cv::putText(image,
                            QString::number(score, 'f', 2).toStdString(),
                            cv::Point(x, y),
                            cv::FONT_HERSHEY_COMPLEX,
                            0.5,
                            cv::Scalar(255, 255, 255), 1, cv::LINE_4);
                break;
            }
        }
    }

}
