﻿#include "alltools.h"

//number
std::shared_ptr<cv::VideoCapture> allTools::camera = std::make_shared<cv::VideoCapture>();

PyObject* allTools::pInfer = nullptr;

fastdeploy::vision::matting::PPMatting allTools::GPU_LJH = GpuInit((QDir::currentPath() + "/models/ppmatting").toStdString());
fastdeploy::vision::matting::PPMatting allTools::CPU_LJH = CpuInit((QDir::currentPath() + "/models/ppmatting").toStdString());

IdCardOcr allTools::ocr((QDir::currentPath() + "/models/ch_ppocr_mobile_v2.0_det_prune_infer").toStdString(),
                        (QDir::currentPath() + "/models/ch_PP-OCRv3_rec_infer").toStdString(),
                        (QDir::currentPath() + "/models/ppocr_keys_v1.txt").toStdString(),
                        "CPU");

fastdeploy::vision::classification::PaddleClasModel allTools::GPU_LAH = emotion_Cpu_init(
            (QDir::currentPath() + "/models/resnet50").toStdString());
fastdeploy::vision::classification::PaddleClasModel allTools::CPU_LAH = emotion_Gpu_init(
            (QDir::currentPath() + "/models/resnet50").toStdString());

std::shared_ptr<DB> allTools::Face_DB = make_shared<DB>();

std::shared_ptr<seeta::QualityOfPoseEx> allTools::QP(FaceAction::getQualityOfPoseEx(
                                                         SettingModel("pose_estimation.csta").GetModel()));
std::shared_ptr<seeta::FaceDetector> allTools::FD(FaceAction::getFaceDetector(
                                                      SettingModel("face_detector.csta").GetModel()));
std::shared_ptr<seeta::FaceLandmarker> allTools::flm68(FaceAction::getFaceLandmarker68(
                                                           SettingModel("face_landmarker_pts68.csta").GetModel()));
std::shared_ptr<seeta::FaceLandmarker> allTools::flm5(FaceAction::getFaceLandmarker5(
                                                          SettingModel("face_landmarker_pts5.csta").GetModel()));

std::shared_ptr<seeta::FaceRecognizer> allTools::FR(getFaceRecognizer(
                                                        SettingModel("face_recognizer.csta").GetModel()));

std::shared_ptr<seeta::FaceAntiSpoofing> allTools::FAS(getFaceAntiSpoofing());

std::shared_ptr<seeta::QualityAssessor> allTools::QAS(getQualityAssessor());

FaceAction allTools::funcSeetaface(flm68.get(), flm5.get(), nullptr, FD.get(), QP.get());

const float allTools::FACE_CMP_THRESHOLD = 0.62;

//function
std::shared_ptr<cv::VideoCapture> allTools::getCap()
{
    return camera;
}

PyObject *allTools::getHandPinfer()
{
    qDebug("54321");
    if(pInfer == nullptr)
    {
        Py_Initialize();
        pInfer = Init_Hand_Model();
        qDebug("12345");
    }
    return pInfer;
}

Mat allTools::handInfer(Mat &img, int *res)
{
    return Hand_Infer(img,pInfer,res);
}

void allTools::destroyHandPinfer()
{
    Py_DECREF(pInfer);
    Py_Finalize();
    qDebug() << "close handPinfer!";
}

Mat allTools::PPMatting(const string &device, cv::Mat &img)
{
    if (device == "CPU" || device == "cpu")
        return MattingInfer(CPU_LJH, img, "");
    else if (device == "GPU" || device == "gpu")
        return MattingInfer(GPU_LJH, img, "");
    return cv::Mat();
}

const string allTools::OCR(cv::Mat &img)
{
    return ocr.infer(img);
}

int allTools::emotion(const string &device, const cv::Mat img)
{
    int res = -1;
    std::string res_s = "";
    cv::Mat t;
    if (device == "CPU" || device == "cpu")
    {
        t = FaceScreenshot_infer(FD.get(), img);
        res_s = emotion_Infer(CPU_LAH, t);
    }
    else if (device == "GPU" || device == "gpu")
    {
        t = FaceScreenshot_infer(FD.get(), img);
        res_s = emotion_Infer(GPU_LAH, t);
    }

    res = res_s[27] - '0';
    return res;
//    if(res == 0)
//    {
//        return QString::fromUtf8("当前表情为：愤怒 ");
//    }
//    else if(res == 1)
//    {
//        return QString::fromUtf8("当前表情为：讨厌 ");
//    }
//    else if(res == 2)
//    {
//        return QString::fromUtf8("当前表情为：害怕 ");
//    }
//    else if(res == 3)
//    {
//        return QString::fromUtf8("当前表情为：开心 ");
//    }
//    else if(res == 4)
//    {
//        return QString::fromUtf8("当前表情为：平静 ");
//    }
//    else if(res == 5)
//    {
//        return QString::fromUtf8("当前表情为：伤心 ");
//    }
//    else //6
//    {
//        return QString::fromUtf8("当前表情为：惊喜 ");
//    }
}

std::shared_ptr<DB> allTools::getDB()
{
    return Face_DB;
}

const int allTools::getEyeRotateId(cv::Mat& img)
{
    return funcSeetaface.inferEyeRotate(img);
}

const int allTools::getFaceActionId(Mat& img)
{
    return funcSeetaface.inferAction(img);
}

const string allTools::getFaceAntiSpoofingStatus(cv::Mat& img)
{
    return FaceAntiSpoofing_operating_Infer(FD.get(), flm5.get(), FAS.get(), img);
}

//是否为假体
bool allTools::isFaceAntiSpoofingStatus(cv::Mat img)
{
    if(FaceAntiSpoofing_operating_Infer(FD.get(), flm5.get(), FAS.get(), img) == "spoof") return true;
    else return false;
}

QMap<QString, QString> allTools::getDbPeopleInfo(const QString& userId) //people_t
{
    return Face_DB->queryById(userId);
}

QMap<QString, QString> allTools::getDbUserInfo(const QString& userId) //user_t
{
    return Face_DB->queryUserById(userId);
}

//get log_t
QMap<QString, QMap<QString, QString> > allTools::getLogInfo(const QString &startTime, const QString &endTime, const QString &userId)
{
    return Face_DB->querLoginlogByTimeAndId(startTime, endTime, userId);
}

void allTools::updateUser_t(const QString &userId, const QString& imgPath) //insert Uset_t
{
    Face_DB->updateRegist(userId, imgPath);
}

void allTools::updateAction(const QString &userId, const QString &action) //update ActionId
{
    Face_DB->updateAction(userId, action);
}

//insert new log
void allTools::insertLogInfo(const QString &time, const QString &id, const QString &result, const QString &IP)
{
    Face_DB->insertLoginlog(time, id, result, IP);
}

//get all user_t info
QMap<QVector<QString>, std::shared_ptr<float> > allTools::getAllFaceInfo()
{
    return Face_DB->getAllFace(FR.get(), flm5.get(), FD.get());
}

//录入表情
void allTools::updateEmotion(const QString &userId, const QString &emo)
{
    Face_DB->updateEmotion(userId, emo);
}

Mat allTools::getPoint68(Mat &img)
{
    return funcSeetaface.drawPoints(img);
}

std::shared_ptr<float> allTools::getFaceFeatures(cv::Mat &img)
{
    return FaceRecognizer_infer(FR.get(), flm5.get(), FD.get(), img);
}

float allTools::getFaceCmp(Mat &img, Mat &_img)
{
    return compare(FR.get(), getFaceFeatures(img), getFaceFeatures(_img));
}

Mat allTools::getFacePic(Mat img)
{
    return FaceScreenshot_infer(FD.get(), img);
}

QMap<QString, QString> allTools::getQualityAssessorInfo(Mat &img)
{
    QMap<QString,QString> Info;
    seeta::QualityResult* res_p = QualityAssessor_infer(QAS.get(), flm5.get(), FD.get(), img);
    //亮度
    //清晰度
    //姿态
    //面部完整性
    //分辨率
    QString now = "";
    for(int i=0;i<5;i++)
    {
        if(i == 0) now = "bri";
        else if(i == 1) now = "qxd";
        else if(i == 2) now = "zt";
        else if(i == 3) now = "face";
        else if(i == 4) now = "fps";

        if(res_p[i].level == 0) //LOW
        {
            Info[now] = "差";
        }
        else if(res_p[i].level == 1) //MEDIUM
        {
            Info[now] = "良";
        }
        else if(res_p[i].level == 2) //HIGH
        {
            Info[now] = "优";
        }
    }
    if(res_p != nullptr) delete[] res_p;
    return Info;
}

//质量检测返回结果
bool allTools::isFaceOk(Mat img)
{
    seeta::QualityResult* res_p = QualityAssessor_infer(QAS.get(), flm5.get(), FD.get(), img);
    if(res_p == nullptr) return false;
    for(int i=0;i<5;i++)
    {
        if(res_p[i].level <= 1) return false;
    }
    //if(res_p[1].level <= 1) return false;
    return true;
}

void allTools::drawFaceInfo(cv::Mat& img, QMap<QVector<QString>,std::shared_ptr<float>> t)
{
    faceRecAndDraw(FR.get(), flm5.get(), FD.get(), img, t);
}

allTools::allTools()
{

}

allTools::~allTools()
{

}

