#include "genderage.h"
#include "ui_genderage.h"
#include <QDebug>
#include "mtcnn.h"

using namespace cv;
using namespace std;

GenderAge::GenderAge(QWidget *parent) :
    QWidget(parent),
    ui(new Ui::GenderAge)
{
    ui->setupUi(this);
    connect(ui->back,&QPushButton::clicked,[=](){
        emit this->back();
    });
}

GenderAge::~GenderAge()
{
    delete ui;
}

/**********       图像处理函数部分       ***********/

//判断由人脸的部分是否需要进行边界填充
Mat getPaddedROI(const Mat &input, int top_left_x, int top_left_y, int width, int height, Scalar paddingColor) {
    int bottom_right_x = top_left_x + width;
    int bottom_right_y = top_left_y + height;

    Mat output;

    //需要进行边界填充
    if (top_left_x < 0 || top_left_y < 0 || bottom_right_x > input.cols || bottom_right_y > input.rows) {
        int border_left = 0, border_right = 0, border_top = 0, border_bottom = 0;

        if (top_left_x < 0) {
            width = width + top_left_x;
            border_left = -1 * top_left_x;
            top_left_x = 0;
        }
        if (top_left_y < 0) {
            height = height + top_left_y;
            border_top = -1 * top_left_y;
            top_left_y = 0;
        }
        if (bottom_right_x > input.cols) {
            width = width - (bottom_right_x - input.cols);
            border_right = bottom_right_x - input.cols;
        }
        if (bottom_right_y > input.rows) {
            height = height - (bottom_right_y - input.rows);
            border_bottom = bottom_right_y - input.rows;
        }

        Rect R(top_left_x, top_left_y, width, height);

        //用常数值进行填充后输出
        copyMakeBorder(input(R), output, border_top, border_bottom, border_left, border_right, BORDER_CONSTANT, paddingColor);
    }

    else {
        //不需要边界填充直接输出
        Rect R(top_left_x, top_left_y, width, height);
        output = input(R);
    }
    return output;
}

//将矩形转化为正方形
Rect rect2square(const Rect& rect) {
    Rect square;
    int h = rect.height;
    int w = rect.width;
    int side = h>w ? h : w;
    square.x = rect.x + static_cast<int>((w - side)*0.5);
    square.y = rect.y + static_cast<int>((h - side)*0.5);
    square.width = side;
    square.height = side;
    return square;
}

//寻找最大概率值
void getMaxClass(const cv::Mat &probBlob, int *classId, double *classProb)
{
    cv::Mat probMat = probBlob.reshape(1, 1);
    cv::Point classNumber;

    cv::minMaxLoc(probMat, NULL, classProb, NULL, &classNumber);
    *classId = classNumber.x;
}




/**********       调用模型部分       ***********/

void GenderAge::on_begin_clicked()
{
    ui->photo->setEnabled(false);

    Mat img;
    VideoCapture video;
    video.open(0);
    if (!video.isOpened()) {
        qDebug() <<"摄像头开启失败！";
    }

    stop = false;

    //使用三个人脸检测模型，初始化多任务卷积神经网络检测器
    ProposalNetwork::Config pConfig;
    pConfig.caffeModel = CAFFE_PATH_1;
    pConfig.protoText = CAFFE_PATH_2;
    pConfig.threshold = 0.6f;

    RefineNetwork::Config rConfig;
    rConfig.caffeModel = CAFFE_PATH_3;
    rConfig.protoText = CAFFE_PATH_4;
    rConfig.threshold = 0.7f;

    OutputNetwork::Config oConfig;
    oConfig.caffeModel = CAFFE_PATH_5;
    oConfig.protoText = CAFFE_PATH_6;
    oConfig.threshold = 0.7f;

    MTCNNDetector detector(pConfig, rConfig, oConfig);

    //设置人脸参数
    const float minFaceSize = 40.f;
    const float scaleFactor = 0.709f;

    //初始化年龄和性别分类器
    const std::string inBlobName = "input_1";
    const std::string outBlobName = "softmax/Softmax";

    //加载卷积神经网络模型
    cv::dnn::Net net;
    const std::string modelFile = CNN_PATH;
    net = cv::dnn::readNetFromTensorflow(modelFile);
    if (net.empty())
    {
        qDebug() << "卷积神经网络模型加载失败！";
        exit(-1);
    }


    while (stop != true) {
        video >>img;
        //复制一个用于输出的图像
        Mat out = img.clone();

        //检测人脸
        vector<Face> faces;
        faces = detector.detect(img, minFaceSize, scaleFactor);

        for (const auto &face : faces)
        {
            //获取人脸矩形
            Rect r = face.bbox.getRect();

            //转换为正方形
            Rect square = rect2square(r);

            //裁剪或补充人脸图像
            Mat img_face = getPaddedROI(img, square.x, square.y, square.width, square.height, cv::BORDER_REPLICATE);

            //设置大小位64×64
            cv::resize(img_face, img_face, cv::Size(64, 64), 0, 0);

            //将Mat类型转化为图像批次
            Mat inputBlob = cv::dnn::blobFromImage(img_face, 0.0078431372549019607843137254902, cv::Size(64, 64), cv::Scalar(127.5, 127.5, 127.5), true, false, CV_32F);

            //设置神经网络的输入
            net.setInput(inputBlob, inBlobName);

            //使用神经网络计算并输出
            Mat result = net.forward(outBlobName);

            int classId;
            double classProb;

            //获得最优匹配项
            getMaxClass(result, &classId, &classProb);

            //获取性别
            std::string gender = (classId <= 25) ? "female" : "male";

            //获取年龄
            int age = (classId % 26)*3;

            //生成矩形框出人脸
            rectangle(out, r, Scalar(0, 0, 255), 2, 1, 0);

            //生成性别和文字信息并显示
            string label = format("%d-%d,%s", age - 1, age + 1, gender.c_str());
            putText(out, label, Point(r.x - 10, r.y + 20), FONT_HERSHEY_DUPLEX, 1, Scalar(0, 255, 0), 2);
        }

        //输出视频
        Mat temp;
        cvtColor(out,temp,COLOR_BGR2RGB);//BGR转化为RGB
        QImage Qtemp = QImage((const unsigned char*)(temp.data),
                                temp.cols, temp.rows, temp.step,
                                QImage::Format_RGB888);

        ui->display->setPixmap(QPixmap::fromImage(Qtemp));

        waitKey(10);
        QApplication::processEvents();
    }
}

void GenderAge::on_photo_clicked()
{
    Mat img;
    VideoCapture video;
    video.open(0);
    if (!video.isOpened()) {
        qDebug() <<"摄像头开启失败！";
    }

    stop = false;

    while (stop != true) {
        video >> img;
        stop = true;
    }

    //使用三个人脸检测模型，初始化多任务卷积神经网络检测器
    ProposalNetwork::Config pConfig;
    pConfig.caffeModel = CAFFE_PATH_1;
    pConfig.protoText = CAFFE_PATH_2;
    pConfig.threshold = 0.6f;

    RefineNetwork::Config rConfig;
    rConfig.caffeModel = CAFFE_PATH_3;
    rConfig.protoText = CAFFE_PATH_4;
    rConfig.threshold = 0.7f;

    OutputNetwork::Config oConfig;
    oConfig.caffeModel = CAFFE_PATH_5;
    oConfig.protoText = CAFFE_PATH_6;
    oConfig.threshold = 0.7f;

    MTCNNDetector detector(pConfig, rConfig, oConfig);

    //设置人脸参数
    const float minFaceSize = 40.f;
    const float scaleFactor = 0.709f;

    //初始化年龄和性别分类器
    const std::string inBlobName = "input_1";
    const std::string outBlobName = "softmax/Softmax";

    //加载卷积神经网络模型
    cv::dnn::Net net;
    const std::string modelFile = CNN_PATH;
    net = cv::dnn::readNetFromTensorflow(modelFile);
    if (net.empty())
    {
        qDebug() << "卷积神经网络模型加载失败！";
        exit(-1);
    }

    //复制一个用于输出的图像
    Mat out = img.clone();

    //检测人脸
    vector<Face> faces;
    faces = detector.detect(img, minFaceSize, scaleFactor);

    for (const auto &face : faces)
    {
        //获取人脸矩形
        Rect r = face.bbox.getRect();

        //转换为正方形
        Rect square = rect2square(r);

        //裁剪或补充人脸图像
        Mat img_face = getPaddedROI(img, square.x, square.y, square.width, square.height, cv::BORDER_REPLICATE);

        //设置大小位64×64
        cv::resize(img_face, img_face, cv::Size(64, 64), 0, 0);

        //将Mat类型转化为图像批次
        Mat inputBlob = cv::dnn::blobFromImage(img_face, 0.0078431372549019607843137254902, cv::Size(64, 64), cv::Scalar(127.5, 127.5, 127.5), true, false, CV_32F);

        //设置神经网络的输入
        net.setInput(inputBlob, inBlobName);

        //使用神经网络计算并输出
        Mat result = net.forward(outBlobName);

        int classId;
        double classProb;

        //获得最优匹配项
        getMaxClass(result, &classId, &classProb);

        //获取性别
        std::string gender = (classId <= 25) ? "female" : "male";

        //获取年龄
        int age = (classId % 26)*3;

        //生成矩形框出人脸
        rectangle(out, r, Scalar(0, 0, 255), 2, 1, 0);

        //生成性别和文字信息并显示
        string label = format("%d-%d,%s", age - 1, age + 1, gender.c_str());
        putText(out, label, Point(r.x - 10, r.y + 20), FONT_HERSHEY_DUPLEX, 1, Scalar(0, 255, 0), 2);
    }

        //输出图片
        Mat temp;
        cvtColor(out,temp,COLOR_BGR2RGB);//BGR转化为RGB
        QImage Qtemp = QImage((const unsigned char*)(temp.data),
                                temp.cols, temp.rows, temp.step,
                                QImage::Format_RGB888);

        ui->display->setPixmap(QPixmap::fromImage(Qtemp));

}

void GenderAge::on_stop_clicked()
{
    stop = true;
    ui->photo->setEnabled(true);

}

void GenderAge::on_back_clicked()
{
    ui->display->clear();
}
