#include "image_processing.h"
#include"twocut_funcs.h"
#include"change_bk.h"
#include"qt_cv_change.h"
#include <QDebug>
#include <QFileDialog>
#include <opencv2/dnn.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace cv::dnn;
imageprocess::imageprocess()
{

}
Mat imageprocess::working()
{
    cvtColor(srcImage,dstImage,COLOR_BGR2GRAY);
    return dstImage;
}


Mat imageprocess::grey_all(){
    cvtColor(dstImage,dstImage,COLOR_BGR2GRAY,0);
    return dstImage;
}
Mat imageprocess::soft_all(){
    blur(dstImage,dstImage,Size(5,5),Point(-1,-1),BORDER_DEFAULT);
    return dstImage;
}
Mat imageprocess::hard_all(){
    Mat imageBf;
    bilateralFilter(dstImage, imageBf, 5, 10, 10);//双边滤波
    Mat imageEnhance;
    Mat  kernel = (Mat_<float>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
    filter2D(imageBf, imageEnhance, CV_8UC3, kernel);
    dstImage=imageEnhance;
    return dstImage;
}
//知识点：分水岭分割、高斯模糊
//处理步骤：数据组装-KMeans分割-背景消除-生成遮罩-模糊-输出
Mat imageprocess::change_phbg(){
    Mat img1,img2;
    img1=dstImage;
    img2 = img1.clone();
    Mat points = HandleImgData(img1);
    //Kmeans处理
    int numCluster = 4;
    Mat labels;
    Mat centers;
    TermCriteria termCriteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 0.1);
    kmeans(points, numCluster, labels, termCriteria, 3, KMEANS_PP_CENTERS, centers);
    //遮罩
    Mat mask = Mat::zeros(img1.size(), CV_8UC1);
    int index = img1.rows * 2 + 2;
    int cindex = labels.at<int>(index, 0);//背景设置为0
    int height = img1.rows;
    int width = img1.cols;

    for (int row = 0; row < height; row++)
    {
        for (int col = 0; col < width; col++)
        {
            index = row * width + col;
            int label = labels.at<int>(index, 0);
            if (label == cindex)
            {
                img2.at<Vec3b>(row, col)[0] = 0;
                img2.at<Vec3b>(row, col)[1] = 0;
                img2.at<Vec3b>(row, col)[2] = 0;
                mask.at<uchar>(row, col) = 0;
            }
            else
            {
                mask.at<uchar>(row, col) = 255;
            }
        }
    }

    //腐蚀
    Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
    erode(mask, mask, k);
    //imshow(win4, mask);

    //高斯模糊
    GaussianBlur(mask, mask, Size(3, 3), 0, 0);
    //imshow(win5, mask);

    //通道混合
    RNG rng(12345);


    //背景颜色调整
    Vec3b color;
    //color[0] = rng.uniform(255, 255);
    //color[1] = rng.uniform(255, 255);
    //color[2] = rng.uniform(255, 255);
    color[0] = B;
    color[1] = G;
    color[2] = R;
    qDebug()<<B;
    Mat result(img1.size(), img1.type());

    double d1 = 0.0;
    int r = 0, g = 0, b = 0;
    int r1 = 0, g1 = 0, b1 = 0;
    int r2 = 0, g2 = 0, b2 = 0;


    for (int row = 0; row < height; row++)
    {
        for (int col = 0; col < width; col++)
        {
            int m = mask.at<uchar>(row, col);
            if (m == 255)
            {
                result.at<Vec3b>(row, col) = img1.at<Vec3b>(row, col);//前景
            }
            else if (m == 0)
            {
                result.at<Vec3b>(row, col) = color;//背景
            }
            else
            {
                d1 = m / 255.0;
                b1 = img1.at<Vec3b>(row, col)[0];
                g1 = img1.at<Vec3b>(row, col)[1];
                r1 = img1.at<Vec3b>(row, col)[2];

                b2 = color[0];
                g2 = color[1];
                r2 = color[2];

                b = b1 * d1 + b2 * (1.0 - d1);
                g = g1 * d1 + g2 * (1.0 - d1);
                r = r1 * d1 + r2 * (1.0 - d1);

                result.at<Vec3b>(row, col)[0] = b;
                result.at<Vec3b>(row, col)[1] = g;
                result.at<Vec3b>(row, col)[2] = r;
            }
        }
    }
    maskImage=convertTo3Channels(mask);
    return result;
}
//组装样本数据
Mat imageprocess::HandleImgData(Mat& img)
{
    int width = img.cols;
    int height = img.rows;
    int count1 = width * height;
    int channels1 = img.channels();

    Mat points(count1, channels1, CV_32F, Scalar(10));
    int index = 0;
    for (int row = 0; row < height; row++)
    {
        for (int col = 0; col < width; col++)
        {
            index = row * width + col;
            Vec3b bgr = img.at<Vec3b>(row, col);
            points.at<float>(index, 0) = static_cast<int>(bgr[0]);
            points.at<float>(index, 1) = static_cast<int>(bgr[1]);
            points.at<float>(index, 2) = static_cast<int>(bgr[2]);
        }
    }
    return points;
}
Mat imageprocess::rgfc(){
    Mat img=dstImage;
    String model_bin = "/Users/wangjiayi/Desktop/opencv_face_detector_uint8.pb";
    String config_text = "/Users/wangjiayi/Desktop/opencv_face_detector.pbtxt";
    Net faceNet = readNet(model_bin, config_text);

    String genderProto = "/Users/wangjiayi/Desktop/gender_deploy.prototxt";
    String genderModel = "/Users/wangjiayi/Desktop/gender_net.caffemodel";
    String genderList[] = { "Male", "Female" };
    Net genderNet = readNet(genderModel, genderProto);
    Mat blobImage = blobFromImage(img, 1.0, Size(300, 300), Scalar(), false, false);
    faceNet.setInput(blobImage, "data");
    Mat detect = faceNet.forward("detection_out");
    Mat detectionMat(detect.size[2], detect.size[3], CV_32F, detect.ptr<float>());
    int exBoundray = 25;
    float confidenceThreshold = 0.5;
    for (int i = 0; i < detectionMat.rows; i++)
    {
        float confidence = detectionMat.at<float>(i, 2);
        if (confidence > confidenceThreshold)
        {
            int topLx = detectionMat.at<float>(i, 3) * img.cols;
            int topLy = detectionMat.at<float>(i, 4) * img.rows;
            int bottomRx = detectionMat.at<float>(i, 5) * img.cols;
            int bottomRy = detectionMat.at<float>(i, 6) * img.rows;
            Rect faceRect(topLx, topLy, bottomRx - topLx, bottomRy - topLy);
            Rect faceTextRect;
            faceTextRect.x = max(0, faceRect.x - exBoundray);
            faceTextRect.y = max(0, faceRect.y - exBoundray);
            faceTextRect.width = min(faceRect.width + exBoundray, img.cols - 1);
            faceTextRect.height = min(faceRect.height + exBoundray, img.rows - 1);
            Mat face = img(faceTextRect);
            Mat faceblob = blobFromImage(face, 1.0, Size(227, 227), Scalar(), false, false);
            genderNet.setInput(faceblob);
            Mat genderPreds = genderNet.forward();
            float male, female;
            male = genderPreds.at<float>(0, 0);
            female = genderPreds.at<float>(0, 1);
            int classID = male > female ? 0 : 1;
            String gender = genderList[classID];
            rectangle(img, faceRect, Scalar(0, 0, 255), 2, 8, 0);
            putText(img, gender.c_str(), faceRect.tl(), FONT_HERSHEY_SIMPLEX, 0.8, Scalar(0, 0, 255), 2, 8);
        }
    }
    return img;
}

Mat imageprocess::ai(){
    Mat frame=dstImage;
    double t1 = (double)getTickCount();
    Mat gray;
      //1.灰度图
    cvtColor(frame, gray, COLOR_BGR2GRAY);
      //2.中值滤波降噪
    medianBlur(gray, gray, 5);
      //2.Canny提取边缘，原来也用Laplacian算子试过，不过本张图的效果还是Canny好
      //Laplacian(gray, gray, CV_8U, 3);
    Canny(gray, gray, 120, 240);
      //3.二值化提取后的边缘图像
    Mat mask(frame.size(), CV_8U);
    threshold(gray, mask, 120, 255, THRESH_BINARY_INV);
      //4.对原始图像双边滤波
      //4.1缩小原始图像，用于加快处理速度
    Size tmpdst(frame.cols / 2, frame.rows / 2);
    Mat srctmp = Mat(tmpdst, frame.type());
    resize(frame, srctmp, tmpdst, 0, 0, INTER_LINEAR);
    Mat tmp = Mat(tmpdst, CV_8UC3);
      //4.2使用双边滤波处理图像，设置了迭代次数为2
      //像素邻域的直径范围
    int dsize = 5;
    double sColor = 30;
    double sSpace = 10;
      //迭代次数
    int iterator = 10;
    for (int i = 0; i < iterator; i++) {
      bilateralFilter(srctmp, tmp, dsize, sColor, sSpace);
      bilateralFilter(tmp, srctmp, dsize, sColor, sSpace);
    }
      //4.3将处理完的图像缩放至原图大小再进行掩膜叠加
    Mat srcbak;
    resize(srctmp, srcbak, frame.size(), 0, 0, INTER_LINEAR);
      //掩膜叠加
    Mat dst = Mat(frame.size(), frame.type(), Scalar::all(0)); //初始化
    srcbak.copyTo(dst, mask);
    return dst;
}
Mat imageprocess::two_cut(){
    Mat image=dstImage;
    //namedWindow("origin",WINDOW_AUTOSIZE);
    //imshow("origin", image);
    Mat result_1=step_one(image);
    //namedWindow("testWindow1",WINDOW_AUTOSIZE);
    //imshow("testWindow1", result_1);
    Mat result_2=step_two(result_1);
    //namedWindow("testWindow2",WINDOW_AUTOSIZE);
    //imshow("testWindow2", result_2);
    Mat result_3=step_three(result_2,image);
    //namedWindow("testWindow3",WINDOW_AUTOSIZE);
    //imshow("testWindow3", result_3);
    //waitKey(0);
    return result_3;
}

