#include "dialog.h"


#define PosSamNO 2400  //正样本个数
#define NegSamNO 12000 //负样本个数

#define TRAIN true //是否进行训练
#define CENTRAL_CROP true //对96*160的INRIA正样本图片剪裁出中间的64*128大小人体
#define HardExampleNO 0//负样本个数，不用时置0

class MySVM :public CvSVM
{
public:
    //获得SVM的决策函数中的alpha数组
        double * get_alpha_vector()
        {
            return this->decision_func->alpha;
        }

        //获得SVM的决策函数中的rho参数,即偏移量
        float get_rho()
        {
            return this->decision_func->rho;
        }
};

 MySVM svm;
 HOGDescriptor myHOG;
 cv::Mat src;
Dialog::Dialog(QWidget *parent)
    : QDialog(parent)
{    setWindowState(Qt::WindowMaximized);

    setWindowTitle(tr("行人检测软件"));//设置窗口标题
    pictureLabel = new QLabel(tr("原图像显示"));
    processLabel = new QLabel(tr("处理后图像显示"));
    ageLabel =new QLabel(tr("年龄："));
    genderLabel = new QLabel(tr("性别:"));
    expressionLabel = new QLabel(tr("表情："));
    beautyLabel =new QLabel(tr("颜值："));
    pictureBtn= new QPushButton(tr("打开检测图像"));
    processBtn= new QPushButton(tr("进行行人检测"));
    renlianjianceBtn = new QPushButton(tr("人脸信息检测"));
    nameLineEdit =new QLineEdit;
    beautyEdit  =new QLineEdit;
    ageLineEdit = new QLineEdit;
     mp4LineEdit =new QLineEdit;
    expressoionLineEdit =new QLineEdit;
    genderLineEdit = new QLineEdit;
     open =new QPushButton(tr("目标跟踪"));
     close =new QPushButton(tr("关闭摄像头"));
     cameraprocessBtn=new QPushButton(tr("行人检测"));
      mp4Button  =new QPushButton(tr("打开视频"));

     timer =new QTimer(this);
     timer1 =new QTimer(this);
     imag =new QImage();
     connect(timer,SIGNAL(timeout()),this,SLOT(readFrame()));
     connect(timer1,SIGNAL(timeout()),this,SLOT(fllow()));
     connect(open,SIGNAL(clicked()),this,SLOT(opencamera()));
     connect(close,SIGNAL(clicked()),this,SLOT(closeCamera()));
     connect(cameraprocessBtn,SIGNAL(clicked()),this,SLOT(cameraprocess()));
    mainLayout = new QGridLayout(this);//布局设置，网格型布局
    mainLayout -> addWidget(pictureLabel,0,0);
    mainLayout -> addWidget(processLabel,0,1,1,2);
    mainLayout -> addWidget(pictureBtn,1,0);
    mainLayout -> addWidget(nameLineEdit,2,0);//显示图片文件地址
    mainLayout -> addWidget(processBtn,3,0);
    mainLayout -> addWidget(renlianjianceBtn,4,0);
    mainLayout -> addWidget(ageLabel,1,1);
    mainLayout -> addWidget(ageLineEdit,1,2);
    mainLayout -> addWidget(genderLabel,2,1);
    mainLayout -> addWidget(genderLineEdit,2,2);
    mainLayout -> addWidget(beautyLabel,3,1);
    mainLayout -> addWidget(beautyEdit,3,2);
    mainLayout -> addWidget(expressionLabel,4,1);
    mainLayout -> addWidget(expressoionLineEdit,4,2);
    mainLayout -> addWidget(open,5,0);
    mainLayout -> addWidget(close,5,1,1,2);
    mainLayout -> addWidget(cameraprocessBtn,6,0);
    mainLayout -> addWidget(mp4Button,6,1);
    mainLayout -> addWidget(mp4LineEdit,6,2);

     requestUrl = "https://aip.baidubce.com/rest/2.0/face/v3/detect";
     //这里更换为自己的Access Token,每个Access Token的有效期为30天
     accessToken = "24.a613c05dc4df230da481be2c15b8c16c.2592000.1544706406.282335-14426125";
     manager = new QNetworkAccessManager(this);
    connect(mp4Button,SIGNAL(clicked()),this,SLOT(opencvmp4()));
    connect(manager, SIGNAL(finished(QNetworkReply*)), this, SLOT(replyFinished(QNetworkReply*)));
    connect(processBtn,SIGNAL(clicked()),this,SLOT(xingrenjiance()));
    connect(pictureBtn,SIGNAL(clicked()),this,SLOT(openpicture()));
    connect(renlianjianceBtn,SIGNAL(clicked()),this,SLOT(on_btnStartIdentify_clicked()));
     svm.load("D:\\Users\\Administrator\\Desktop\\INRIAPerson\\SVM_HOG_2400.xml");//从XML文件读取训练好的SVM模型
     cout<<"8"<<endl;
      HOGDescriptor hog(Size(64,128),Size(16,16),Size(8,8),Size(8,8),9);
      int DescriptorDim;//HOG描述子算子

      if(0)
          {
              string ImgName;//图片名(绝对路径)
              ifstream finPos("D:\\Users\\Administrator\\Desktop\\INRIAPerson\\Train\\pos.lst");//正样本图片的文件名列表
              //ifstream finPos("PersonFromVOC2012List.txt");//正样本图片的文件名列表
              ifstream finNeg("D:\\Users\\Administrator\\Desktop\\INRIAPerson\\Train\\neg.lst");//负样本图片的文件名列表

              Mat sampleFeatureMat;//所有训练样本的特征向量组成的矩阵，行数等于所有样本的个数，列数等于HOG描述子维数
              Mat sampleLabelMat;//训练样本的类别向量，行数等于所有样本的个数，列数等于1；1表示有人，-1表示无人


              //依次读取正样本图片，生成HOG描述子
              for(int num=0; num<PosSamNO && getline(finPos,ImgName); num++)
              {
                 // cout<<"处理："<<ImgName<<endl;
                  //ImgName = "D:\\DataSet\\PersonFromVOC2012\\" + ImgName;//加上正样本的路径名
                  ImgName = "D:\\Users\\Administrator\\Desktop\\INRIAPerson\\" + ImgName;//加上正样本的路径名
                  cout<<ImgName<<endl;
                  Mat src = imread(ImgName);//读取图片
                  if(CENTRAL_CROP)
                      src = src(Rect(16,16,64,128));//将96*160的INRIA正样本图片剪裁为64*128，即剪去上下左右各16个像素
                  //resize(src,src,Size(64,128));

                  vector<float> descriptors;//HOG描述子向量
                  hog.compute(src,descriptors,Size(8,8));//计算HOG描述子，检测窗口移动步长(8,8)
                  //cout<<"描述子维数："<<descriptors.size()<<endl;

                  //处理第一个样本时初始化特征向量矩阵和类别矩阵，因为只有知道了特征向量的维数才能初始化特征向量矩阵
                  if( 0 == num )
                  {
                      DescriptorDim = descriptors.size();//HOG描述子的维数
                      //初始化所有训练样本的特征向量组成的矩阵，行数等于所有样本的个数，列数等于HOG描述子维数sampleFeatureMat
                      sampleFeatureMat = Mat::zeros(PosSamNO+NegSamNO+HardExampleNO, DescriptorDim, CV_32FC1);
                      //初始化训练样本的类别向量，行数等于所有样本的个数，列数等于1；1表示有人，0表示无人
                      sampleLabelMat = Mat::zeros(PosSamNO+NegSamNO+HardExampleNO, 1, CV_32FC1);
                  }

                  //将计算好的HOG描述子复制到样本特征矩阵sampleFeatureMat
                  for(int i=0; i<DescriptorDim; i++)
                   sampleFeatureMat.at<float>(num,i) = descriptors[i];//第num个样本的特征向量中的第i个元素
                  sampleLabelMat.at<float>(num,0) = 1;//正样本类别为1，有人
              }

              //依次读取负样本图片，生成HOG描述子
              for(int num=0; num<NegSamNO && getline(finNeg,ImgName); num++)
              {
                  //cout<<"处理："<<ImgName<<endl;
                  ImgName = "D:\\Users\\Administrator\\Desktop\\INRIAPerson\\" + ImgName;//加上负样本的路径名
                  cout<<ImgName<<endl;
                  Mat src = imread(ImgName);//读取图片
                  //resize(src,img,Size(64,128));

                  vector<float> descriptors;//HOG描述子向量
                  hog.compute(src,descriptors,Size(8,8));//计算HOG描述子，检测窗口移动步长(8,8)
                  //cout<<"描述子维数："<<descriptors.size()<<endl;

                  //将计算好的HOG描述子复制到样本特征矩阵sampleFeatureMat
                  for(int i=0; i<DescriptorDim; i++)
                      sampleFeatureMat.at<float>(num+PosSamNO,i) = descriptors[i];//第PosSamNO+num个样本的特征向量中的第i个元素
                  sampleLabelMat.at<float>(num+PosSamNO,0) = 0;//负样本类别为-1，无人
              }

              //训练SVM分类器
                      //迭代终止条件，当迭代满1000次或误差小于FLT_EPSILON时停止迭代
                      CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON);
                      //SVM参数：SVM类型为C_SVC；线性核函数；松弛因子C=0.01
                      CvSVMParams param(CvSVM::C_SVC, CvSVM::LINEAR, 0, 1, 0, 0.01, 0, 0, 0, criteria);
                      cout<<"begin"<<endl;
                      svm.train(sampleFeatureMat, sampleLabelMat, Mat(), Mat(), param);//训练分类器
                      cout<<"finished"<<endl;
                      svm.save("D:\\Users\\Administrator\\Desktop\\INRIAPerson\\SVM_HOG.xml");//将训练好的SVM模型保存为xml文件

      }
      else //若TRAIN为false，从XML文件读取训练好的分类器
          {

          }
          DescriptorDim = svm.get_var_count();//特征向量的维数，即HOG描述子的维数
          int supportVectorNum = svm.get_support_vector_count();//支持向量的个数
          cout<<supportVectorNum<<endl;

          Mat alphaMat = Mat::zeros(1, supportVectorNum, CV_32FC1);//alpha向量，长度等于支持向量个数
          Mat supportVectorMat = Mat::zeros(supportVectorNum, DescriptorDim, CV_32FC1);//支持向量矩阵
          Mat resultMat = Mat::zeros(1, DescriptorDim, CV_32FC1);//alpha向量乘以支持向量矩阵的结果

          //将支持向量的数据复制到supportVectorMat矩阵中
          for(int i=0; i<supportVectorNum; i++)
          {
              const float *pSVData = svm.get_support_vector(i);//返回第i个支持向量的数据指针
              for(int j=0; j<DescriptorDim; j++)
              {
                  //cout<<pData[j]<<" ";
                  supportVectorMat.at<float>(i,j) = pSVData[j];
              }
          }

          //将alpha向量的数据复制到alphaMat中
          double * pAlphaData = svm.get_alpha_vector();//返回SVM的决策函数中的alpha向量
          for(int i=0; i<supportVectorNum; i++)
          {
              alphaMat.at<float>(0,i) = pAlphaData[i];
          }

          //计算-(alphaMat * supportVectorMat),结果放到resultMat中
          //gemm(alphaMat, supportVectorMat, -1, 0, 1, resultMat);//不知道为什么加负号？
          resultMat = -1 * alphaMat * supportVectorMat;

          //得到最终的setSVMDetector(const vector<float>& detector)参数中可用的检测子
          vector<float> myDetector;
          //将resultMat中的数据复制到数组myDetector中
          for(int i=0; i<DescriptorDim; i++)
          {
              myDetector.push_back(resultMat.at<float>(0,i));
          }
          //最后添加偏移量rho，得到检测子
          myDetector.push_back(svm.get_rho());
          cout<<"检测子维数："<<myDetector.size()<<endl;
          //设置HOGDescriptor的检测子
          myHOG.setSVMDetector(myDetector);
          //myHOG.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());

          //保存检测子参数到文件
          ofstream fout("HOGDetectorForOpenCV.txt");
          for(int i=0; i<myDetector.size(); i++)
          {
              fout<<myDetector[i]<<endl;
          }


}

Dialog::~Dialog()
{
    timer->stop();
    timer1->stop();
    capture.release();

}

void Dialog::openpicture()
{
    QString fileName = QFileDialog::getOpenFileName(
                  this, tr("open image file"),
                  "./", tr("Image files(*.bmp *.jpg *.pbm *.pgm *.png *.ppm *.xbm *.xpm);;All files (*.*)"));
    if(fileName.isEmpty())
     {
         QMessageBox mesg;
         mesg.warning(this,"警告","打开图片失败!");
         return;
     }

    nameLineEdit->setText(fileName);

    Mat img;
    char *path;
    QByteArray ba = fileName.toLatin1();
    path = ba.data();


    img=imread(path);
    //cout<<img;
    Mat store1;
    cv::resize(img,store1,Size(640,480));
    imwrite("D:\\Users\\Administrator\\Desktop\\INRIAPerson\\pictureLabel.png",store1);


    QPixmap pix;
    pix.load("D:\\Users\\Administrator\\Desktop\\INRIAPerson\\pictureLabel.png");
    pictureLabel->setPixmap(pix);


  }




void Dialog::xingrenjiance()
{



        /**************读入图片进行HOG行人检测******************/

        cout<<"start"<<endl;
        QString TestText =nameLineEdit->text();
        char *testpath;
        QByteArray da = TestText.toLatin1();
        testpath = da.data();
        Mat src = imread(testpath);
        //cout<<src;
        vector<Rect> found, found_filtered;//矩形框数组
        //cout<<"进行多尺度HOG人体检测"<<endl;
        myHOG.detectMultiScale(src, found, 0, Size(8,8), Size(32,32), 1.05, 2);//对图片进行多尺度行人检测
        cout<<found.size()<<endl;

        //找出所有没有嵌套的矩形框r,并放入found_filtered中,如果有嵌套的话,则取外面最大的那个矩形框放入found_filtered中
        for(int i=0; i < found.size(); i++)
        {
            Rect r = found[i];
            int j=0;
            for(; j < found.size(); j++)
                if(j != i && (r & found[j]) == r)
                    break;
            if( j == found.size())
                found_filtered.push_back(r);
        }

        //画矩形框，因为hog检测出的矩形框比实际人体框要稍微大些,所以这里需要做一些调整
        for(int i=0; i<found_filtered.size(); i++)
        {
            Rect r = found_filtered[i];
            r.x += cvRound(r.width*0.1);
            r.width = cvRound(r.width*0.8);
            r.y += cvRound(r.height*0.07);
            r.height = cvRound(r.height*0.8);
            rectangle(src, r.tl(), r.br(), Scalar(0,255,0), 3);
        }


        //namedWindow("src",0);
        //imshow("src",src);
        //waitKey();//注意：imshow之后必须加waitKey，否则无法显示图像
        Mat store;
        cv::resize(src,store,Size(640,480));
        imwrite("D:\\Users\\Administrator\\Desktop\\INRIAPerson\\result.png",store);
        QPixmap pix1;
        pix1.load("D:\\Users\\Administrator\\Desktop\\INRIAPerson\\result.png");

        processLabel->setPixmap(pix1);



        /******************读入单个64*128的测试图并对其HOG描述子进行分类*********************/
        ////读取测试图片(64*128大小)，并计算其HOG描述子
        ////Mat testImg = imread("person014142.jpg");
        //Mat testImg = imread("noperson000026.jpg");
        //vector<float> descriptor;
        //hog.compute(testImg,descriptor,Size(8,8));//计算HOG描述子，检测窗口移动步长(8,8)
        //Mat testFeatureMat = Mat::zeros(1,3780,CV_32FC1);//测试样本的特征向量矩阵
        ////将计算好的HOG描述子复制到testFeatureMat矩阵中
        //for(int i=0; i<descriptor.size(); i++)
        //	testFeatureMat.at<float>(0,i) = descriptor[i];

        ////用训练好的SVM分类器对测试图片的特征向量进行分类
        //int result = svm.predict(testFeatureMat);//返回类标
        //cout<<"分类结果："<<result<<endl;



        //system("pause");
}
void Dialog::xinrenjiancecamera(cv::Mat src)
{




    vector<Rect> found, found_filtered;//矩形框数组
    myHOG.detectMultiScale(src, found, 0, Size(8,8), Size(32,32), 1.05, 2);//对图片进行多尺度行人检测
    cout<<found.size()<<endl;

    //找出所有没有嵌套的矩形框r,并放入found_filtered中,如果有嵌套的话,则取外面最大的那个矩形框放入found_filtered中
    for(int i=0; i < found.size(); i++)
    {
        Rect r = found[i];
        int j=0;
        for(; j < found.size(); j++)
            if(j != i && (r & found[j]) == r)
                break;
        if( j == found.size())
            found_filtered.push_back(r);
    }

    //画矩形框，因为hog检测出的矩形框比实际人体框要稍微大些,所以这里需要做一些调整
    for(int i=0; i<found_filtered.size(); i++)
    {
        Rect r = found_filtered[i];
        r.x += cvRound(r.width*0.1);
        r.width = cvRound(r.width*0.8);
        r.y += cvRound(r.height*0.07);
        r.height = cvRound(r.height*0.8);
        rectangle(src, r.tl(), r.br(), Scalar(0,255,0), 3);
    }

    cv::cvtColor(src, src, COLOR_BGR2RGB,0 );
    cv::resize(src,src,Size(640,480));
    QImage image((const unsigned char*)(src.data),
                  src.cols,src.rows,
                  src.cols*src.channels(),
                  QImage::Format_RGB888);
    processLabel->setPixmap(QPixmap::fromImage(image));
}

void Dialog::on_btnStartIdentify_clicked()
{
        QString imgPath=nameLineEdit->text();
        cout<<"begin"<<endl;
        //设置请求地址
        QUrl url(requestUrl + "?access_token=" + accessToken);
        QNetworkRequest request(url);

        //设置数据提交格式，这个不能自己随便写，每个平台的格式可能不一样，百度AI要求的格式为application/json
        request.setHeader(QNetworkRequest::ContentTypeHeader, QVariant("application/json"));

        //将要检测的图片进行BASE64编码
        QImage image(imgPath);





        QByteArray ba;

        QBuffer buffer(&ba);
       // buffer.open(QIODevice::WriteOnly);
        //以png格式将图片数据写入ba
        image.save(&buffer, "png");
        //将图片进行BASE64编码
        QString imgData =QString(ba.toBase64());

        if(imgData==NULL)
        {
            cout<<"NULL"<<endl;
        }
        char *path;
        QByteArray da = imgData.toLatin1();
        path = da.data();
        cout<<path<<endl;
        buffer.close();

        //打包请求参数
        QJsonObject post_data;
        QJsonDocument document;
        post_data.insert("image", imgData);
        post_data.insert("image_type", "BASE64");
        post_data.insert("face_field", "age,beauty,gender,expression");
        document.setObject(post_data);
        QByteArray post_param = document.toJson(QJsonDocument::Compact);
       cout<<"finishe"<<endl;
        //发送请求
       manager->post(request, post_param);
}

void Dialog::replyFinished(QNetworkReply *reply)
{   cout<<"begin"<<endl;
    int statusCode = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt();
    if(200 != statusCode)
    {
        qDebug() << "Status code is error:" << statusCode;
        return;
    }
    replyData = reply->readAll();
    qDebug()<<"reply data is:"<<QString(replyData);

    QJsonParseError json_error;
    QJsonDocument document = QJsonDocument::fromJson(replyData, &json_error);
    if(json_error.error == QJsonParseError::NoError)
    {
        //判断是否是对象,然后开始解析数据
        if(document.isObject())
        {
            QJsonObject obj = document.object();
            //解析反馈的人脸属性结果
            if(obj.contains("result"))
            {
                QJsonObject resultObj = obj.take("result").toObject();
                //解析人脸个数
               // if(resultObj.contains("face_num"))
                //{
                  //  int faceNum = obj.take("face_num").toInt();
                  //  qDebug()<<"查询到了图片中的人脸个数为："<<faceNum;
                //}
                //解析人脸属性
                if(resultObj.contains("face_list"))
                {
                    QJsonArray faceArray = resultObj.take("face_list").toArray();

                    for(int i = 0; i < faceArray.size(); i++)
                    {
                        QJsonObject faceObj = faceArray.at(i).toObject();
                        if(faceObj.contains("gender"))
                        {
                            QJsonObject genderObj = faceObj.take("gender").toObject();
                            if(genderObj.contains("type"))
                            {
                                QString type = genderObj.take("type").toString();;
                                if(type == "male")
                                                                {
                                                                        genderLineEdit->setText("男");
                                                                    qDebug()<<"男";
                                                                }
                                                                else
                                                                { genderLineEdit->setText("女");
                                                                   qDebug()<<"女";
                                                                }
                            }
                        }
                        if(faceObj.contains("age"))
                        {
                            int age = faceObj.take("age").toDouble();
                            qDebug()<<"查询到了年龄："<<age;
                            ageLineEdit->setText(QString::number(age));

                        }
                        if(faceObj.contains("beauty"))
                        {
                            int beauty = faceObj.take("beauty").toDouble();;
                            qDebug()<<"查询到了颜值："<<beauty;
                            beautyEdit->setText(QString::number(beauty));


                        }
                        if(faceObj.contains("expression"))
                        {
                            QJsonObject expressionObj = faceObj.take("expression").toObject();
                            if(expressionObj.contains("type"))
                            {
                                QString type = expressionObj.take("type").toString();;
                                if(type == "smile")
                 {             expressoionLineEdit->setText("微笑");
                                qDebug()<<"微笑";
                  }
                   else if(type == "laugh")
               {
                                     expressoionLineEdit->setText("大笑");
                 qDebug()<<"大笑";
               }
       else
    {    expressoionLineEdit->setText("不笑");
         qDebug()<<"不笑";
    }
                            }
                        }
                    }
                }
            }
        }
    }
    reply->deleteLater();
}



void Dialog::opencamera()
{
   capture.open(0);
    timer1->start(33);

}
void Dialog::readFrame()
{

    capture>>src;
    xinrenjiancecamera(src);
}

void Dialog::closeCamera()
{  destroyAllWindows();
    timer1->stop();
    timer->stop();
   capture.release();//
    pictureLabel->clear();
    processLabel->clear();
}


void Dialog::cameraprocess()
{   QString mp4Path=mp4LineEdit->text();
    string  videoPath= mp4Path.toStdString();
   // capture.open("D:\\Users\\Administrator\\Desktop\\video_20181113_112911.mp4");
    capture.open(videoPath);

    if (!capture.isOpened())

    {

    std::cout << "read video failure" << std::endl;

    return ;
    capture.open(0);
    }
 timer->start(0);





}

void Dialog::opencvmp4()
{
    QString fileName1 = QFileDialog::getOpenFileName(this, tr("Open Video"), ".", tr("Video File(*.avi *.mp4 *.h264 *.MOV)"));
    if(fileName1.isEmpty())
     {
         QMessageBox mesg;
         mesg.warning(this,"警告","打开图片失败!");
         return;
     }

         mp4LineEdit->setText(fileName1);

}
void Dialog::fllow()
{   int exit=0;
    VideoCapture capture1(0);
    Mat tempframe, currentframe, previousframe;
    Mat frame;
    int framenum = 0;
    vector<vector<Point> >contours;
        vector<Vec4i>hierarchy;
        //读取一帧处理
        cout<<"begin"<<endl;
    while (true)
    {
        if(!capture1.isOpened())
        {
            cout << "read video failure" << endl;

        }



        capture1 >> frame;
        Mat result;
        result= frame.clone();
        tempframe = frame;
        framenum++;
        if (framenum == 1)
        {
            cvtColor(tempframe, previousframe, CV_BGR2GRAY);
        }
        if (framenum >= 2)
        {

            cvtColor(tempframe, currentframe, CV_BGR2GRAY);//转化为单通道灰度图，此时currentFrame已经存了tempFrame的内容
            absdiff(currentframe,previousframe,currentframe);//做差求绝对值
            threshold(currentframe, currentframe, 20, 255.0, CV_THRESH_BINARY);
            dilate(currentframe, currentframe,Mat());//膨胀
            erode(currentframe, currentframe,Mat());//腐蚀
            findContours(currentframe,contours,hierarchy,RETR_EXTERNAL,CHAIN_APPROX_SIMPLE);
            //drawContours(frame,contours, -1, Scalar(0, 0, 255), 2);
            vector<Rect> boundRect(contours.size());
                for (int i = 0; i < contours.size(); i++)
                {

                    std::string text = "有人";
                        //int font_face = cv::FONT_HERSHEY_COMPLEX;
                        //double font_scale = 2;
                        //int thickness = 2;
                        //int baseline;
                      //  cv::Size text_size = cv::getTextSize(text, font_face, font_scale, thickness, &baseline);


                     if (contourArea(contours[i]) < 15000)
                        continue;
                       boundRect[i] = boundingRect(contours[i]);
                    rectangle(frame, boundRect[i], Scalar(0, 255, 0), 2);//在result上绘制正外接矩形
                    //cv::Point origin;
                        //origin.x = frame.cols / 2 - text_size.width / 2;
                      //  origin.y = frame.rows / 2 + text_size.height / 2;

                    //cv::putText(frame, text, origin, font_face, font_scale, cv::Scalar(0, 255, 255), thickness, 8, 0);


                }

            //显示图像

               // imshow("camera", tempframe);
               // imshow("moving area", currentframe);
                imshow("result", frame);
                while(char(waitKey(33))=='q')
                 {destroyAllWindows();
                   // break;
                    exit=1;
                    capture1.release();//
               cout<<exit<<endl;
                  //exit(0);
                }
        }
        //把当前帧保存作为下一次处理的前一帧
        //cvtColor(tempframe, previousframe, CV_BGR2GRAY);
        if(exit==1)
        {   timer1->stop();
            break;
         }
       // cout<<exit<<endl;
}


}
