#include "opencvthread.h"

using namespace std;
using namespace cv;
OpenCvThread::OpenCvThread(QObject *parent) : QThread(parent)
{
    isFirstDetectedR = true;
    isFirstDetectedG = true;
    runFlog = false;
}

OpenCvThread::~OpenCvThread()
{
}
int OpenCvThread::getGreenCount() const
{
    return greenCount;
}

int OpenCvThread::getRedCount() const
{
    return redCount;
}

int OpenCvThread::processImgG(Mat src, Mat &frame)
{
    Mat tmp;
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    vector<Point> hull;
    CvPoint2D32f tempNode;
    CvMemStorage *storage = cvCreateMemStorage();
    CvSeq *pointSeq = cvCreateSeq(CV_32FC2, sizeof(CvSeq), sizeof(CvPoint2D32f), storage);
    Rect *trackBox;
    Rect *result;
    int resultNum = 0;
    int area = 0;

    src.copyTo(tmp);
    findContours(tmp, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

    if (contours.size() > 0)
    {
        trackBox = new Rect[contours.size()];
        result = new Rect[contours.size()];

        for (int i = 0; i < contours.size(); i++)
        {
            cvClearSeq(pointSeq);
            convexHull(Mat(contours[i]), hull, true);
            int hullcount = (int)hull.size();
            for (int j = 0; j < hullcount - 1; j++)
            {
                tempNode.x = hull[j].x;
                tempNode.y = hull[j].y;
                cvSeqPush(pointSeq, &tempNode);
            }
            trackBox[i] = cvBoundingRect(pointSeq);
        }

        if (isFirstDetectedG)
        {
            lastTrackBoxG = new Rect[contours.size()];
            for (int i = 0; i < contours.size(); i++)
                lastTrackBoxG[i] = trackBox[i];
            lastTrackNumG = contours.size();
            isFirstDetectedG = false;
        }
        else
        {
            for (int i = 0; i < contours.size(); i++)
            {
                for (int j = 0; j < lastTrackNumG; j++)
                {
                    if (isIntersected(trackBox[i], lastTrackBoxG[j]))
                    {
                        result[resultNum] = trackBox[i];
                        break;
                    }
                }
                resultNum++;
            }
            delete[] lastTrackBoxG;
            lastTrackBoxG = new Rect[contours.size()];
            for (int i = 0; i < contours.size(); i++)
                lastTrackBoxG[i] = trackBox[i];
            lastTrackNumG = contours.size();
        }

        // 存储满足面积要求的结果
        vector<Rect> validResults;
        int maxArea = 0;
        // 找到最大面积的框
        for (int i = 0; i < resultNum; i++)
        {
            if (result[i].area() > maxArea)
            {
                maxArea = result[i].area();
            }
        }
        for (int i = 0; i < resultNum; i++)
        {
            // 仅保留面积大于等于最大框面积 1/2 的框
            if (result[i].area() >= maxArea / 2)
            {
                validResults.push_back(result[i]);
            }
        }

        // 在原图像上绘制满足面积要求的绿色区域的跟踪框
        for (size_t i = 0; i < validResults.size(); i++)
        {
            rectangle(frame, validResults[i], Scalar(0, 255, 0), 2);
        }

        delete[] trackBox;
    }
    else
    {
        isFirstDetectedG = true;
        result = NULL;
    }
    cvReleaseMemStorage(&storage);

    if (result != NULL)
    {
        for (int i = 0; i < resultNum; i++)
        {
            area += result[i].area();
        }
    }
    delete[] result;
    return area;
}

bool isIntersected(Rect r1, Rect r2)
{
    int minX = max(r1.x, r2.x);
    int minY = max(r1.y, r2.y);
    int maxX = min(r1.x + r1.width, r2.x + r2.width);
    int maxY = min(r1.y + r1.height, r2.y + r2.height);
    if (minX < maxX && minY < maxY)
        return true;
    else
        return false;
}
int OpenCvThread::processImgR(Mat src, Mat &frame)
{
    Mat tmp;
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    vector<Point> hull;
    CvPoint2D32f tempNode;
    CvMemStorage *storage = cvCreateMemStorage();
    CvSeq *pointSeq = cvCreateSeq(CV_32FC2, sizeof(CvSeq), sizeof(CvPoint2D32f), storage);
    Rect *trackBox;
    Rect *result;
    int resultNum = 0;
    int area = 0;

    src.copyTo(tmp);
    findContours(tmp, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

    if (contours.size() > 0)
    {
        trackBox = new Rect[contours.size()];
        result = new Rect[contours.size()];

        for (int i = 0; i < contours.size(); i++)
        {
            cvClearSeq(pointSeq);
            convexHull(Mat(contours[i]), hull, true);
            int hullcount = (int)hull.size();
            for (int j = 0; j < hullcount - 1; j++)
            {
                tempNode.x = hull[j].x;
                tempNode.y = hull[j].y;
                cvSeqPush(pointSeq, &tempNode);
            }
            trackBox[i] = cvBoundingRect(pointSeq);
        }

        if (isFirstDetectedR)
        {
            lastTrackBoxR = new Rect[contours.size()];
            for (int i = 0; i < contours.size(); i++)
                lastTrackBoxR[i] = trackBox[i];
            lastTrackNumR = contours.size();
            isFirstDetectedR = false;
        }
        else
        {
            for (int i = 0; i < contours.size(); i++)
            {
                for (int j = 0; j < lastTrackNumR; j++)
                {
                    if (isIntersected(trackBox[i], lastTrackBoxR[j]))
                    {
                        result[resultNum] = trackBox[i];
                        break;
                    }
                }
                resultNum++;
            }
            delete[] lastTrackBoxR;
            lastTrackBoxR = new Rect[contours.size()];
            for (int i = 0; i < contours.size(); i++)
                lastTrackBoxR[i] = trackBox[i];
            lastTrackNumR = contours.size();
        }

        // 存储满足面积要求的结果
        vector<Rect> validResults;
        int maxArea = 0;
        // 找到最大面积的框
        for (int i = 0; i < resultNum; i++)
        {
            if (result[i].area() > maxArea)
            {
                maxArea = result[i].area();
            }
        }
        for (int i = 0; i < resultNum; i++)
        {
            // 仅保留面积大于等于最大框面积 1/2 的框
            if (result[i].area() >= maxArea / 2)
            {
                validResults.push_back(result[i]);
            }
        }

        // 在原图像上绘制满足面积要求的红色区域的跟踪框
        for (size_t i = 0; i < validResults.size(); i++)
        {
            rectangle(frame, validResults[i], Scalar(0, 0, 255), 2);
        }

        delete[] trackBox;
    }
    else
    {
        isFirstDetectedR = true;
        result = NULL;
    }
    cvReleaseMemStorage(&storage);

    if (result != NULL)
    {
        for (int i = 0; i < resultNum; i++)
        {
            area += result[i].area();
        }
    }
    delete[] result;
    return area;
}
void OpenCvThread::run()
{
    redCount = 0;
    greenCount = 0;
    Mat frame;
    Mat img;
    Mat imgYCrCb;
    Mat imgRed;
    Mat imgGreen;
    double a = 0.3;
    double b = (1 - a) * 125;

#if 0
    VideoCapture capture(0);
    if (!capture.isOpened()) {
        cout << "Start device failed!\n" << endl;
        return ;
    }
#endif
    while (true)
    {
        if (runFlog == false)
        {
            QThread::msleep(100);
            if (cvWaitKey(10) == 'q')
                break;
            continue;
        }

        std::vector<char> buffer(oldDaya.begin(), oldDaya.end());
        frame = cv::imdecode(buffer, cv::IMREAD_COLOR);
#if 0
        capture >> frame;
#endif
        frame.convertTo(img, img.type(), a, b);
        cvtColor(img, imgYCrCb, CV_BGR2YCrCb);
        imgRed.create(imgYCrCb.rows, imgYCrCb.cols, CV_8UC1);
        imgGreen.create(imgYCrCb.rows, imgYCrCb.cols, CV_8UC1);

        vector<Mat> planes;
        split(imgYCrCb, planes);
        MatIterator_<uchar> it_Cr = planes[1].begin<uchar>(), it_Cr_end = planes[1].end<uchar>();
        MatIterator_<uchar> it_Red = imgRed.begin<uchar>();
        MatIterator_<uchar> it_Green = imgGreen.begin<uchar>();
        for (; it_Cr != it_Cr_end; ++it_Cr, ++it_Red, ++it_Green)
        {
            if (*it_Cr > 145 && *it_Cr < 470)
                *it_Red = 255;
            else
                *it_Red = 0;
            if (*it_Cr > UNDER_THE_GREEN_CHANNEL && *it_Cr < ON_THE_GREEN_CHANNEL)
                *it_Green = 255;
            else
                *it_Green = 0;
        }

        dilate(imgRed, imgRed, Mat(15, 15, CV_8UC1), Point(-1, -1));
        erode(imgRed, imgRed, Mat(1, 1, CV_8UC1), Point(-1, -1));
        dilate(imgGreen, imgGreen, Mat(20, 20, CV_8UC1), Point(-1, -1));
        erode(imgGreen, imgGreen, Mat(2, 2, CV_8UC1), Point(-1, -1));

        redCount = processImgR(imgRed, frame);
        greenCount = processImgG(imgGreen, frame);
        cout << "red:" << redCount << "; " << "green:" << greenCount << endl;
        //qDebug() << "red:"<< redCount << ";" << "green:" << greenCount;
#if 0
        imshow("Origin", frame);

        imshow("Red", imgRed);

        imshow("Green", imgGreen);

        imshow("Red", imgRed);
#endif
        if (cvWaitKey(50) == 'q')
            break;

        vector<uchar> imgBuf;
        imencode(".bmp", frame, imgBuf);
        QByteArray baImg((char *)imgBuf.data(), static_cast<int>(imgBuf.size()));
        newAiData.loadFromData(baImg, "BMP");
        qDebug() << "4.file process okk";
        runFlog = false;
        emit newAiDataSignal();
    }

    return;
}

bool OpenCvThread::isIntersected(Rect r1, Rect r2)
{
    int minX = max(r1.x, r2.x);
    int minY = max(r1.y, r2.y);
    int maxX = min(r1.x + r1.width, r2.x + r2.width);
    int maxY = min(r1.y + r1.height, r2.y + r2.height);
    if (minX < maxX && minY < maxY)
        return true;
    else
        return false;
}
QPixmap OpenCvThread::getNewAiData() const
{
    return newAiData;
}

void OpenCvThread::newDataSlot(QByteArray Daya)
{
    runFlog = true;
    oldDaya.clear();
    oldDaya = Daya;
    qDebug() << "3.open recv okk";
}
