#include "armorDetect.h"

using namespace std;
using namespace cv;

VideoCapture capture;
int g_slider_position = 0;
int g_run = -1;
int g_dontset = 0;
void onTrackbarSlide(int pos, void *) //当用户拖动滑块的时候，利用函数callback（回调）出当前滑块的位置（视频帧数）
{
    capture.set(CAP_PROP_POS_FRAMES, pos);
    if (!g_dontset)
    {
        g_run = 1;
    }
    g_dontset = 0;
}

armorDetect::armorDetect()
{
    _is_recorded = false;
    _color = RED;
    _is_continuous = true;
}

armorDetect::armorDetect(bool is_recorded)
{
    _is_recorded = is_recorded;
    _color = RED;
    _is_continuous = true;
}

armorDetect::armorDetect(bool is_recorded = false, int color = RED, bool is_continuous = true)
{
    _is_recorded = is_recorded;
    _color = color;
    _is_continuous = is_continuous;
}

int armorDetect::Imageprocess(std::string video_path)
{
    capture.open(video_path);
    if (!capture.isOpened())
    {
        cout << "video cannot be opened! Please check your video path." << endl;
        return -1;
    }
    namedWindow("效果视频", WINDOW_GUI_EXPANDED);

    int frames = (int)capture.get(CAP_PROP_FRAME_COUNT);
    int tmpw = (int)capture.get(CAP_PROP_FRAME_WIDTH);
    int tmph = (int)capture.get(CAP_PROP_FRAME_HEIGHT);
    createTrackbar("Position", "效果视频", &g_slider_position, frames, onTrackbarSlide);

    if (_is_recorded)
    {
        double fps = capture.get(CAP_PROP_FPS);
        Size size(
            (int)capture.get(CAP_PROP_FRAME_WIDTH),
            (int)capture.get(CAP_PROP_FRAME_HEIGHT));
        writer.open("./代码效果.avi", VideoWriter::fourcc('M', 'P', 'G', '2'), fps, size);
    }

    Mat src;                      //源视频帧
    Mat gray_img;                 //灰度视频帧
    Mat binBright_img;            //二值化视频帧
    vector<Mat> channels;         //源视频帧通道
    vector<Point2f> target_saved; //存储装甲板中心坐标矩阵的数据
    RotatedRect rrect;            //边框外接矩形对象
    vector<Point2f> light_center; //存储灯条中心坐标的数组
    vector<Point2f> light_point;  //存储灯条上下角点坐标的数组

    while (capture.read(src)) //当读取到视频帧时
    {
        if (g_run != 0)
        {
            int current_pos = (int)capture.get(CAP_PROP_POS_FRAMES); //得到目前视频帧数位置
            g_dontset = 1;                                           //将g_dontset设置为1，使得下一个callback函数不会将系统置于单步模式
            split(src, channels);
            if (_color) //判断是否灯带颜色为红光
            {

                gray_img = channels.at(2) - channels.at(0); //增强r通道的灰度值，即增强红色光，并写入到单通道图像中
            }
            else
            {
                gray_img = channels.at(0) - channels.at(2); //增强b通道的灰度值，即增强蓝色光，并写入到单通道图像中
            }

            threshold(gray_img, binBright_img, 100, 255, THRESH_BINARY); //二值化视频帧
            // imshow("二值化",binBright_img);

            vector<vector<Point>> lightContours;                                                    //存储若干边框像素点坐标的数组（二维数组，一个维度表示某个边框的像素点坐标，另一个维度表示某个边框）
            findContours(binBright_img.clone(), lightContours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE); //找出所有外层边框的像素点坐标

            for (int n = 0, cnt = 0; n < lightContours.size(); n++) //遍历所有轮廓
            {
                rrect = minAreaRect(lightContours[n]); //返回某个轮廓外接矩形
                Point2f cpt = rrect.center;            //外接矩形的中心店赋值给cpt

                /*对轮廓外形参数做筛选——这里是剔除长边与短边之比小于3的轮廓*/
                if (rrect.size.width < rrect.size.height && ((rrect.size.height / rrect.size.width) < 3.0))
                {
                    continue;
                }
                if (rrect.size.width > rrect.size.height && ((rrect.size.width / rrect.size.height) < 3.0))
                {
                    continue;
                }
                double bigger_line;
                if (rrect.size.width < rrect.size.height)
                {
                    bigger_line = (double)rrect.size.height; //外接矩形的height较大，赋值给bigger_line
                }
                else
                {
                    bigger_line = (double)rrect.size.width; //外接矩形的width较大，赋值给bigger_line
                }
                if (bigger_line < 20)
                {
                    continue; //剔除外接矩形边长较小的轮廓
                }

                /*根据像素坐标系的y坐标 选出该轮廓的上下角点*/
                auto max_y = (double)lightContours[n][0].y;
                auto min_y = (double)lightContours[n][0].y;
                double temp;
                Point up_point = lightContours[n][0], low_point = lightContours[n][0];
                for (size_t i = 0; i < lightContours[n].size(); i++)
                {
                    temp = (double)lightContours[n][i].y;
                    if (temp > max_y)
                    {
                        max_y = temp;
                        up_point = lightContours[n][i];
                    }
                    if (temp < min_y)
                    {
                        min_y = temp;
                        low_point = lightContours[n][i];
                    }
                }
                light_point.push_back(up_point);
                light_point.push_back(low_point);
                light_center.push_back(cpt);
            }

            /* 
            加入对两个灯条间的区域的长宽比判断
            剔除装甲板_2中两个灯条之间长度过长的非装甲板区域
            该程序也通过了装甲板_1的测试
        */
            bool passLongArea = false; //是否跳过两个灯条中间区域的标定
            if (light_point.size() > 2)
            {
                auto x_length = abs(light_point[2].x - light_point[0].x);
                auto y_length = abs(light_point[1].y - light_point[0].y);
                if (x_length / y_length > 4.0)
                {
                    passLongArea = true; //若中间区域的长宽比大于4,则跳过两个灯条中间区域的标定
                }
            }

            /*标定装甲板的区域与中心*/
            if (light_point.size() > 2 && !passLongArea)
            {
                line(src, light_point[0], light_point[3], Scalar(0, 128, 0), 1, 8, 0);
                line(src, light_point[1], light_point[2], Scalar(0, 128, 0), 1, 8, 0);
            }
            if (light_center.size() > 1 && !passLongArea)
            {
                Point2f armor_center = (light_center[0] + light_center[1]) / 2; //装甲板中心坐标
                circle(src, armor_center, 5, Scalar(0, 0, 255), -1, 8, 0);
                putText(src, "target", armor_center, 2, 0.4, Scalar(255, 255, 255), 1, 8, 0);
                target_predict(src, armor_center, target_saved);
            }
            light_center.clear();
            light_point.clear();

            setTrackbarPos("Position", "效果视频", current_pos);
            imshow("效果视频", src);
            g_run -= 1;
            
            if (_is_recorded)
            {
                writer << src;
            }
        }

        /*连续播放模式与逐帧播放模式判断*/
        char c = (char)waitKey(10);
        if (_is_continuous)
        {
            if (c == ' ') //按下空格暂停
            {
                if (waitKey(0) == 27) //暂停后按下esc键退出
                {
                    break;
                }
            }
        }
        else
        {
            if (waitKey(0) == 27) //等待键事件以逐帧播放（按esc退出）
            {
                break;
            }
        }
        if (c == 's') // single step
        {
            g_run = 1;
            cout << "Single step, run = " << g_run << endl;
        }
        if (c == 'r') // run mode
        {
            g_run = -1;
            cout << "Run mode, run = " << g_run << endl;
        }

    } //while循环

    return 0;
}

void armorDetect::target_predict(cv::Mat &src, cv::Point2f target, vector<cv::Point2f> &target_saved)
{

    if (target_saved.size() < 3) //判断是否已保存有3帧画面的装甲板中心点坐标
    {
        target_saved.push_back(target);
    }
    else
    {
        Point2f dtarget; //装甲板中心点坐标变化速度
        /* 以下4句——将3帧前的中心点坐标剔除，然后再将该帧画面插入 */
        target_saved[0] = target_saved[1];
        target_saved[1] = target_saved[2];
        target_saved.pop_back();
        target_saved.push_back(target);
        dtarget = ((target_saved[2] - target_saved[1]) + (target_saved[1] - target_saved[0])) / 2.0 * 1.8; //得到装甲板中心点坐标变化速度，由于3帧画面太快，要将此速度放大数倍
        target = target + dtarget;                                                                         //在原装甲板中心点坐标基础上得到目标预测点的坐标
        circle(src, target, 20, Scalar(0, 255, 0), 1, 8, 0);                                               //画出预测点的范围圆
        // putText(src, "predict_point", target, 2, 0.4, Scalar(255,255,255), 1, 8, 0);
    }
}

armorDetect::~armorDetect()
{
    capture.release();
    writer.release();
}