#include "lane_detection.h"

using namespace std;
using namespace cv;

int main(int argc, char** argv) {
	cv::Mat img_input, img_uch, img_gray, img_canny, img_and, img_view, img_hls;
	cv::Mat img_hulf, img_roi, img_roi_cont, img_input_roi, img_filter, img_connect;
	cv::Mat img_transf, img_gray_t, img_canny_t, img_uch_open, img_hls_uch, img_input_rect;
	int img_count = 0;

	img_input = cv::imread("./lane_gz/lane_05.jpg");
	//double time_start = static_cast<double>(getTickCount());
	LaneDetection lane_detec;
	vector<cv::Mat> img_inputs;
	lane_detec.read_imgs(img_inputs);
	
	while(true) {
		img_input = img_inputs[img_count];
		cv::Point roi_pts[4];
		double row_prec = 0.3;
		lane_detec.roi_points(img_input, roi_pts, row_prec);
		lane_detec.lane_roi(img_input, img_input_roi, roi_pts);
		//cv::imshow("img_input_roi", img_input_roi);
#if 0
		lane_detec.perspective_transform(img_input_roi, img_transf, roi_pts, row_prec);
		cv::cvtColor(img_transf, img_gray_t, COLOR_BGR2GRAY);
		cv::Mat img_uch_t(img_input.rows, img_input.cols, CV_8UC1, cv::Scalar::all(0));
		cv::threshold(img_gray_t, img_uch_t, 100, 255, CV_THRESH_OTSU + CV_THRESH_BINARY);//CV_THRESH_OTSU
		cv::imshow("img_transf", img_transf);
		cv::imshow("img_uch_t", img_uch_t);

		cv::Mat element = getStructuringElement(cv::MORPH_ELLIPSE, Size(15, 15)); //open
		cv::morphologyEx(img_uch_t, img_uch_open, MORPH_OPEN, element);
		cv::imshow("img_uch_open", img_uch_open);
#endif
		cv::Rect img_rect = cv::Rect(0, row_prec*img_input.rows, img_input.cols, (1-row_prec)*img_input.rows);
		img_input(img_rect).copyTo(img_input_rect);
		lane_detec.rgb_gray(img_input_rect, img_gray);
		cv::imshow("img_gray", img_gray);

		//lane_detec.fill_mean(img_gray);
		//cv::threshold(img_gray, img_uch, 10, 255, CV_THRESH_OTSU+CV_THRESH_BINARY);//CV_THRESH_BINARY
		
		lane_detec.adaptive_threshold(img_gray, img_uch);
		int open_val = 15;
		cv::Mat element = getStructuringElement(cv::MORPH_ELLIPSE, Size(open_val, open_val)); //open
		cv::morphologyEx(img_uch, img_uch_open, MORPH_OPEN, element);
		//cv::imshow("img_uch", img_uch);
		//cv::imshow("img_uch_open", img_uch_open);

		cv::Mat img_uch_rgb, img_add;
		img_input(img_rect).copyTo(img_add);
		cv::cvtColor(img_uch_open, img_uch_rgb, COLOR_GRAY2BGR);
		cv::add(img_add, img_uch_rgb, img_view);
		//cv::imshow("img_view", img_view);

		img_input(img_rect).copyTo(img_hls);
		lane_detec.rgb_to_hls(img_hls, img_hls_uch);
		//bitwise_and(img_uch, img_hls_uch, img_and);
		//cv::imshow("img_and", img_and);
		//lane_detec.filter_img(img_uch_open, img_filter);
		//cv::imshow("img_filter", img_filter);

		cv::Mat img_contour = cv::Mat(img_gray.size(), img_gray.type(), Scalar::all(0));
		lane_detec.find_contour(img_uch_open, img_contour, open_val);
		//imshow("img_contour", img_contour);

		//lane_detec.connect_component(img_uch_open, img_connect);
		cv::Canny(img_gray, img_canny, 40, 100);
		bitwise_and(img_contour, img_canny, img_canny);
		cv::imshow("img_canny", img_canny);
		//double time = (static_cast<double>(getTickCount()) - time_start) / getTickFrequency();
		//std::cout<<"time: "<<time<<std::endl;

		cv::Mat img_hough = cv::Mat(img_canny.size(), img_canny.type(), Scalar::all(0));
		cv::Mat img_hough1, img_differ;
		img_hough.copyTo(img_hough1);
		img_hough.copyTo(img_differ);

		std::vector<cv::Vec4i> lines;
		lines = lane_detec.detect_reduce_lines(img_canny, img_hough1);
		//cv::imshow("img_hough1", img_hough1);

		vector<linePara> lines_para;
		lines = lane_detec.filter_lines(lines, img_hough, lines_para);
		lines = lane_detec.differ_lines(lines, img_differ, lines_para);
		lane_detec.line_mask(lines_para, img_hough);
		lane_detec.line_mask(lines, img_input_rect);
		//lane_detec.line_mask(lines, img_differ);
		cv::imshow("img_hough", img_hough);
		//cv::imshow("img_differ", img_differ);
		cv::imshow("img_input_rect", img_input_rect);
		cv::imread("./img_input_rect.");
#if 0
		std::vector<std::vector<cv::Vec4i>> left_right_lines;
		std::vector<cv::Point> lanes;
		left_right_lines = lane_detec.line_separa(lines, img_roi);
		lanes = lane_detec.regression(left_right_lines, img_roi);
		cv::Mat img_lane = cv::Mat::zeros(img_hough.size(), img_hough.type());
		lane_detec.lane_mask(left_right_lines[0], img_lane);
		lane_detec.lane_mask(left_right_lines[1], img_lane);
		//cv::imshow("img_lane", img_lane);
#endif
		int key = cv::waitKey(0);
		if(key == 'q' || key == 'Q')
			break;
		else if(key == 'n' || key == 'N') {
			img_count++;
			if(img_count > img_inputs.size()) break;
			continue;
		}
		else if(key == 'u' || "U") {
			img_count--;
			if(img_count < 0) break;
			continue;
		}
		printf("\n");
	}

	return 0;
}

void LaneDetection::read_imgs(std::vector<cv::Mat>& img_inputs_) {
	vector<cv::String> img_files;
	cv::Mat img_read;
	cv::glob(img_path, img_files, true);
	for(int i = 0; i < img_files.size(); i++) {
		img_read = cv::imread(img_files[i]);
		img_inputs_.push_back(img_read);
	}
}

void LaneDetection::rgb_gray(cv::Mat& img_input_, cv::Mat& img_output_) 
{
	cv::GaussianBlur(img_input_, img_input_, cv::Size(3, 3), 0, 0);
	cv::cvtColor(img_input_, img_output_, COLOR_BGR2GRAY);
	//cv::Ptr<CLAHE> clahe = cv::createCLAHE();
	//clahe->setClipLimit(4.0);
	//clahe->setTilesGridSize(Size(8, 8));
	//clahe->apply(img_output_, img_output_);
	//cv::equalizeHist(img_output_, img_output_);
}

void LaneDetection::lane_roi(cv::Mat& img_input_, cv::Mat& img_roi_, cv::Point* pts_)
{
	cv::Mat img_fill = cv::Mat::zeros(img_input_.size(), img_input_.type());
	if(img_input_.channels() > 1) {
		cv::fillConvexPoly(img_fill, pts_, 4, cv::Scalar(255,255,255));
	}
	else {
		cv::fillConvexPoly(img_fill, pts_, 4, cv::Scalar(255));
	}
	cv::bitwise_and(img_input_, img_fill, img_roi_);
}

void LaneDetection::roi_points(cv::Mat& img_input_, cv::Point* pts_, double row_prec_) {
	int rows_ =  img_input_.rows;
	int cols_ =  img_input_.cols;
	pts_[0] = cv::Point(row_prec_ * cols_, row_prec_ * rows_);
	pts_[1] = cv::Point((1-row_prec_) * cols_, row_prec_ * rows_);
	pts_[2] = cv::Point(1 * cols_, 1 * rows_);
	pts_[3] = cv::Point(0 * cols_, 1 * rows_);
}

void LaneDetection::perspective_transform(cv::Mat& img_input_, cv::Mat& img_output_, cv::Point* pts_, double row_prec_) {
	cv::Point2f src_points[4] = { pts_[0], pts_[1], pts_[2], pts_[3] };
	cv::Point2f dst_points[4] = {	cv::Point(0, 0),//cv::Point(0, row_prec_*img_input_.rows), 
									cv::Point(img_input_.cols, 0),//cv::Point(cols_, row_prec_*img_input_.cols),
									pts_[2], pts_[3] };
	cv::Mat matrix = getPerspectiveTransform(src_points, dst_points);
	
	warpPerspective(img_input_, img_output_, matrix, cv::Size(img_input_.cols, img_input_.rows), cv::INTER_LINEAR);
}

void LaneDetection::fill_mean(cv::Mat& img_input_) {
	cv::Scalar ss = cv::sum(img_input_);
	int val_all = cv::countNonZero(img_input_);
	int mean_val = ss[0] / val_all - 1;

	for(int i = 0; i < img_input_.rows; i++) {
		uchar* ptr = img_input_.ptr<uchar>(i);
		for(int j = 0; j < img_input_.cols; j++) {
			if(ptr[j] > 1)
				continue;
			ptr[j] = mean_val;
		}
	}
}

void LaneDetection::filter_img(cv::Mat& img_input_, cv::Mat& img_output_) {
	// Create the kernel [-1 0 1]
	// This kernel is based on the one found in the Lane Departure Warning System by Mathworks
	cv::Point anchor = cv::Point(-1, -1);
	cv::Mat kernel = cv::Mat(1, 3, CV_32F);
	kernel.at<float>(0, 0) = -1;
	kernel.at<float>(0, 2) = 1;

	// Filter the binary image to obtain the edges
	cv::filter2D(img_input_, img_output_, -1, kernel, anchor, 0, cv::BORDER_DEFAULT);
}

void LaneDetection::connect_component(cv::Mat& img_input_, cv::Mat& img_output_) {
	cv::Mat labmap, stats, centroids;
	int nccomps = connectedComponentsWithStats(img_input_, labmap, stats, centroids);
	int img_area = img_input_.rows * img_input_.cols;
	
	for(int i = 1; i < nccomps; i++) {
		int area = stats.at<int>(i, cv::CC_STAT_AREA);
		//int width = stats.at<int>(i, cv::CC_STAT_WIDTH);
		//int height = stats.at<int>(i, cv::CC_STAT_HEIGHT);
		//cout<<"area: "<<area<<", width: "<<width<<", height: "<<height<<endl;
	}
	//cv::imshow("img_output_", img_output_);
}

void LaneDetection::rgb_to_hls(cv::Mat& img_input_, cv::Mat& img_output_)
{
	int rows_ =  img_input_.rows;
	int cols_ =  img_input_.cols;
	vector<Mat> hls_ch;
	cv::cvtColor(img_input_, img_input_, COLOR_BGR2HLS);
	cv::split(img_input_, hls_ch);

	cv::Scalar ss = cv::sum(hls_ch[2]);
	int val_all = cv::countNonZero(hls_ch[2]);
	int mean_val = ss[0] / val_all;

	cv::Mat img_mask(rows_, cols_, hls_ch[2].type(), Scalar::all(0));
	for(int i = 0; i < rows_; i++) {
		uchar* ptr = hls_ch[2].ptr<uchar>(i);
		uchar* ptr_m = img_mask.ptr<uchar>(i);
		for(int j = 0; j < cols_; j++) {
			if(ptr[j] < mean_val)
				continue;
			//ptr_m[j] = ptr[j];
			ptr_m[j] = 255;
		}
	}

	cv::Mat img_mask_ada;
	cv::equalizeHist(hls_ch[2], img_mask_ada);
	adaptive_threshold(img_mask_ada, img_mask_ada);
	img_mask_ada.copyTo(img_output_);
}

void LaneDetection::adaptive_threshold(cv::Mat& img_input_, cv::Mat& img_output_)
{
	int max_val = 255;
	int block_size = 101;
	double c = 0;
	if(img_input_.channels() > 1) {
		cv::Mat img_guas_, img_gray_;
		cv::GaussianBlur(img_input_, img_guas_, cv::Size(3, 3), 0, 0);
		cv::cvtColor(img_guas_, img_gray_, COLOR_BGR2GRAY);
		cv::adaptiveThreshold(img_gray_, img_output_, max_val, ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, block_size, c);
	}
	else {
		cv::adaptiveThreshold(img_input_, img_output_, max_val, ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, block_size, c);
	}
}

void LaneDetection::line_mask(std::vector<cv::Vec4i> lines_, cv::Mat& img_input_)
{
	Scalar color(255);
	if(img_input_.channels() > 1) {
		color = cv::Scalar(0, 0, 255);
	}
	cv::Point point_ini, point_fini;
	for(auto line : lines_) {
		point_ini = cv::Point(line[0], line[1]);
		point_fini = cv::Point(line[2], line[3]);
		cv::line(img_input_, point_ini, point_fini, color, 2, CV_AA);
	}
}

void LaneDetection::line_mask(std::vector<linePara> line_paras_, cv::Mat& img_output_)
{
	RNG rng(215526);
    std::vector<Scalar> colors(line_paras_.size());
    for (int i = 0; i < line_paras_.size(); i++) {
        colors[i] = Scalar(rng.uniform(30, 255), rng.uniform(30, 255), rng.uniform(30, 255));
    }

	cv::Point point_ini, point_fini;
	for(auto line:line_paras_) {
		point_ini = cv::Point(line.points[0], line.points[1]);
		point_fini = cv::Point(line.points[2], line.points[3]);
		cv::line(img_output_, point_ini, point_fini, colors[line.label], 2, CV_AA);
	}
}

std::vector<std::vector<cv::Vec4i>> LaneDetection::line_separa(std::vector<cv::Vec4i> lines, cv::Mat& img_edges)
{
	std::vector<std::vector<cv::Vec4i>> output(2);
	cv::Point ini, fini;
	double slope_thresh_min = 0.2, slope_thresh_max = 5;
	double slope = 0, interc = 0;
	std::vector<double> slope_left, slope_right;
	std::vector<cv::Vec4i> sel_line_left, sel_line_right;
	std::vector<cv::Vec4i> right_lines, left_lines;

	// Calculate the slope of all the detected lines
	double img_center = img_edges.cols / 2;
	for (auto line:lines) {
		ini = cv::Point(line[0], line[1]);
		fini = cv::Point(line[2], line[3]);
		slope = (fini.y - ini.y)*1.0 / (fini.x - ini.x + 0.001);

		// If the slope is too horizontal, discard the line
		// If not, save them  and their respective slope
		if (std::abs(slope) < slope_thresh_min || std::abs(slope) > slope_thresh_max) {
			continue;
		}

		// Condition to classify line as left side or right side
		if (slope > 0 && fini.x > img_center && ini.x > img_center) {
			right_lines.push_back(line);
		}
		else if (slope < 0 && fini.x < img_center && ini.x < img_center) {
			left_lines.push_back(line);
		}
	}

	for(int m = 0; m < right_lines.size(); m++) {
		ini = cv::Point(right_lines[m][0], right_lines[m][1]);
		fini = cv::Point(right_lines[m][2], right_lines[m][3]);
		slope = (fini.y - ini.y)*1.0 / (fini.x - ini.x + 0.001);
		interc = fini.y - slope * fini.x;

		//std::cout<<"slope1: "<<slope<<", interc1: "<<interc<<std::endl;
		slope_right.push_back(slope);
		sel_line_right.push_back(right_lines[m]);
	}

	for(int n = 0; n < left_lines.size(); n++) {
		ini = cv::Point(left_lines[n][0], left_lines[n][1]);
		fini = cv::Point(left_lines[n][2], left_lines[n][3]);
		slope = (fini.y - ini.y)*1.0 / (fini.x - ini.x + 0.001);
		interc = fini.y - slope * fini.x;

		//std::cout<<"slope2: "<<slope<<", interc2: "<<interc<<std::endl;
		slope_left.push_back(slope);
		sel_line_left.push_back(left_lines[n]);
	}

	output[0] = right_lines;
	output[1] = left_lines;
	return output;
}

std::vector<cv::Point> LaneDetection::regression(vector<vector<cv::Vec4i>> left_right_lines, cv::Mat& img_input_)
{
	std::vector<cv::Point> output(4);
	cv::Point ini, fini;
	cv::Vec4d right_line, left_line;
	std::vector<cv::Point> right_pts, left_pts;
	cv::Point right_b, left_b;  // Members of both line equations of the lane boundaries:
	double right_m, left_m;  // y = m*x + b

	// If right lines are being detected, fit a line using all the init and final points of the lines
	for (auto line:left_right_lines[0]) {
		ini = cv::Point(line[0], line[1]);
		fini = cv::Point(line[2], line[3]);
		right_pts.push_back(ini);
		right_pts.push_back(fini);
	}

	if (right_pts.size() > 0) {
		// The right line is formed here
		cv::fitLine(right_pts, right_line, CV_DIST_L2, 0, 0.01, 0.01);
		right_m = right_line[1] / right_line[0];
		right_b = cv::Point(right_line[2], right_line[3]);
	}

	// If left lines are being detected, fit a line using all the init and final points of the lin
	for (auto line:left_right_lines[1]) {
		ini = cv::Point(line[0], line[1]);
		fini = cv::Point(line[2], line[3]);
		left_pts.push_back(ini);
		left_pts.push_back(fini);
	}

	if (left_pts.size() > 0) {
		// The left line is formed here
		cv::fitLine(left_pts, left_line, CV_DIST_L2, 0, 0.01, 0.01);
		left_m = left_line[1] / left_line[0];
		left_b = cv::Point(left_line[2], left_line[3]);
	}

	// One the slope and offset points have been obtained, apply the line equation to obtain the line points
	int ini_y = img_input_.rows;
	int fin_y = 470;
	double right_ini_x = ((ini_y - right_b.y) / right_m) + right_b.x;
	double right_fin_x = ((fin_y - right_b.y) / right_m) + right_b.x;
	double left_ini_x = ((ini_y - left_b.y) / left_m) + left_b.x;
	double left_fin_x = ((fin_y - left_b.y) / left_m) + left_b.x;

	output[0] = cv::Point(right_ini_x, ini_y);
	output[1] = cv::Point(right_fin_x, fin_y);
	output[2] = cv::Point(left_ini_x, ini_y);
	output[3] = cv::Point(left_fin_x, fin_y);
	return output;
}

int LaneDetection::find_contour(cv::Mat& img_input_, cv::Mat& img_output_, int val_)
{
	//寻找轮廓
	vector<vector<Point>> contours;
	findContours(img_input_, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);

	double length, area, size_width, width_rate;
	for (int i = 0; i < contours.size(); i++) {
		//计算面积与周长
		length = arcLength(contours[i], true);
		area = contourArea(contours[i]);
		if (length < 4 * val_ || area < 2 * val_ * val_)
			continue;

		//外部矩形边界
		//Rect rect = boundingRect(contours[i]);		

		//最小包围矩形
		RotatedRect rrt = minAreaRect(contours[i]);
		size_width = rrt.size.width*1.0 / rrt.size.height;
		if(area < 10 * val_ * val_) {
			width_rate = 3;
			if(size_width < width_rate && size_width > 1.0/width_rate)
				continue;
		}
		else if(area < 15 * val_ * val_) {
			width_rate = 2;
			if(size_width < width_rate && size_width > 1.0/width_rate)
				continue;
		}
#if 0
		if (contours[i].size() > 5 && area < 10 * val_ * val_) {
			//用椭圆拟合
			RotatedRect errt = fitEllipse(contours[i]);
			if ((errt.angle < 5.0) || (errt.angle > 160.0)) {
				continue;
			}
		}
#endif
		drawContours(img_output_, contours, i, Scalar(255), -1, 8);
	}
	cv::Mat element_con_ = getStructuringElement(cv::MORPH_ELLIPSE, Size(3, 3)); //open
	cv::dilate(img_output_, img_output_, element_con_);

	return 0;
}

std::vector<cv::Vec4i> LaneDetection::detect_reduce_lines(cv::Mat& img_input_, cv::Mat& img_output_)
{
    std::vector<cv::Vec4i> lines;
    // Apply Hough Transform
    HoughLinesP(img_input_, lines, 1, CV_PI / 180, 40, 20, 10);

    // partition via our partitioning function
    std::vector<int> labels;
    int equilavence_class_count = cv::partition(
        lines, labels, [this](const cv::Vec4i l1, const cv::Vec4i l2) {
            return this->extend_bound_rectangle_line_equivalence( l1, l2,
                0.1, //line extension length - as fraction of original line width
                2.0, //maximum allowed angle difference for lines to be considered in same equivalence class
                20   //thickness of bounding rectangle around each line
			);
        });

    // grab a random color for each equivalence class
    RNG rng(215526);
    std::vector<Scalar> colors(equilavence_class_count);
    for (int i = 0; i < equilavence_class_count; i++) {
        colors[i] = Scalar(rng.uniform(30, 255), rng.uniform(30, 255), rng.uniform(30, 255));
    }

    // draw original detected lines
    for (int i = 0; i < lines.size(); i++) {
        cv::Vec4i& detected_line = lines[i];
        line(img_output_, cv::Point(detected_line[0], detected_line[1]),
             cv::Point(detected_line[2], detected_line[3]), colors[labels[i]], 1);
    }

    // build point clouds out of each equivalence classes
    std::vector<std::vector<Point2i>> point_clouds(equilavence_class_count);
    for (int i = 0; i < lines.size(); i++) {
        cv::Vec4i& detected_line = lines[i];
        point_clouds[labels[i]].push_back(Point2i(detected_line[0], detected_line[1]));
        point_clouds[labels[i]].push_back(Point2i(detected_line[2], detected_line[3]));
    }

    // fit line to each equivalence class point cloud
    std::vector<cv::Vec4i> reduced_lines = std::accumulate(
        point_clouds.begin(), point_clouds.end(), std::vector<cv::Vec4i>{},
        [](std::vector<cv::Vec4i> target, const std::vector<Point2i>& point_cloud_) {
            std::vector<Point2i> point_cloud = point_cloud_;

            // line_params: [vx,vy, x0,y0]: (normalized vector, point on our contour)
            // (x,y) = (x0,y0) + t*(vx,vy), t -> (-inf; inf)
            Vec4f line_params;
            cv::fitLine(point_cloud, line_params, cv::DIST_L2, 0, 0.01, 0.01);

            // derive the bounding xs of point cloud
            decltype(point_cloud)::iterator min_xp, max_xp;
            std::tie(min_xp, max_xp) = std::minmax_element(point_cloud.begin(), point_cloud.end(),
                                    [](const Point2i& p1, const Point2i& p2) {
                                        return p1.x < p2.x;
                                    });

            // derive y coords of fitted line
            float m = line_params[1] / line_params[0];
            int y1 = ((min_xp->x - line_params[2]) * m) + line_params[3];
            int y2 = ((max_xp->x - line_params[2]) * m) + line_params[3];

            target.push_back(cv::Vec4i(min_xp->x, y1, max_xp->x, y2));
            return target;
        });
    lines = reduced_lines;

    return lines;
}

bool LaneDetection::extend_bound_rectangle_line_equivalence(const cv::Vec4i& l1_, const cv::Vec4i& l2_, 
			float exten_length_fraction, float max_angle_diff, float bound_rectangle_thickness)
{
    cv::Vec4i l1(l1_), l2(l2_);
    // extend lines by percentage of line width
    float len1 = sqrtf((l1[2]-l1[0]) * (l1[2]-l1[0]) + (l1[3]-l1[1]) * (l1[3]-l1[1]));
    float len2 = sqrtf((l2[2]-l2[0]) * (l2[2]-l2[0]) + (l2[3]-l2[1]) * (l2[3]-l2[1]));
    cv::Vec4i el1 = extend_line(l1, len1 * exten_length_fraction);
    cv::Vec4i el2 = extend_line(l2, len2 * exten_length_fraction);

    // reject the lines that have wide difference in angles
    float a1 = atan(linear_parameters(el1)[0]);
    float a2 = atan(linear_parameters(el2)[0]);
    if (fabs(a1 - a2) > max_angle_diff * M_PI / 180.0) {
        return false;
    }

    // calculate window around extended line
    // at least one point needs to inside extended bounding rectangle of other line,
    vector<Point2i> lineBoundingContour = bound_rectangle_contour(el1, bound_rectangle_thickness/2);
    return pointPolygonTest(lineBoundingContour, cv::Point(el2[0], el2[1]), false) == 1 ||
           pointPolygonTest(lineBoundingContour, cv::Point(el2[2], el2[3]), false) == 1;
}


cv::Vec4i LaneDetection::extend_line(cv::Vec4i line, double len)
{
    // oriented left-t-right
    Vec4d line_ = line[2] - line[0] < 0
                      ? Vec4d(line[2], line[3], line[0], line[1])
                      : Vec4d(line[0], line[1], line[2], line[3]);
    double m = linear_parameters(line_)[0];
    // solution of pythagorean theorem and m = yd/xd
    double xd = sqrt(len * len / (m * m + 1));
    double yd = xd * m;
    return Vec4d(line_[0] - xd, line_[1] - yd, line_[2] + xd, line_[3] + yd);
}

cv::Vec2d LaneDetection::linear_parameters(cv::Vec4i line)
{
    Mat a = (Mat_<double>(2, 2) << line[0], 1, line[2], 1);
    Mat y = (Mat_<double>(2, 1) << line[1], line[3]);
    cv::Vec2d mc;
    cv::solve(a, y, mc);
    return mc;
}

std::vector<Point2i> LaneDetection::bound_rectangle_contour(cv::Vec4i line, float d)
{
    // finds coordinates of perpendicular lines with length d in both line points
    // https://math.stackexchange.com/a/2043065/183923

    cv::Vec2f mc = linear_parameters(line);
    float m = mc[0];
    float factor = sqrtf((d * d) / (1 + (1 / (m * m))));

    float x3, y3, x4, y4, x5, y5, x6, y6;
    // special case(vertical perpendicular line) when -1/m -> -infinity
    if (m == 0) {
        x3 = line[0];
        y3 = line[1] + d;
        x4 = line[0];
        y4 = line[1] - d;
        x5 = line[2];
        y5 = line[3] + d;
        x6 = line[2];
        y6 = line[3] - d;
    }else {
        // slope of perpendicular lines
        float m_per = -1 / m;

        // y1 = m_per * x1 + c_per
        float c_per1 = line[1] - m_per * line[0];
        float c_per2 = line[3] - m_per * line[2];

        // coordinates of perpendicular lines
        x3 = line[0] + factor;
        y3 = m_per * x3 + c_per1;
        x4 = line[0] - factor;
        y4 = m_per * x4 + c_per1;
        x5 = line[2] + factor;
        y5 = m_per * x5 + c_per2;
        x6 = line[2] - factor;
        y6 = m_per * x6 + c_per2;
    }
    return std::vector<Point2i>{Point2i(x3, y3), Point2i(x4, y4), Point2i(x6, y6), Point2i(x5, y5)};
}

vector<cv::Vec4i> LaneDetection::filter_lines(vector<cv::Vec4i> lines_, cv::Mat& img_input_, vector<linePara>& line_paras_)
{
    std::vector<cv::Vec4i> filtered_lines;
	double b, angle, slope, upcol_edge, line_length;
	double angle_rate = 10;
	double slope_min = 0.2;
	double slope_max = 5;
	int line_length_min = 0.15*img_input_.rows;
	int img_left = img_input_.cols / 3;
	int img_right = img_input_.cols * 2/3;
	int img_center = img_input_.cols / 2;

	for(auto line_:lines_) {
		angle = atan2(line_[3]-line_[1], line_[2]-line_[0]) * 180.0 / CV_PI;
		slope = (line_[3]-line_[1])*1.0 / (line_[2]-line_[0]+0.001);
		if(std::abs(slope) < slope_min || std::abs(slope) > slope_max)
			continue;
		
		// Filter short lines
		line_length = sqrt(pow(line_[2] - line_[0], 2) + pow(line_[3] - line_[1], 2));
        if (line_length < line_length_min)
			continue;

		// Filter left or right, line_[0] < line_[2]
		if(slope > 0 && line_[0] < img_left && line_[2] < img_center)
			continue;
		if(slope < 0 && line_[0] > img_center && line_[2] > img_right)
			continue;
		//std::cout<<"line_0: "<<line_[0]<<", line_2: "<<line_[2]<<endl;	

		b = line_[1] - slope*line_[0];
		upcol_edge = (0 - b) / slope;
		linePara line_para;
		line_para.points = line_;
		line_para.b = b;
		line_para.angle = angle;
		line_para.slope = slope;
		line_para.upcol_edge = upcol_edge;
		line_para.length = line_length;
		line_paras_.push_back(line_para);

		filtered_lines.push_back(line_);
		//cout<<"angle: "<<angle<<", slope: "<<slope<<", length: "<<line_length<<", upcol_edge: "<<upcol_edge<<endl;
		//	<<", x1: "<<line_[0]<<", x2: "<<line_[2]<<std::endl;
	}

	return filtered_lines;
}


std::vector<cv::Vec4i> LaneDetection::differ_lines(vector<cv::Vec4i> lines, cv::Mat& img_input_, vector<linePara>& line_paras_)
{
	vector<diffLinePara> diff_lens;
	vector<cv::Vec4i> lines_ret;
	cv::Point point_ini, point_fini;
	double angle_sub, upcol_edge_sub;
	int count = 0;
	double angle_rate = 2.0;
	int lengths_min = img_input_.rows / 2;
	int upcol_edge_max = img_input_.cols / 8;
	int diff_num = 0;

	for(auto& line_para:line_paras_) {
		if(diff_lens.size() < 1) {
			line_para.label = 0;
			diff_lens.push_back({0, line_para.length, line_para.angle, line_para.upcol_edge});
			continue;
		}

		// the same label
		bool same = false;
		for(auto& diff_len:diff_lens) {
			angle_sub = abs(line_para.angle - diff_len.angle);
			upcol_edge_sub = abs(line_para.upcol_edge - diff_len.upcol_edge);
			//cout<<"line_para.upcol_edge: "<<line_para.upcol_edge<<", angle_sub: "<<angle_sub<<", upcol_edge_sub: "<<upcol_edge_sub<<endl;

			if(angle_sub < angle_rate && upcol_edge_sub < upcol_edge_max) {
				line_para.label = diff_len.label;
				diff_len.length += line_para.length;
				diff_len.angle = line_para.length/diff_len.length * line_para.angle +
									(1- line_para.length/diff_len.length) * diff_len.angle;
				diff_len.upcol_edge = line_para.length/diff_len.length * line_para.upcol_edge +
									(1- line_para.length/diff_len.length) * diff_len.upcol_edge;
				same = true;
				//cout<<"join diff"<<endl;
				break;
			}
		}

		if(!same) {
			count++;
			line_para.label = count;
			diff_lens.push_back({count, line_para.length, line_para.angle, line_para.upcol_edge});
		}
	}

	RNG rng(215526);
	vector<Scalar> color(diff_lens.size());
	for(int i = 0; i < diff_lens.size(); i++) {
		color[i] = Scalar(rng.uniform(40, 255), rng.uniform(40, 255), rng.uniform(40, 255));
	}

	for(int i = 0; i < diff_lens.size(); i++) {
		auto& diff_len = diff_lens[i];
		if(diff_len.length < lengths_min)
			continue;
		diff_num++;

		for(auto line_para:line_paras_) {
			if(line_para.label != diff_len.label)
				continue;
			lines_ret.push_back(line_para.points);

			point_ini = Point(line_para.points[0], line_para.points[1]);
			point_fini = Point(line_para.points[2], line_para.points[3]);
			line(img_input_, point_ini, point_fini, color[i], 1);
			cout<<"line_para label: "<<line_para.label<<", len: "<<line_para.length<<", angle: "<<line_para.angle
				<<", upcol_edge: "<<line_para.upcol_edge<<endl;
		}
		//cout<<"diff_len label: "<<diff_len.label<<", len: "<<diff_len.length<<", angle: "<<diff_len.angle<<endl;
	}
	cout<<"line_paras_: "<<line_paras_.size()<<", diff_num: "<<diff_num<<", lines_ret: "<<lines_ret.size()<<endl;

    for (size_t i = 0; i < lines_ret.size(); i++) {
        point_ini = Point(lines_ret[i][0], lines_ret[i][1]);
        point_fini = Point(lines_ret[i][2], lines_ret[i][3]);
        line_point_image_border(point_ini, point_fini, point_ini, point_fini, img_input_.rows, img_input_.cols);
        //lines_ret[i] = cv::Vec4i(point_ini.x, point_ini.y, point_fini.x, point_fini.y);
    }

	return lines_ret;
}

void LaneDetection::line_point_image_border(const cv::Point &p1_in, const cv::Point &p2_in, 
						cv::Point &p1_out, cv::Point &p2_out, int rows, int cols)
{
    double m = (p1_in.y - p2_in.y)*1.0 / (p1_in.x - p2_in.x + std::numeric_limits<double>::epsilon());
    double b = p1_in.y - (m * p1_in.x);

    std::vector<cv::Point> border_point;
    double x, y;
    // test for the line y = 0
    y = 0;
    x = (y - b) / m;
    if (x > 0 && x < cols) border_point.push_back(cv::Point(x, y));

    // test for the line y = img.rows
    y = rows;
    x = (y - b) / m;
    if (x > 0 && x < cols) border_point.push_back(cv::Point(x, y));

    // check intersection with horizontal lines x = 0
    x = 0;
    y = m * x + b;
    if (y > 0 && y < rows) border_point.push_back(cv::Point(x, y));

    x = cols;
    y = m * x + b;
    if (y > 0 && y < rows) border_point.push_back(cv::Point(x, y));

    p1_out = border_point[0];
    p2_out = border_point[1];
}
