import cv2
import numpy as np
import matplotlib.pyplot as plt

correct_image = cv2.imread("./panel2.png")
correct_image = cv2.cvtColor(correct_image,cv2.COLOR_BGR2GRAY)

kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

dilated_image = cv2.dilate(correct_image, kernel)
eroded_image = cv2.erode(dilated_image, kernel)

eroded_image = cv2.erode(eroded_image, kernel)
dilated_image = cv2.dilate(eroded_image, kernel)

dilated_image = cv2.dilate(dilated_image, kernel)
eroded_image = cv2.erode(dilated_image, kernel)

histogram = np.sum(eroded_image[:,:],axis=0)

midpoint = np.array(histogram.shape[0] / 2,dtype=np.int32)
left_x_base = np.argmax(histogram[:midpoint])
right_x_base = np.argmax(histogram[midpoint:]) + midpoint

# 2. 滑动窗口检测车道线
# 设置滑动窗口的数量，计算每一个窗口的高度
m_windows = 9
window_height = int(eroded_image.shape[0] / m_windows)
# 获取图像中不为0的点
non_zero = eroded_image.nonzero()
non_zero_y = np.array(non_zero[0])
non_zero_x = np.array(non_zero[1])
# 车道检测的当前位置
left_x_current = left_x_base
right_x_current = right_x_base
# 设置x的检测范围，滑动窗口的宽度的一般，手动指定
margin = 100
# 设置最小像素点，阈值用于统计的那个窗口区域内的非零像素个数，小于50的窗口不对x的中心值进行更新
minpix = 50
# 用来记录搜索窗口中非零点在nonzero_y和nonzero_x中的索引
left_lane_inds = []
right_lane_inds = []

# 遍历该副图像中的每一个窗口
for window in range(m_windows):
    # 设置窗口的y的检测范围，因为图像是（行列）,shape[0]表示y方向的结果，上面是0
    win_y_low = eroded_image.shape[0] - (window + 1) * window_height
    win_y_high = eroded_image.shape[0] - window * window_height
    # 左车道x的范围
    win_x_left_low = left_x_current - margin
    win_x_left_high = left_x_current + margin
    # 右车道x的范围
    win_x_right_low = right_x_current - margin
    win_x_right_high = right_x_current + margin

    # 确定非零点的位置x,y是否在搜索窗口中，将在搜索窗口内的x,y的索引存入left_lane_inds
    good_left_inds = ((non_zero_y >= win_y_low) & (non_zero_y < win_y_high) &
                      (non_zero_x >= win_x_left_low) & (non_zero_x < win_x_left_high)).nonzero()

    good_right_inds = ((non_zero_y >= win_y_low) & (non_zero_y < win_y_high) &
                      (non_zero_x >= win_x_right_low) & (non_zero_x < win_x_right_high)).nonzero()

    left_lane_inds.append(good_left_inds)
    right_lane_inds.append(good_right_inds)

    # 如果获取的点的个数大于最小个数，则利用其更新滑动窗口在x轴的位置
    if len(good_left_inds) > minpix:
        left_x_current = np.mean(non_zero_x[good_left_inds]).astype(dtype=np.int32)
    if len(good_right_inds) > minpix:
        right_x_current = np.mean(non_zero_x[good_right_inds]).astype(dtype=np.int32)

#  将检测处的左右车道点转换为array
left_lane_inds = np.concatenate(left_lane_inds,axis=1)
right_lane_inds = np.concatenate(right_lane_inds,axis=1)

# 获取检测出的左右车道点在图像中的位置
left_x = non_zero_x[left_lane_inds]
left_y = non_zero_y[left_lane_inds]
right_x = non_zero_x[right_lane_inds]
right_y = non_zero_y[right_lane_inds]



# 3. 用曲线拟合检测出的点，二次多项式拟合，返回结果是系数
left_fit = np.polyfit(left_y[0],left_x[0],2)
right_fit = np.polyfit(right_y[0],right_x[0],2)
# left_fit = (a,b,c)  x = ay**2 + by + c


# 获取图像的行数
y_max = eroded_image.shape[0]
out_img = np.dstack((eroded_image,eroded_image,eroded_image)) * 255
# 在拟合曲线中获取左右车道线的像素位置
left_points = [[left_fit[0] * y**2 + left_fit[1] * y + left_fit[2],y] for y in range(y_max)]
right_points = [[right_fit[0] * y**2 + right_fit[1] * y + right_fit[2],y] for y in range(y_max)]
# 将左右车道的像素点进行合并
line_points = np.vstack((right_points,left_points))
np.random.shuffle(line_points)
# 根据左右车道线的像素位置绘制多边形
# cv2.fillPoly(out_img,np.int_([line_points]),(0,255,0))
for point in line_points.astype(dtype=np.int32):
    cv2.circle(out_img,point,10,color=(0,255,0),thickness=10)

# cv2.imshow("out_img",out_img)
# cv2.waitKey()

# 设置偏移量
offset_x = 160
offset_y = 0
img_shape = correct_image.shape
# 图像大小 高：720 宽：1280
pts1 = np.float32([
        [img_shape[1] * 0.4, img_shape[0] * 0.7],
        [img_shape[1] * 0.6, img_shape[0] * 0.7],
        [img_shape[1] * 1 / 8, img_shape[0]],
        [img_shape[1] * 7 / 8, img_shape[0]]])

pts2 = np.float32([
        [offset_x, offset_y],
        [img_shape[1] - offset_x, offset_y],
        [offset_x, img_shape[0] - offset_y],
        [img_shape[1] - offset_x, img_shape[0] - offset_y]])
# pts2 = np.float32([[0, 0], [img_shape[1], 0], [0, img_shape[0]], [img_shape[1], img_shape[0]]])
pts = cv2.getPerspectiveTransform(pts2, pts1)  # 生成透视变换矩阵
correct_image = cv2.warpPerspective(out_img, pts, (img_shape[1], img_shape[0]))

# 图像与车道线图像进行融合
img = cv2.imread("./img/up01.png")
union_img = cv2.bitwise_or(img,correct_image)

cv2.imshow("union_img",union_img)
cv2.waitKey()

