import base64
import json
import queue
import time
import matplotlib.pyplot as plt
import cv2
import numpy as np
import hqyj_mqtt
from pid import PID
from tools import *

class LaneCenterPlotter:
    def __init__(self, max_frames=200, image_height=480):
        # 设置matplotlib为交互模式
        plt.ion()

        self.fig, self.ax = plt.subplots()
        self.line_lane_center, = self.ax.plot([], [], 'r-', label='Lane Center')
        self.line_image_center, = self.ax.plot([], [], 'b-', label='Image Center')

        # 设置图表、标题和标签
        self.ax.set_title('Lane and Image Center')
        self.ax.set_xlabel('Frame')
        self.ax.set_ylabel('Pixel Coordinate')

        self.x_data = []
        self.y_data_lane_center = []
        self.y_data_image_center = []
        self.max_frames = max_frames
        self.image_height = image_height

        # 初始化折线图
        self.init_plot()

    def init_plot(self):
        self.ax.set_xlim(0, self.max_frames)
        self.ax.set_ylim(0, self.image_height)
        self.line_lane_center.set_data([], [])
        self.line_image_center.set_data([], [])

    def update_plot(self, frame, lane_center, image_center):
        self.x_data.append(frame)
        self.y_data_lane_center.append(lane_center)
        self.y_data_image_center.append(image_center)

        # 更新折线图
        self.line_lane_center.set_data(self.x_data, self.y_data_lane_center)
        self.line_image_center.set_data(self.x_data, self.y_data_image_center)

        if len(self.x_data) > self.max_frames:
            self.ax.set_xlim(self.x_data[-self.max_frames], self.x_data[-1])
            self.ax.figure.canvas.draw()
        self.ax.figure.canvas.draw()
        self.ax.figure.canvas.flush_events()


def b64_to_np(image):
    image_data = base64.b64decode(image['image'])
    image_np = np.frombuffer(image_data, dtype=np.uint8)
    img = cv2.imdecode(image_np, cv2.IMREAD_COLOR)
    return img


# 图像矫正
def perspective_transform(image):
    # cv2.line(image, (53, image.shape[0]), (image.shape[1] // 2 - 33, image.shape[0] // 2 - 20), (0, 0, 255), 1,
    #          cv2.LINE_AA)
    # cv2.line(image, (426, image.shape[0]), (image.shape[1] // 2 + 48, image.shape[0] // 2 - 20), (0, 0, 255), 1,
    #          cv2.LINE_AA)
    # src = np.float32([[53, image.shape[0]],
    #                   [426, image.shape[0]],
    #                   [image.shape[1] / 2 + 48, image.shape[0] // 2 - 20],
    #                   [image.shape[1] / 2 - 39, image.shape[0] // 2 - 20]])
    # src = np.float32([[43, image.shape[0]],
    #                   [436, image.shape[0]],
    #                   [image.shape[1] / 2 + 56, image.shape[0] // 2 - 20],
    #                   [image.shape[1] / 2 - 41, image.shape[0] // 2 - 20]])
    src = np.float32([[75, image.shape[0]],
                      [444, image.shape[0]],
                      [image.shape[1] / 2 + 64, image.shape[0] // 2],
                      [image.shape[1] / 2 - 54, image.shape[0] // 2]])
    # src = np.float32([[50, image.shape[0]],
    #                   [420, image.shape[0]],
    #                   [image.shape[1] / 2 + 54, image.shape[0] // 2],
    #                   [image.shape[1] / 2 - 62, image.shape[0] // 2]])
    dst = np.float32([[image.shape[1] / 4, image.shape[0]],
                      [image.shape[1] * 3 / 4, image.shape[0]],
                      [image.shape[1] * 3 / 4, 0],
                      [image.shape[1] / 4, 0]])
    M = cv2.getPerspectiveTransform(src, dst)

    # 获取逆变换矩阵
    minv = cv2.getPerspectiveTransform(dst, src)

    image_warp = cv2.warpPerspective(image, M, (image.shape[1], image.shape[0]), flags=cv2.INTER_LINEAR)

    return image_warp, minv


# 使用梯度去提取车道线
def extract_line_gradient(image_warp):
    # 首先对鸟瞰图进行滤波
    image_Gaussian = cv2.GaussianBlur(image_warp, (5, 5), 1)
    image_Gaussian_gray = cv2.cvtColor(image_Gaussian, cv2.COLOR_BGR2GRAY)
    # image_Gaussian_gray[image_Gaussian_gray <= 20] = 70
    res = cv2.Sobel(image_Gaussian_gray, -1, 1, 0)
    ret, res_threshold = cv2.threshold(res, 60, 255, cv2.THRESH_BINARY)

    # 先膨胀后腐蚀
    image_result = dilate_erode(res_threshold, 15)
    # cv2.imshow('sobel',res)
    # cv2.imshow('sobel_thresh', res_threshold)
    return image_result

def find_stop_line(image_arpPerspective):
    stop_line_flag = False
    hsv = cv2.cvtColor(image_arpPerspective, cv2.COLOR_BGR2HSV)
    white_low = np.array([0,0,200])
    white_up = np.array([180,30,255])
    mask = cv2.inRange(hsv, white_low, white_up)
    # 进行闭运算
    image_close = cv2.morphologyEx(mask,cv2.MORPH_CLOSE,np.ones((15,15),np.uint8))
    contours,hierarchy = cv2.findContours(image_close,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:
        contour = max(contours,key=cv2.contourArea)
        Area = cv2.contourArea(contour)
        if Area > 1000:
            print('发现车道线,车道线面积:',Area)
            x,y,w,h = cv2.boundingRect(contour)
            top_left = (x,y)
            bottom_right = (x+w,y+h)
            cv2.drawContours(image_arpPerspective,[contour],-1,(0,0,255),2)
            # 车与停止线的偏差
            offset = 20
            if bottom_right[1] > image_arpPerspective.shape[0] - offset:
                print('停车线已到')
                stop_line_flag = True
    # cv2.imshow('image_arpPerspective_stop', image_arpPerspective)
    # cv2.imshow('image_binary_stop',image_close)
    return stop_line_flag

# 先膨胀后腐蚀
def dilate_erode(image, kernel_size):
    kernel = np.ones((kernel_size, kernel_size), np.uint8)
    image_dilate = cv2.dilate(image, kernel, iterations=1)
    image_result = cv2.erode(image_dilate, kernel, iterations=1)
    return image_result


# 提取白色车道线:HLS模型
def hlsSelect(img, thresh=(220, 255)):
    hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
    l_channel = hls[:, :, 1]
    l_channel = l_channel / np.max(l_channel) * 255
    binary_output = np.zeros_like(l_channel)
    binary_output[(thresh[0] < l_channel) & (l_channel < thresh[1])] = 1
    return binary_output


# 提取黄色车道线:lab模型
def labSelect(img, thresh=(198, 220)):
    img[:, 240:, :] = (0, 0, 0)
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    lab_b = lab[:, :, 2]
    if np.max(lab_b) > 100:
        lab_b = (lab_b / np.max(lab_b)) * 255
    binary_output = np.zeros_like(lab_b)
    binary_output[(lab_b > thresh[0]) & (lab_b < thresh[1])] = 1
    return binary_output


# 用颜色提取车道线
def extract_line_color(image):
    hlsL_binary = hlsSelect(image)
    cv2.imshow('hlsL_binary', hlsL_binary)

    labB_binary = labSelect(image)
    cv2.imshow('labB_binary', labB_binary)
    image_binary = cv2.bitwise_or(hlsL_binary, labB_binary)
    dilate_erode_image = dilate_erode(image_binary, 15)
    cv2.imshow('image_binary', dilate_erode_image)
    return dilate_erode_image


def finding_line(dilate_erode_image):
    histogram = np.sum(dilate_erode_image[dilate_erode_image.shape[0] // 2:, :440], axis=0)
    # 创建一个三通道图像,用来显示小窗口寻找车道线的过程
    out_img = np.dstack([dilate_erode_image, dilate_erode_image, dilate_erode_image])

    # 获取直方图的中点位置,也就是图像宽度的一半
    midpoint = histogram.shape[0] // 2
    leftx_base = np.argmax(histogram[:midpoint])
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint
    # 获取图像中所有非零像素的x和y的位置,返回的是行索引和列索引
    nonzero = dilate_erode_image.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])

    # 定义一些小窗口概念
    # 小窗口的个数
    nwindows = 10
    # 小窗口的高度
    window_height = dilate_erode_image.shape[0] // nwindows
    # 小窗口的宽度
    margin = 50
    # 小窗口内白色像素点的个数阈值
    minpix = 40
    # 初始化当前的窗口位置,后面会持续更新
    leftx_current = leftx_base
    rightx_current = rightx_base
    left_pre = leftx_base
    right_pre = rightx_base
    # 创建空列表接收左侧和右侧车道线像素的索引
    left_lane_inds = []
    right_lane_inds = []
    for window in range(nwindows):
        # 计算当前窗口的上边界y坐标
        win_y_high = dilate_erode_image.shape[0] - (window + 1) * window_height
        # 计算当前窗口的下边界y坐标
        win_y_low = dilate_erode_image.shape[0] - window * window_height
        # 计算左边窗口左右边界的x坐标
        win_xleft_low = leftx_current - margin
        win_xleft_high = leftx_current + margin
        # 计算右边窗口的左右边界的x坐标
        win_xright_low = rightx_current - margin
        win_xright_high = rightx_current + margin
        # 在out_img里去显示小窗口的滑动过程
        cv2.rectangle(out_img, (win_xleft_low, win_y_high), (win_xleft_high, win_y_low), (0, 255, 0), 2)
        cv2.rectangle(out_img, (win_xright_low, win_y_high), (win_xright_high, win_y_low), (0, 255, 0), 2)
        # cv2.imshow('window',out_img)

        # 找到处于窗口内非零像素的索引
        # 得到了坐标点的索引
        good_left_inds = ((nonzeroy >= win_y_high) & (nonzeroy < win_y_low) &
                          (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
        good_right_inds = ((nonzeroy >= win_y_high) & (nonzeroy < win_y_low) &
                           (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
        # 将获取到的白色像素点的索引添加到列表中
        left_lane_inds.append(good_left_inds)
        right_lane_inds.append(good_right_inds)

        if len(good_left_inds) > minpix:
            leftx_current = int(np.mean(nonzerox[good_left_inds]))
        else:
            if len(good_right_inds) > minpix:
                offset = int(np.mean(nonzerox[good_right_inds])) - right_pre
                leftx_current = leftx_current + offset
        if len(good_right_inds) > minpix:
            rightx_current = int(np.mean(nonzerox[good_right_inds]))
        else:
            if len(good_left_inds) > minpix:
                offset = int(np.mean(nonzerox[good_left_inds])) - left_pre
                rightx_current = rightx_current + offset

        left_pre = leftx_current
        right_pre = rightx_current
    # 连接索引的列表,为了后续更方便的提取出这些像素点的x和y坐标,以便进行车道线的拟合
    left_lane_inds = np.concatenate(left_lane_inds)
    right_lane_inds = np.concatenate(right_lane_inds)
    cv2.imshow('out_img',out_img)
    # 提取左侧和右侧车道线像素的位置
    # left_lane_inds 是一个一维数组,它包含了左侧车道线在滑动窗口中找到的白色像素点的x坐标的索引
    # 通过将这些索引作为索引器应用到nonzerox数组上,就可以得到相应的左侧车道线的x坐标
    # leftx包含了左侧车道线白色像素点的x坐标
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds]
    # rightx包含了右侧车道线白色像素点的x坐标
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]

    # 有了坐标之后,就要去对左侧和右侧车道线进行多项式拟合,从而得到拟合的车道线
    # np.polyfit()是numpy中进行多项式拟合的函数
    # 它接收三个参数:x,y,deg
    # x:自变量数组 y:因变量数组
    # deg:多项式的次数,如果是2, y= ax^2 + b^x +c
    # left_fit 里存放的就是a、b、c的参数
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)
    # 使用np.linspace 生成一组均匀分布的数值，用来表示竖直方向上的像素坐标,方便后续的车道线的绘制
    ploty = np.linspace(0, dilate_erode_image.shape[0] - 1, dilate_erode_image.shape[0])

    # 使用多项式拟合来估计左侧和右侧车道线的x坐标
    left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
    right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]

    # 计算中间车道线的位置
    middle_fitx = (left_fitx + right_fitx) // 2

    # 使用不同颜色将车道线
    out_img[lefty, leftx] = [255, 0, 0]
    out_img[righty, rightx] = [0, 0, 255]
    # cv2.imshow('out_img',out_img)

    return left_fitx, right_fitx, middle_fitx, ploty


# 显示原始图像,显示透视变换后的结果,显示车道线在原图的结果,显示单纯车道线
def show_line(image, image_warp, dilate_erode_image, minv, left_fitx, right_fitx, middle_fitx, ploty):
    # 创建一个空白图像，用于在上面绘制检测到的车道线
    warp_zero = np.zeros_like(dilate_erode_image).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
    # print(left_fitx.shape)
    # print(right_fitx.shape)
    # print(ploty.shape)
    pts_left = np.transpose(np.vstack([left_fitx, ploty]))
    pts_right = np.transpose(np.vstack([right_fitx, ploty]))
    pts_middle = np.transpose(np.vstack([middle_fitx, ploty]))

    # 绘制车道线
    cv2.polylines(color_warp, np.int32([pts_left]), isClosed=False, color=(202, 124, 0), thickness=15,
                  lineType=cv2.LINE_AA)
    cv2.polylines(color_warp, np.int32([pts_right]), isClosed=False, color=(202, 124, 0), thickness=15,
                  lineType=cv2.LINE_AA)
    cv2.polylines(color_warp, np.int32([pts_middle]), isClosed=False, color=(202, 124, 0), thickness=15,
                  lineType=cv2.LINE_AA)
    # 将得到的车道线的像素点根据逆透视变换矩阵映射到原始图像中
    newwarp = cv2.warpPerspective(color_warp, minv, (image.shape[1], image.shape[0]))
    # 将逆透视变换后的结果和原始图像进行融合
    result1 = cv2.addWeighted(image, 1, newwarp, 1, 0)
    # 创建一个灰度图
    background_zero = np.zeros_like(image).astype(np.uint8) + 127
    #
    result = cv2.addWeighted(newwarp, 1, background_zero, 1, 0)
    # cv2.imshow('newwarp',newwarp)
    # cv2.imshow('result',result)
    # cv2.imshow('image_warp', image_warp)
    # cv2.imshow('image', image)
    # # cv2.imshow('image_result', image_result)
    cv2.line(image_warp,(240,0),(240,270),(0,0,255),1,cv2.LINE_AA)
    cv2.polylines(image_warp, np.int32([pts_middle]), isClosed=False, color=(202, 124, 0), thickness=1,
                  lineType=cv2.LINE_AA)
    add1 = np.hstack((image, image_warp))
    add2 = np.hstack((result, result1))
    last_result = np.vstack((add1, add2))
    cv2.imshow('result', last_result)
    cv2.waitKey(1)
    return pts_middle


def auto_run(image, mqtt_client, pts_middle, pid, carspeed=10):
    # 计算车道中心的像素坐标,目标位置(其实是当前位置,在本案例中目标位置和当前位置互换了)
    lane_center = pts_middle[240:, :].mean()

    # 当前位置(其实是目标位置)
    image_center = image.shape[1] // 2
    mqtt_client.send_mqtt(json.dumps({'carSpeed': carspeed}))
    print(lane_center)
    steering_angle = -pid(lane_center)

    # 发送控制指令
    mqtt_client.send_mqtt(json.dumps({'carDirection': steering_angle}))
    print('steering_angle:', steering_angle)

    return lane_center, image_center


if __name__ == '__main__':
    q_mqtt_data = queue.Queue(5)

    # plotter = LaneCenterPlotter()
    # frame = 0
    # 构建mqtt客户端,连接mqtt服务器,方便和3D场景通信
    mqtt_client = hqyj_mqtt.MQTTClient('127.0.0.1', 21883, 'xiaoyibb', 'xiaoyiaa', q_mqtt_data)
    # 图片个数
    frame_count = 0
    # 开始时间
    start_time = time.time()
    # pid = PID(Kp=1.010, Ki=0.00, Kd=0.0, setpoint=240)
    pid = PID(Kp=0.285, Ki=0.004, Kd=0.13, setpoint=240)
    # pid = PID(Kp=0.5, Ki=0.0, Kd=0.0, setpoint=240)
    pid.sample_time = 0.12531
    pid.output_limits = (-13, 13)
    # try:
    while True:
        try:
            images = q_mqtt_data.get()
            if 'image' in images:
                # frame_count += 1
                # current_time = time.time()
                # elapsed_time = current_time - start_time
                # fps = frame_count / elapsed_time if elapsed_time > 0 else 0
                # print(f'fps:::::::{fps:.2f}')
                # image = cv2.imread('images/s5.png')
                image = b64_to_np(images)
                image_warp, minv = perspective_transform(image)
                image_warp_copy = image_warp.copy()
                # 用梯度提取车道线
                dilate_erode_image = extract_line_gradient(image_warp)
                # 停止线检测
                # stop_line_flag = find_stop_line(image_warp_copy)
                # light_color,person_Warning = yolo_detect(image)
                # print('person_Warning',person_Warning)
                # print('stop_line_flag',stop_line_flag)
                # if person_Warning == True:
                #     print('前方检测到行人,需要刹车！！！')
                #     carSpeed = 0
                #     time.sleep(1)
                #     continue
                # if stop_line_flag == True:
                #     carSpeed = 0
                #     print('识别到的颜色为:',light_color)
                #     if light_color != None:
                #         if light_color == 0 and light_color == 1:
                #             print('停止车辆')
                #             carSpeed = 0
                #             # cv2.waitKey(1)
                #             continue
                #         elif light_color == 2:
                #             print('绿灯,启动车辆')
                #             carSpeed = 10
                #             time.sleep(2)
                # 用颜色提取车道线
                # dilate_erode_image = extract_line_color(image_warp_copy)
                # 拟合的车道线
                left_fitx, right_fitx, middle_fitx, ploty = finding_line(dilate_erode_image)
                # 绘制车道线
                pts_middle = show_line(image, image_warp, dilate_erode_image, minv, left_fitx, right_fitx, middle_fitx,
                                       ploty)
                # 自动驾驶
                lane_center, image_center = auto_run(image, mqtt_client, pts_middle, pid)

                # # 实时显示误差情况
                # plotter.update_plot(frame, lane_center, image_center)
                # frame += 1
        except Exception as e:
            print(e)
    # finally:
    #     # cv2.destroyAllWindows()
    #     # plt.close('all')
    #     # mqtt_client.close()  # 确保断开MQTT
    #     print("所有资源已释放。")
