# Reference: https://www.learnopencv.com/deep-learning-based-object-detection-using-yolov3-with-opencv-python-c/

import argparse
from cv2 import cv2
import numpy as np
import pandas as pd
import time
import CarModule
import map_matching.IAMM as mapmatching
import CameraModule as CameraModule
from settings import *


class YOLO:
    # Constant Parameters
    conf_threshod = 0.25  # 包含物体的概率阈值
    nms_threshod = 0.4  # 非极大值抑制阈值
    input_width = 416  # 模型输入图片宽度
    input_height = 416  # 模型输入图片高度
    car_class = [2, 5]  # 小汽车和卡车的类别编号

    def __init__(self, classes_path, cfg_path, weights_path):
        # Read Yolov3 models from model configuration and trained data
        net = cv2.dnn.readNetFromDarknet(cfg_path, weights_path)
        net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
        self.net = net

    def getOutputsNames(self):
        # 获得模型的输出层
        layersNames = self.net.getLayerNames()
        return [layersNames[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]


class FrameClass:
    frame_width = 1920
    frame_height = 1088
    CarModule.CarClass.frame_size = [frame_width, frame_height]

    def __init__(self, fname0, frame0, lane_param0, vanishing_point0, camera0):
        self.fname = fname0
        self.frame = frame0
        self.lane_param = lane_param0
        self.vanishing_point = vanishing_point0
        self.indices = None
        self.boxes = None
        self.car_list = None
        self.camera = camera0
        self.phi = np.arctan((self.vanishing_point[1] - self.camera.y0) / self.camera.fy)
        self.omega = np.arctan((self.camera.x0 - self.vanishing_point[0]) * np.cos(self.phi) / self.camera.fx)

    def get_lane_num(self, car):
        """
        输入待定点的横纵坐标和车道线的系数（车道线方程：y=b1*x+b0），返回待定点所在车道
        """
        bottom_y = car.top + car.height
        lanes = []
        n = self.lane_param.shape[0]

        if n < 2:
            return -1
        for i in range(n):
            lanes.append(self.lane_function(bottom_y, i))
        lanes = np.sort(lanes)  # 排序 从小到大
        for i in range(n - 1):
            if car.x_center < lanes[i + 1]:
                return i + 1  # 从1开始 最左一个车道为1
        else:
            return n

    def car_processing(self, count_car, last_frame_obj):
        # 初始化车辆匹配列表。行：本张图片的一辆车，与上一张图片所有车的匹配分数
        matching_scores = []
        # 本张图的车辆列表
        self.car_list = []
        for i_box in self.indices:
            box = self.boxes[i_box[0]]
            left = box[0]
            top = box[1]
            width = box[2]
            height = box[3]
            matching_s = []
            if left < 0 or top < 0 or left + width > FrameClass.frame_width or top + height > FrameClass.frame_height:
                continue
            car = CarModule.CarClass(self.frame[top:top + height, left:left + width], top, left, width, height)
            car.get_dist(self.phi, self.omega, self.camera)
            car.lane_num = self.get_lane_num(car)

            self.car_list.append(car)
            # 有上一张图片
            if last_frame_obj is not None and len(last_frame_obj.car_list) != 0:
                # 与前一张图的每一个车辆比对，计算分数
                for previous_car in last_frame_obj.car_list:
                    matching_s.append(car.cos_matching(previous_car))
                    # 除了直方图，还要加上位置和大小。并且考虑一对多的问题。去掉太小的车。（识别不准确、位置不准确
                matching_scores.append(matching_s)
            else:  # 没有上一张图片
                count_car += 1
                car_num = count_car
                car.set_car_num(car_num)
                car.draw_pred_car(self.frame)
        if last_frame_obj is None or len(last_frame_obj.car_list) == 0 or len(self.car_list) == 0:
            return count_car

        # 开始匹配
        imax = np.argmax(matching_scores, axis=0)  # 每一列的最大值。前一张图每辆车在这张图的匹配。行：本张 列：前一张

        # 遍历前图车辆
        for j in range(len(last_frame_obj.car_list)):
            for i in range(len(self.car_list)):
                if i == imax[j] and matching_scores[i][j] > 0.8:  # 是前图某车的匹配，且分数>0.85
                    car_num = last_frame_obj.car_list[j].car_num
                    self.car_list[i].set_car_num(car_num)
                    self.car_list[i].set_delta_distance(last_frame_obj.car_list[j].dy)

        # 处理未匹配到的，并画图
        for car in self.car_list:
            if car.car_num == -1:  # 未在前图找到匹配
                count_car += 1
                car_num = count_car
                car.set_car_num(car_num)
            car.draw_pred_car(self.frame)
        return count_car

    def draw_lane_line(self):
        n = self.lane_param.shape[0]
        y1, y2 = 400, 900  # 车道线的两个端点的纵坐标
        if n < 2:
            return
        for i in range(n):
            cv2.line(self.frame, (self.lane_function(y1, i), y1),
                     (self.lane_function(y2, i), y2), (0, 0, 255), thickness=3)

    def lane_function(self, y, i):
        x = self.lane_param[i, 0] * y + self.lane_param[i, 1]
        return int(x)

    def post_process(self, outs):
        """
        去除低概率的预测值
        """
        confidences = []
        self.boxes = []

        for out in outs:
            for detection in out:

                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = scores[class_id]

                if class_id not in YOLO.car_class:  # 过滤不是车的
                    continue

                # 过滤预测概率低于概率阈值的预测，此处的阈值不等同于conf_threshold
                if confidence > 0.5:
                    center_x = int(detection[0] * self.frame_width)
                    center_y = int(detection[1] * self.frame_height)
                    width = int(detection[2] * self.frame_width)
                    height = int(detection[3] * self.frame_height)
                    left = int(center_x - width / 2)
                    top = int(center_y - height / 2)
                    # # 过滤太小的框
                    # if detection[2] < 0.03 or detection[3] < 0.03:
                    #     continue
                    confidences.append(float(confidence))
                    self.boxes.append([left, top, width, height])

        # 使用非极大值抑制去除包含概率低的预测
        self.indices = cv2.dnn.NMSBoxes(self.boxes, confidences, YOLO.conf_threshod, YOLO.nms_threshod)


def main():
    # map matching
    data_index = 0
    img_input_dir = img_roots[data_index]
    img_output_dir = r"D:\BaiduNetdiskDownload\20190429\pic\output_SURF"

    # trajectory
    trajectory_df = pd.read_csv(traj_roots[data_index] + trajectory_data_list[data_index])

    start = time.time()

    # 初始化车辆计数
    count_car = 0
    # 测试用 车道线初值
    last_lane_param = np.array([[-7.91408430e-01, 1.37122453e+03], [2.66310987e+00, -4.89425724e+02]])
    last_vanishing_point = np.array([944, 600])

    camera = CameraModule.Camera()

    map_matcher = mapmatching.IAMM_for_yolo(conn, datatable, svc_img, "", sigma_list[data_index])

    yolo_obj = YOLO(classes_path, cfg_path, weights_path)

    last_frame_obj = None

    for i in range(trajectory_df.shape[0]):
        fname = trajectory_df["relation"].iloc[i]
        lon = trajectory_df["lon"].iloc[i]
        lat = trajectory_df["lat"].iloc[i]
        pang = trajectory_df["pang"].iloc[i]

        input_path = img_input_dir + "\\" + fname
        output_path = img_output_dir + "\\" + fname
        parser = argparse.ArgumentParser()
        parser.add_argument("-input", "--input_path", help="输入路径", default=input_path)
        parser.add_argument("-output", "--output_path", help="输出文件夹", default=output_path)
        args = parser.parse_args()

        # 读图像
        cap = cv2.VideoCapture(args.input_path)
        window_name = "Yolov3 detection"
        cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
        cv2.resizeWindow(window_name, 900, 500)
        ret, frame = cap.read()

        # 车道线识别
        vanishing_point, lane_param = CarModule.get_lane_param(fname)
        if vanishing_point[0] == -1:  # 检测失败，用上次的有效结果
            lane_param = last_lane_param
            vanishing_point = last_vanishing_point
        else:
            last_lane_param = lane_param  # 保存有效结果
            last_vanishing_point = vanishing_point

        frame_obj = FrameClass(fname, frame, lane_param, vanishing_point, camera)

        if ret:
            print("Processing: ", args.output_path)

        blob = cv2.dnn.blobFromImage(frame, 1 / 255, (YOLO.input_width, YOLO.input_height), [0, 0, 0], 1, crop=False)
        yolo_obj.net.setInput(blob)
        outs = yolo_obj.net.forward(yolo_obj.getOutputsNames())

        # 车辆识别和筛选
        frame_obj.post_process(outs)

        # 路网匹配
        lon_proj, lat_proj, azi, rid = map_matcher.map_matching_for_yolo(lon, lat, pang, input_path)
        # 在图上标注
        cv2.putText(frame, f"{rid}", (0, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

        # 处理车辆。计算距离、车速和所在车道并画图
        count_car = frame_obj.car_processing(count_car, last_frame_obj)
        last_frame_obj = frame_obj
        if count_car > 100:
            count_car = 0

        # 画车道线
        frame_obj.draw_lane_line()

        cv2.imwrite(args.output_path, frame.astype(np.uint8))
        cv2.imshow(window_name, frame)
        cv2.waitKey(10)

    end = time.time() - start
    print("mean processing time: ", end / trajectory_df.shape[0])
    cv2.destroyAllWindows()


if __name__ == '__main__':
    main()
