# encoding:utf-8
import imutils
import time
import cv2
import numpy as np
from kalman import *
import matplotlib.pyplot as plt

# 根据摄像头的图像尺寸进行设置
line = [(0, 350), (2560, 350)]
# 车辆总数
counter = 0
# 正向车道的车辆数
counter_up = 0
# 逆向车道的车辆数
counter_down = 0

# 创建跟踪器的对象
tracker = Sort()
memory = {}


# 线与线的碰撞检测--二维叉乘的方法检测两个直线之间是否相交
# 计算叉乘符号
def ccw(A, B, C):
    return (C[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (C[0] - A[0])


# 检测AB和CD两条直线是否相交
def intersect(A, B, C, D):
    return ccw(A, C, D) != ccw(B, C, D) and ccw(A, B, C) != ccw(A, B, D)


# 利用yolov3模型进行目标检测
# 加载模型相关信息
# 加载可以检测的目标的类型

# labelPath：类别标签文件的路径
labelPath = "./yolo-coco/coco.names"

# 加载类别标签文件
LABELS = open(labelPath).read().strip().split("\n")

# 生成多种不同的颜色的检测框 用来标注物体
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(200, 3), dtype='uint8')

# 加载预训练的模型：权重 配置信息、进行恢复模型
# weights_path：模型权重文件的路径
weightsPath = "./yolo-coco/yolov3.weights"
# configPath：模型配置文件的路径
configPath = "./yolo-coco/yolov3.cfg"

net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
# 获取YOLO每一层的名称
# getLayerNames（）：获取网络所有层的名称。
ln = net.getLayerNames()
# 获取输出层的名称: [yolo-82,yolo-94,yolo-106]
# getUnconnectedOutLayers（）：获取输出层的索引
ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()]

# 读取视频
vs = cv2.VideoCapture('input/test_1.mp4')
# 获取宽和高
(W, H) = (None, None)
writer = None

try:
    prop = cv2.cv.CV_CAP_PROP_Frame_COUNT if imutils.is_cv2() else cv2.CAP_PROP_FRAME_COUNT
    # 获取视频的总帧数
    total = int(vs.get(prop))
    print("INFO:{} total Frame in video".format(total))
except:
    print("[INFO] could not determine in video")

cnt = 1
# 遍历每一帧的图像
while True:
    # 获取帧的结果
    (grabed, frame) = vs.read()
    # 如果没有 则跳出循环
    if not grabed:
        break
    '''
    # 展示图像
    # cv2.imshow('Image', frame)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    '''
    if W is None or H is None:
        (H, W) = frame.shape[:2]
    '''
    # 假设已经读取了图像并保存在变量image中
    image = frame;
    # 定义目标宽高
    target_width = 416
    target_height = 416

    # 计算调整比例
    scale_ratio = min(target_width / image.shape[1], target_height / image.shape[0])

    # 计算调整后的宽高
    resized_width = int(scale_ratio * image.shape[1])
    resized_height = int(scale_ratio * image.shape[0])

    # 调整图像大小
    resized_image = cv2.resize(image, (resized_width, resized_height))

    # 创建目标图像并进行填充
    # 创建目标图像并进行填充
    padded_image = np.ones((target_height, target_width, 3), dtype=np.uint8) * 255
    padded_image[:resized_height, :resized_width] = resized_image

    # 输出填充后的图像
    cv2.imshow("Padded Image", padded_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    '''

    # 将图片构建成一个blob，设置图片尺寸，然后执行一次前向传播
    # YOLO前馈网络计算，最终获取边界框和相应概率
    blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)

    '''
    # 将 Blob 转换为 NumPy 数组
    blob_array = blob

    # 取出第一张图像
    image = blob_array[0]
    image = image.transpose(1,2,0)
    # 根据需要进行通道顺序转换
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    image_rgb = image_rgb.astype(np.uint8)

    # 显示图像
    cv2.imshow('Blob Image', image_rgb)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    '''


    # 将blob送入网络
    net.setInput(blob)
    start = time.time()
    # 前向传播，进行预测，返回目标框的边界和响应的概率
    layerOutouts = net.forward(ln)
    end = time.time()

    # 存放目标的检测框
    boxes = []
    # 置信度
    confidences = []
    # 目标类别
    classIDs = []

    # 迭代每个输出层，总共三个
    for output in layerOutouts:
        # 遍历每个检测结果
        for detection in output:
            # 提取类别ID和置信度
            # detction:1*85 [5:]表示类别，[0:4]bbox的位置信息 [5]置信度、可信度
            scores = detection[5:]
            classID = np.argmax(scores)
            confidence = scores[classID]

            # 只保留置信度大于某值的边界框
            if confidence > 0.3:
                # 将边界框的坐标还原至与原图片相匹配，记住YOLO返回的是边界框的中心坐标以及边界框的宽度和高度
                box = detection[0:4] * np.array([W, H, W, H])
                (centerX, centerY, width, height) = box.astype("int")
                # 计算边界框的左上角位置
                x = int(centerX - width / 2)
                y = int(centerY - height / 2)
                # 更新目标框，置信度（概率）以及类别
                boxes.append([x, y, int(width), int(height)])
                confidences.append(float(confidence))
                classIDs.append(classID)

    # 使用非极大值抑制方法抑制弱、重叠的目标框
    idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)
    # 检测框的结果:左上角坐标、右下角坐标
    dets = []

    # 确保至少有一个边界框
    if len(idxs) > 0:
        # 迭代每个边界框
        for i in idxs.flatten():
            # 提取边界框的坐标
            if LABELS[classIDs[i]] == "car":
                (x, y) = (boxes[i][0], boxes[i][1])
                (w, h) = (boxes[i][2], boxes[i][3])
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                dets.append([x, y, x + w, y + h, confidences[i]])
    # 类型设置
    np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
    dets = np.asarray(dets)

    # SORT目标跟踪
    if np.size(dets) == 0:
        continue
    else:
        tracks = tracker.update(dets)
    # 存放跟踪框
    boxes = []
    # 存储置信度/可靠性
    indexIDs = []
    # 上一帧的跟踪结果
    previous = memory.copy()
    memory = {}
    for track in tracks:
        boxes.append([track[0], track[1], track[2], track[3]])
        indexIDs.append(int(track[4]))
        memory[indexIDs[-1]] = boxes[-1]

    # 从SORT跟踪框的结果中进行碰撞检测
    if len(boxes) > 0:
        i = int(0)
        # 遍历跟踪框
        for box in boxes:
            (x, y) = (int(box[0]), int(box[1]))  # 左上角坐标
            (w, h) = (int(box[2]), int(box[3]))  # 宽高
            color = [int(c) for c in COLORS[indexIDs[i] % len(COLORS)]]
            cv2.rectangle(frame, (x, y), (w, h), color, 2)

            # 根据在上一帧的检测结果与当前帧的检测结果，利用虚拟线圈完成车辆的计数
            if indexIDs[i] in previous:
                previous_box = previous[indexIDs[i]]
                # 上一帧图像的左上角坐标
                (x2, y2) = (int(previous_box[0]), int(previous_box[1]))
                # 上一帧图像的宽高
                (w2, h2) = (int(previous_box[2]), int(previous_box[3]))
                # 上一帧的中心点的坐标
                p1 = (int(x2 + (w2 - x2) / 2), int(y2 + (h2 - y2) / 2))
                # 当前帧的中心点的坐标
                p0 = (int(x + (w - x) / 2), int(y + (h - y) / 2))

                # 利用p0,p1与line进行碰撞检测
                if intersect(p0, p1, line[0], line[1]):
                    counter += 1
                    # 判断行进方向
                    if y2 > y:
                        counter_down += 1
                    else:
                        counter_up += 1
            i += 1

    # 将车辆计数的相关结果放在视频上
    # print("将车辆计数的相关结果放在视频上")
    cv2.line(frame, line[0], line[1], (0, 255, 0), 3)
    cv2.putText(frame, str(counter), (30, 80), cv2.FONT_HERSHEY_DUPLEX, 3.0, (255, 0, 0), 3)
    cv2.putText(frame, str(counter_up), (130, 80), cv2.FONT_HERSHEY_DUPLEX, 3.0, (0, 255, 0), 3)
    cv2.putText(frame, str(counter_down), (230, 80), cv2.FONT_HERSHEY_DUPLEX, 3.0, (0, 0, 255), 3)

    # 将检测结果保存在视频
    # 未设置视频的编解码信息时，执行以下代码
    if writer is None:
        # 设置编码格式
        fourcc = cv2.VideoWriter_fourcc(*"mp4v")
        # 视频信息设置
        writer = cv2.VideoWriter("output.mp4",
                                 fourcc,
                                 30,
                                 (frame.shape[1], frame.shape[0]),
                                 True)
    # 将处理后的帧写入到视频中
    # print("将处理后的帧写入到视频中")
    writer.write(frame)
    print(f'Frame {cnt} finish!')
    cnt = cnt + 1
    # 显示当前帧的结果
    # cv2.imshow("", frame)
    # # 按下q键退出
    # if cv2.waitKey(1) & 0xFF == ord('q'):
    #      break

# "释放资源"
writer.release()
vs.release()
cv2.destroyAllWindows()