import cv2
import numpy as np, random
import time
import torch

from models.experimental import attempt_load
from utils.datasets import letterbox
from utils.general import non_max_suppression, scale_coords
from utils.plots import plot_one_box


def main():
    device = 'cuda:0'
    model = attempt_load('modelWeight/best.pt', map_location=device)
    names = model.module.names if hasattr(model, 'module') else model.names
    colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]


    # 海康
    rtsp_urls = ["rtsp://admin:jk123456@192.168.1.64:554/h264/ch1/main/av_stream",
                "rtsp://admin:jk123456@192.168.1.65:554/h264/ch1/main/av_stream"]

    cv2.namedWindow("Detect", 0)
    caps = []
    for rtsp_url in rtsp_urls:
        cap_tmp = cv2.VideoCapture(rtsp_url)
        caps.append(cap_tmp)

    #循环解码-检测流程
    while True:
        # 按 'q' 键退出循环
        if cv2.waitKey(1) & 0xFF == 27:
            break

        combined_frame = []
        frame_backs = []
        #记录处理全部流每帧的时间
        start = time.perf_counter()
        for cap in caps:
            # 读取摄像头的每一帧
            ret, frame = cap.read()
            if not ret:
                print("Failed to grab frame")
                break
            frame_backs.append(frame.copy())
            frame = letterbox(frame, 640, 32)[0]    #resize成网络输入的形状
            frame = frame[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, H,W,C to C,W,H
            frame = np.ascontiguousarray(frame)  #确保numpy数组是以c-style方式连续，保证在内存中是连续的，行优先
            frame = torch.from_numpy(frame).to(device)  #转入cuda显存中
            frame = frame.float()  # uint8 to fp16/32
            frame /= 255.0  # 0 - 255 to 0.0 - 1.0
            combined_frame = torch.stack((combined_frame, frame)) if len(combined_frame) else frame  #合并获取到的所有视频帧 用于检测

            if frame.ndimension() == 3:
                frame = frame.unsqueeze(0)
                # frame = frame.repeat(3,1,1,1)

        #进入评估模式
        with torch.no_grad():
            preds = model(combined_frame, augment=False)[0]

        preds = non_max_suppression(preds, 0.25, 0.45) # 后两数代表conf_thres, iou_thres
        print(preds)
        # for i, pred in enumerate(preds):
        #     print(pred)
        for i, det in enumerate(preds):
            if det is not None and len(det):
                # print(combined_frame[i].shape[1:])
                # print(frame_backs[i].shape)
                det[:, :4] = scale_coords((combined_frame[0].shape[1:]), det[:, :4], frame_backs[i].shape).round()
                for *xyxy, conf, cls in reversed(det):
                    label = f'{names[int(cls)]} {conf:.2f}'
                    plot_one_box(xyxy, frame_backs[i], label=label, color=colors[int(cls)], line_thickness=3)



        end = time.perf_counter()
        print("检测时间：", (end - start) * 1000, "毫秒")
        frame_show = []
        for frame_back in frame_backs:
            frame_show = np.hstack((frame_show, frame_back)) if len(frame_show) else frame_back  #合并获取到的所有视频帧

        cv2.imshow("Detect", frame_show)



    for cap in caps:
        cap.release()
    cv2.destroyAllWindows()


if __name__ == "__main__":
    main()
