# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
import argparse
import os
import platform
import sys
import time
from pathlib import Path
import torch
from multiprocessing import  Process
import threading
from queue import Queue


FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative

rtmp = 'rtmp://localhost:1935/live/stream'
rtmp2 = 'rtmp://localhost:1935/live/stream2'

size = (640, 480)
sizeStr = str(size[0]) + 'x' + str(size[1])

from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
                           increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, smart_inference_mode
import cv2
import subprocess

class run2(Process):
    def __init__(self,url):
        super(run2,self).__init__()
        self.url = url

    def run(self,
            weights=ROOT / 'yolov5s.pt',  # model path or triton URL  使用的模型的路径
            source=ROOT / 'static/test2.mp4',  # file/dir/URL/glob/screen/0(webcam)  所要处理的数据的存放路径
            data=ROOT / 'data/coco128.yaml',  # dataset.yaml path
            imgsz=(640, 640),  # inference size (height, width)   图片的尺寸设置，不符合的设置尺寸大小的会被调整
            device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
            save_txt=False,  # save results to *.txt     是否将分析处理所得的数据保存在txt文件中，保存的数据为所有的框的四个坐标
            save_conf=False,  # save confidences in --save-txt labels
            nosave=False,  # do not save images/videos
            classes=None,  # filter by class: --class 0, or --class 0 2 3
            augment=False,  # augmented inference
            visualize=False,  # visualize features
            update=False,  # update all models
            project=ROOT / 'runs/detect',  # save results to project/name
            name='exp',  # save results to project/name
            exist_ok=False,  # existing project/name ok, do not increment
            half=True,  # use FP16 half-precision inference
            dnn=False,  # use OpenCV DNN for ONNX inference
            vid_stride=1,  # video frame-rate stride
    ):
        self.weights=weights
        self.source=source
        self.data=data
        self.imgsz=imgsz
        self.device=device
        self.save_txt=save_txt
        self.save_conf=save_conf
        self.nosave=nosave
        self.classes=classes
        self.augment=augment
        self.visualize=visualize
        self.update=update
        self.project=project
        self.name=name
        self.exist_ok=exist_ok
        self.half=half
        self.dnn=dnn
        self.vid_stride=vid_stride
        self.pred=None
        #合成文件名
        #将路径的类型有路径类型转换成字符串格式
        self.source = str(self.url)
        self.save_img = not nosave and not self.source.endswith('.txt')  # save inference images
        #
        self.is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
        #判断是否是网络流媒体
        self.is_url = self.source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
        #判断是不是本机设备的摄像头
        self.webcam = self.source.isnumeric() or self.source.endswith('.txt') or (self.is_url and not self.is_file)
        #判断是不是截图
        screenshot = self.source.lower().startswith('screen')
        if self.is_url and self.is_file:
            self.source = check_file(self.source)  # download
        # Directories
        self.save_dir = increment_path(Path(self.project) / self.name, exist_ok=exist_ok)  # increment run
        (self.save_dir / 'labels' if self.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)  # make dir

        # Load model
        self.device = select_device(self.device)
        self.model = DetectMultiBackend(weights, device=self.device, dnn=self.dnn, data=data, fp16=half)
        model=self.model
        self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
        self.imgsz = check_img_size(self.imgsz, s=self.stride)  # check image size

        # Dataloader  数据加载器
        bs = 1  # batch_size
        if self.webcam:  #处理摄像头流数据
            self.view_img = check_imshow(warn=True)
            dataset = LoadStreams(self.source, img_size=imgsz, stride=self.stride, auto=self.pt, vid_stride=vid_stride)
            bs = len(dataset)
        elif screenshot:    #处理截图类型的图片      截图类型的图片一般都是screenshot开头明明文件名
            dataset = LoadScreenshots(self.source, img_size=imgsz, stride=self.stride, auto=self.pt)
        else:               #处理图片
            dataset = LoadImages(self.source, img_size=imgsz, stride=self.stride, auto=self.pt, vid_stride=vid_stride)
        self.vid_path, self.vid_writer = [None] * bs, [None] * bs


        # Run inference
        self.predict_process(dataset)

    def predict_process(self,dataset):

        queue1 = Queue(100)
        thread_1 = threading.Thread(target=self.test, args=(0,queue1))
        thread_1.start()

        queue2 = Queue(maxsize=-1)
        thread_2 = threading.Thread(target=self.test, args=(67,queue2))
        thread_2.start()

        # queue3 = Queue(maxsize=-1)
        # thread_3 = threading.Thread(target=self.test, args=(65, queue2))
        # thread_3.start()

        self.seen, self.windows, self.dt = 0, [], (Profile(), Profile(), Profile())
        for path, im, im0s, vid_cap, s in dataset:
            with self.dt[0]:
                im = torch.from_numpy(im).to(self.model.device)
                im = im.half() if self.model.fp16 else im.float()  # uint8 to fp16/32
                im /= 255  # 0 - 255 to 0.0 - 1.0
                if len(im.shape) == 3:
                    im = im[None]  # expand for batch dim

            # Inference
            with self.dt[1]:
                self.visualize = increment_path(self.save_dir / Path(path).stem, mkdir=True) if self.visualize else False
                pred = self.model(im, augment=self.augment, visualize=self.visualize)

                print(pred)
                self.path=path
                self.im=im
                self.im0s=im0s
                self.vid_cap=vid_cap
                self.dataset=dataset
                self.pred=pred
                queue1.put([pred,dataset,im,im0s])
                queue2.put([pred, dataset, im, im0s])
                cv2.waitKey(1)

    def genertor(self):
        while True:
            yield 'aaaa'

    def test(
            self,
            classes,
            queue,
            conf_thres=0.25,
            iou_thres=0.45,
            agnostic_nms=False,
            max_det=1000,
            line_thickness=3,  # bounding box thickness (pixels)
            save_crop=True,  # save cropped prediction boxes
            view_img=True,  # show results  是否直接显示处理后的图片的结果
            hide_labels=False,  # hide labels
            hide_conf=False,  # hide confidences
    ):
        # genertor = self.genertor()
        while True:
            if queue.get() is None:
                print("continue")
                time.sleep(1)
                continue
            else:
                data = queue.get()
                if data is None:
                    continue
                pred, dataset, im, im0s = data[0], data[1], data[2], data[3]

                pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
                s = ''
                for i, det in enumerate(pred):  # per image
                    if self.webcam:  # batch_size >= 1
                        p, im0, frame = self.path[i], im0s[i].copy(), dataset.count
                        s += f'{classes}: '
                    else:
                        p, im0, frame = self.path, im0s.copy(), getattr(dataset, 'frame', 0)

                    p = Path(p)  # to Path
                    self.save_path = str(self.save_dir / p.name)  # img.jpg
                    self.txt_path = str(self.save_dir / 'labels' / p.stem) + (
                        '' if self.dataset.mode == 'image' else f'_{frame}')  # im.txt
                    s += '%gx%g ' % im.shape[2:]  # print string
                    gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
                    imc = im0.copy() if save_crop else im0  # for save_crop
                    annotator = Annotator(im0, line_width=line_thickness, example=str(self.names))
                    if len(det):
                        # Rescale boxes from img_size to im0 size
                        det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()

                        # Print results 控制台输出信息
                        for c in det[:, 5].unique():
                            n = (det[:, 5] == c).sum()  # detections per class
                            s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, "  # add to string

                        # Write results
                        for *xyxy, conf, cls in reversed(det):
                            # if self.save_txt:  # Write to file
                            #     xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
                            #     line = (cls, *xywh, conf) if self.save_conf else (cls, *xywh)  # label format
                            #     with open(f'{self.txt_path}.txt', 'a') as f:
                            #         f.write(('%g ' * len(line)).rstrip() % line + '\n')

                            if self.save_img or save_crop or view_img:  # Add bbox to image          图片上画框框
                                c = int(cls)  # integer class
                                label = None if hide_labels else (
                                    self.names[c] if hide_conf else f'{self.names[c]} {conf:.2f}')
                                annotator.box_label(xyxy, label, color=colors(c, True))
                            if save_crop:
                                save_one_box(xyxy, imc, file=self.save_dir / 'crops' / self.names[c] / f'{p.stem}.jpg',
                                             BGR=True)

                    # Stream results
                    im0 = annotator.result()
                    if classes==0:
                        if platform.system() == 'Linux' and p not in self.windows:
                            self.windows.append(p)
                            cv2.namedWindow(str(p),
                                            cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)  # allow window resize (Linux)
                            cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
                        cv2.imshow(str(p), im0)
                        cv2.waitKey(1)  # 1 millisecond

                    # Save results (image with detections)
                    # if self.save_img:
                    #     if self.dataset.mode == 'image':
                    #         cv2.imwrite(self.save_path, im0)
                    #         print("图片保存的路径" + str(self.save_path))
                    #     else:  # 'video' or 'stream'
                    #         if self.vid_path[i] != self.save_path:  # new video
                    #             self.vid_path[i] = self.save_path
                    #             if isinstance(self.vid_writer[i], cv2.VideoWriter):
                    #                 self.vid_writer[i].release()  # release previous video writer
                    #             if self.vid_cap:  # video
                    #                 fps = self.vid_cap.get(cv2.CAP_PROP_FPS)
                    #                 w = int(self.vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                    #                 h = int(self.vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                    #             else:  # stream
                    #                 fps, w, h = 30, im0.shape[1], im0.shape[0]
                    #             self.save_path = str(
                    #                 Path(self.save_path).with_suffix('.mp4'))  # force *.mp4 suffix on results videos
                    #             self.vid_writer[i] = cv2.VideoWriter(self.save_path, cv2.VideoWriter_fourcc(*'mp4v'),
                    #                                                  fps,
                    #                                                  (w, h))
                    #         self.vid_writer[i].write(im0)
                #LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{self.dt[1].dt * 1E3:.1f}ms")


