'''
进程类
'''
import contextlib
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
import platform
from itertools import repeat
from multiprocessing import Process
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse

import numpy as np
import torch
from utils.plots import Annotator, colors, save_one_box
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
                           increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
from models.common import DetectMultiBackend
from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste,
                                 cutout, letterbox, mixup, random_perspective)
from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str,
                           cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy,
                           xyxy2xywhn)
from utils.torch_utils import torch_distributed_zero_first
import sys
from concurrent.futures import ThreadPoolExecutor
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative
from utils.torch_utils import select_device
class videoCapture(Process):
    def __init__(self, sources, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1,
                 weights=ROOT / 'yolov5s.pt',  # model path or triton URL  使用的模型的路径
                 dnn=False,  # use OpenCV DNN for ONNX inference
                 data=ROOT / 'data/coco128.yaml',  # dataset.yaml path\
                 half=True,  # use FP16 half-precision inference
                 visualize=False,  # visualize features
                 device='0',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
                 project=ROOT / 'runs/detect',  # save results to project/name
                 name='exp',  # save results to project/name
                 exist_ok=False,  # existing project/name ok, do not increment
                 imgsz=(640, 640),  # inference size (height, width)
                 ):
        super(videoCapture, self).__init__()
        imgsz = check_img_size(imgsz, s=stride)  # check image size
        torch.backends.cudnn.benchmark = True  # faster for fixed-size inference
        self.mode = 'stream'  # 数据模式
        self.img_size = imgsz  # 图片的尺寸
        self.stride = stride
        self.vid_stride = vid_stride  # video frame-rate stride
        sources = Path(sources).read_text().rsplit() if Path(sources).is_file() else [sources]
        n = len(sources)
        self.sources = [clean_str(x) for x in sources]  # clean source names for later
        self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
        for i, s in enumerate(sources):  # index, source
            # Start thread to read frames from video stream
            st = f'{i + 1}/{n}: {s}... '
            if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'):  # if source is YouTube video
                check_requirements(('pafy', 'youtube_dl==2020.12.2'))
                import pafy
                s = pafy.new(s).getbest(preftype="mp4").url  # YouTube URL
            s = eval(s) if s.isnumeric() else s  # i.e. s = '0' local webcam
            if s == 0:
                assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'
                assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'
            cap = cv2.VideoCapture(0)
            assert cap.isOpened(), f'{st}Failed to open {s}'
            w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fps = cap.get(cv2.CAP_PROP_FPS)  # warning: may return 0 or nan
            self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf')  # infinite stream fallback
            self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30  # 30 FPS fallback

            _, self.imgs[i] = cap.read()  # guarantee first frame
            # self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
            LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
            # self.threads[i].start()
            self.i = i
            self.stream=s
        LOGGER.info('')  # newline

        # check for common shapes
        s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])
        self.rect = np.unique(s, axis=0).shape[0] == 1  # rect inference if all shapes equal
        self.auto = auto and self.rect
        self.transforms = transforms  # optional
        if not self.rect:
            LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')
        # device = select_device(device)
        # self.model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
        # self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
        self.save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run



    def run(self):
        i=self.i
        cap = cv2.VideoCapture(0)
        stream=self.stream
        device = select_device('0')
        model = DetectMultiBackend(ROOT / 'yolov5s.pt', device=device, dnn=False, data=ROOT / 'data/coco128.yaml',fp16=True)
        theard_pool = ThreadPoolExecutor(max_workers=2)
        # Read stream `i` frames in daemon thread
        n, f = 0, self.frames[i]  # frame number, frame array
        while cap.isOpened() and n < f:
            n += 1
            cap.grab()  # .read() = .grab() followed by .retrieve()
            if n % self.vid_stride == 0:
                success, im = cap.retrieve()
                if success:
                    self.imgs[i] = im
                    self.process(model,theard_pool)
                else:
                    LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')
                    self.imgs[i] = np.zeros_like(self.imgs[i])
                    cap.open(stream)  # re-open stream if signal was lost
            time.sleep(0.0)  # wait time
    def process(self,
                model,
                theard_pool,
                name='exp',
                project = ROOT / 'runs/detect',  # save results to project/name
                exist_ok=False,  # existing project/name ok, do not increment
                visualize=False,  # visualize features
                augment=False,  # augmented inference
                ):
        save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)

        im0 = self.imgs.copy()
        if self.transforms:
            im = np.stack([self.transforms(x) for x in im0])  # transforms
        else:
            im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0])  # resize
            im = im[..., ::-1].transpose((0, 3, 1, 2))  # BGR to RGB, BHWC to BCHW
            im = np.ascontiguousarray(im)  # contiguous

        im = torch.from_numpy(im).to(model.device)
        im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32
        im /= 255  # 0 - 255 to 0.0 - 1.0
        if len(im.shape) == 3:
            im = im[None]  # expand for batch dim
            # Inference
        path="0"
        visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
        pred = model(im, augment=augment, visualize=visualize)

        args =[model,pred,im,im0,0]
        theard_pool.submit(lambda p: self.process2(*p),args)
        args =[model,pred,im,im0,67]
        theard_pool.submit(lambda p: self.process2(*p),args)

        # self.process2(model,pred,im,im0)

            # NMS
        # pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
        #
        # # stride, names, pt = self.model.stride, self.model.names, self.model.pt
        # for i, det in enumerate(pred):  # per image
        #     im0=im0[i].copy()
        #     gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]
        #     annotator = Annotator(im0, line_width=line_thickness)
        #     if len(det):
        #         # Rescale boxes from img_size to im0 size
        #         det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
        #
        #         # Write results
        #         for *xyxy, conf, cls in reversed(det):
        #                 c = int(cls)  # integer class
        #                 label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
        #                 annotator.box_label(xyxy, label, color=colors(c, True))
        #
        #
        #     # Stream results
        #     im0 = annotator.result()
        #     if view_img:
        #         cv2.imshow("frame", im0)
        #         cv2.waitKey(1)  # 1 millisecond



        return self.sources, im, im0, None, ''
    def process2(self,
                 model,
                 pred,
                 im,
                 im0,
                 classes,
                 conf_thres=0.25,
                 iou_thres=0.45,
                 agnostic_nms=False,
                 max_det=1000,
                 line_thickness=3,  # bounding box thickness (pixels)
                 save_crop=True,  # save cropped prediction boxes
                 view_img=True,  # show results  是否直接显示处理后的图片的结果
                 hide_labels=False,  # hide labels
                 hide_conf=False,  # hide confidences
                 ):
        stride, names, pt = model.stride, model.names, model.pt

        # NMS
        pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)

        # stride, names, pt = self.model.stride, self.model.names, self.model.pt
        for i, det in enumerate(pred):  # per image
            im0 = im0[i].copy()
            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]
            annotator = Annotator(im0, line_width=line_thickness)
            if len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()

                # Write results
                for *xyxy, conf, cls in reversed(det):
                    c = int(cls)  # integer class
                    label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
                    annotator.box_label(xyxy, label, color=colors(c, True))

            # Stream results
            im0 = annotator.result()
            if view_img:
                cv2.imshow("frame", im0)
                cv2.waitKey(1)  # 1 millisecond

