# Ultralytics YOLO 🚀, AGPL-3.0 license

import glob
import math
import os
import time
from dataclasses import dataclass
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse

import cv2
import numpy as np
import requests
import torch
from PIL import Image

from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS
from ultralytics.utils import LOGGER, is_colab, is_kaggle, ops
from ultralytics.utils.checks import check_requirements


@dataclass
class SourceTypes:
    """Class to represent various types of input sources for predictions."""
    webcam: bool = False
    screenshot: bool = False
    from_img: bool = False
    tensor: bool = False


class LoadStreams:
    """
    Stream Loader for various types of video streams.

    Suitable for use with `yolo predict source='rtsp://example.com/media.mp4'`, supports RTSP, RTMP, HTTP, and TCP streams.

    Attributes:
        sources (str): The source input paths or URLs for the video streams.
        imgsz (int): The image size for processing, defaults to 640.
        vid_stride (int): Video frame-rate stride, defaults to 1.
        buffer (bool): Whether to buffer input streams, defaults to False.
        running (bool): Flag to indicate if the streaming thread is running.
        mode (str): Set to 'stream' indicating real-time capture.
        imgs (list): List of image frames for each stream.
        fps (list): List of FPS for each stream.
        frames (list): List of total frames for each stream.
        threads (list): List of threads for each stream.
        shape (list): List of shapes for each stream.
        caps (list): List of cv2.VideoCapture objects for each stream.
        bs (int): Batch size for processing.

    Methods:
        __init__: Initialize the stream loader.
        update: Read stream frames in daemon thread.
        close: Close stream loader and release resources.
        __iter__: Returns an iterator object for the class.
        __next__: Returns source paths, transformed, and original images for processing.
        __len__: Return the length of the sources object.
    """

    def __init__(self, sources='file.streams', imgsz=640, vid_stride=1, buffer=False):
        """Initialize instance variables and check for consistent input stream shapes."""
        torch.backends.cudnn.benchmark = True  # faster for fixed-size inference
        self.buffer = buffer  # buffer input streams
        self.running = True  # running flag for Thread
        self.mode = 'stream'
        self.imgsz = imgsz
        self.vid_stride = vid_stride  # video frame-rate stride
        sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
        n = len(sources)
        self.sources = [ops.clean_str(x) for x in sources]  # clean source names for later
        self.imgs, self.fps, self.frames, self.threads, self.shape = [[]] * n, [0] * n, [0] * n, [None] * n, [[]] * n
        self.caps = [None] * n  # video capture objects
        for i, s in enumerate(sources):  # index, source
            # Start thread to read frames from video stream
            st = f'{i + 1}/{n}: {s}... '
            if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'):  # if source is YouTube video
                # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'
                s = get_best_youtube_url(s)
            s = eval(s) if s.isnumeric() else s  # i.e. s = '0' local webcam
            if s == 0 and (is_colab() or is_kaggle()):
                raise NotImplementedError("'source=0' webcam not supported in Colab and Kaggle notebooks. "
                                          "Try running 'source=0' in a local environment.")
            self.caps[i] = cv2.VideoCapture(s)  # store video capture object
            if not self.caps[i].isOpened():
                raise ConnectionError(f'{st}Failed to open {s}')
            w = int(self.caps[i].get(cv2.CAP_PROP_FRAME_WIDTH))
            h = int(self.caps[i].get(cv2.CAP_PROP_FRAME_HEIGHT))
            fps = self.caps[i].get(cv2.CAP_PROP_FPS)  # warning: may return 0 or nan
            self.frames[i] = max(int(self.caps[i].get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float(
                'inf')  # infinite stream fallback
            self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30  # 30 FPS fallback

            success, im = self.caps[i].read()  # guarantee first frame
            if not success or im is None:
                raise ConnectionError(f'{st}Failed to read images from {s}')
            self.imgs[i].append(im)
            self.shape[i] = im.shape
            self.threads[i] = Thread(target=self.update, args=([i, self.caps[i], s]), daemon=True)
            LOGGER.info(f'{st}Success ✅ ({self.frames[i]} frames of shape {w}x{h} at {self.fps[i]:.2f} FPS)')
            self.threads[i].start()
        LOGGER.info('')  # newline

        # Check for common shapes
        self.bs = self.__len__()

    def update(self, i, cap, stream):
        """Read stream `i` frames in daemon thread."""
        n, f = 0, self.frames[i]  # frame number, frame array
        while self.running and cap.isOpened() and n < (f - 1):
            if len(self.imgs[i]) < 30:  # keep a <=30-image buffer
                n += 1
                cap.grab()  # .read() = .grab() followed by .retrieve()
                if n % self.vid_stride == 0:
                    success, im = cap.retrieve()
                    if not success:
                        im = np.zeros(self.shape[i], dtype=np.uint8)
                        LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')
                        cap.open(stream)  # re-open stream if signal was lost
                    if self.buffer:
                        self.imgs[i].append(im)
                    else:
                        self.imgs[i] = [im]
            else:
                time.sleep(0.01)  # wait until the buffer is empty

    def close(self):
        """Close stream loader and release resources."""
        self.running = False  # stop flag for Thread
        for thread in self.threads:
            if thread.is_alive():
                thread.join(timeout=5)  # Add timeout
        for cap in self.caps:  # Iterate through the stored VideoCapture objects
            try:
                cap.release()  # release video capture
            except Exception as e:
                LOGGER.warning(f'WARNING ⚠️ Could not release VideoCapture object: {e}')
        cv2.destroyAllWindows()

    def __iter__(self):
        """Iterates through YOLO image feed and re-opens unresponsive streams."""
        self.count = -1
        return self

    def __next__(self):
        """Returns source paths, transformed and original images for processing."""
        self.count += 1

        images = []
        for i, x in enumerate(self.imgs):

            # Wait until a frame is available in each buffer
            while not x:
                if not self.threads[i].is_alive() or cv2.waitKey(1) == ord('q'):  # q to quit
                    self.close()
                    raise StopIteration
                time.sleep(1 / min(self.fps))
                x = self.imgs[i]
                if not x:
                    LOGGER.warning(f'WARNING ⚠️ Waiting for stream {i}')

            # Get and remove the first frame from imgs buffer
            if self.buffer:
                images.append(x.pop(0))

            # Get the last frame, and clear the rest from the imgs buffer
            else:
                images.append(x.pop(-1) if x else np.zeros(self.shape[i], dtype=np.uint8))
                x.clear()

        return self.sources, images, None, ''

    def __len__(self):
        """Return the length of the sources object."""
        return len(self.sources)  # 1E12 frames = 32 streams at 30 FPS for 30 years


class LoadScreenshots:
    """
    YOLOv8 screenshot dataloader.

    This class manages the loading of screenshot images for processing with YOLOv8.
    Suitable for use with `yolo predict source=screen`.

    Attributes:
        source (str): The source input indicating which screen to capture.
        imgsz (int): The image size for processing, defaults to 640.
        screen (int): The screen number to capture.
        left (int): The left coordinate for screen capture area.
        top (int): The top coordinate for screen capture area.
        width (int): The width of the screen capture area.
        height (int): The height of the screen capture area.
        mode (str): Set to 'stream' indicating real-time capture.
        frame (int): Counter for captured frames.
        sct (mss.mss): Screen capture object from `mss` library.
        bs (int): Batch size, set to 1.
        monitor (dict): Monitor configuration details.

    Methods:
        __iter__: Returns an iterator object.
        __next__: Captures the next screenshot and returns it.
    """

    def __init__(self, source, imgsz=640):
        """Source = [screen_number left top width height] (pixels)."""
        check_requirements('mss')
        import mss  # noqa

        source, *params = source.split()
        self.screen, left, top, width, height = 0, None, None, None, None  # default to full screen 0
        if len(params) == 1:
            self.screen = int(params[0])
        elif len(params) == 4:
            left, top, width, height = (int(x) for x in params)
        elif len(params) == 5:
            self.screen, left, top, width, height = (int(x) for x in params)
        self.imgsz = imgsz
        self.mode = 'stream'
        self.frame = 0
        self.sct = mss.mss()
        self.bs = 1

        # Parse monitor shape
        monitor = self.sct.monitors[self.screen]
        self.top = monitor['top'] if top is None else (monitor['top'] + top)
        self.left = monitor['left'] if left is None else (monitor['left'] + left)
        self.width = width or monitor['width']
        self.height = height or monitor['height']
        self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height}

    def __iter__(self):
        """Returns an iterator of the object."""
        return self

    def __next__(self):
        """mss screen capture: get raw pixels from the screen as np array."""
        im0 = np.asarray(self.sct.grab(self.monitor))[:, :, :3]  # BGRA to BGR
        s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: '

        self.frame += 1
        return [str(self.screen)], [im0], None, s  # screen, img, vid_cap, string


class LoadImages:
    """
    YOLOv8 图像/视频数据加载器。

    此类管理 YOLOv8 的图像和视频数据的加载和预处理。支持从多种格式加载，包括单个图像文件、视频文件以及图像和视频路径列表。

    属性:
        imgsz (int): 图像大小，默认为 640。
        files (list): 图像和视频文件路径列表。
        nf (int): 文件总数（图像和视频）。
        video_flag (list): 标志列表，指示文件是否为视频（True）或图像（False）。
        mode (str): 当前模式，'image' 或 'video'。
        vid_stride (int): 视频帧率的步长，默认为 1。
        bs (int): 批量大小，此类中设置为 1。
        cap (cv2.VideoCapture): OpenCV 的视频捕获对象。
        frame (int): 视频的帧计数器。
        frames (int): 视频的总帧数。
        count (int): 迭代计数器，在 `__iter__()` 中初始化为 0。

    方法:
        _new_video(path): 为给定的视频路径创建一个新的 cv2.VideoCapture 对象。
    """

    def __init__(self, path, imgsz=640, vid_stride=1):
        """初始化数据加载器，如果文件未找到则抛出 FileNotFoundError。"""
        parent = None  # 初始化父目录变量
        if isinstance(path, str) and Path(path).suffix == '.txt':  # 如果路径是一个 .txt 文件，文件中每行包含图像/视频/目录路径
            parent = Path(path).parent  # 获取 .txt 文件的父目录
            path = Path(path).read_text().splitlines()  # 读取 .txt 文件内容并分割为路径列表
        files = []  # 初始化文件列表
        for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:  # 如果路径是列表或元组，则排序处理，否则直接处理单个路径
            a = str(Path(p).absolute())  # 获取路径的绝对路径（不使用 .resolve()，避免潜在问题）
            if '*' in a:  # 如果路径中包含通配符
                files.extend(sorted(glob.glob(a, recursive=True)))  # 使用 glob 搜索匹配的文件
            elif os.path.isdir(a):  # 如果路径是目录
                files.extend(sorted(glob.glob(os.path.join(a, '*.*'))))  # 搜索目录中的所有文件
            elif os.path.isfile(a):  # 如果路径是文件
                files.append(a)  # 添加文件到列表
            elif parent and (parent / p).is_file():  # 如果路径是相对于 .txt 文件父目录的文件
                files.append(str((parent / p).absolute()))  # 添加文件的绝对路径到列表
            else:  # 如果路径不存在，抛出异常
                raise FileNotFoundError(f'{p} does not exist')  # 抛出文件未找到异常

        # 筛选图像文件，检查文件后缀是否属于图像格式
        images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
        # 筛选视频文件，检查文件后缀是否属于视频格式
        videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
        ni, nv = len(images), len(videos)  # 分别统计图像和视频文件的数量

        self.imgsz = imgsz  # 设置图像大小
        self.files = images + videos  # 合并图像和视频文件列表
        self.nf = ni + nv  # 文件总数（图像和视频）
        self.video_flag = [False] * ni + [True] * nv  # 创建标志列表，图像为 False，视频为 True
        self.mode = 'image'  # 初始化模式为 'image'
        self.vid_stride = vid_stride  # 设置视频帧率步长
        self.bs = 1  # 设置批量大小为 1
        if any(videos):  # 如果存在视频文件
            self._new_video(videos[0])  # 初始化第一个视频文件
        else:  # 如果没有视频文件
            self.cap = None  # 设置视频捕获对象为 None
        if self.nf == 0:  # 如果没有找到任何图像或视频文件
            raise FileNotFoundError(f'No images or videos found in {p}. '  # 抛出文件未找到异常
                                    f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}')  # 提示支持的格式

    def __iter__(self):
        """Returns an iterator object for VideoStream or ImageFolder."""
        self.count = 0
        return self

    def __next__(self):
        """返回数据集中的下一个图像、路径和元数据。"""
        if self.count == self.nf:  # 如果当前计数等于文件总数，表示迭代结束
            raise StopIteration  # 抛出 StopIteration 异常以结束迭代
        path = self.files[self.count]  # 获取当前文件路径
        if self.video_flag[self.count]:  # 如果当前文件是视频
            # 读取视频
            self.mode = 'video'  # 设置模式为 'video'
            for _ in range(self.vid_stride):  # 根据视频帧率步长跳过帧
                self.cap.grab()  # 捕获视频帧但不解码
            success, im0 = self.cap.retrieve()  # 解码并获取视频帧
            while not success:  # 如果解码失败
                self.count += 1  # 跳到下一个文件
                self.cap.release()  # 释放当前视频捕获对象
                if self.count == self.nf:  # 如果已经处理完所有视频文件
                    raise StopIteration  # 抛出 StopIteration 异常以结束迭代
                path = self.files[self.count]  # 获取下一个文件路径
                self._new_video(path)  # 初始化新的视频捕获对象
                success, im0 = self.cap.read()  # 读取视频帧

            self.frame += 1  # 增加当前视频帧计数
            # im0 = self._cv2_rotate(im0)  # 如果 cv2 自动旋转功能关闭，可以手动旋转帧
            s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '  # 构造视频元数据字符串

        else:  # 如果当前文件是图像
            # 读取图像
            self.count += 1  # 增加文件计数
            im0 = cv2.imread(path)  # 使用 OpenCV 读取图像（BGR 格式）
            if im0 is None:  # 如果图像读取失败
                raise FileNotFoundError(f'Image Not Found {path}')  # 抛出 FileNotFoundError 异常
            s = f'image {self.count}/{self.nf} {path}: '  # 构造图像元数据字符串

        return [path], [im0], self.cap, s  # 返回文件路径、图像数据、视频捕获对象和元数据字符串

    def _new_video(self, path):
        """Create a new video capture object."""
        self.frame = 0
        self.cap = cv2.VideoCapture(path)
        self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)

    def __len__(self):
        """Returns the number of files in the object."""
        return self.nf  # number of files


class LoadMultiImages(LoadImages):

    def __init__(self, path, imgsz=640, vid_stride=1):
        """初始化多图像数据加载器，继承自 LoadImages 类。"""
        super().__init__(path, imgsz, vid_stride)
        self.cap_2 = None

    def _new_video(self, path):
        # 生成 path2
        path_obj = Path(path)  # 将路径转换为 Path 对象
        stem = path_obj.stem  # 获取文件名（不含后缀）
        if stem.endswith('_t'):  # 检查文件名是否以 '_t' 结尾
            new_stem = stem[:-2] + '_ll'  # 替换 '_t' 为 '_ll'
            path_2 = path_obj.with_name(new_stem + path_obj.suffix)  # 生成新的路径
        else:
            raise ValueError(f"文件名 {stem} 不符合要求")  # 如果文件名不符合要求，抛出异常
        
        self.frame = 0
        self.cap = cv2.VideoCapture(path)
        self.cap_2 = cv2.VideoCapture(str(path_2))  # 创建第二个视频捕获对象
        self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)

    def __next__(self):
        """返回数据集中的下一个图像、路径和元数据。"""
        if self.count == self.nf:  # 如果当前计数等于文件总数，表示迭代结束
            raise StopIteration  # 抛出 StopIteration 异常以结束迭代
        path = self.files[self.count]  # 获取当前文件路径
        
        if self.video_flag[self.count]:  # 如果当前文件是视频
            # 读取视频
            self.mode = 'video'  # 设置模式为 'video'
            for _ in range(self.vid_stride):  # 根据视频帧率步长跳过帧
                self.cap.grab()  # 捕获视频帧但不解码
                self.cap_2.grab()  # 捕获第二个视频帧但不解码
            success, im0_1 = self.cap.retrieve()  # 解码并获取视频帧
            success_2, im0_2 = self.cap_2.retrieve()  # 解码并获取第二个视频帧
            # gray1 = cv2.cvtColor(im0_1, cv2.COLOR_BGR2GRAY)
            # gray2 = cv2.cvtColor(im0_2, cv2.COLOR_BGR2GRAY)
            # fused_gray = cv2.addWeighted(gray1, 0.5, gray2, 0.5, 0)
            # im0 = cv2.merge([gray2, gray1, fused_gray])
            im0 = cv2.imread(im0_2)  # 使用 OpenCV 读取图像（BGR 格式）
            while not (success and success_2):  # 如果解码失败
                self.count += 1  # 跳到下一个文件
                self.cap.release()  # 释放当前视频捕获对象
                self.cap_2.release()  # 释放第二个视频捕获对象
                if self.count == self.nf:  # 如果已经处理完所有视频文件
                    raise StopIteration  # 抛出 StopIteration 异常以结束迭代
                path = self.files[self.count]  # 获取下一个文件路径
                self._new_video(path)  # 初始化新的视频捕获对象
                success, im0 = self.cap.read()  # 读取视频帧
                success_2, im0_2 = self.cap_2.read()
                # gray1 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)
                # gray2 = cv2.cvtColor(im0_2, cv2.COLOR_BGR2GRAY)
                # fused_gray = cv2.addWeighted(gray1, 0.5, gray2, 0.5, 0)
                # im0 = cv2.merge([gray2, gray1, fused_gray])  # 合并灰度图像
                im0 = cv2.imread(im0_2)

            self.frame += 1  # 增加当前视频帧计数
            # im0 = self._cv2_rotate(im0)  # 如果 cv2 自动旋转功能关闭，可以手动旋转帧
            s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '  # 构造视频元数据字符串

        else:  # 如果当前文件是图像
            # 读取图像
            self.count += 1  # 增加文件计数
            ft = path  # 保存原始路径
            fll = Path(str(ft).replace('channel2', 'channel').replace('t.jpg', 'll.jpg'))
            img_st = cv2.imread(str(ft), cv2.IMREAD_GRAYSCALE) 
            img_nd = cv2.imread(str(fll), cv2.IMREAD_GRAYSCALE)
            if img_st is None or img_nd is None:
                raise ValueError("Failed to read ft or fll image.")
            img_rd = cv2.addWeighted(img_st, 0.5, img_nd, 0.5, 0)
            im0 = cv2.merge((img_st, img_nd, img_rd))
            if im0 is None:  # 如果图像读取失败
                raise FileNotFoundError(f'Image Not Found {path}')  # 抛出 FileNotFoundError 异常
            s = f'image {self.count}/{self.nf} {path}: '  # 构造图像元数据字符串

        return [path], [im0], self.cap, s  # 返回文件路径、图像数据、视频捕获对象和元数据字符串

class LoadPilAndNumpy:
    """
    Load images from PIL and Numpy arrays for batch processing.

    This class is designed to manage loading and pre-processing of image data from both PIL and Numpy formats.
    It performs basic validation and format conversion to ensure that the images are in the required format for
    downstream processing.

    Attributes:
        paths (list): List of image paths or autogenerated filenames.
        im0 (list): List of images stored as Numpy arrays.
        imgsz (int): Image size, defaults to 640.
        mode (str): Type of data being processed, defaults to 'image'.
        bs (int): Batch size, equivalent to the length of `im0`.
        count (int): Counter for iteration, initialized at 0 during `__iter__()`.

    Methods:
        _single_check(im): Validate and format a single image to a Numpy array.
    """

    def __init__(self, im0, imgsz=640):
        """Initialize PIL and Numpy Dataloader."""
        if not isinstance(im0, list):
            im0 = [im0]
        self.paths = [getattr(im, 'filename', f'image{i}.jpg') for i, im in enumerate(im0)]
        self.im0 = [self._single_check(im) for im in im0]
        self.imgsz = imgsz
        self.mode = 'image'
        # Generate fake paths
        self.bs = len(self.im0)

    @staticmethod
    def _single_check(im):
        """Validate and format an image to numpy array."""
        assert isinstance(im, (Image.Image, np.ndarray)), f'Expected PIL/np.ndarray image type, but got {type(im)}'
        if isinstance(im, Image.Image):
            if im.mode != 'RGB':
                im = im.convert('RGB')
            im = np.asarray(im)[:, :, ::-1]
            im = np.ascontiguousarray(im)  # contiguous
        return im

    def __len__(self):
        """Returns the length of the 'im0' attribute."""
        return len(self.im0)

    def __next__(self):
        """Returns batch paths, images, processed images, None, ''."""
        if self.count == 1:  # loop only once as it's batch inference
            raise StopIteration
        self.count += 1
        return self.paths, self.im0, None, ''

    def __iter__(self):
        """Enables iteration for class LoadPilAndNumpy."""
        self.count = 0
        return self


class LoadTensor:
    """
    Load images from torch.Tensor data.

    This class manages the loading and pre-processing of image data from PyTorch tensors for further processing.

    Attributes:
        im0 (torch.Tensor): The input tensor containing the image(s).
        bs (int): Batch size, inferred from the shape of `im0`.
        mode (str): Current mode, set to 'image'.
        paths (list): List of image paths or filenames.
        count (int): Counter for iteration, initialized at 0 during `__iter__()`.

    Methods:
        _single_check(im, stride): Validate and possibly modify the input tensor.
    """

    def __init__(self, im0) -> None:
        """Initialize Tensor Dataloader."""
        self.im0 = self._single_check(im0)
        self.bs = self.im0.shape[0]
        self.mode = 'image'
        self.paths = [getattr(im, 'filename', f'image{i}.jpg') for i, im in enumerate(im0)]

    @staticmethod
    def _single_check(im, stride=32):
        """Validate and format an image to torch.Tensor."""
        s = f'WARNING ⚠️ torch.Tensor inputs should be BCHW i.e. shape(1, 3, 640, 640) ' \
            f'divisible by stride {stride}. Input shape{tuple(im.shape)} is incompatible.'
        if len(im.shape) != 4:
            if len(im.shape) != 3:
                raise ValueError(s)
            LOGGER.warning(s)
            im = im.unsqueeze(0)
        if im.shape[2] % stride or im.shape[3] % stride:
            raise ValueError(s)
        if im.max() > 1.0:
            LOGGER.warning(f'WARNING ⚠️ torch.Tensor inputs should be normalized 0.0-1.0 but max value is {im.max()}. '
                           f'Dividing input by 255.')
            im = im.float() / 255.0

        return im

    def __iter__(self):
        """Returns an iterator object."""
        self.count = 0
        return self

    def __next__(self):
        """Return next item in the iterator."""
        if self.count == 1:
            raise StopIteration
        self.count += 1
        return self.paths, self.im0, None, ''

    def __len__(self):
        """Returns the batch size."""
        return self.bs


def autocast_list(source):
    """Merges a list of source of different types into a list of numpy arrays or PIL images."""
    files = []
    for im in source:
        if isinstance(im, (str, Path)):  # filename or uri
            files.append(Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im))
        elif isinstance(im, (Image.Image, np.ndarray)):  # PIL or np Image
            files.append(im)
        else:
            raise TypeError(f'type {type(im).__name__} is not a supported Ultralytics prediction source type. \n'
                            f'See https://docs.ultralytics.com/modes/predict for supported source types.')

    return files


LOADERS = LoadStreams, LoadPilAndNumpy, LoadImages, LoadScreenshots  # tuple


def get_best_youtube_url(url, use_pafy=False):
    """
    Retrieves the URL of the best quality MP4 video stream from a given YouTube video.

    This function uses the pafy or yt_dlp library to extract the video info from YouTube. It then finds the highest
    quality MP4 format that has video codec but no audio codec, and returns the URL of this video stream.

    Args:
        url (str): The URL of the YouTube video.
        use_pafy (bool): Use the pafy package, default=True, otherwise use yt_dlp package.

    Returns:
        (str): The URL of the best quality MP4 video stream, or None if no suitable stream is found.
    """
    if use_pafy:
        check_requirements(('pafy', 'youtube_dl==2020.12.2'))
        import pafy  # noqa
        return pafy.new(url).getbestvideo(preftype='mp4').url
    else:
        check_requirements('yt-dlp')
        import yt_dlp
        with yt_dlp.YoutubeDL({'quiet': True}) as ydl:
            info_dict = ydl.extract_info(url, download=False)  # extract info
        for f in reversed(info_dict.get('formats', [])):  # reversed because best is usually last
            # Find a format with video codec, no audio, *.mp4 extension at least 1920x1080 size
            good_size = (f.get('width') or 0) >= 1920 or (f.get('height') or 0) >= 1080
            if good_size and f['vcodec'] != 'none' and f['acodec'] == 'none' and f['ext'] == 'mp4':
                return f.get('url')
