# YOLOv8 🚀 by Ultralytics, GPL-3.0 license
"""
Run YOLOv8 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.

Usage - sources:
    $ python yolov8_detect.py --weights yolov8n.pt --source 0                               # webcam
                                                          img.jpg                         # image
                                                          vid.mp4                         # video
                                                          path/                           # directory
                                                          'path/*.jpg'                    # glob
                                                          'https://youtu.be/Zgi9g1ksQHc'  # YouTube
                                                          'rtsp://example.com/media.mp4'  # RTSP, RTMP, HTTP stream
"""

import argparse
import os
import platform
import sys
from pathlib import Path
import torch
import cv2
import numpy as np
from collections import deque
import glob
import re

# 导入YOLOv8
from ultralytics import YOLO
from ultralytics.utils.plotting import Annotator, colors

# 获取项目的路径
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv8 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative

class FatigueDetector:
    """疲劳驾驶检测器"""
    
    def __init__(self):
        self.dangerous_classes = ['closed_eyes', 'yawning', 'head_down', 'using_phone', 'distracted']
        self.detection_history = deque(maxlen=30)
        
    def analyze_fatigue_level(self, current_detections):
        """分析疲劳等级"""
        if not current_detections:
            return 0
            
        # 更新检测历史
        self.detection_history.append(current_detections)
        
        # 统计危险行为
        dangerous_count = 0
        total_detections = 0
        
        for detections in self.detection_history:
            for detection in detections:
                total_detections += 1
                if detection in self.dangerous_classes:
                    dangerous_count += 1
        
        if total_detections == 0:
            return 0
            
        danger_ratio = dangerous_count / total_detections
        
        # 确定疲劳等级
        if danger_ratio > 0.6:
            return 3  # 重度疲劳
        elif danger_ratio > 0.4:
            return 2  # 中度疲劳
        elif danger_ratio > 0.2:
            return 1  # 轻度疲劳
        else:
            return 0  # 正常

def run(
        weights=ROOT / 'yolov8n.pt',  # model path
        source=ROOT / 'data/images',  # file/dir/URL/glob/screen/0(webcam)
        data=ROOT / 'data/coco128.yaml',  # dataset.yaml path
        imgsz=640,  # inference size
        conf_thres=0.25,  # confidence threshold
        iou_thres=0.45,  # NMS IOU threshold
        max_det=1000,  # maximum detections per image
        device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
        view_img=False,  # show results
        save_txt=False,  # save results to *.txt
        save_conf=False,  # save confidences in --save-txt labels
        save_crop=False,  # save cropped prediction boxes
        nosave=False,  # do not save images/videos
        classes=None,  # filter by class: --class 0, or --class 0 2 3
        augment=False,  # augmented inference
        visualize=False,  # visualize features
        update=False,  # update all models
        project=ROOT / 'runs/detect',  # save results to project/name
        name='exp',  # save results to project/name
        exist_ok=False,  # existing project/name ok, do not increment
        line_thickness=3,  # bounding box thickness (pixels)
        hide_labels=False,  # hide labels
        hide_conf=False,  # hide confidences
        half=False,  # use FP16 half-precision inference
        dnn=False,  # use OpenCV DNN for ONNX inference
        vid_stride=1,  # video frame-rate stride
        fatigue_detection=False,  # enable fatigue detection
):
    """
    YOLOv8推理主函数
    """
    # 初始化疲劳检测器
    fatigue_detector = FatigueDetector() if fatigue_detection else None
    
    # 1. 检查source类型
    source = str(source)
    save_img = not nosave and not source.endswith('.txt')  # save inference images
    is_file = Path(source).suffix[1:] in IMG_FORMATS + VID_FORMATS
    is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
    webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
    screenshot = source.lower().startswith('screen')
    
    # 2. 创建保存目录
    save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)
    (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)
    
    # 3. 加载模型
    device = select_device(device)
    model = YOLO(weights)
    
    # 4. 数据加载
    if webcam:
        view_img = check_imshow()
        dataset = LoadStreams(source, img_size=imgsz, vid_stride=vid_stride)
        bs = len(dataset)  # batch_size
    else:
        dataset = LoadImages(source, img_size=imgsz, vid_stride=vid_stride)
        bs = 1
    
    # 5. 运行推理
    seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
    for path, im, im0s, vid_cap, s in dataset:
        with dt[0]:
            # YOLOv8会自动处理图像预处理，我们直接传递原始图像
            pass

        # Inference
        with dt[1]:
            # 使用YOLOv8进行推理
            results = model.predict(
                im0s,  # 直接使用原始图像
                conf=conf_thres,
                iou=iou_thres,
                imgsz=imgsz,
                max_det=max_det,
                classes=classes,
                augment=augment,
                verbose=False
            )

        # Process predictions
        with dt[2]:
            for i, result in enumerate(results):
                if webcam:  # batch_size >= 1
                    p, im0, frame = path[i], im0s[i].copy(), dataset.count
                    s += f'{i}: '
                else:
                    p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)

                p = Path(p)
                save_path = str(save_dir / p.name)
                txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}')
                s += '%gx%g ' % im0.shape[:2]  # print string

                # 创建注释器
                annotator = Annotator(im0, line_width=line_thickness, example=str(model.names))

                # 获取检测结果
                if result.boxes is not None and len(result.boxes):
                    # 获取当前帧的检测类别
                    current_detections = []
                    boxes = result.boxes
                    
                    for box in boxes:
                        # 获取坐标和类别
                        xyxy = box.xyxy[0].cpu().numpy()
                        conf = box.conf[0].cpu().numpy()
                        cls = box.cls[0].cpu().numpy().astype(int)
                        
                        label = model.names[cls]
                        current_detections.append(label)
                        
                        # 添加标签
                        if not hide_labels:
                            label_text = f'{label}' if hide_conf else f'{label} {conf:.2f}'
                            annotator.box_label(xyxy, label_text, color=colors(cls, True))
                        
                        # 保存标签文件
                        if save_txt:
                            # 转换为YOLO格式
                            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
                            xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()
                            line = (cls, *xywh, conf) if save_conf else (cls, *xywh)
                            with open(f'{txt_path}.txt', 'a') as f:
                                f.write(('%g ' * len(line)).rstrip() % line + '\n')
                        
                        # 保存裁剪的检测框
                        if save_crop:
                            save_one_box(xyxy, im0, file=save_dir / 'crops' / model.names[cls] / f'{p.stem}.jpg', BGR=True)
                    
                    # 打印检测结果
                    for c in torch.unique(boxes.cls):
                        n = (boxes.cls == c).sum()
                        s += f"{n} {model.names[int(c)]}{'s' * (n > 1)}, "
                    
                    # 疲劳检测分析
                    if fatigue_detection and fatigue_detector:
                        fatigue_level = fatigue_detector.analyze_fatigue_level(current_detections)
                        
                        # 在图像上显示疲劳等级
                        level_texts = {0: "正常驾驶", 1: "轻度疲劳", 2: "中度疲劳", 3: "重度疲劳"}
                        level_colors = {0: (0, 255, 0), 1: (0, 255, 255), 2: (0, 165, 255), 3: (0, 0, 255)}
                        
                        text = level_texts.get(fatigue_level, "未知")
                        color = level_colors.get(fatigue_level, (255, 255, 255))
                        
                        cv2.putText(im0, f"疲劳等级: {text}", (10, 30), 
                                   cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)
                        
                        if fatigue_level >= 2:
                            cv2.putText(im0, "警告: 请休息!", (10, 70), 
                                       cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)

                # 显示结果
                im0 = annotator.result()
                if view_img:
                    if platform.system() == 'Linux' and p not in windows:
                        windows.append(p)
                        cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
                        cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
                    cv2.imshow(str(p), im0)
                    cv2.waitKey(1)  # 1 millisecond

                # 保存结果
                if save_img:
                    if dataset.mode == 'image':
                        cv2.imwrite(save_path, im0)
                    else:
                        if vid_cap:  # video
                            fps = vid_cap.get(cv2.CAP_PROP_FPS)
                            w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                            h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        else:  # stream
                            fps, w, h = 30, im0.shape[1], im0.shape[0]
                        
                        save_path = str(Path(save_path).with_suffix('.mp4'))
                        if not hasattr(dataset, 'vid_writer') or dataset.vid_writer is None:
                            dataset.vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
                        dataset.vid_writer.write(im0)

        # Print time (inference-only)
        print(f"{s}{'' if len(results[0].boxes) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")

    # 打印总结
    t = tuple(x.dt for x in dt)
    print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, imgsz, imgsz)}' % t)
    if save_txt or save_img:
        s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
        print(f"Results saved to {save_dir}{s}")

# ========== 工具函数 ==========

class Profile:
    """性能分析器"""
    def __init__(self, t=0.0):
        self.t = t
        self.dt = 0.0
    
    def __enter__(self):
        self.start()
        return self
    
    def __exit__(self, type, value, traceback):
        self.stop()
    
    def start(self):
        self.t = time.time()
    
    def stop(self):
        self.dt = time.time() - self.t

def increment_path(path, exist_ok=False, sep='', mkdir=False):
    """递增路径"""
    path = Path(path)
    if path.exists() and not exist_ok:
        path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')
        for n in range(2, 9999):
            p = f'{path}{sep}{n}{suffix}'
            if not os.path.exists(p):
                path = Path(p)
                break
    if mkdir:
        path.mkdir(parents=True, exist_ok=True)
    return path

def xyxy2xywh(x):
    """转换坐标格式"""
    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
    y[..., 0] = (x[..., 0] + x[..., 2]) / 2  # x center
    y[..., 1] = (x[..., 1] + x[..., 3]) / 2  # y center
    y[..., 2] = x[..., 2] - x[..., 0]  # width
    y[..., 3] = x[..., 3] - x[..., 1]  # height
    return y

def xywh2xyxy(x):
    """转换坐标格式"""
    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
    y[..., 0] = x[..., 0] - x[..., 2] / 2  # top left x
    y[..., 1] = x[..., 1] - x[..., 3] / 2  # top left y
    y[..., 2] = x[..., 0] + x[..., 2] / 2  # bottom right x
    y[..., 3] = x[..., 1] + x[..., 3] / 2  # bottom right y
    return y

def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False):
    """保存检测框"""
    xyxy = torch.tensor(xyxy).view(-1, 4)
    b = xyxy2xywh(xyxy)  # boxes
    if square:
        b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1)  # attempt rectangle to square
    b[:, 2:] = b[:, 2:] * gain + pad  # box wh * gain + pad
    xyxy = xywh2xyxy(b).long()
    clip_boxes(xyxy, im.shape)
    crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
    cv2.imwrite(str(file), crop)

def clip_boxes(boxes, shape):
    """裁剪框到图像边界"""
    if isinstance(boxes, torch.Tensor):
        boxes[..., 0].clamp_(0, shape[1])  # x1
        boxes[..., 1].clamp_(0, shape[0])  # y1
        boxes[..., 2].clamp_(0, shape[1])  # x2
        boxes[..., 3].clamp_(0, shape[0])  # y2
    else:
        boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1])
        boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0])

def select_device(device='', batch_size=0):
    """选择设备"""
    s = f'YOLOv8 🚀 {torch.__version__} '
    device = str(device).strip().lower().replace('cuda:', '').replace('none', '')
    cpu = device == 'cpu'
    if cpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
    elif device:
        os.environ['CUDA_VISIBLE_DEVICES'] = device
        assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \
            f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)"
    
    if not cpu and torch.cuda.is_available():
        devices = device.split(',') if device else '0'
        n = len(devices)
        if n > 1 and batch_size > 0:
            assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
        space = ' ' * len(s)
        for i, d in enumerate(devices):
            p = torch.cuda.get_device_properties(i)
            s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n"
        arg = 'cuda:0'
    else:
        s += 'CPU\n'
        arg = 'cpu'
    
    print(s)
    return torch.device(arg)

def check_imshow():
    """检查是否支持图像显示"""
    try:
        cv2.imshow('test', np.zeros((1, 1, 3)))
        cv2.waitKey(1)
        cv2.destroyAllWindows()
        return True
    except Exception as e:
        print(f'WARNING: Environment does not support cv2.imshow(): {e}')
        return False

# 图像和视频格式
IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm'  # include image suffixes
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv', 'webm'  # include video suffixes

import time

class LoadImages:
    """图像/视频加载类"""
    
    def __init__(self, path, img_size=640, vid_stride=1):
        files = []
        for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
            p = str(Path(p).resolve())
            if '*' in p:
                files.extend(sorted(glob.glob(p, recursive=True)))
            elif os.path.isdir(p):
                files.extend(sorted(glob.glob(os.path.join(p, '*.*'))))
            elif os.path.isfile(p):
                files.append(p)
            else:
                raise FileNotFoundError(f'{p} does not exist')
        
        images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
        videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
        ni, nv = len(images), len(videos)
        
        self.img_size = img_size
        self.files = images + videos
        self.nf = ni + nv
        self.video_flag = [False] * ni + [True] * nv
        self.mode = 'image'
        self.vid_stride = vid_stride
        
        if any(videos):
            self._new_video(videos[0])
        else:
            self.cap = None
        self.frame = 0
        self.count = 0
    
    def __iter__(self):
        self.count = 0
        return self
    
    def __next__(self):
        if self.count == self.nf:
            raise StopIteration
        path = self.files[self.count]
        
        if self.video_flag[self.count]:
            self.mode = 'video'
            for _ in range(self.vid_stride):
                ret_val, im0 = self.cap.read()
                if not ret_val:
                    self.count += 1
                    self.cap.release()
                    if self.count == self.nf:
                        raise StopIteration
                    else:
                        path = self.files[self.count]
                        self._new_video(path)
                        ret_val, im0 = self.cap.read()
            
            self.frame += 1
            s = f'video {self.count + 1}/{self.nf} ({self.frame}) {path}: '
        else:
            self.count += 1
            im0 = cv2.imread(path)
            if im0 is None:
                raise FileNotFoundError(f'Image Not Found {path}')
            s = f'image {self.count}/{self.nf} {path}: '
            self.mode = 'image'
        
        return path, im0, im0, self.cap, s
    
    def _new_video(self, path):
        self.frame = 0
        self.cap = cv2.VideoCapture(path)
        self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
    
    def __len__(self):
        return self.nf

class LoadStreams:
    """流加载类"""
    
    def __init__(self, sources='0', img_size=640, vid_stride=1):
        self.mode = 'stream'
        self.img_size = img_size
        self.vid_stride = vid_stride
        
        if isinstance(sources, str) and sources.isnumeric():
            sources = [sources]
        
        n = len(sources)
        self.sources = [clean_str(x) for x in sources]
        self.cap = [None] * n
        self.ims = [None] * n
        
        for i, s in enumerate(self.sources):
            cap = cv2.VideoCapture(0 if s == '0' else s)
            assert cap.isOpened(), f'Failed to open {s}'
            w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            self.fps = cap.get(cv2.CAP_PROP_FPS) % 100
            self.frames = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf')
            self.cap[i] = cap
            ret, self.ims[i] = cap.read()
            if not ret:
                raise ValueError(f'Failed to read frame from {s}')
        
        self.n = n
        self.count = 0
    
    def __iter__(self):
        self.count = -1
        return self
    
    def __next__(self):
        self.count += 1
        
        im0s = []
        for i in range(self.n):
            for _ in range(self.vid_stride):
                ret_val, im0 = self.cap[i].read()
                if not ret_val:
                    self.cap[i].release()
                    self.cap[i] = cv2.VideoCapture(0 if self.sources[i] == '0' else self.sources[i])
                    ret_val, im0 = self.cap[i].read()
                im0s.append(im0)
        
        s = f'stream {self.count}: '
        
        return self.sources, im0s, im0s, None, s
    
    def __len__(self):
        return len(self.sources)

def clean_str(s):
    """清理字符串"""
    return re.sub(r'[|@#!¡·$€%&()=?¿^*;:,¨´><+]', '', s)

def parse_opt():
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov8n.pt', help='model path')
    parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
    parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
    parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size')
    parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
    parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--view-img', action='store_true', help='show results')
    parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
    parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
    parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
    parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
    parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
    parser.add_argument('--augment', action='store_true', help='augmented inference')
    parser.add_argument('--visualize', action='store_true', help='visualize features')
    parser.add_argument('--update', action='store_true', help='update all models')
    parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')
    parser.add_argument('--name', default='exp', help='save results to project/name')
    parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
    parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
    parser.add_argument('--hide-labels', action='store_true', help='hide labels')
    parser.add_argument('--hide-conf', action='store_true', help='hide confidences')
    parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
    parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
    parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
    parser.add_argument('--fatigue-detection', action='store_true', help='enable fatigue detection')
    opt = parser.parse_args()
    return opt

def main(opt):
    run(**vars(opt))

if __name__ == "__main__":
    opt = parse_opt()
    main(opt)