import argparse
import os
import sys
import cv2
import torch
from pathlib import Path
from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS
from ultralytics.data.loaders import LoadStreams, LoadScreenshots, LoadImagesAndVideos
from ultralytics.utils.checks import check_file
from ultralytics.utils.files import increment_path
from ultralytics import YOLO
from Dlib_Utils import DlibUtils

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLO root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative


def run(
        weights="yolov8x.pt",
        source=None,
        device="cpu",
        project=ROOT / 'runs/detect',  # save results to project/name
        name='exp',  # save results to project/name
        exist_ok=False,
        save_txt=False,  # save results to *.txt
):
    source = str(source)
    is_file = Path(source).suffix[1:] in IMG_FORMATS, VID_FORMATS
    is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
    # 网络视频流加载
    webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
    screenshot = source.lower().startswith('screen')
    if is_url and is_file:
        source = check_file(source)  # download
        # Directories
    save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run
    (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir
    # Dataloader
    # 摄像头画面加载
    if webcam:
        dataset = LoadStreams(source)
        bs = len(dataset)
    # 截图加载
    elif screenshot:
        dataset = LoadScreenshots(source)
    # 图片、视频加载
    else:
        dataset = LoadImagesAndVideos(source)
    # 加载YOLOv8模型
    model = YOLO(f"{weights}")
    model.to("cuda") if device == "0" else model.to("cpu")
    # 加载Dlib人脸检测工具，单例
    dib_utils = DlibUtils()
    for path, img, info in dataset:
        # Inference
        preds = model.predict(img)
        for i, pred in enumerate(preds):
            # 预测结果为Results(orig_img, path=img_path, names=self.model.names, boxes=pred)
            print("--------------")
            orig_img = pred.orig_img
            if isinstance(orig_img, torch.Tensor):
                orig_img = (orig_img.detach().permute(1, 2, 0).contiguous() * 255).to(
                    torch.uint8).cpu().numpy()
            if pred.boxes is not None:
                for d in reversed(pred.boxes):
                    c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item())
                    name = ("" if id is None else f"id:{id} ") + pred.names[c]
                    box = d.xyxy.squeeze()
                    # 如果预测的类别是人，且置信度大于0.5，则画出矩形框；分人、司机、副驾驶、乘客等类型
                    if name == "person" and conf > 0.5:
                        # 获取图片中含有人脸的切片，
                        img_clip = orig_img[int(box[1]):int(box[3]), int(box[0]):int(box[2])]
                        # 传入图片中含有人脸的切片，获取脸部关键点坐标
                        facial_list = dib_utils.detect_facial_landmarks(img_clip, int(box[0]), int(box[1]))
                        # 绘制眼睛区域和检测的物体
                        for ear, leftEyeHull, rightEyeHull in facial_list:
                            # 绘制眼睛区域
                            cv2.drawContours(orig_img, [leftEyeHull], -1, (0, 255, 0), 1)
                            cv2.drawContours(orig_img, [rightEyeHull], -1, (0, 255, 0), 1)
                    # 绘制矩形框
                    cv2.rectangle(orig_img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 2)
                    cv2.putText(orig_img, f"{name} {conf:.2f}", (int(box[0]), int(box[1]) - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.imshow("safety_driving", orig_img)
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break

    cv2.destroyAllWindows()


def parse_opt():
    """Parse command line arguments."""
    parser = argparse.ArgumentParser()
    parser.add_argument("--weights", type=str, default="../weights/detect_blink_belt.pt", help="initial weights path")
    parser.add_argument("--source", type=str, default=0, help="video file path")
    parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
    parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')
    parser.add_argument('--name', default='exp', help='save results to project/name')
    parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
    parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
    return parser.parse_args()


def main(opt):
    """Main function."""
    run(**vars(opt))


if __name__ == "__main__":
    opt = parse_opt()
    main(opt)
