# -*- coding: utf-8 -*-
"""
# --------------------------------------------------------
# @Author : Pan
# @E-mail :
# @Date   : 2020-08-22 16:19:37
# --------------------------------------------------------
"""
import sys
import os

sys.path.insert(0, os.path.dirname(__file__))

import cv2
import numpy as np
import easydict
from pybaseutils import image_utils, file_utils, time_utils
from pybaseutils.cvutils import video_utils

project_root = os.path.join(os.path.dirname(__file__))


class Detector(object):

    def __init__(self, net_name="rfb_face", input_size=(), conf_thresh=0.5, nms_thresh=0.3, device="cuda:0"):
        """
        进行预标注
        :param net_name: mtcnn(推荐)
                mtcnn检测效果比较好，
                dfsd对小目标人脸检测效果好，但对自拍的图片(如证件照)效果比较差
                rfb_face对小目标人脸检测效果好，但对自拍的图片(如证件照)效果比较差
        :return:
        """
        self.device = device
        self.class_names = None
        self.net_name = net_name.lower()
        top_k = 500
        keep_top_k = 750
        if self.net_name == "mtcnn":
            sys.path.append(os.path.join(os.path.dirname(__file__), "mtcnn"))
            from mtcnn.mtcnn_detector import MTCNNDetector
            self.class_names = ["BACKGROUND", "face"]
            self.detector = MTCNNDetector(input_size=[416, None],
                                          conf_thresh=[0.75, 0.85, 0.95],
                                          iou_thresh=[0.7, 0.7, 0.7],
                                          device=device)
        elif self.net_name == "rfb_face":
            # sys.path.append(os.path.join(os.path.dirname(__file__)))
            from libs.detection.light_detector.light_detector import Detector, root
            from libs.detection.light_detector.detection import ssd_inference
            self.class_names = ["BACKGROUND", "face"]
            # from light_detector.light_detector import Detector, root
            cfg = {"model_file": os.path.join(root, "pretrained/pth/rfb_face_416.pth"),  # 使用默认的模型路径
                   "net_type": "rfb",
                   "priors_type": "face",
                   "input_size": input_size if input_size else (416, 416),
                   "width_mult": 1.0,
                   "prob_threshold": conf_thresh,
                   "iou_threshold": nms_thresh,
                   "top_k": top_k,
                   "keep_top_k": keep_top_k}
            cfg = easydict.EasyDict(cfg)
            self.detector = ssd_inference.SSDInference(cfg, padding=True, freeze_header=False, device=device)
        elif self.net_name == "rfb_face_landm":
            # sys.path.append(os.path.join(os.path.dirname(__file__), "light_detector"))
            # from libs.detection.light_detector import light_detector
            from libs.detection.light_detector.light_detector import Detector, root
            self.class_names = ["BACKGROUND", "face"]
            cfg = {"model_file": os.path.join(root, "pretrained/pth/rfb_landm_face_416_416.pth"),  # 使用默认的模型路径
                   "net_type": "rfb_landm",
                   "priors_type": "face",
                   "input_size": input_size if input_size else (416, 416),
                   "width_mult": 1.0,
                   "prob_threshold": conf_thresh,
                   "iou_threshold": nms_thresh,
                   "top_k": top_k,
                   "keep_top_k": keep_top_k}
            cfg = easydict.EasyDict(cfg)
            self.detector = Detector(cfg, padding=True, freeze_header=False, device=device)
        elif self.net_name.startswith("yolov5"):
            sys.path.insert(0, os.path.join(os.path.dirname(__file__), "yolov5"))
            from yolov5.demo import Yolov5Detector
            models = {"yolov5_person": {"class_names": ["person"],
                                        "weights": "yolov5/data/model/person/yolov5s_640/weights/best.pt"
                                        },
                      "yolov5_hand": {"class_names": ["hand"],
                                      "weights": "yolov5/data/model/hand/yolov5s_640/weights/best.pt"
                                      },
                      "yolov5_pen_tip": {"class_names": ["pen_tip"],
                                         "weights": "yolov5/data/model/pen_tip/yolov5s_640/weights/best.pt"
                                         },
                      "yolov5_hand_pen": {"class_names": ["hand_pen", "hand"],
                                          "weights": "yolov5/data/model/hand_pen/yolov5s_640/weights/best.pt"
                                          },
                      }
            self.class_names = models[net_name]["class_names"]
            weights = models[net_name]["weights"]
            weights = os.path.join(project_root, weights)
            self.detector = Yolov5Detector(weights=weights,  # model.pt path(s)
                                           imgsz=640,  # inference size (pixels)
                                           conf_thres=conf_thresh,  # confidence threshold
                                           iou_thres=nms_thresh,  # NMS IOU threshold
                                           max_det=1000,  # maximum detections per image
                                           class_name=None,  # filter by class: --class 0, or --class 0 2 3
                                           classes=None,  # filter by class: --class 0, or --class 0 2 3
                                           agnostic_nms=False,  # class-agnostic NMS
                                           augment=False,  # augmented inference
                                           device=device,  # cuda device, i.e. 0 or 0,1,2,3 or cpu
                                           )
        elif self.net_name == "dfsd":
            self.class_names = ["BACKGROUND", "face"]
            sys.path.append(os.path.join(os.path.dirname(__file__), "dfsd_landmark"))
            from dfsd_landmark.demo import FaceLandmarkDetection
            self.detector = FaceLandmarkDetection(prob_threshold=conf_thresh, iou_threshold=nms_thresh)
        else:
            raise Exception("Error:{}".format(self.net_name))
        print("detect_type:{},class_names:{}".format(self.net_name, self.class_names))

    def start_capture(self, video_file, save_video=None, interval=1, vis=True):
        """
        start capture video
        :param video_file: *.avi,*.mp4,...
        :param save_video: *.avi
        :param interval:
        :return:
        """
        video_cap = video_utils.video_iterator(video_file, save_video, interval=interval)
        for data_info in video_cap:
            frame = data_info["frame"]
            result = self.detect(frame, vis=False)
            if vis:
                frame = self.draw_result(frame, result, vis=vis, delay=30)
                data_info["frame"] = frame

    @time_utils.performance()
    def detect(self, image, use_rgb=True, vis=False):
        """
        模型输入要求是RGB，如果输入图片是BGR，需要use_rgb=True
        :param image: BGR use_rgb=True; RGB use_rgb=False
        :param vis:
        :return:
        """
        if use_rgb: image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        label, score, boxes, landm = [], [], [], []
        if self.net_name == "mtcnn":
            boxes, score, landm = self.detector.detect(image, vis=False)
            label = [1] * len(boxes)
            score = score.reshape(-1)
        elif "rfb_face_landm" == self.net_name:
            # dets is（xmin,ymin,xmax,ymax,score,label）
            dets, landm = self.detector.detect(image, vis=False)
            if len(dets) > 0: boxes, score, label = dets[:, 0:4], dets[:, 4], dets[:, 5]
        elif "rfb" in self.net_name:
            # dets is（xmin,ymin,xmax,ymax,score,label）
            dets = self.detector.detect(image, vis=False)
            if len(dets) > 0: boxes, score, label = dets[:, 0:4], dets[:, 4], dets[:, 5]
        elif self.net_name.startswith("yolov5"):
            bgr = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            dets = self.detector.detect(bgr)[0]
            boxes, score, label = dets[:, 0:4], dets[:, 4], dets[:, 5]
        elif self.net_name == "dfsd":
            boxes, score, landm = self.detector.detect(image, vis=False)
            label = [1] * len(boxes)
        else:
            raise Exception("Error:{}".format(self.net_name))
        label = np.asarray(label, dtype=np.int32).reshape(-1).tolist()
        result = {"boxes": boxes, "score": score, "landm": landm, "label": label, "names": []}
        if vis:  self.draw_result(image, result, use_rgb=use_rgb)
        return result

    def get_class_names(self, labels):
        return [self.class_names[int(l)] for l in labels]

    def detect_image_dir(self, image_dir, vis=True):
        """
        :param image_dir: directory or image file path
        :param vis:<bool>
        :return:
        """
        image_list = file_utils.get_files_lists(image_dir)
        for img_path in image_list:
            print(img_path)
            image = cv2.imread(img_path)
            result = self.detect(image, vis=False)
            if vis: self.draw_result(image, result)

    def draw_result(self, image, result: dict, vis=True, delay=0):
        """
        :param image: BGR image
        :param boxes:<np.ndarray>: (num_boxes, 4), box=[xmin,ymin,xmax,ymax]
        :param score:<np.ndarray>: (num_boxes,)
        :param label:<np.ndarray>: (num_boxes,)
        :return:
        """
        color = (0, 255, 0)
        boxes = result.get("boxes", [])
        score = result.get("score", [])
        landm = result.get("landm", [])
        segms = result.get("segms", [])
        label = result.get("label", [])
        names = result.get("names", [])
        if not names: names = label
        texts = ["{} {:3.3f}".format(n, s) for n, s in zip(names, score)]
        image = image_utils.draw_image_bboxes_text(image, boxes, texts, color=color, thickness=2,
                                                   fontScale=0.8, drawType="ch")
        image = image_utils.draw_landmark(image, landm, radius=2, fontScale=1.0, color=color)
        if vis: image_utils.cv_show_image("image", image, delay=delay)
        return image


if __name__ == "__main__":
    image_dir = "../../dataset/test_image"
    # det = Detector(net_name="rfb_face", device="cpu")
    # det = Detector(net_name="rfb_face_landm", device="cpu")
    # det = Detector(net_name="yolov5_person")
    # det = Detector(net_name="yolov5_hand")
    det = Detector(net_name="dfsd")
    # det = Detector(net_name="mtcnn")
    det.detect_image_dir(image_dir, vis=True)
    # det.start_capture(video_path, vis=True)
