import argparse
import time
from pathlib import Path
import cv2
import torch
import numpy as np
from numpy import random

from models.experimental import attempt_load
from utils.datasets import letterbox
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
    scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel


class Detector:
    """
    detector based on yolov7
    """

    def __init__(self, device_id="0",
                 img_size=640,
                 weight="weights/2022-11-25-chock.pt",
                 conf_thres=0.25,
                 iou_thres=0.45):
        """
        Args:
            device_id : cuda device, i.e. 0 or 0,1,2,3 or cpu
            img_size : inference size(pixels)
            weight : model.pt path
            conf_thres : object confidence threshold
            iou_thres : NMS threshold
        """
        # Initialize
        self.conf_thres = conf_thres
        self.iou_thres = iou_thres
        self.device = select_device(device_id)
        # self.half = device_id != "cpu"
        self.half = False
        # Load model
        model = attempt_load(weight, map_location=self.device)  # load FP32 model
        model.to(self.device).eval()
        if self.half:
            model.half()  # to FP16
        self.model = model
        self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names
        self.stride = int(self.model.stride.max())  # model stride
        self.img_size = check_img_size(img_size, s=self.stride)  # check img_size

    # set up picture data processing
    def preprocess(self, img=None):
        """
        Args:
            img : image that opencv reads
        """
        img0 = img.copy()
        # Padded resize
        img = letterbox(img, self.img_size, stride=self.stride)[0]
        # Convert
        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        img = np.ascontiguousarray(img)
        img = torch.from_numpy(img).to(self.device)
        img = img.half() if self.half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)
        return img, img0

    def detect(self, img=None):
        """
        Args:
            img : image that opencv reads
        """
        img, img0 = self.preprocess(img)
        # Inference
        t1 = time_synchronized()
        # Calculating gradients would cause a GPU memory leak
        with torch.no_grad():
            pred = self.model(img)[0]
        t2 = time_synchronized()
        print(f'Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference')
        # Apply NMS
        pred = non_max_suppression(pred, self.conf_thres, self.iou_thres)
        t3 = time_synchronized()
        print(f'Done. ({(1E3 * (t3 - t2)):.1f}ms) NMS')
        # Process detections
        for i, det in enumerate(pred):
            if len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
        info = {'box_nums': 0, 'raw_img': img0}
        if (pred is not None) and (len(pred) >= 1) and (pred[0] is not None):
            prediction = pred[0].cpu().numpy()
            # 保存预测框信息
            info['boxes'] = prediction[:, 0:4]
            # 保存预测置信度信息
            info['scores'] = prediction[:, 4]
            # 保存类别信息
            info['class_ids'] = prediction[:, 5]
            # 保存预测框数量
            info['box_nums'] = prediction.shape[0]
        t4 = time_synchronized()
        print(f'Done. ({(1E3 * (t4 - t3)):.1f}ms) Processing')
        return pred, info

    def detect_class(self, img=None, class_names=None):
        """
        Args:
            :param img:
            :param class_names:
        """
        if class_names is None:
            class_names = []
        img, img0 = self.preprocess(img)
        # Inference
        t1 = time_synchronized()
        # Calculating gradients would cause a GPU memory leak
        with torch.no_grad():
            pred = self.model(img)[0]
        t2 = time_synchronized()
        print(f'Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference')
        # Apply NMS
        pred = non_max_suppression(pred, self.conf_thres, self.iou_thres)
        t3 = time_synchronized()
        print(f'Done. ({(1E3 * (t3 - t2)):.1f}ms) NMS')
        # Process detections
        # print(pred)
        for i, det in enumerate(pred):
            if len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
        info = {'box_nums': 0, 'raw_img': img0,
                'boxes': [], 'scores': [], 'class_ids': []}
        if (pred is not None) and (len(pred) >= 1) and (pred[0] is not None):
            prediction = pred[0].cpu().numpy()
            for order in range(prediction.shape[0]):
                if self.names[int(prediction[order, 5])] in class_names:
                    # 保存预测框信息
                    info['boxes'].append(prediction[order, 0:4])
                    # 保存预测置信度信息
                    info['scores'].append(prediction[order, 4])
                    # 保存类别信息
                    info['class_ids'].append(prediction[order, 5])
            # 保存预测框数量
            info['box_nums'] = len(info['boxes'])
        t4 = time_synchronized()
        print(f'Done. ({(1E3 * (t4 - t3)):.1f}ms) Processing')
        return info


if __name__ == "__main__":
    # image_path = "/home/stark/algo-env/datasets/dataset-remote/Images/2023_06_14_0213A_3.jpg"
    # detect_pic = cv2.imread(image_path)
    # (pic_height, pic_width, _) = detect_pic.shape
    # print('HEIGHT:', pic_height)
    # print('WIDTH:', pic_width)
    detector = Detector(weight="weights/yolov7_hq_safeguard.pt")
    num = 0
    for name in detector.names:
        num += 1
        # print(f"{num}" + " : " + f"{name}")
        print(f"{name}")
    # prediction, _ = detector.detect(detect_pic)
    # # print(prediction)
    # for det in prediction[0]:
    #     print(det)
    #     print(det.shape)
    # print(prediction[0].shape)
    # print(prediction[0][:, :4])
