import torch
from torch import nn
import numpy as np

from utiils.utills_functions import non_max_suppression,scale_coords

from utiils.utills_functions import letterbox

class DetectMultiBackend(nn.Module):
    # YOLOv5 MultiBackend class for python inference on various backends
    def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):
        super().__init__()
        w = str(weights[0] if isinstance(weights, list) else weights)
        # pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)
        fp16 =False  # FP16
        nhwc =False # BHWC formats (vs torch BCWH)
        onnx =True
        stride = 32  # default stride
        cuda = torch.cuda.is_available() and device.type != 'cpu'  # use CUDA

        import onnxruntime
        providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
        session = onnxruntime.InferenceSession(w)
        output_names = [x.name for x in session.get_outputs()]
        meta = session.get_modelmeta().custom_metadata_map  # metadata {'names': "{0: 'person'}", 'stride': '32'}
        if 'stride' in meta:
            stride, names = int(meta['stride']), eval(meta['names'])
        else:
            raise NotImplementedError(f'ERROR: {w} is not a supported format')

        self.__dict__.update(locals())  # assign all variables to self

    def forward(self, im, augment=False, visualize=False):
        # YOLOv5 MultiBackend inference
        b, ch, h, w = im.shape  # batch, channel, height, width
        if self.fp16 and im.dtype != torch.float16:
            im = im.half()  # to FP16
        if self.nhwc:
            im = im.permute(0, 2, 3, 1)  # torch BCHW to numpy BHWC shape(1,320,192,3)

        elif self.onnx:  # ONNX Runtime
            # im = im.cpu().numpy()  # torch to numpy
            y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
            print(y)

        return y[0] if len(y) == 1 else [x for x in y]


class Person_YOLO5_Detector():
    def __init__(self, conf):
        self.model_weights_path = conf["weights_blindperson"]
        self.img_size = int(conf["img_size"])
        self.conf_thres = float(conf["conf_thres"])
        self.num_classes = conf["classes"]
        self.device = torch.device(conf["device"] if conf["device"] == 'cpu' else conf["device"])
        self.iou_thres = float(conf["iou_thres"])
        self.max_det = int(conf["max_det"])
        self.agnostic_nms = False
        self.line_thickness = 3
        self.hide_labels = False
        self.hide_conf = False
        self.half = bool(conf["half"])
        self.dnn = False
        self.stride = 3
        self.auto = True
        self.load_model()


    def load_model(self):
        print('Loading YOLO model..')

        self.model = DetectMultiBackend(self.model_weights_path, device=self.device, dnn=self.dnn, fp16=self.half)


    def image_preprocess(self, img):
        """
        Pre-process the img before fed to the object detection network
        Input: image name(str) or raw image data(ndarray or torch.Tensor,channel GBR)
        Output: pre-processed image data(torch.FloatTensor,(1,3,h,w))
        """
        stride, names,  = self.model.stride, self.model.names,
        img = letterbox(img, self.img_size, stride=stride,)[0]
        img = img.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
        img = np.ascontiguousarray(img)
        # im = torch.from_numpy(img).to(self.device)  # cuda:1
        # im = im.half() if self.model.fp16 else im.float()  # uint8 to fp16/32
        img = img.astype(np.float32)
        img /= 255  # 0 - 255 to 0.0 - 1.0
        if len(img.shape) == 3:
            img = img[None]  # expand for batch dim
        return img

    def __call__(self, image):
        # image = self.check_img_size(image, s=self.model.stride)
        img = self.image_preprocess(image)
        # Run inference
        pred = self.model(img)
        name = self.model.names
        # NMS
        pred = torch.from_numpy(pred)
        pred = non_max_suppression(pred, conf_thres=self.conf_thres, iou_thres=self.iou_thres, classes=None,
                                   agnostic=self.agnostic_nms)
        #[tensor([[496.42371,  39.67577, 532.64294, 155.08865,   0.93354,   0.00000]])]
        # 将预测框变回输入图像尺度

        pred[0][:, :4] = scale_coords(img.shape[2:], pred[0][:, :4], image.shape).round()
        pred[0]=pred[0][torch.where(pred[0][:, -1] == 0)]

        return pred[0], name #[tensor([[1.48900e+03, 8.30000e+01, 1.59800e+03, 4.29000e+02, 9.33540e-01, 0.00000e+00]])]









