
import torch
from models.experimental import attempt_load
from utils.datasets import  LoadImages
from utils.general import non_max_suppression, scale_coords, xyxy2xywh,  set_logging,get_image_size,get_result_by_xywh
from utils.torch_utils import select_device


class YOLO_BIN:
    def __init__(self):
        self.weights = "./weights/yolov5x.pt"
        self.conf_thres = 0.25
        self.iou_thres = 0.45
        set_logging()

        #指定使用cpu=“cpu”或者是GPU=“0,1,2,3...”
        self.device = select_device("0")
        self.half =self.device.type != 'cpu'  # half precision only supported on CUDA
        # Load model
        self.model = attempt_load(self.weights, map_location=self.device)  # load FP32 model
        if self.half:
            self.model.half()  # to FP16

    def detect_single_image(self,img_abs_path):
        model=self.model
        stride = int(model.stride.max())  # model stride
        dataset = LoadImages(img_abs_path, img_size=640, stride=stride)
        names =  model.names
        image_size=get_image_size(img_abs_path)
        for path, img, shape in dataset:
            img = torch.from_numpy(img).to(self.device)
            img = img.half() if self.half else img.float()  # uint8 to fp16/32
            img /= 255.0  # 0 - shape255 to 0.0 - 1.0
            if img.ndimension() == 3:
                img = img.unsqueeze(0)
            pred = model(img, augment=False)[0]
            # Apply NMS
            pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=None, agnostic=False)
            #print("opt.classes:%s"%opt.classes)
            for i, det in enumerate(pred):
                # detections per image

                p, s,  frame = path, '', getattr(dataset, 'frame', 0)

                #print("p, s, im0, frame%s,%s,%s,%s"%(p, s, im0, frame))
                s += '%gx%g ' % img.shape[2:]  # print string
                gn = torch.tensor(shape)[[1, 0, 1, 0]]  # normalization gain whwh
                if len(det):
                    # Rescale boxes from img_size to im0 size
                    det[:, :4] = scale_coords(img.shape[2:], det[:, :4], shape).round()

                    # Print results
                    for c in det[:, -1].unique():
                        n = (det[:, -1] == c).sum()  # detections per class
                        s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string

                    # Write results

                    result_all = ""

                    for *xyxy, conf, cls in reversed(det):


                        if names[int(cls)]== "tie":

                            clsss="tie_"
                            xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh

                            #x:检测框中心x轴坐标 Y：监测框中心Y轴中心坐标  W：监测框宽度  H：监测框高度

                            x, y, w, h = xywh


                            label = '{}{:.2f}'.format(clsss, conf)


                            #print(".....")
                            result=get_result_by_xywh(x,y,w,h,label,image_size)

                            result_all += result

                        if names[int(cls)] == "person":

                            clsss = "pers"
                            xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh

                            x,y,w,h=xywh
                            label = '{}{:.2f}'.format(clsss, conf)
                            result=get_result_by_xywh(x,y,w,h,label,image_size)

                            result_all += result

                        if names[int(cls)] == "bicycle" or names[int(cls)] =="motorcycle":
                            clsss = "bike"
                            xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh

                            x,y,w,h=xywh
                            label = '{}{:.2f}'.format(clsss, conf)
                            result=get_result_by_xywh(x,y,w,h,label,image_size)

                            result_all += result

                        if names[int(cls)] == "cat"  or  names[int(cls)] == "dog" :

                            clsss = "pets"
                            xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh

                            x,y,w,h=xywh
                            label = '{}{:.2f}'.format(clsss, conf)
                            result=get_result_by_xywh(x,y,w,h,label,image_size)

                            result_all += result

                    return result_all




