
import torch
from models.experimental import attempt_load
from utils.datasets import  LoadImages
from utils.general import check_img_size, check_requirements,  non_max_suppression, scale_coords, xyxy2xywh,  set_logging,get_image_size,get_result_by_xywh
from utils.torch_utils import select_device


class YOLO_V5:
    weights = "yolov5s.pt"
    conf_thres = 0.25

    imgsz = 640
    iou_thres = 0.45
    set_logging()
    device = select_device("cpu")
    half = device.type != 'cpu'  # half precision only supported on CUDA
    # Load model
    model = attempt_load(weights, map_location=device)  # load FP32 model
    if half:
        model.half()  # to FP16
        
def detect_single_image(img_abs_path,model):
    source = img_abs_path
    stride = int(model.stride.max())  # model stride

    dataset = LoadImages(source, img_size=imgsz, stride=stride)
    names =  model.names
    image_size=get_image_size(img_abs_path)
    for path, img, shape in dataset:
        img = torch.from_numpy(img).to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - shape255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)
        pred = model(img, augment=False)[0]
        # Apply NMS
        pred = non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False)
        #print("opt.classes:%s"%opt.classes)
        for i, det in enumerate(pred):
            # detections per image

            p, s,  frame = path, '', getattr(dataset, 'frame', 0)

            #print("p, s, im0, frame%s,%s,%s,%s"%(p, s, im0, frame))
            s += '%gx%g ' % img.shape[2:]  # print string
            gn = torch.tensor(shape)[[1, 0, 1, 0]]  # normalization gain whwh
            if len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], shape).round()

                # Print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string

                # Write results

                result_all = ""

                for *xyxy, conf, cls in reversed(det):

                    if names[int(cls)]== "tie":

                        clsss="tie_"
                        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh

                        #x:检测框中心x轴坐标 Y：监测框中心Y轴中心坐标  W：监测框宽度  H：监测框高度

                        x, y, w, h = xywh


                        label = '{}{:.2f}'.format(clsss, conf)


                        #print(".....")
                        result=get_result_by_xywh(x,y,w,h,label,image_size)

                        result_all += result

                    if names[int(cls)] == "person":

                        clsss = "pers"
                        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh

                        x,y,w,h=xywh
                        label = '{}{:.2f}'.format(clsss, conf)
                        result=get_result_by_xywh(x,y,w,h,label,image_size)

                        result_all += result

                print(result_all)


if __name__ == '__main__':
    check_requirements(exclude=('pycocotools', 'thop'))
    with torch.no_grad():

        #print(image_size)

        detect_single_image("2.jpg")

