# import cvzone
import torch

from preparation import *
from yolov5.utils.general import (non_max_suppression, scale_coords, cv2,
                                  xyxy2xywh)
from yolov5.utils.plots import Annotator, colors
from yolov5.utils.torch_utils import time_sync
from oak_Cam import OakCam



#参数初始化
dt, seen,t1,t2,t3,t4,t5,t6,t7  = [0.0, 0.0, 0.0, 0.0], 0,0,0,0,0,0,0,0
ts = time_sync()

#将测试保存为视频
# fourcc = cv2.VideoWriter_fourcc(*'mp4v')
cap_fps = 30
# video = cv2.VideoWriter('result.mp4', fourcc, cap_fps, (1920,1080))
#开始

oakCam = OakCam()
msg = oakCam.get_msg()

while True:

    # im, im0 = next_img(cap1)
    img0 = next(msg)[0]
    img = letterbox(img0, imgsz, stride=stride)[0]
    img = np.array([img])
    img = img[..., ::-1].transpose((0, 3, 1, 2))  # BGR to RGB, BHWC to BCHW
    img = np.ascontiguousarray(img)

    img, np.ascontiguousarray(img0)


    t1 = time_sync()
    img = torch.from_numpy(img).to(device)

    img = img.half() if half else img.float()  # uint8 to fp16/32
    img /= 255.0
    t2 = time_sync()
    dt[0] += t2 - t1

    # Inference
    pred = model(img, augment=augment, visualize=visualize)
    t3 = time_sync()
    dt[1] += t3 - t2

    #nms
    pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
    dt[2] += time_sync() - t3

    # Process detections
    s = ''
    for det in pred:  # detections per image
        seen += 1

        # s += f'{im.shape[2:]} '

        annotator = Annotator(img0, line_width=1, pil=not ascii,font_size=1)

        s = len(det)

        if det is not None and len(det):
            det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()

            # Print results
            for c in det[:, -1].unique():
                n = (det[:, -1] == c).sum()  # detections per class
                # s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string

            xywhs = xyxy2xywh(det[:, 0:4])
            confs = det[:, 4]
            clss = det[:, 5]
            xyxys = det[:, 0:4]

            # pass detections to strongsort
            t4 = time_sync()
            t5 = time_sync()
            dt[3] += t5 - t4


            for j, (xyxy, cls,conf) in enumerate(zip(xyxys,clss,confs)):

                c = int(cls)  # integer clas

                label =  f'{names[c]} {conf * 100:.2f}%'
                annotator.box_label(xyxy, label, color=colors(c, True))

        t6 = time_sync()


        img0 = annotator.result()

        # im0 = letterbox(im0,(800,800),is_border=True)[0]

        fps = 15 if t5 - t1 <= 0 else 1 / (t5 - t1)

        # cvzone.putTextRect(im0, f' YOLO:({t3 - t2:.3f}s), FPS:({fps :.2f}), num({ s })', (0, 20),scale=1,thickness=1)

        cv2.imshow('demo', img0)

        # video.write(im0)

        cv2.waitKey(1)  # 1 millisecond

# video.release()

