# 启动代码：
# python3 deep_learning_object_detection.py -i images/DFDD31C25B663437AE561AD9CFED3601.png -p deploy.prototxt -m mobilenet_iter_73000.caffemodel

# 导入第三方库
# from imutils.video import VideoStream
# from imutils.video import FPS
import numpy as np
import argparse
# import imutils
import time
import cv2

# 构建参数解析器并解析参数
ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--image", required=True,
#                 help="path to input image")
ap.add_argument("-p", "--prototxt",
                help="path to Caffe 'deploy' prototxt file",default='./deploy.prototxt')
ap.add_argument("-m", "--model",
                help="path to Caffe pre-trained model",default='./mobilenet_iter_73000.caffemodel')
ap.add_argument("-c", "--confidence", type=float, default=0.3,
                help="minimum probability to filter weak detections")
args = vars(ap.parse_args())

# 初始化类的标签和包围框的颜色
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
           "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
           "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
           "sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

# 导入模型
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args['prototxt'], args['model'])


# 载入视频流
print("[INFO] loading video...")
# vs=VideoStream(src=1).start()
# time.sleep(5.0)
# fps=FPS().start()
# print(vs)

# cv2方法打开视频
cap =cv2.VideoCapture(0)
def openCap():
    if(cap.isOpened()):
        #逐帧检测
        print('视频打开成功！')
        startRead()
    else:
        print('视频打开中……')
        cap.open(0)
        time.sleep(2.0)
        openCap()

def startRead():
    while True:

        #读取一帧缩放尺寸
        ret,frame=cap.read()
        if(frame is None):
            print("--(!) No captured frame -- Break!")
            time.sleep(1.0)

        (h,w)=(cap.get(4),cap.get(3))
        #将 frame 转换为一个有 dnn 模块的 blob
        blob=cv2.dnn.blobFromImage(cv2.resize(frame,(300,300)),0.007843,(300,300),127.5)

        #将blob输入到神经网络net，进行正向传播
        net.setInput(blob)
        detections = net.forward()

        #输出测试结果
        for i in np.arange(0, detections.shape[2]):
            confidence=detections[0,0,i,2]

            if confidence>args['confidence']:

                idx = int(detections[0, 0, i, 1])
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)

                cv2.rectangle(frame,(startX,startY),(endX,endY),COLORS[idx],2)

                y=startY-15 if startY-15>15 else startY+15

                cv2.putText(frame,label,(startX,y),cv2.FONT_HERSHEY_SIMPLEX,0.5,COLORS[idx],2)

        cv2.imshow('Frame',frame)

        key=cv2.waitKey(1)&0xFF

        if(key==ord('q')):
            break

            # fps.update()

            # fps.stop()
            # print("[INFO] elopsed time: {:.2f}".format(fps.elopsed()))
            # print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

openCap()







