# 2019 eidted by Philip Gao
# coding:utf-8

import argparse
import sys
import numpy as np
import time
import cv2
import face
from array_storage import the_face_dic

temple_time = time.strftime("%Y-%m-%d %X")

def add_overlays(frame, faces, frame_rate, proba):
    global temple_time
    if faces is not None:
        for face in faces:
            face_bb = face.bounding_box.astype(int)
            if face.probability > proba and face.name is not None: 
                cv2.rectangle(frame,
                          (face_bb[0], face_bb[1]), (face_bb[2], face_bb[3]),
                          (0, 0, 255), 2)
                pro = face.probability*100 # probability transfer to %
                cv2.putText(frame, '%s: %.1f%%'%(face.name, pro), (face_bb[0], face_bb[3]),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),
                            thickness=2, lineType=2)
                # ingore repeat name in same time
                now = time.strftime("%Y-%m-%d %X")
                #now = time.strftime('%Y/%m/%d - %H:%M:%S')
                if now != temple_time: 
                    print(time.strftime("%Y-%m-%d %X") +' 发现%s: %.1f%%'%(the_face_dic[face.name], pro))
                    temple_time = now
            else:
                cv2.rectangle(frame,
                          (face_bb[0], face_bb[1]), (face_bb[2], face_bb[3]),
                          (0, 255, 0), 2)
                cv2.putText(frame, 'unkown', (face_bb[0], face_bb[3]),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
                            thickness=2, lineType=2)
                # ingore same report in same time
                now = time.strftime("%Y-%m-%d %X")
                if now != temple_time: 
                    print('发现unkown: '+now)
                    temple_time = now

    cv2.putText(frame, str(frame_rate) + " fps", (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0),
                thickness=2, lineType=2)
#ip address
address= [
        0, #0
        "D:/Philip/test_angelababy.flv", #1
        "rtsp://test:abc123456@192.168.3.38:554/Streaming/Channels/1", #2  1楼主楼大厅1
        "rtsp://test:abc123456@192.168.3.40:554/Streaming/Channels/1", #3  1楼主楼大厅2
        "rtsp://test:abc123456@192.168.3.41:554/Streaming/Channels/1", #4  1楼主楼电梯口
        "rtsp://test:abc123456@192.168.3.55:554/Streaming/Channels/1", #5  3楼主楼电梯口
        "rtsp://test:abc123456@192.168.3.190:554/Streaming/Channels/1", #6 3楼附楼电梯扣
        "rtsp://test:abc123456@192.168.3.191:554/Streaming/Channels/1", #7 3楼办公室拐角
        "rtsp://test:abc123456@192.168.3.192:554/Streaming/Channels/1", #8 3楼办公室尽头
        "rtsp://test:abc123456@192.168.3.203:554/Streaming/Channels/1", #9 主楼电梯内部
        "rtsp://test:abc123456@192.168.3.200:554/Streaming/Channels/1", #10 室外球机摄像头
    ]

def main(args):
    frame_interval = 5 # Number of frames after which to run face detection, perfect is 3
    fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0
    
    # 1: camera mode,2: video mode, others: rtsp mode
    if args.mode == 1:
        url = 0
    elif args.mode == 2:
        #url = 'D:/Philip/test_angelababy.flv'
        url = 'C:/Philip_train/test_part/video/test_out_door.avi'
    else:
        url = address[8]

    # face rec start
    face_recognition = face.Recognition()
    start_time = time.time()

    # loading video data
    video_capture = cv2.VideoCapture(url)
    print (video_capture.isOpened())
    print('video rate: '+str(video_capture.get(5)))
    
    # Define the codec and create VideoWriter object
    if args.save:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('output.avi',fourcc, 20.0, (int(1920*0.8),int(1080*0.8)))
        #out = cv2.VideoWriter('output1.avi',fourcc, 20.0, (1920,1080))
    
    # frame resizable
    #cv2.namedWindow('Frame', cv2.WINDOW_NORMAL)

    if args.debug:
        print("Debug enabled")
        face.debug = True
    
    while video_capture.isOpened():
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        #if frame is not None and frame.any():
        if ret:
            if args.mode == 1:
                #frame need flip on X-axis when using camera mode
                frame = cv2.flip(frame,1)
            else:
                #frame = frame[:,200:]
                frame = cv2.resize(frame,(0,0),fx=0.8,fy=0.8)
                #frame = frame
            
            if (frame_count % frame_interval) == 0:
                faces = face_recognition.identify(frame)

                # Check our current fps
                end_time = time.time()
                if (end_time - start_time) > fps_display_interval:
                    frame_rate = int(frame_count / (end_time - start_time))
                    start_time = time.time()
                    frame_count = 0

            add_overlays(frame, faces, frame_rate, args.probability)

            frame_count += 1
            # write the flipped frame
            if args.save:
                out.write(frame)
        #display
        #if frame is not None and frame.any():
        cv2.imshow('Frame', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # When everything is done, release the capture
    video_capture.release()
    if args.save:
        out.release()
    cv2.destroyAllWindows()


def parse_arguments(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument('mode', type=int, 
        help='Choose the video mode, 0: url, 1: self usb camera, 2: video file')
    parser.add_argument('--debug', action='store_true',
                        help='Enable some debug outputs.')
    parser.add_argument('--probability', type=float,
                        help='the threshold value.', default=0.72)
    parser.add_argument('--save',
                        help='save video or not', default=False)
    return parser.parse_args(argv)


if __name__ == '__main__':
    main(parse_arguments(sys.argv[1:]))
