import torch
import torch.nn as nn
import torch.nn.functional as F

import os
import cv2
import numpy as np
import argparse
from utils.box import non_max_suppression, bbox_iou
import json

from modules.retinaface.detector import FaceDetector

# <function detect_img/>
def detect_img(detector, image, conf_thres=0.5, nms_thres=0.5, waitkey=0):
    boxes, scores, landmarks = detector.apply_image(image, conf_thres, nms_thres)
    for box, score, landmark in zip(boxes, scores, landmarks):
        cv2.rectangle( image, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 1 )
        cv2.putText( image, "{:.4f}".format(score), (int(box[0]), int(box[1])+12), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255) )
        cv2.circle( image, (landmark[0][0], landmark[0][1]), 1, (0, 0, 255), 2)
        cv2.circle( image, (landmark[1][0], landmark[1][1]), 1, (0, 0, 255), 2)
        cv2.circle( image, (landmark[2][0], landmark[2][1]), 1, (0, 0, 255), 2)
        cv2.circle( image, (landmark[3][0], landmark[3][1]), 1, (0, 0, 255), 2)
        cv2.circle( image, (landmark[4][0], landmark[4][1]), 1, (0, 0, 255), 2)
    # end-for
    cv2.imshow("frame", image)
    return cv2.waitKey(waitkey)
# </function detect_img>

# <function detect_video/>
def detect_video(detector, vid):
    # Create a VideoCapture object and read from input file
    # If the input is the camera, pass 0 instead of the video file name
    cap = cv2.VideoCapture(vid)
    # Check if camera opened successfully
    if (cap.isOpened() == False): 
        raise RuntimeError("Error opening video stream or file")
    # end-if
    # Read until video is completed
    while(cap.isOpened()):
        # Capture frame-by-frame
        ret, frame = cap.read()
        if isinstance(vid, int):
            frame = cv2.flip(frame, 1)
        if ret == True:    
            # Display the resulting frame
            # Press Q on keyboard to  exit
            if detect_img(detector=detector, image=frame, conf_thres=0.5, nms_thres=0.5, waitkey=1) & 0xFF == ord('q'):
                break
            # end-if
        # Break the loop
        else: 
            break
        # end-if
    # end-while
    cap.release() # When everything done, release the video capture object
    cv2.destroyAllWindows() # Closes all the frames
# </function detect_video>

if __name__ == "__main__":
    """
    ***********************************************************************************************************************************************
    Add load args parser for hyper-params
    ***********************************************************************************************************************************************
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('--run_name', type=str, default="retinaface_2020_06_25_18_19_08", help="run name")
    parser.add_argument("--vid", type=str, default="/home/yuda/Videos/face_clip.mp4", help="path to video file demo use.")
    parser.add_argument("--cam", type=int, default=-1, help="device id of camera demo used.")
    demo_args = parser.parse_args()

    detector = FaceDetector(os.path.join("./checkpoints", demo_args.run_name, "args.json"))

    if demo_args.cam >= 0:
        detect_video(detector, demo_args.cam)
    else:
        if os.path.exists(demo_args.vid):
            detect_video(detector, demo_args.vid)

    

