import time
import cv2
import numpy as np
import torch
import math
from FaceDetect import FaceDetectAPI,VideoRecoder
from Rec.models import parse_args,resnet101
from torchvision import transforms
from PIL import Image
from Rec.align_faces import get_reference_facial_points, warp_and_crop_face

im_size = 112
data_transforms = {
    'train': transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=0.125, contrast=0.125, saturation=0.125),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ]),
    'val': transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}
transformer = data_transforms['val']
# imgs_0[idx] = get_image(transformer, filepath, flip=False)


def align_face(raw, facial5points):
    # raw = cv.imread(img_fn, True)  # BGR
    facial5points = np.reshape(facial5points, (2, 5))

    crop_size = (im_size, im_size)

    default_square = True
    inner_padding_factor = 0.25
    outer_padding = (0, 0)
    output_size = (im_size, im_size)

    # get the reference 5 landmarks position in the crop settings
    reference_5pts = get_reference_facial_points(
        output_size, inner_padding_factor, outer_padding, default_square)

    # dst_img = warp_and_crop_face(raw, facial5points)
    dst_img = warp_and_crop_face(raw, facial5points, reference_pts=reference_5pts, crop_size=crop_size)
    return dst_img


class FaceRecognAPI:
    def __init__(self):
        args = parse_args()
        model = resnet101(args)
        fn__weight="./weights/insight-face-v3.pt"
        dict_weigt=torch.load(fn__weight)
        model.load_state_dict(dict_weigt)
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # sets device for model and PyTorch tensors
        print("found device: ", self.device)
        self.model = model.to(self.device)
        self.model.eval()

    def GetAlignFace(self,img,landmark):        
        img_aligned = align_face(img,landmark)
        return img_aligned

    def GetFeature(self,img_aligned):
        with torch.no_grad():
            imgs_0 = torch.zeros([1, 3, 112, 112], dtype=torch.float, device=self.device)              
            image2=Image.fromarray(img_aligned)
            image2 = transformer(image2).to(self.device)
            imgs_0[0]=image2
            features_0 = self.model(imgs_0.to(self.device))
            features_0 = features_0.cpu().numpy()
            return features_0[0]

    def GetFeatures(self,img,list_landmarks):        
        list_features=[]        
        for landmark in list_landmarks:
            img_aligned=self.GetAlignFace(img, landmark)
            
            #cv2.imshow("img_aligned",img_aligned)
            #cv2.waitKey(0)
            feature=self.GetFeature(img_aligned)
            list_features.append(feature)
        return list_features

    def EvalCosDis(self,feature0,feature1):
        x0 = feature0 / np.linalg.norm(feature0)
        x1 = feature1 / np.linalg.norm(feature1)
        cosine = np.dot(x0, x1)
        cosine = np.clip(cosine, -1.0, 1.0)
        theta = math.acos(cosine)
        theta = theta * 180 / math.pi
        return theta


if __name__=="__main__":
   
    """src="/home/guo/eng/Face/Pytorch_Retinaface/faceVideo3.avi"
#     src="rtsp://admin:qd123456@10.39.245.166:554/Streaming/Channels/1"
    vc=cv2.VideoCapture(src)
    res,img_raw=vc.read()
    vr=VideoRecoder(img_raw,"faceVideo_0.avi")
    fd=FaceDetectAPI(img_raw)
    fr=FaceRecognAPI()
    # testing begin
    counter=0
    while True:
        face_flag=False
        image_path = "./curve/test.jpg"
#         img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
        res,img_raw=vc.read()        
        if res==False:
            continue
        img_write=img_raw.copy()
        counter+=1
        if counter%1000==0:
            print(counter)
        list_face,list_landmarks=fd.Detect(img_raw)
        list_features=fr.GetFeatures(img_raw, list_landmarks)                   
        img_dis=fd.DrawFaces(img_raw, list_face)
        vr.feed(img_write)
        cv2.imshow("img_dis",img_dis)
        cv2.waitKey(1)"""
    
    
    #src="/home/guo/eng/Face/Pytorch_Retinaface/faceVideo3.avi"
#     src="rtsp://admin:qd123456@10.39.245.166:554/Streaming/Channels/1"
    #vc=cv2.VideoCapture(src)
    #res,img_raw=vc.read()
    #vr=VideoRecoder(img_raw,"faceVideo_0.avi")
    image_path = "./curve/a86bav90410472093.jpg"
    img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
    start = time.time() 
    fd=FaceDetectAPI(img_raw)
    fr=FaceRecognAPI()
    end = time.time()
    print("load model time cost(s): ", end - start)

    # testing begin
    #counter=0
    #while True:
    #face_flag=False
    for i in range(10):
        image_path = "./curve/a86bav90410472093.jpg"
        img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)

        start = time.time()
        list_face,list_landmarks=fd.Detect(img_raw)
        end = time.time()
        print("detect face time cost(s): ", end - start)

        start = time.time()
        list_features=fr.GetFeatures(img_raw, list_landmarks)                   
        end = time.time()
        print("get features time cost(s): ", end - start)
        print("lenght of list_features: ", len(list_features))
        print("type of list_features: ", type(list_features))
        print("lenght of feature: ", len(list_features[0]))
        print("lenght of feature: ", len(list_features[1]))
        print("type of feature: ", type(list_features[1]))
    
