import cv2
import time
import numpy
import torch
import base64
import requests
import io
from PIL import Image
import config as cfg
import pickle
from collections import Counter
from models.facenet.facenet import Facenet
from utils import plot_one_box
from numpy import load, argmin, array, zeros
from face_recognition import face_distance, face_locations, face_landmarks


DEBUG = False

def image_to_base64(image_np):
    image = cv2.imencode(".jpg", image_np)[1].tobytes()
    base64_data = base64.b64encode(image)
    base64_data = base64_data.decode()
    return base64_data


def send(name, image):
    if DEBUG:
        print(name)
    else:
        url = ''
        data = {
            "userName": name,
            "downWordImg": image_to_base64(image),
            "downWordTime": time.strftime("%Y-%m-%d %H:%M:%S")
        }
        try:
            response = requests.post(url, json=data).text
        except Exception as e:
            print('send fail with {}'.format(e))
            return
        # time.sleep(1)
        print(response)
    
def compute_IOU(rec1, rec2):
    """
    计算两个矩形框的交并比。
    :param rec1: (x0,y0,x1,y1)      (x0,y0)代表矩形左上的顶点，（x1,y1）代表矩形右下的顶点。下同。
    :param rec2: (x0,y0,x1,y1)
    :return: 交并比IOU.
    """
    left_column_max = max(rec1[0], rec2[0])
    right_column_min = min(rec1[2], rec2[2])
    up_row_max = max(rec1[3], rec2[3])
    down_row_min = min(rec1[1], rec2[1])
    # 两矩形无相交区域的情况
    if left_column_max >= right_column_min or down_row_min <= up_row_max:
        return 0
    # 两矩形有相交区域的情况
    else:
        S1 = (rec1[2] - rec1[0]) * (rec1[1] - rec1[3])
        S2 = (rec2[2] - rec2[0]) * (rec2[1] - rec2[3])
        S_cross = (down_row_min - up_row_max) * (right_column_min - left_column_max)
        return S_cross / (S1 + S2 - S_cross)


def iou_match(old_frame, frame):
    """
    # 返回匹配度最好的车牌坐标对
    e.g.
    old_frame = [o_plate1, o_plate2，o_plate3]
    flame = [n_plate1, n_plate2
    """
    match_map = {}
    for i, new in enumerate(frame):
        best_match_iou = 0
        best_match_pos = None
        for j, old in enumerate(old_frame):
            iou = compute_IOU(old[0], new[0])
            if iou > best_match_iou:
                best_match_iou = iou
                best_match_pos = j
        # 把前后帧信息里的匹配程度最好的添加到列表中
        if best_match_pos is not None:
            match_map[i] = best_match_pos
    return match_map

def detect_img(img, codeing_model, knn, thres=0.9, size_thres=100*100):
    # 减小输入尺寸，加快识别速度， 但牺牲了对小人脸的检测能力
    small_img = cv2.resize(img, (0, 0), fx=0.25, fy=0.25)[:, :, ::-1]
    boxes = face_locations(small_img, model='hog')
    boxes = array(boxes)*4  # 将检测框映射回原图

    if len(boxes) == 0:
        return img, []

    faces = []
    for box in boxes:
        # 人脸裁剪
        face = img[box[0]:box[2], box[3]:box[1]]
        faces.append(cv2.resize(face, (160, 160)) / 255.)
        # 人脸编码
    faces = array(faces)
    with torch.no_grad():
        faces = torch.from_numpy(faces.transpose(0, 3, 1, 2)).type(torch.FloatTensor)
        features = codeing_model(faces).detach().numpy()

    msg = []
    for box, feature in zip(boxes, features):
        feature = [feature]
        closest_distances = knn.kneighbors(feature, n_neighbors=1)
        name = knn.predict(feature)[0] if closest_distances[0][0][0] <= thres else 'unknow'
        img = plot_one_box(array(box), img, label=name, line_thickness=1)
        msg.append([box, [name], 1])
    
    return img, msg


def detect_video(codeing_model, video_path=0):
    # 配置摄像头
    capture = cv2.VideoCapture(video_path)
    knn = pickle.load(open('data\knnd3.pickle', 'rb'))
    # 决定是否输出的阈值
    send_thr = 15  # 识别次数
    conf_thr = 0.8  # 众数比重·
    # old_frame => [[name, [[x1,y1,x2,y2]]],
    #        ...]
    old_frame = []
    t1 = time.time()
    while True:
        ref, frame = capture.read()
        if not ref:
            print('get video fail')
            time.sleep(1)
            continue
        
        # 从图片识别人脸，返回框选后的图片和对应的人名列表
        img, msg = detect_img(frame, codeing_model, knn)
        # msg => [[pos, [name1, name2, ...]],
        #        ...]
        match = iou_match(old_frame, msg)
        for i, item in enumerate(msg):
            # 有匹配
            if i in match:
                msg[i][1] = old_frame[match[i]][1] + item[1]
                msg[i][2] = old_frame[match[i]][2]
            # 追踪次数达到阈值
            find_times = len(msg[i][1])
            if find_times > send_thr:
            # 计算众数
                most_comm = Counter(msg[i][1]).most_common(1)
                name, times = most_comm[0]
                if times > find_times*conf_thr and msg[i][2]:
                    # send(name, frame)
                    print(name)
                    msg[i][2] = 0

        old_frame = msg

        cv2.imshow('video', img)
        cv2.waitKey(1)
        fps = time.time() - t1
        t1 = time.time()



if __name__=='__main__':
    # 加载模型
    path = 'models/facenet/facenet_mobilenet.pth'
    model = Facenet(mode='predict').eval()
    state_dict = torch.load(path, map_location='cpu')
    model.load_state_dict(state_dict, strict=False)

    # 预测视频流
    detect_video(codeing_model=model, video_path=0)