import websockets
import asyncio
import face_model
import argparse
from datasource import load_data_from_sqlite
import base64
import cv2
import numpy as np
import json
import sqlite3
import threading
import logging
from track.sort import Sort
import time
from queue import Queue
import sqlitehelper

logger = logging.getLogger(__name__)

tracker = Sort()
recog_queue = Queue()
trk_id_set = set()
trk_id__user_id = {}
trk_id_ga = {}
rec_ga = False
CAMERA_ID = 1


def arg_parse():
    parser = argparse.ArgumentParser(description='face model test')
    # general
    parser.add_argument('--camera', type=int, default=1, help='1：library; 0: show')
    parser.add_argument('--image-size', default='112,112', help='')
    parser.add_argument('--model', default='model/model-r100-ii/model,0', help='path to load model.')
    parser.add_argument('--ga-model', default='ga-model/model,0', help='path to load model.')
    parser.add_argument('--gpu', default=0, type=int, help='gpu id')
    parser.add_argument('--det', default=2, type=int,
                        help='mtcnn option, 1 means using R+O, 0 means detect from begining,2 meanus retinaface-r50')
    parser.add_argument('--flip', default=0, type=int, help='whether do lr flip aug')
    parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold')
    return parser.parse_args()


# region+ 加载模型
print('start to load model done!')
args = arg_parse()

model = face_model.FaceModel(args)

pre_load = cv2.imread('RetinaFace/t3.png')
resize_pre_load = cv2.resize(pre_load, (480, 270))
ret = model.detector.detect(resize_pre_load, 0.8, do_flip=False)

print('load model done!')
# endregion
# region+ 加载数据
print('start to load features database')
# person_Ids表示数据库顺序的用户Id列表（每个用户仅出现一次）；
# recent_Features表示数据库顺序对应的用户最近特征；
# peoplesIndex表示用户特征库中，即peoples对应位置的用户Id；
# data包含数据库中用户姓名，Id
# peoples为用户特征库信息，与peoplesIndex组合使用。
data, user_dict, peoples = load_data_from_sqlite()
print('load features database done')


# endregion

def get_name_by_userId(Id):
    return user_dict[str(id)]


# 输入包含单个人脸的图片，提取该人脸特征和位置框(left,top,right,bottom)
def get_feature_face_from_file(img_file):
    img = cv2.imread(img_file)
    box, img = model.get_input(img)
    fea = model.get_feature(img)
    return box, fea


def get_feature_faces_from_file(img_file):
    img = cv2.imread(img_file)
    boxes, imgs = model.get_inputs(img)
    features = []
    for i in range(boxes.shape[0]):
        fea = model.get_feature(imgs[i])
        features.append(fea)
    return boxes, features


# 输入包含单个人脸的cv2图片矩阵BGR，提取该人脸特征和位置框(left,top,right,bottom)
def get_feature_face_from_bgrarray(img_array):
    box, img = model.get_input(img_array)
    fea = model.get_feature(img)
    return box, fea


def get_feature_faces_from_bgrarray(img_array):
    # trackers包含卡尔曼滤波结果
    boxes, imgs, trackers = model.get_inputs(img_array)
    features = []
    face_boxes = []
    if boxes == []:
        return face_boxes, features
    for box, img in zip(boxes, imgs):
        if box[2] - box[0] < 40 or box[3] - box[1] < 40:
            continue
        fea = model.get_feature(img)
        features.append(fea)
        face_boxes.append(box)
    return face_boxes, features


# 第一个参数是要比对的目标的特征序列；第二个参数是比对库；
def compare(arrayobj):
    ''' 人脸识别逻辑
    在本函数中，实现由特征向量到所对应的实体的关联；具体分为三个过程：
        1.先和数据库中所有特征向量计算相似度，找出最大相似度的实体，如果相似度大于0.6，则识别结果为该实体，否则转下一步；
        2.上一步如果没有识别出对象，则和最近特征库进行比对，特征库为recentFeatures,同样计算过程，如果相似度大于0.8，则认
        为是该实体，否则转下一步；

        3.和陌生人库进行比对，如果有相似度大于0.6的陌生人，则标识为该陌生人，否则，添加到陌生人特征库中。
    有一个问题留待解决：对于相似度较高的陌生人进行识别判断，如果判断成功，则从陌生人列表中删除。变成最近特征库中的内容。
    :param arrayobj:
    :param arraysets:
    :return:
    '''
    global peoples
    arrayobj = np.array(arrayobj)
    dot = np.dot(arrayobj, peoples.T)
    norm_arrayobj = np.linalg.norm(arrayobj)
    norm_arraysets = np.array(np.linalg.norm(peoples, axis=1))
    cos = dot / (norm_arrayobj * norm_arraysets)
    index = np.argmax(cos)
    # 在数据库中比对成功；
    if cos[index] > 0.5:
        return True, index, cos[index]
    else:
        return False, index, cos[index]


def reg_all_and_format_from_base64(input_data):
    decoderstr = base64.b64decode(input_data)
    img_array = np.fromstring(decoderstr, np.uint8)
    img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
    boxes, features = get_feature_faces_from_bgrarray(img)
    results = []
    if boxes == [[]]:
        return results
    print("Detect {} faces".format(len(boxes)))

    for box, fea in zip(boxes, features):
        temp_ret_result = {}
        IsUnknown, index, prob = compare(fea)
        temp_ret_result["bbox"] = [int(box[0]), int(box[1]), int(box[2] - box[0]), int(box[3] - box[1])]
        temp_ret_result["prob"] = str(round(prob, 4))
        if not IsUnknown:
            temp_ret_result["id"] = 'u' + str(index)
            temp_ret_result["IsUnKnown"] = 1
        else:
            temp_ret_result["id"] = str(data[index][0])
            temp_ret_result["name"] = str(data[index][1])
            temp_ret_result["IsUnKnown"] = 0
        results.append(temp_ret_result)
    return results


def reg_all_and_format_from_frame(input_data):
    model.kalman_track(input_data)
    trackers = tracker.trackers
    results = []
    if len(trackers) == 0:
        print("Detect 0 faces")
        return results
    print("Detect {} faces".format(len(trackers)))
    for item in trackers:
        temp_ret_result = {}
        box = item.get_state()
        temp_ret_result["bbox"] = [int(box[0]), int(box[1]), int(box[2] - box[0]), int(box[3] - box[1])]
        temp_ret_result["id"] = str(item.id)
        # 陌生人
        if item.user_Id is None:
            temp_ret_result["IsUnKnown"] = 1
        else:
            temp_ret_result["name"] = get_name_by_userId(item.user_Id)
            temp_ret_result["IsUnKnown"] = 0
        results.append(temp_ret_result)
    return results


def get_device_addr(device_id):
    sql = "select addr from sys_device where Id=?"
    conn = sqlite3.connect('test.db')
    c = conn.cursor()
    cursor = c.execute(sql, device_id)
    result = cursor.fetchone()[0]
    return result


newimg = []
re_bboxes, aligneds, img_size, addtional_attribute_list = [], [], [], []


async def capture(conn, path):
    global re_bboxes, aligneds, img_size, addtional_attribute_list
    global user_dict
    global trk_id_ga, trk_id__user_id, trk_id_set, rec_ga
    msg = await conn.recv()
    while True:
        send_msg = ''
        result = []
        try:
            user_id = []
            for i, tr in enumerate(tracker.trackers):
                box = tr.get_state()
                box = box.astype(np.int32) * 4
                temp_ret_result = {}
                temp_ret_result['id'] = tr.id
                temp_ret_result["bbox"] = [int(box[0]), int(box[1]), int(box[2] - box[0]), int(box[3] - box[1])]
                temp_ret_result["IsUnKnown"] = 1
                if tr.id in trk_id__user_id:
                    tr.user_Id = trk_id__user_id[tr.id]
                    user_id.append(str(tr.user_Id))

                if rec_ga and tr.id in trk_id_ga:
                    tr.age = trk_id_ga[tr.id][1]
                    tr.gender = trk_id_ga[tr.id][0]
                    temp_ret_result["age"] = str(tr.age)
                    temp_ret_result["gender"] = str(tr.gender)
                if tr.user_Id is not None:
                    temp_ret_result["user_Id"] = tr.user_Id
                    name = str(user_dict[str(tr.user_Id)])
                    temp_ret_result["name"] = name
                    temp_ret_result["IsUnKnown"] = 0
                result.append(temp_ret_result)
            send_msg = json.dumps({"errorcode": "0", "result": result, "users_id": user_id, "show_ga": rec_ga})
            await conn.send(send_msg)
        except Exception as e:
            print(e)
            print('客户端异常')
            break
    print('断开客户端输出')


class ReadRtspTread(threading.Thread):
    def __init__(self, threadID, name, counter, event):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.name = name
        self.counter = counter
        self.event = event

    def run(self):  # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
        print('开始读取流')
        global re_bboxes, aligneds, img_size, addtional_attribute_list
        global newimg, tracker, model
        global data, peoples
        global recog_queue, trk_id_set, trk_id__user_id, trk_id_ga
        cap = cv2.VideoCapture(get_device_addr(str(CAMERA_ID)))
        _, newimg = cap.read()
        self.event.set()
        while True:
            _, newimg = cap.read()
            tracker.update(recog_queue, trk_id_set, trk_id__user_id, trk_id_ga, re_bboxes, aligneds, img_size,
                           'static/trackers',
                           addtional_attribute_list, 1)
            cv2.waitKey(40)


class KalmanTrackTread(threading.Thread):
    def __init__(self, threadID, name, counter, event):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.name = name
        self.counter = counter
        self.event = event

    def run(self):  # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
        print('开始执行卡尔曼滤波')
        global re_bboxes, aligneds, img_size, addtional_attribute_list
        global newimg, model
        self.event.wait()
        while True:
            re_bboxes, aligneds, img_size, addtional_attribute_list = model.kalman_track(newimg)
            cv2.waitKey(40)


class RecogFaceThread(threading.Thread):
    def __init__(self, threadID, name, counter, event):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.name = name
        self.counter = counter
        self.event = event

    def run(self):  # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
        print('开始执行人脸识别程序')
        global newimg, model
        global recog_queue, trk_id_set
        global trk_id__user_id, rec_ga
        self.event.wait()
        while True:
            [trk_id, aligned] = recog_queue.get()
            # 如果跟踪对象没有被删除，就识别，否则，不识别。
            if trk_id in trk_id_set:
                fea = model.get_feature(aligned)
                if rec_ga:
                    gender, age = model.get_ga(aligned)
                    trk_id_ga[trk_id] = [gender, age]
                isKnown, index, prob = compare(fea)
                # 人脸识别
                if isKnown:
                    user_id = str(data[index][0])
                    trk_id__user_id[trk_id] = user_id
                    trk_id_set.discard(trk_id)
                    sqlitehelper.AddUserRecord(user_id, CAMERA_ID)
                    print('trk_id:%d ---------- user_id: %s' % (trk_id, user_id))


class UpdateDataThread(threading.Thread):
    def __init__(self, threadID, name, counter):
        threading.Thread.__init__(self)
        self.threadID = threadID
        self.name = name
        self.counter = counter

    def run(self):  # 检测数据库的更新情况
        global data, user_dict, peoples
        print('开始检测数据库')
        while True:
            data, user_dict, peoples = load_data_from_sqlite()
            time.sleep(5)


evt_has_frame = threading.Event()
evt_has_frame.clear()

thread1 = ReadRtspTread(1, 'read', 1, evt_has_frame)
thread1.start()

thread2 = KalmanTrackTread(2, 'kalmantrack', 2, evt_has_frame)
thread2.start()

thread3 = RecogFaceThread(3, 'recface', 3, evt_has_frame)
thread3.start()

thread4 = UpdateDataThread(4, 'updateData', 4)
thread4.start()

start_server = websockets.serve(capture, 'localhost', 8100, max_size=2 ** 25)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
