#!/usr/bin/python3
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
#   This example shows how to use dlib's face recognition tool.  This tool maps
#   an image of a human face to a 128 dimensional vector space where images of
#   the same person are near to each other and images from different people are
#   far apart.  Therefore, you can perform face recognition by mapping faces to
#   the 128D space and then checking if their Euclidean distance is small
#   enough. 
#
#   When using a distance threshold of 0.6, the dlib model obtains an accuracy
#   of 99.38% on the standard LFW face recognition benchmark, which is
#   comparable to other state-of-the-art methods for face recognition as of
#   February 2017. This accuracy means that, when presented with a pair of face
#   images, the tool will correctly identify if the pair belongs to the same
#   person or is from different people 99.38% of the time.
#
#   Finally, for an in-depth discussion of how dlib's tool works you should
#   refer to the C++ example program dnn_face_recognition_ex.cpp and the
#   attendant documentation referenced therein.
#

#
#
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
#   You can install dlib using the command:
#       pip install dlib
#
#   Alternatively, if you want to compile dlib yourself then go into the dlib
#   root folder and run:
#       python setup.py install
#
#   Compiling dlib should work on any operating system so long as you have
#   CMake installed.  On Ubuntu, this can be done easily by running the
#   command:
#       sudo apt-get install cmake
#
#   Also note that this example requires Numpy which can be installed
#   via the command:
#       pip install numpy

import sqlite3
import sys
import os
import pika
import json
import pyzbar.pyzbar as pyzbar
import time
import datetime
import cv2
import numpy as np
import torch
import socket
import math
from FaceDetect import FaceDetectAPI,VideoRecoder
from Rec.models import parse_args,resnet101
from torchvision import transforms
from PIL import Image, ImageDraw, ImageFont
from Rec.align_faces import get_reference_facial_points, warp_and_crop_face
from Face2Vec import FaceRecognAPI
import vec2faceid
import camreader
import QRcodeDecoding
import getTemperature
import readConfig
import loger

os.chdir(r'/home/aiot/workspace/EpidemicPreventionProj/EP_faceAnalysis')
log = loger.Log("faceanalysis")
camInfoRecordPath = "../info_Cam.txt"
failedUrlRecordPath = "../record_DbOpErr.txt"

imgH_vr = 480 #380
imgW_vr = 640 #672
imgH_ir = 480 #240
imgW_ir = 640 #320
out_win = "showWindow"
cv2.namedWindow(out_win, cv2.WINDOW_NORMAL)
cv2.setWindowProperty(out_win, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
insteadimg = np.zeros((imgH_vr, imgW_vr, 3), dtype=np.uint8)

bbox_last = []   # face bbox in last frame
recogFaceGap = 1 #5 # if bbox stable, recog face every 5 frames
recogFaceGapCounter = 0 # used for count recogFaceGap
time_lastSaveFace = time.time() # last time found face and save face img's time
saveFaceTimeGap = 5 # only after 5s, can save another jpg file
global last_showinfo # last time shown info: about door open status
last_showinfo = ""
last_showinfo_famesNum = 6 # how many farmes to show one showinfo

# load config params
conf = readConfig.paramConfiger()
conf.loadConfigParams()

# load net
img_init = cv2.imread("./frame_vr_resized.jpg", cv2.IMREAD_COLOR) # just need for img size
start = time.time()
log.info("FaceDetectAPI init...")
fd=FaceDetectAPI(img_init)
log.info("FaceDetectAPI init done.")
log.info("FaceRecognAPI init...")
fr=FaceRecognAPI()
log.info("FaceRecognAPI init done.")
end = time.time()
log.debug("load model time cost(s): " + str(end - start))

# udp socket
BUFSIZE = 1024
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#ip_port = ('10.39.243.221', 5678)
ip_port = ('127.0.0.1', 5678)
#ip_port = ('127.0.0.1', 9999)

# preload templates
log.info("initializing read temperature data...")
list_templates = []
for k in range(10):
    img0 = cv2.imread("./templatesDir/f"+str(k)+".png")
    img0gray = cv2.cvtColor(img0,cv2.COLOR_BGR2GRAY)
    list_templates.append(img0gray.astype(int))

# read calib info from file
log.info("initializing read calib data...")
posCalibM = []
with open("posCalibData.dat", "r") as f:
    msg = f.read()
msgdict = json.loads(msg)
posCalibM = msgdict["posCalibM"]

#load faceid DB
log.info("loading faceid Database...")
faceRecoger = vec2faceid.faceRecoger()
#reloadDbCountFlag = 20 #9000 # every xxxx frames, reload db

'''class DisplayChinese:
    def __init__(self):
        self.textSize=20
        self.fontText = ImageFont.truetype("./msyh.ttf", self.textSize, encoding="utf-8")

    def DrawImage(self,img,text,pos,color):
        if (isinstance(img, np.ndarray)):  #判断是否OpenCV图片类型
            img = Image.fromarray(img)
        draw = ImageDraw.Draw(img)
        draw.text(pos, text, color, font=self.fontText)
        return np.asarray(img)

#load font
dc = DisplayChinese()'''

def camInfo():
    """
    read camera ip user psw infomation from file
    """
    ip = ""
    user = ""
    psw = ""
    with open(camInfoRecordPath) as lines:
        linelist = lines.readlines()
        for linestr in linelist:
            if "IP" in linestr:
                ip = linestr.split("=")[1].strip()
                log.info("cam ip: " + ip)
            if "USER" in linestr:
                user = linestr.split("=")[1].strip()
                log.info("cam user: " + user)
            if "PASSWORD" in linestr:
                psw = linestr.split("=")[1].strip()
                log.info("cam psw: " + psw)

    return ip, user, psw


def failedUrlRecord(url, reason):
    f = failedUrlRecordPath
    with open(f, "a") as file:
        context0 = "time: " + time.strftime('%Y-%m-%d-%H-%M', time.localtime()) + "\n"
        file.write(context0)
        context1 = "reason: " + reason + "\n"
        file.write(context1)
        context2 = "url: " + url + "\n"
        file.write(context2)
        file.write("\n")


def bboxIOU(rec1, rec2):
    """
    computing IoU
    :param rec1: (y0, x0, y1, x1), which reflects
            (top, left, bottom, right)
    :param rec2: (y0, x0, y1, x1)
    :return: scala value of IoU
    """

    if len(rec1) != len(rec2):
        return False

    # computing area of each rectangles
    S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
    S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
 
    # computing the sum_area
    sum_area = S_rec1 + S_rec2
 
    # find the each edge of intersect rectangle
    left_line = max(rec1[1], rec2[1])
    right_line = min(rec1[3], rec2[3])
    top_line = max(rec1[0], rec2[0])
    bottom_line = min(rec1[2], rec2[2])
 
    # judge if there is an intersect
    if left_line >= right_line or top_line >= bottom_line:
        iou = 0
    else:
        intersect = (right_line - left_line) * (bottom_line - top_line)
        iou = (intersect / (sum_area - intersect))*1.0

    #print("cur face bbox and last face bbxo IOU: ", iou)

    if iou > 0.9:
        return True
    else:
        return False


def img2vecProcess_Cam(frame_vr, frame_ir, ifFoundQRcode):
    '''
    :param inputmsg:
    :return:
    '''
    global showimg, bbox_last

    img_raw = frame_vr #r #string2cvimg(list_imgsdata[i])

    # detect face
    list_face,list_landmarks=fd.Detect(img_raw, conf) # input: one image

    # filtering faces by roi
    roiw = conf.config_vrFrameRoiRate_x * imgW_vr
    roih = conf.config_vrFrameRoiRate_y * imgH_vr
    roix0 = int((imgW_vr - roiw) / 2)
    roiy0 = int((imgH_vr - roih) / 2)
    roix1 = int(roix0 + roiw)
    roiy1 = int(roiy0 + roih)
    cv2.rectangle(img_raw, (roix0,roiy0), (roix1,roiy1), (255,0,0),3)

    #facenum = len(list_face)
    #for i in range(facenum):
    #    if 'bbox' not in list_face[i] or len(list_face[i]['bbox']) != 4:
    #        list_face.pop(i)
    #        list_landmarks.pop(i)

    facenum = len(list_face)
    for i in range(facenum-1, -1, -1):
        #print("xxxxxxxxxxxxxxxxxxxx, frame_vr.shape", frame_vr.shape)
        #print("xxxxxxxxxxxxxxxxxxxx, list_face[i]['bbox']", list_face[i]['bbox'])
        facecenterx = (list_face[i]['bbox'][0] + list_face[i]['bbox'][2]) / 2
        facecentery = (list_face[i]['bbox'][1] + list_face[i]['bbox'][3]) / 2
        if facecenterx < roix0 or facecenterx > roix1 or facecentery < roiy0 or facecentery > roiy1:  # check face center if in roi
            list_face.pop(i)
            list_landmarks.pop(i)
  
    # get the person's temperature
    irimg_raw = frame_ir #string2cvimg(list_irimgsdata[i])
    isGetTemperatureErr, list_temperatures = getTemperature.getTemperature(irimg_raw, list_face, posCalibM, list_templates, conf)

    #cache current face bbox info 
    list_bboxpos = []
    bbox_area_max = 0
    bbox_index = -1
    for k in range(len(list_face)):
        # find the biggest face bbox
        dict = list_face[k]
        curbbox_area = (dict['bbox'][2] - dict['bbox'][0]) * (dict['bbox'][3] - dict['bbox'][1])
        if curbbox_area > bbox_area_max:
            bbox_area_max = curbbox_area
            bbox_index = k 

        # draw
        list_bboxpos.append((int(dict['bbox'][0]),int(dict['bbox'][1])))
        if int(list_temperatures[k])/10 > conf.config_bodyTemperatureThreshold: #37.5
            cv2.rectangle(img_raw, (int(dict['bbox'][0]),int(dict['bbox'][1])), (int(dict['bbox'][2]),int(dict['bbox'][3])), (0,0,255),3)
        else:
            cv2.rectangle(img_raw, (int(dict['bbox'][0]),int(dict['bbox'][1])), (int(dict['bbox'][2]),int(dict['bbox'][3])), (0,255,0),3)
    irimg_raw = cv2.resize(irimg_raw, (imgW_vr, imgH_vr))
    showimg = np.hstack([img_raw, irimg_raw])

    #check if face stable
    if bbox_index is not -1 and bbox_last is not []:
        bbox_cur = list_face[bbox_index]['bbox']
        isFaceStable = bboxIOU(bbox_cur, bbox_last)
    else:
        isFaceStable = False

    # get face feature
    if ifFoundQRcode == False and isFaceStable and recogFaceGapCounter == 0:
        list_features = fr.GetFeatures(img_raw, list_landmarks) #output: list. len=faces in one img. element in list: numpy.ndarray
    else:
        list_features = []

    #save current img's face bbox
    #notice: it's useful for next frame analysis
    if bbox_index is not -1:
        bbox_last = bbox_cur
    else:
        bbox_last = []

    tmplist = []
    for j in range(len(list_features)):  # convert np.array to list
        tmp = list_features[j].tolist()
        tmplist.append(tmp)

    return isFaceStable, tmplist, list_temperatures, showimg, list_bboxpos


def img2QRprocess_Cam(frame_vr, frame_ir):
    '''
    :param inputmsg:
    :return: if found QR code
    '''
    imgnum = 1 #len(list_imgsdata)
    for i in range(imgnum):
        img_raw = frame_vr #r #string2cvimg(list_imgsdata[i])

        # detect QR code
        #start = time.time()
        barcodes = pyzbar.decode(img_raw)
        barcodeData = ''
        for barcode in barcodes:
            (x, y, w, h) = barcode.rect
            cv2.rectangle(img_raw, (x, y), (x + w, y + h), (0, 0, 255), 2)

            barcodeData = barcode.data.decode("utf-8")
        #end = time.time()
        #print("detect face time cost(s): ", end - start)
        irimg_raw = cv2.resize(frame_ir, (imgW_vr, imgH_vr))
        showimg = np.hstack([img_raw, irimg_raw])
    QRcodeStr = barcodeData
    if QRcodeStr is not '':
        ifFoundQRcode = True
    else:
        ifFoundQRcode = False
    
    return ifFoundQRcode, QRcodeStr, showimg


def process_QRandFaceRecog(frame_vr, frame_ir, channel, showinfo):
    '''
    input: normal img, IR img, MQ channel
    output:QR code in img, or who is the person in img
    '''
    global recogFaceGapCounter, time_lastSaveFace
    recogFaceGapCounter = (recogFaceGapCounter + 1) % recogFaceGap
    ifKnownPerson = False
    start = time.time()

    # detect QR code
    if conf.config_ifSearchQR:
        ifFoundQRcode, QRcodeStr, showimg = img2QRprocess_Cam(frame_vr, frame_ir)
    else:
        ifFoundQRcode = False
    
    #if not ifFoundQRcode:
    # detect face, get temperature
    isFaceStable, list_facesfeaturevecs, list_temperatures, showimg, list_bboxpos = img2vecProcess_Cam(frame_vr, frame_ir, ifFoundQRcode)
    
    # recognize face
    # save img to local jpg file
    # if feature is very good, insert to faceDB
    tempjpgpath = ""
    if ifFoundQRcode == False and isFaceStable and recogFaceGapCounter == 0:
        list_facesid, ifKnownPerson, ifInsertToDb = faceRecoger.vec2faceidProcess(list_facesfeaturevecs, conf)

        # save img
        if ifKnownPerson:
            curtime = time.time()
            if curtime - time_lastSaveFace > saveFaceTimeGap:
                tempjpgpath = "/home/aiot/workspace/EpidemicPreventionProj/EP_imgTmpSpace/passRecord_"+str(curtime)+".jpg"
                cv2.imwrite(tempjpgpath, frame_vr)
                time_lastSaveFace = curtime

        # insert to faceDB
        if ifInsertToDb:
            insertGoodFaceToDb(channel, list_facesfeaturevecs, list_facesid)
    else:
        list_facesid = []
    end = time.time()
    log.debug("time cost: " + str(end - start))

    # draw and show frame
    facenum = len(list_facesid)
    for i in range(facenum):
        posx = list_bboxpos[i][0]
        posy = list_bboxpos[i][1] + 15
        cv2.putText(showimg, str(list_facesid[i]), (posx, posy), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0,0,255))
        #list_showimg_allimgs[k] = dc.DrawImage(list_showimg_allimgs[k], str(list_facesfeaturevecs_allimgs[k][i]), (posx, posy), (0,0,255))
    fps = 1.0 / ((end - start))
    fps = int(fps * 10) / 10.0
    cv2.putText(showimg, "fps: "+str(fps), (0,imgH_vr-15), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0,0,255))
    cv2.putText(showimg, showinfo, (0,imgH_vr-30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0, 0, 255))
    #list_showimg_allimgs[k] = dc.DrawImage(list_showimg_allimgs[k], showinfo, (0,imgH_vr-30), (0,0,255))
    cv2.imshow("showWindow", showimg)
    cv2.waitKey(10)

    # send result to PassDB manager
    if ifFoundQRcode:
        try:
            QRcodeStr = QRcodeStr.split(":")[1]
            QRcodeStrLen = len(QRcodeStr)
            QRcode = int(QRcodeStr)
            passCode = QRcodeDecoding.Decode(QRcode)
            passCodeStr = str(passCode)
            for i in range(QRcodeStrLen - len(passCodeStr)):
                passCodeStr = "0" + passCodeStr
            curtime = time.time()
            if curtime - time_lastSaveFace > saveFaceTimeGap and len(list_temperatures) > 0:
                for j in range(1):#QRnum): # num of QR code in one photo
                    dict = {}
                    dict.update({'type':2})
                    dict.update({'id':passCodeStr})
                    dict.update({'temperature':list_temperatures[0]}) # do not consider many faces
                    dict.update({'jpglocalpath':''})
                    resultstring = json.dumps(dict)

                    client.sendto(resultstring.encode('utf-8'), ip_port)
                    time_lastSaveFace = curtime
        except:
            log.warning("warning: QRcode data is not like: 123:123456.")

    # send result to PassDB manager
    if ifFoundQRcode == False and ifKnownPerson:
        facenum = len(list_facesid)
        if facenum > 0 and tempjpgpath != "":
            for j in range(facenum): # num of faces in one photo
                dict = {}
                dict.update({'type':1})
                dict.update({'id':list_facesid[j]})
                dict.update({'temperature':list_temperatures[j]})
                dict.update({'jpglocalpath':tempjpgpath})
                resultstring = json.dumps(dict)

                client.sendto(resultstring.encode('utf-8'), ip_port)
        else:
            log.warning('not found face.')


def imgpath2vecProcess_DB(inputmsg):
    '''
    :param inputmsg:
    :return:
    '''
    log.info("inputmsg: " + str(inputmsg))
    msgdict = json.loads(inputmsg)
    localtmpjpgpath= msgdict["localtmpjpgpath"]
    faceid = msgdict["faceid"]
    operate = msgdict["operate"]
    url = msgdict["url"]

    img_raw = cv2.imread(localtmpjpgpath, cv2.IMREAD_COLOR)
    h = img_raw.shape[0]
    w = img_raw.shape[1]
    wph = w / h
    wph_standard = imgW_vr / imgH_vr

    # resize img
    if wph < wph_standard * 1.2 and wph > wph_standard * 0.8:
        img_raw_resize = cv2.resize(img_raw, (imgW_vr, imgH_vr))
    elif wph >= wph_standard * 1.2:
        img_raw_resize = cv2.resize(img_raw, (int(imgH_vr*wph), imgH_vr))
        tmp = int((imgH_vr*wph-imgW_vr)/2)
        img_raw_resize = img_raw_resize[0:imgH_vr, tmp:tmp+imgW_vr]
    else:
        imgtmp = cv2.resize(img_raw, (int(imgH_vr*wph), imgH_vr))
        img_raw_resize = np.zeros((imgH_vr, imgW_vr, 3), dtype=np.uint8)
        img_raw_resize[0:imgH_vr, 0:int(imgH_vr*wph)] = imgtmp
       
    # get face feature
    list_face,list_landmarks=fd.Detect(img_raw_resize,  conf) # input: one image
    list_features=fr.GetFeatures(img_raw_resize, list_landmarks) #output: list. len=faces in one img. element in list: numpy.ndarray

    tmplist = []
    for j in range(len(list_features)):  # convert np.array to list
        tmp = list_features[j].tolist()
        tmplist.append(tmp)

    # 删除本地图片
    os.remove(localtmpjpgpath)

    return tmplist, faceid, operate, url


def process_faceToVec(ch, msg):
    '''
    input: img
    output: face feature vector
    '''
    list_facesfeaturevecs, faceid, operate, url = imgpath2vecProcess_DB(msg)

    if len(list_facesfeaturevecs) == 0:
        log.error("err! input photo has no face!")
        failedUrlRecord(url, "photo has no face")
    else: 
        facefeature = list_facesfeaturevecs[0]

        dict = {}
        dict.update({'faceid':faceid})
        dict.update({'operate':operate})
        dict.update({'facefeature':facefeature})
        resultstring = json.dumps(dict)

        ch.basic_publish(exchange='',
                         routing_key='DbVecQueue',
                         body=resultstring)


def insertGoodFaceToDb(channel, list_facesfeaturevecs, list_facesid):
    """
    if face feature is good enough, insert it into face DB
    """
    if len(list_facesid) == 1 and len(list_facesfeaturevecs) == 1: # only 1 face
        dict = {}
        dict.update({'faceid':list_facesid[0]})
        dict.update({'operate':"face_add"})
        dict.update({'facefeature':list_facesfeaturevecs[0]})
        resultstring = json.dumps(dict)

        channel.basic_publish(exchange='',
                         routing_key='DbVecQueue',
                         body=resultstring)


if __name__ == "__main__":
    '''
    从rabbitMQ服务器消息队列中取消息
    注意：pika版本为1.x
    '''
    
    # 连接至rabbitMQ服务器
    credentials = pika.PlainCredentials('aiot', 'aiot')
    Parameter = pika.ConnectionParameters('127.0.0.1', 5672, '/', credentials)
    connection = pika.BlockingConnection(Parameter)
    channel = connection.channel()

    # 声明/指定消息队列
    channel.queue_declare(queue='RecogQueue')
    channel.queue_declare(queue='DbImgpathQueue')
    channel.queue_declare(queue='DbVecQueue')
    channel.queue_declare(queue='DbReloadQueue')
    channel.queue_declare(queue='ShowDoorOpenStatusQueue')
    
    # read cam
    # include visible radiation frame and infrared radiation frame
    # using onvif
    ip, user, psw = camInfo()
    while True:
        log.info("init cam ...")
        #src_camera_vr = "rtsp://admin:qd123456@10.39.245.253:554/h265/ch1/main/av_stream"
        #src_camera_ir = "rtsp://admin:qd123456@10.39.245.253:554/h265/ch2/main/av_stream"
        #src_camera_vr = "rtsp://admin:qd123123@10.39.245.249/h265/ch1/main/av_stream"
        #src_camera_ir = "rtsp://admin:qd123123@10.39.245.249/h265/ch2/main/av_stream"
        src_camera_vr = "rtsp://" + user + ":" + psw + "@" + ip + "/h265/ch1/main/av_stream"
        src_camera_ir = "rtsp://" + user + ":" + psw + "@" + ip + "/h265/ch2/main/av_stream"
        rtscap_vr = camreader.RTSCapture.create(src_camera_vr) # visible radiation
        rtscap_ir = camreader.RTSCapture.create(src_camera_ir) # infrared radiation
        rtscap_vr.start_read()
        rtscap_ir.start_read()
        log.info("cam init done.")

        errcode = 0
        errtimes = 0
        unprocessed_img_num = 0
        last_showinfo_famesNumCounter = 0
        while rtscap_vr.isStarted() and rtscap_ir.isStarted():
            if errtimes > 50:
                log.warning("reconnect to IPC.")
                break

            # if cache too many img, slower down the capture fps
            time.sleep(0.05)

            #read frame
            try:
                ok1, frame_vr = rtscap_vr.read_latest_frame()
                ok1, frame_ir = rtscap_ir.read_latest_frame()
                frame_vr = cv2.resize(frame_vr, (imgW_vr, imgH_vr))
                unprocessed_img_num += 1
            except:
                log.warning("get empty frame.")
                errtimes += 1
                continue

            # listen to reload face Db msg 
            r = channel.basic_get(queue='DbReloadQueue', auto_ack=True)
            if r[2] is not None:
                log.info("reload face db now ....")
                msg = r[2].decode("utf-8")
                faceRecoger.reloadDb()

            # listen to show door open status msg
            r = channel.basic_get(queue='ShowDoorOpenStatusQueue', auto_ack=True)
            if r[2] is not None:
                showinfo = r[2].decode("utf-8")
                last_showinfo = showinfo
                last_showinfo_famesNumCounter = 0

            last_showinfo_famesNumCounter += 1
            last_showinfo_famesNumCounter = last_showinfo_famesNumCounter % last_showinfo_famesNum
            if last_showinfo_famesNumCounter == 0:
                last_showinfo = ""

            #detect QR code
            #detect face, get temperature and recognize face
            #also show frame
            process_QRandFaceRecog(frame_vr, frame_ir, channel, last_showinfo)
            unprocessed_img_num -= 1

            #listen to face DB operation msg
            r = channel.basic_get(queue='DbImgpathQueue', auto_ack=True)
            if r[2] is not None:
                msg = r[2].decode("utf-8")
                process_faceToVec(channel, msg)

        rtscap_vr.stop_read()
        rtscap_ir.stop_read()
        rtscap_vr.release()
        rtscap_ir.release()

    # 断开连接
    connection.close()

