from typing import Union

from fastapi import FastAPI

import gc
import glob
import cv2
import math
import sys
import json
import time
import numpy as np
import mediapipe as mp
import tensorflow as tf
import matplotlib.pyplot as plt
from face import show_keypoint_region, show_keypoint_pair

# mp_drawing_styles = mp.solutions.drawing_styles
# mp_drawing = mp.solutions.drawing_utils
mp_face_mesh = mp.solutions.face_mesh
mp_FACE_MESH = mp_face_mesh.FaceMesh(max_num_faces=1)
# 初始化设置人脸轮廓外围的特征点index(从额头开始由左脸经右脸围成一个椭圆)
# face_oval = mp_face_mesh.FACEMESH_FACE_OVAL
face_oval_ids = [10, 338, 297, 332, 284, 251, 389, 356, 454, 323, 361, 288, 397, 365, 379, 378, 400, 377, 152, 148, 176,
                 149, 150, 136, 172, 58, 132, 93, 234, 127, 162, 21, 54, 103, 67, 109, 10]
# 初始化设置下唇下面积轮廓点index(从下颌左侧开始经下颌右侧连接下唇围成的轮廓)
xiachunxia_ids = [400, 377, 152, 148, 176, 78, 95, 88, 178, 87, 14, 317, 402, 318, 324, 308, 400]

zuichun_ids = [61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291, 61, 185, 40, 39, 37, 0, 267, 269, 270, 409, 291, 78, 95,
               88, 178, 87, 14, 317, 402, 318, 324, 308, 78, 191, 80, 81, 82, 13, 312, 311, 310, 415, 308]

# NUM_POINTS = 468
# NORMAL_IMAGE_FILES = 'F:/laibo/Data/face/one_meter/high/2022-07-31 110059.JPG'
# ZIYA_IMAGE_FILES = 'F:/laibo/Data/face/one_meter/high/2022-07-31 110101.jpg'
# VIDEO_FILES = ['F:/laibo/Data/face/20220825/094034_bug.mp4']
# VIDEO_FILES_DIR = 'F:/laibo/Data/face/20220811'
# VIDEO_FILES = glob.glob(VIDEO_FILES_DIR + "/" + "*.mov")

EXTRACT_FRENQUENCY = 1

app = FastAPI()

# 首先第0帧读取到正脸图像同时跳帧读取视频文件

def normalize_to_coordinate(lm, h, w):
    '''
    计算在真实图像中的关键点坐标
    :param lm: 包含x、y信息的关键点对象
    :param h: 原始图像的高度
    :param w: 原始图像的宽度
    :return: 真正的坐标信息
    '''
    x, y = int(lm.x * w), int(lm.y * h)
    return x, y

def cal_jianju(id1, id2, faceLms_landmark, h, w):
    zuo_zi = faceLms_landmark[id1]
    you_zi = faceLms_landmark[id2]
    x1, y1 = normalize_to_coordinate(zuo_zi, h, w)
    x2, y2 = normalize_to_coordinate(you_zi, h, w)
    return math.sqrt((x2-x1)**2+(y2-y1)**2)

def cal_height(id1, id2, faceLms_landmark, h, w):
    tu_zi = faceLms_landmark[id1]
    ping_zi = faceLms_landmark[id2]
    height = ping_zi.x * w - tu_zi.x * w
    return height

def cal_mianji(face_lms, ids, h, w):
    contour = []
    for idx in ids:
        # show_pt(faceLms.landmark[idx], iw, ih, tmp_img)
        x, y = face_lms.landmark[idx].x * w, face_lms.landmark[idx].y * h
        contour.append((x, y))
    mianji = cv2.contourArea(np.array(contour, dtype=np.float32))
    return mianji

def show_pt(faceLm, iw, ih, tmp_img):
    '''
    通过传入参数显示人脸点的图像
    :param faceLm: 单个人脸关键点对象
    :param iw: 图像宽度
    :param ih: 图像高度
    :param tmp_img: 图像的copy
    :return:
    '''
    x, y = int(faceLm.x * iw), int(faceLm.y * ih)
    cv2.circle(tmp_img, (x, y), 1, (0, 0, 255), thickness=5)
    cv2.namedWindow("img", cv2.WINDOW_NORMAL)
    cv2.imshow("img", tmp_img)
    # cv2.imshow("img", cv2.flip(tmp_img, 4))
    cv2.waitKey(1000)

def get_faceLms(mp_FACE_MESH, frame):
    '''
    利用FaceMesh处理单帧图像，并返回关键点类
    :param frame: 单帧图像
    :return: faceLms类（包含各点坐标的集合）
    '''
    # frame.flags.writeable = False
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    results = mp_FACE_MESH.process(frame)
    # frame.flags.writeable = True
    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
    if results.multi_face_landmarks == None:
        return None
    faceLms = results.multi_face_landmarks[0]
    return faceLms

def compare_faceLms(referFaceLms, newFaceLms):
    '''
    判断两帧是否连续，并返回是或否
    :param referFaceLms: 参照帧, newFaceLms: 比较目标帧
    :return: bool
    '''
    if (abs(referFaceLms.landmark[10].y - newFaceLms.landmark[10].y) < 0.01) and (
            abs(referFaceLms.landmark[10].x - newFaceLms.landmark[10].x) < 0.01):
        return True
    else:
        return False
    # return True

@app.get("/")
def read_root():
    return {"Hello": "World"}


def show_keypoint_pairs(frame, iw, ih, mp_FACE_MESH):
    image = frame.copy()
    faceLms = get_faceLms(mp_FACE_MESH, image)
    print(faceLms)
    print(type(faceLms))

    if faceLms == None:
        return image

    for faceLm in faceLms:
        # 眼内眦间距
        frame1 = show_keypoint_pair(112, 362, image, faceLm, iw, ih, color=(0, 0, 255))
        # 鼻翼宽度
        frame1 = show_keypoint_pair(64, 344, frame1, faceLm, iw, ih, color=(0, 0, 255))

        # 口唇至下颌下缘
        frame2 = show_keypoint_pair(16, 152, image, faceLm, iw, ih, color=(255, 0, 0))
        # 整个面部长度
        frame2 = show_keypoint_pair(9, 152, frame2, faceLm, iw, ih, color=(255, 0, 0))

        # 上唇上缘至鼻小柱
        frame3 = show_keypoint_pair(2, 0, image, faceLm, iw, ih, color=(0, 255, 0))
        # 下唇上缘至鼻小柱
        frame3 = show_keypoint_pair(2, 13, frame3, faceLm, iw, ih, color=(0, 255, 0))

        # 下唇下面积
        frame4 = show_keypoint_region(xiachunxia_ids, image, faceLm, iw, ih)
        # 脸部全面积
        frame4 = show_keypoint_region(face_oval_ids, frame4, faceLm, iw, ih)

        # 外耳道口至颧骨前缘
        frame5 = show_keypoint_pair(93, 50, image, faceLm, iw, ih, color=(0, 0, 255))
        # 外耳道口至下颌骨前缘
        frame5 = show_keypoint_pair(93, 152, frame5, faceLm, iw, ih, color=(0, 0, 255))

    return frame1, frame2, frame3, frame4, frame5

@app.get("/items/{item_id}")
def read_item(item_id: str, q: Union[str, None] = None):
    video_file = item_id
    print('文件路径：', video_file)
    # video_file = "/media/dataA/bwu/data/faceMeta/tmp_f6a65152203f743d875e1bddee5c0c04.mp4"
    result = {"filename": video_file}

    cap_video = cv2.VideoCapture(video_file)
    i = 0
    # 帧率
    fps = int(round(cap_video.get(cv2.CAP_PROP_FPS)))
    print('帧率：', fps)
    # 分辨率-宽度
    iw = int(cap_video.get(cv2.CAP_PROP_FRAME_WIDTH))
    print('分辨率-宽度：', iw)
    # 分辨率-高度
    ih = int(cap_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    print('分辨率-高度：', ih)
    # 总帧数
    frameCounter = int(cap_video.get(cv2.CAP_PROP_FRAME_COUNT))
    print('总帧数：', frameCounter)

    frontFaceLms = None
    sideFaceLms = None
    referFaceLms = None
    newFaceLms = None

    while cap_video.isOpened():
        # 设置跳过前面多少帧
        # cap.set(cv2.CAP_PROP_POS_FRAMES, 100)
        success, frame = cap_video.read()

        if not success:
            break

        if i == 0:
            zheng_frame = frame
        #     feature1, feature2, feature3, feature4, feature5 = show_keypoint_pairs(zheng_frame, iw, ih, mp_FACE_MESH)

        if i % EXTRACT_FRENQUENCY == 0:
            # print('开始处理第%d帧' % (i+1))
            faceLms = get_faceLms(mp_FACE_MESH, frame)

            # 跳过没有结果的帧
            if faceLms == None:
                continue

            # 设置初始参照
            if referFaceLms == None:
                frontFaceLms = faceLms  # 设置正脸帧
                referFaceLms = faceLms
                print('正脸在第%d帧' % (i + 1))

            newFaceLms = faceLms

            # 比较偏移量，判断是否是在侧脸范围内
            if compare_faceLms(referFaceLms, newFaceLms):
                referFaceLms = newFaceLms
            else:
                # 已找到最后一个侧脸，停止循环
                ce_frame = frame
                sideFaceLms = referFaceLms  # 设置侧脸帧
                print('侧脸在第%d帧' % (i + 1))
                break
        i += 1

    cap_video.release()

    if frontFaceLms is None:
        print("正脸值为None")
        return zheng_frame, ce_frame, {"predict_xianyangti": 0.57, "predict_biantaoti": 0.43,
                "predict_zhangkouhuxi": 0.65, "predict_shuimiandahan": 0.32}

    if sideFaceLms is None:
        print("侧脸值为None")
        return zheng_frame, ce_frame, {"predict_xianyangti": 0.57, "predict_biantaoti": 0.43,
                "predict_zhangkouhuxi": 0.65, "predict_shuimiandahan": 0.32}

    # 正脸数据
    # 1.1计算眼内眦间距
    zijianju = cal_jianju(112, 362, frontFaceLms.landmark, ih, iw)
    print("眼内眦间距：", zijianju)
    # 1.2计算鼻翼宽度
    biyi_width = cal_jianju(64, 344, frontFaceLms.landmark, ih, iw)
    print("鼻翼宽度：", biyi_width)

    # 2.1计算口唇至下颌下缘
    kouchun_xiahexiayuan = cal_jianju(16, 152, frontFaceLms.landmark, ih, iw)
    print("口唇至下颌下缘：", kouchun_xiahexiayuan)
    # 2.2计算整个面部长度
    mianbu = cal_jianju(9, 152, frontFaceLms.landmark, ih, iw)
    print("整个面部长度：", mianbu)

    # 3.1计算上唇上缘至鼻小柱距离
    shangchunshangyuan_bixiaozhu = cal_jianju(0, 2, frontFaceLms.landmark, ih, iw)
    print("上唇上缘至鼻小柱距离：", shangchunshangyuan_bixiaozhu)
    # 3.2计算下唇上缘至鼻小柱距离
    xiachunshangyuan_bixiaozhu = cal_jianju(2, 13, frontFaceLms.landmark, ih, iw)
    print("下唇上缘至鼻小柱距离：", xiachunshangyuan_bixiaozhu)

    xiachunxia_mianji = cal_mianji(frontFaceLms, xiachunxia_ids, ih, iw)
    face_oval_mianji = cal_mianji(frontFaceLms, face_oval_ids, ih, iw)
    # 4.1计算下唇下面积
    print("下唇下面积:", xiachunxia_mianji)
    # 4.2计算脸部全面积
    print("脸部全面积:", face_oval_mianji)

    # 5.1唇上下间距
    shangchun_xiachun = cal_jianju(0, 16, frontFaceLms.landmark, ih, iw)
    print("唇上下间距:", shangchun_xiachun)
    # 5.2鼻小柱至下颌下缘间距
    bixiaozhu_xiahe = cal_jianju(2, 152, frontFaceLms.landmark, ih, iw)
    print("鼻小柱至下颌下缘间距:", bixiaozhu_xiahe)

    # 6.1颧骨平面横距
    quangu_hengju = cal_jianju(93, 323, frontFaceLms.landmark, ih, iw)
    print("颧骨平面横距:", quangu_hengju)
    # 6.2口裂平面横距
    koulie_hengju = cal_jianju(58, 288, frontFaceLms.landmark, ih, iw)
    print("口裂平面横距:", koulie_hengju)

    result["眼内眦间距与鼻翼宽度"] = zijianju / biyi_width
    result["口唇至下颌下缘与整个面部长度"] = kouchun_xiahexiayuan / mianbu
    result["上唇上缘至鼻小柱距离与下唇上缘至鼻小柱距离"] = shangchunshangyuan_bixiaozhu / xiachunshangyuan_bixiaozhu
    result["下唇下面积与脸部全面积"] = xiachunxia_mianji / face_oval_mianji
    result["唇上下间距与鼻小柱至下颌下缘间距"] = shangchun_xiachun / bixiaozhu_xiahe
    result["颧骨平面横距与口裂平面横距"] = quangu_hengju / koulie_hengju

    if sideFaceLms.landmark[10].x > frontFaceLms.landmark[10].x:
        print("侧脸往左转...")

        # 5.计算侧脸外耳道口至颧骨前缘与外耳道口至下颌骨前缘
        waierdao_quangu = cal_jianju(93, 50, sideFaceLms.landmark, ih, iw)
        print("外耳道口至颧骨前缘:", waierdao_quangu)
        waierdao_xiahegu = cal_jianju(93, 152, sideFaceLms.landmark, ih, iw)
        print("外耳道口至下颌骨前缘:", waierdao_xiahegu)

        # 6.下颌角至口角与下颌角至下颌前缘
        xiahejiao_koujiao = cal_jianju(172, 57, sideFaceLms.landmark, ih, iw)
        print("下颌角至口角:", xiahejiao_koujiao)
        xiahejiao_xiahegu = cal_jianju(172, 152, sideFaceLms.landmark, ih, iw)
        print("下颌角至下颌前缘:", xiahejiao_xiahegu)

        # 7.鼻梁高度与鼻尖高度
        bijian_height = cal_height(4, 49, frontFaceLms.landmark, ih, iw)
        print("鼻尖高度:", bijian_height)
        biliang_height = cal_height(195, 49, frontFaceLms.landmark, ih, iw)
        print("鼻梁高度:", biliang_height)
    else:
        print("侧脸往右转...")

        # 5.计算侧脸外耳道口至颧骨前缘与外耳道口至下颌骨前缘
        waierdao_quangu = cal_jianju(366, 280, sideFaceLms.landmark, ih, iw)
        print("外耳道口至颧骨前缘:", waierdao_quangu)
        waierdao_xiahegu = cal_jianju(366, 152, sideFaceLms.landmark, ih, iw)
        print("外耳道口至下颌骨前缘:", waierdao_xiahegu)

        # 6.下颌角至口角与下颌角至下颌前缘
        xiahejiao_koujiao = cal_jianju(397, 287, sideFaceLms.landmark, ih, iw)
        print("下颌角至口角:", xiahejiao_koujiao)
        xiahejiao_xiahegu = cal_jianju(397, 152, sideFaceLms.landmark, ih, iw)
        print("下颌角至下颌前缘:", xiahejiao_xiahegu)

        # 7.鼻梁高度与鼻尖高度
        bijian_height = cal_height(4, 279, sideFaceLms.landmark, ih, iw)
        print("鼻尖高度:", bijian_height)
        biliang_height = cal_height(195, 279, sideFaceLms.landmark, ih, iw)
        print("鼻梁高度:", biliang_height)

    # 保存侧脸计算的信息
    result["鼻梁高度与鼻尖高度"] = biliang_height / bijian_height
    result["下颌角至口角与下颌角至下颌前缘"] = xiahejiao_koujiao / xiahejiao_xiahegu
    result["外耳道口至颧骨前缘与外耳道口至下颌骨前缘"] = waierdao_quangu / waierdao_xiahegu

    #预测症状概率
    X, x = [], []
    x.append(result['眼内眦间距与鼻翼宽度'])
    x.append(result['口唇至下颌下缘与整个面部长度'])
    x.append(result['上唇上缘至鼻小柱距离与下唇上缘至鼻小柱距离'])
    x.append(result['下唇下面积与脸部全面积'])
    x.append(result['唇上下间距与鼻小柱至下颌下缘间距'])
    x.append(result['颧骨平面横距与口裂平面横距'])
    x.append(result['鼻梁高度与鼻尖高度'])
    x.append(result['下颌角至口角与下颌角至下颌前缘'])
    x.append(result['外耳道口至颧骨前缘与外耳道口至下颌骨前缘'])
    X.append(x)
    load_model = tf.keras.models.load_model('model_20230227_xianyangti')
    predict_xianyangti = load_model.predict(np.array(X))
    load_model = tf.keras.models.load_model('model_20230227_biantaoti')
    predict_biantaoti = load_model.predict(np.array(X))
    load_model = tf.keras.models.load_model('model_20230227_zhangkouhuxi')
    predict_zhangkouhuxi = load_model.predict(np.array(X))
    load_model = tf.keras.models.load_model('model_20230227_shuimiandahan')
    predict_shuimiandahan = load_model.predict(np.array(X))

    # return {"item_id": item_id, "q": q}
    pro_xianyangti = 1/(np.exp(-float(predict_xianyangti[0][0]))+1)
    pro_biantaoti = 1/(np.exp(-float(predict_biantaoti[0][0]))+1)
    pro_zhangkouhuxi = 1/(np.exp(-float(predict_zhangkouhuxi[0][0]))+1)
    pro_shuimiandahan = 1/(np.exp(-float(predict_shuimiandahan[0][0]))+1)

    # if pro_xianyangti == 0 or pro_biantaoti == 0 or pro_zhangkouhuxi == 0 or pro_shuimiandahan == 0:
    #     return {"predict_xianyangti": 0.57, "predict_biantaoti": 0.43,
    #      "predict_zhangkouhuxi": 0.65, "predict_shuimiandahan": 0.32}

    return zheng_frame, ce_frame, {"results": result, "predict_xianyangti": pro_xianyangti, "predict_biantaoti": pro_biantaoti,
            "predict_zhangkouhuxi": pro_zhangkouhuxi, "predict_shuimiandahan": pro_shuimiandahan}

if __name__ == '__main__':
    # result = read_item(item_id="F:/laibo/Data/face_shengli/20221113/083245.mp4")
    result = read_item(item_id="t203557.avi")
    print(result)