import cv2
import mediapipe as mp
import numpy as np
import time
import math
from flask import Flask, Response, render_template_string
from flask_socketio import SocketIO, emit
from custom.iris_lm_depth import from_landmarks_to_depth

# 0: 关闭状态（摄像头关闭，不发送画面）
# 1: 发送画面，进行手势方向识别
# 2: 发送画面，进行距离测量
mode = 0

direction = distance = "None"

camera = cv2.VideoCapture(0)

web_folder = './web'
app = Flask(__name__, template_folder=web_folder)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, cors_allowed_origins="*", async_mode='threading')

image_size = (640, 360)


# 获取向量指向方向（上下左右）
def vector2direction(x, y):
    cos = x / (x**2 + y**2)**(1 / 2)
    deg = math.acos(cos) * (180 / math.pi)

    if y < 0:
        deg = 360 - deg

    # print("deg: " + str(deg))

    if deg <= 45 or deg > 315:
        return "right"
    elif deg > 45 and deg <= 135:
        return "up"
    elif deg > 135 and deg <= 225:
        return "left"
    elif deg > 225 and deg <= 315:
        return "down"


# 保持纵横比调整图片大小
def image_resize(source_image, size=(1920, 1080)):
    source_scale = size[0] / size[1]
    target_scale = source_image.shape[1] / source_image.shape[0]

    if target_scale > source_scale:
        x1 = source_scale * source_image.shape[0]
        x_trim = ((source_image.shape[1] - x1) / 2,
                  x1 + (source_image.shape[1] - x1) / 2)
        target_image = source_image[:, int(x_trim[0]):int(x_trim[1])]
    elif target_scale < source_scale:
        y1 = 1 / source_scale * source_image.shape[1]
        y_trim = ((source_image.shape[0] - y1) / 2,
                  y1 + (source_image.shape[0] - y1) / 2)
        target_image = source_image[int(y_trim[0]):int(y_trim[1]), :]

    target_image = cv2.resize(target_image, size)

    return target_image


class handDetctor():

    def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
        self.mode = mode
        self.maxHands = maxHands
        self.detectionCon = detectionCon
        self.trackCon = trackCon

        self.mpHands = mp.solutions.hands
        self.hands = self.mpHands.Hands(max_num_hands=maxHands)
        self.mpDraw = mp.solutions.drawing_utils

    def findHands(self, img, draw=True):
        imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # 转换为rgb
        self.results = self.hands.process(imgRGB)

        # print(results.multi_hand_landmarks)
        if self.results.multi_hand_landmarks:
            for handLms in self.results.multi_hand_landmarks:
                if draw:
                    self.mpDraw.draw_landmarks(img, handLms,
                                               self.mpHands.HAND_CONNECTIONS)
        return img

    def findPosition(self, img, handNo=0, draw=True):
        lmList = []
        if self.results.multi_hand_landmarks:
            myHand = self.results.multi_hand_landmarks[handNo]
            for id, lm in enumerate(myHand.landmark):
                # print(id, lm)
                # 获取手指关节点
                h, w, c = img.shape
                cx, cy = int(lm.x * w), int(lm.y * h)
                lmList.append([id, cx, cy])
                if draw:
                    cv2.putText(img, str(int(id)), (cx + 10, cy + 10),
                                cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
        # 返回点坐标列表
        return lmList

    # 返回列表 包含每个手指的开合状态
    def fingerStatus(self, lmList):
        fingerList = []
        if lmList == []:
            return []
        id, originx, originy = lmList[0]
        keypoint_list = [[2, 4], [6, 8], [10, 12], [14, 16], [18, 20]]
        for point in keypoint_list:
            id, x1, y1 = lmList[point[0]]
            id, x2, y2 = lmList[point[1]]
            if math.hypot(x2 - originx, y2 - originy) > math.hypot(
                    x1 - originx, y1 - originy):
                fingerList.append(True)
            else:
                fingerList.append(False)
        return fingerList

    # 获取食指指向方向
    def get_index_finger_direction(self, lmList):
        fingerup = self.fingerStatus(lmList)
        if fingerup == []:
            return "None"
        if fingerup[1] is False:
            return "None"
        vector = [lmList[8][1] - lmList[5][1], lmList[5][2] - lmList[8][2]]
        return vector2direction(vector[0], vector[1])


class irisDector():

    points_idx = [33, 133, 362, 263, 61, 291, 199]
    points_idx = list(set(points_idx))
    points_idx.sort()

    left_eye_landmarks_id = np.array([33, 133])
    right_eye_landmarks_id = np.array([362, 263])

    dist_coeff = np.zeros((4, 1))

    # pseudo camera internals
    # focal_length = frame_width
    focal_length = 900

    landmarks = None
    smooth_left_depth = -1
    smooth_right_depth = -1
    smooth_factor = 0.1

    YELLOW = (0, 255, 255)
    GREEN = (0, 255, 0)
    BLUE = (255, 0, 0)
    RED = (0, 0, 255)
    SMALL_CIRCLE_SIZE = 1
    LARGE_CIRCLE_SIZE = 2

    def __init__(self, image_size, max_num_faces=1):
        self.image_size = image_size
        self.mp_face_mesh = mp.solutions.face_mesh
        self.face_mesh = self.mp_face_mesh.FaceMesh(
            max_num_faces=max_num_faces)
        self.mpDraw = mp.solutions.drawing_utils

    def get_iris_depth(self,
                       img,
                       image_size=None,
                       draw=True,
                       image_is_RGB=False,
                       focal_length=None):
        if focal_length is not None:
            self.focal_length = focal_length

        if image_size is None:
            image_size = self.image_size

        if image_is_RGB is False:
            imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # 转换为rgb
        else:
            imgRGB = img

        self.results = self.face_mesh.process(imgRGB)

        if self.results.multi_face_landmarks:
            face_landmarks = self.results.multi_face_landmarks[0]
            landmarks = np.array([(lm.x, lm.y, lm.z)
                                  for lm in face_landmarks.landmark])
            self.landmarks = landmarks.T
            ret = from_landmarks_to_depth(
                imgRGB,
                self.landmarks[:, self.left_eye_landmarks_id],
                image_size,
                is_right_eye=False,
                focal_length=self.focal_length,
            )
            if ret is None:
                return None

            (left_depth, left_iris_size, left_iris_landmarks,
             left_eye_contours) = ret

            ret = from_landmarks_to_depth(
                imgRGB,
                self.landmarks[:, self.right_eye_landmarks_id],
                image_size,
                is_right_eye=True,
                focal_length=self.focal_length,
            )

            if ret is None:
                return None

            (right_depth, right_iris_size, right_iris_landmarks,
             right_eye_contours) = ret

            if self.smooth_right_depth < 0:
                self.smooth_right_depth = right_depth
            else:
                self.smooth_right_depth = (self.smooth_right_depth *
                                           (1 - self.smooth_factor) +
                                           right_depth * self.smooth_factor)

            if self.smooth_left_depth < 0:
                self.smooth_left_depth = left_depth
            else:
                self.smooth_left_depth = (self.smooth_left_depth *
                                          (1 - self.smooth_factor) +
                                          left_depth * self.smooth_factor)

            # print(
            #     f"depth in cm: {smooth_left_depth / 10:.2f}, {smooth_right_depth / 10:.2f}"
            # )
            # print(f"size: {left_iris_size:.2f}, {right_iris_size:.2f}")

            if draw is True and self.landmarks is not None:

                # draw subset of facemesh
                for ii in self.points_idx:
                    pos = (np.array(image_size) *
                           self.landmarks[:2, ii]).astype(np.int32)
                    cv2.circle(img, tuple(pos), self.LARGE_CIRCLE_SIZE,
                               self.GREEN, -1)

                # draw eye contours
                eye_landmarks = np.concatenate([
                    right_eye_contours,
                    left_eye_contours,
                ])
                for landmark in eye_landmarks:
                    pos = (np.array(image_size) * landmark[:2]).astype(
                        np.int32)
                    cv2.circle(img, tuple(pos), self.SMALL_CIRCLE_SIZE,
                               self.RED, -1)

                # draw iris landmarks
                iris_landmarks = np.concatenate([
                    right_iris_landmarks,
                    left_iris_landmarks,
                ])
                for landmark in iris_landmarks:
                    pos = (np.array(image_size) * landmark[:2]).astype(
                        np.int32)
                    cv2.circle(img, tuple(pos), self.SMALL_CIRCLE_SIZE,
                               self.YELLOW, -1)

                # write depth values into frame
                depth_string = "{:.2f}cm, {:.2f}cm".format(
                    self.smooth_left_depth / 10, self.smooth_right_depth / 10)
                cv2.putText(
                    img,
                    depth_string,
                    (50, 50),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1,
                    self.GREEN,
                    2,
                    cv2.LINE_AA,
                )
        else:
            return None

        if self.smooth_left_depth == -1 or self.smooth_right_depth == -1:
            return None
        else:
            return self.smooth_left_depth, self.smooth_right_depth


# 渲染画面
def gen_frames(draw_fps=True):  # generate frame by frame from camera
    global mode, direction, distance
    pTime = 0
    cTime = 0

    detctor = handDetctor()
    irisd = irisDector(image_size)
    while True and mode != 0:
        # Capture frame-by-frame
        success, frame = camera.read()  # read the camera frame
        if not success:
            break

        frame = cv2.flip(frame, 1)  # 反转图像
        frame = image_resize(frame, image_size)

        if mode == 1:
            # 手部识别
            frame = detctor.findHands(frame)
            # 获取手部节点坐标
            lmList = detctor.findPosition(frame)
            # 判断手指是否伸出
            fingerup1 = detctor.fingerStatus(lmList)
            # 获取食指指向
            direction = detctor.get_index_finger_direction(lmList)

            # print(direction)

            # 打印手指状态
            cv2.putText(frame, str(fingerup1), (10, 140),
                        cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 1)

            # 打印食指方向
            cv2.putText(frame, str(direction), (10, 200),
                        cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 255), 2)

            # 打印坐标

            cv2.putText(
                frame, "p5:" + str(lmList[5] if lmList != [] else "") +
                " p8:" + str(lmList[8] if lmList != [] else ""), (10, 240),
                cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)
        elif mode == 2:
            ret = irisd.get_iris_depth(frame, draw=True, focal_length=430)
            if ret is None:
                distance = "None"
            else:
                distance = str((ret[0] + ret[1]) / 2)

        if draw_fps:
            # 绘制帧率
            cTime = time.time()
            fps = 1 / (cTime - pTime)
            pTime = cTime

            cv2.putText(frame, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN,
                        3, (255, 255, 255), 2)

        # 编码为jpeg
        ret, buffer = cv2.imencode('.jpg', frame)
        frame = buffer.tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n'
               )  # concat frame one by one and show result


# 视频流接口
@app.route('/video_feed')
def video_feed():
    # Video streaming route. Put this in the src attribute of an img tag
    global mode
    if mode != 0:
        return Response(gen_frames(),
                        mimetype='multipart/x-mixed-replace; boundary=frame')
    else:
        return ""


indexpage = '''
<!doctype html>
<html lang="en">
<head>
    <!-- Required meta tags -->
    <meta charset="utf-8">
    <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">

    <!-- Bootstrap CSS -->
    <!-- <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css"
          integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous"> -->

    <title>Live Streaming Demonstration</title>
</head>
<body>
<div class="container">
    <div class="row">
        <div class="col-lg-8  offset-lg-2">
            <h3 class="mt-5">Live Streaming</h3>
            <img src="{{ url_for('video_feed') }}" width="100%">
        </div>
    </div>
</div>
</body>
</html>
'''


@app.route('/')
def index():
    """Video streaming home page."""
    return render_template_string(indexpage)


@socketio.on('SetMode')
def handle_SetMode(mod):
    global mode
    print('Set Mode: ' + str(mod))
    if mod not in [0, 1, 2]:
        print("error: mod = " + str(mod))
        emit("reply", "error")
    else:
        mode = mod
        emit("reply", "succes")


@socketio.on('GetDirection')
def handle_GetDirection(rq):
    global direction, mode
    print('Get Direction: ' + str(rq))
    if rq == 1:
        while mode == 1:
            emit('AcceptDirection', direction)
            socketio.sleep(0.5)


@socketio.on('GetDistance')
def handle_GetDistance(rq):
    global distance, mode
    print('Get Distance: ' + str(rq))
    if rq == 1:
        while mode == 2:
            emit('AcceptDistance', distance)
            socketio.sleep(0.5)


if __name__ == '__main__':
    socketio.run(app, host="0.0.0.0", port=5002)
