import threading
import time
from threading import Thread

import cv2
import subprocess
import mediapipe as mp
import numpy as np
import websockets
import asyncio
import platform
import signal

from common import *

config = Config()

mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_face_mesh = mp.solutions.face_mesh
mp_hands = mp.solutions.hands

cap = cv2.VideoCapture(config.video)

stop_threads = False
results = None

lock = threading.Lock()
global_hand_landmarks = []
global_face_landmarks = []
connected = None

# 推流命令
command = [
    'ffmpeg',
    '-y',
    '-f', 'rawvideo',
    '-vcodec', 'rawvideo',
    '-pix_fmt', config.inPixelFormat,
    '-s', '%dx%d' % (config.width, config.height),
    '-r', config.fps,
    '-i', '-'
]

if config.withAudio:
    command += [
        '-f', 'dshow',
        '-i', 'audio=%s' % config.audio
    ]

rtmpUrl = 'rtmp://%s:%s/%s' % (config.serverIP, config.rtmpPort, config.rtmpPath)
command += [
    '-c:v', 'h264',
    '-pix_fmt', config.outPixelFormat,  # 'yuv420p',
    '-g', config.quality,
    '-max_delay', config.maxDelay,
    '-f', 'flv',
    rtmpUrl
]


# 绘制结果
def draw_results(image):
    lock.acquire()
    if results.multi_hand_landmarks:
        tmp = []
        for hand_landmarks in results.multi_hand_landmarks:
            mp_drawing.draw_landmarks(
                image,
                hand_landmarks,
                mp_hands.HAND_CONNECTIONS,
                mp_drawing_styles.get_default_hand_landmarks_style(),
                mp_drawing_styles.get_default_hand_connections_style())
            tmp.extend(list(map(lambda v: [v.x, v.y, v.z], hand_landmarks.landmark)))
        global_hand_landmarks.append(tmp)

    if results.multi_face_landmarks:
        for face_landmarks in results.multi_face_landmarks:
            mp_drawing.draw_landmarks(
                image=image,
                landmark_list=face_landmarks,
                connections=mp_face_mesh.FACEMESH_TESSELATION,
                landmark_drawing_spec=None,
                connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_tesselation_style())
            mp_drawing.draw_landmarks(
                image=image,
                landmark_list=face_landmarks,
                connections=mp_face_mesh.FACEMESH_CONTOURS,
                landmark_drawing_spec=None,
                connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_contours_style())
            mp_drawing.draw_landmarks(
                image=image,
                landmark_list=face_landmarks,
                connections=mp_face_mesh.FACEMESH_IRISES,
                landmark_drawing_spec=None,
                connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_iris_connections_style())
            global_face_landmarks.append(list(map(lambda v: [v.x, v.y, v.z], face_landmarks.landmark)))

    while len(global_face_landmarks) > 3:
        global_face_landmarks.pop()
    while len(global_hand_landmarks) > 3:
        global_hand_landmarks.pop()

    lock.release()


# 推流线程
def thread_rtmp():
    print('== start thread rtmp...')
    if config.enableRTMP:
        rtmp_pipe = subprocess.Popen(command, stdin=subprocess.PIPE)
    print('CMD:', command)
    # 面部识别
    face_mesh = mp_face_mesh.FaceMesh(
        max_num_faces=config.max_num_face,
        refine_landmarks=config.refine_landmarks,
        min_detection_confidence=config.min_detection_confidence,
        min_tracking_confidence=config.min_tracking_confidence
    )
    # 手势识别
    hands = mp_hands.Hands(
        static_image_mode=config.static_image_mode,
        max_num_hands=config.max_num_hands,
        min_detection_confidence=config.min_detection_confidence
    )

    while cap.isOpened():

        global stop_threads
        if stop_threads:
            break

        ret, frame = cap.read()

        # 修改图片尺寸
        h, w, c = frame.shape
        if h != config.height or w != config.width:
            h = config.height
            w = config.width
            frame = cv2.resize(frame, (config.width, config.height))

        # To improve performance, optionally mark the image as not writeable to
        # pass by reference.
        frame.flags.writeable = False
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        global results
        results = face_mesh.process(frame)
        hands_results = hands.process(frame)

        results.multi_hand_landmarks = hands_results.multi_hand_landmarks

        # Draw the face mesh annotations on the image.
        frame.flags.writeable = True
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

        # 如果只显示结果 清空图片
        if config.resultOnly:
            frame = np.zeros((h, w, c), dtype=np.uint8)

        # lock.acquire()
        draw_results(frame)
        # lock.release()

        # 转换为灰度图
        if config.inPixelFormat == 'gray':
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Flip the image horizontally for a selfie-view display.
        frame = cv2.flip(frame, 1)

        # 显示视频
        cv2.imshow('frame[%dx%d]' % (w, h), frame)
        if cv2.waitKey(10) & 0xFF == ord('q'):
            break

        if config.enableRTMP:
            # write frame data to pipe
            rtmp_pipe.stdin.write(frame.tobytes())

    cv2.destroyAllWindows()
    cap.release()
    if config.enableRTMP:
        rtmp_pipe.kill()
    print('== thread rtmp stopped...')


# 发布结果线程
def pub_results():
    global connected

    if not connected:
        return

    sub_results = {
        "hands": global_hand_landmarks,
        "face_mesh": global_face_landmarks
    }

    lock.acquire()
    websockets.broadcast(connected, json.dumps(sub_results))
    global_hand_landmarks.clear()
    global_face_landmarks.clear()
    lock.release()


def thread_pub():
    print("== thread publisher start ...")
    while True:
        global stop_threads
        if stop_threads:
            break
        pub_results()
        time.sleep(config.pubPeriod / 1000)
    print("== thread publisher stopped ...")


# websocket 处理函数
async def ws_handler(websocket):

    global connected
    if not connected:
        connected = {websocket}
    else:
        connected.add(websocket)

    while True:
        try:
            message = await websocket.recv()
            # todo: 处理订阅消息
            print("Get:", message)
            # pub_results()
        except websockets.ConnectionClosed:
            connected.remove(websocket)
            print("client closed")
            break


# websocket 主程序
async def ws_main(ip="", port="5500"):
    # rtmp
    rtmp = Thread(target=thread_rtmp)
    rtmp.start()

    # results publisher 会严重延迟检测速度
    if config.enablePublisher:
        pub = Thread(target=thread_pub)
        pub.start()

    loop = asyncio.get_running_loop()
    stop = loop.create_future()

    def stop_loop(*args):
        global stop_threads
        stop_threads = True
        stop.set_result(None)
        print('Websocket stopped')

    if platform.system() != 'Windows':
        loop.add_signal_handler(signal.SIGTERM, stop.set_result, None)
    else:
        signal.signal(signal.SIGINT, stop_loop)

    async with websockets.serve(ws_handler, ip, port):
        print('Websocket working on port %s' % port)
        await stop


if __name__ == '__main__':
    # # rtmp
    # rtmp = Thread(target=thread_rtmp)
    # rtmp.start()
    # websocket
    asyncio.run(ws_main(config.serverIP, config.websocketPort))
