import collections
import time
from pathlib import Path

import cv2
import ipywidgets as widgets
import numpy as np
from IPython.display import clear_output, display
import openvino as ov

import engine3js as engine
import threading

import zmq
import json

#
# zmq publisher
#
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5555")


# 3D edge index array
body_edges = np.array(
    [
        [0, 1],
        [0, 9],
        [9, 10],
        [10, 11],  # neck - r_shoulder - r_elbow - r_wrist
        [0, 3],
        [3, 4],
        [4, 5],  # neck - l_shoulder - l_elbow - l_wrist
        [1, 15],
        [15, 16],  # nose - l_eye - l_ear
        [1, 17],
        [17, 18],  # nose - r_eye - r_ear
        [0, 6],
        [6, 7],
        [7, 8],  # neck - l_hip - l_knee - l_ankle
        [0, 12],
        [12, 13],
        [13, 14],  # neck - r_hip - r_knee - r_ankle
    ]
)


body_edges_2d = np.array(
    [
        [0, 1],  # neck - nose
        [1, 16],
        [16, 18],  # nose - l_eye - l_ear
        [1, 15],
        [15, 17],  # nose - r_eye - r_ear
        [0, 3],
        [3, 4],
        [4, 5],  # neck - l_shoulder - l_elbow - l_wrist
        [0, 9],
        [9, 10],
        [10, 11],  # neck - r_shoulder - r_elbow - r_wrist
        [0, 6],
        [6, 7],
        [7, 8],  # neck - l_hip - l_knee - l_ankle
        [0, 12],
        [12, 13],
        [13, 14],  # neck - r_hip - r_knee - r_ankle
    ]
)

class VideoPlayer:
    """
    Custom video player to fulfill FPS requirements. You can set target FPS and output size,
    flip the video horizontally or skip first N frames.

    :param source: Video source. It could be either camera device or video file.
    :param size: Output frame size.
    :param flip: Flip source horizontally.
    :param fps: Target FPS.
    :param skip_first_frames: Skip first N frames.
    """

    def __init__(self, source, size=None, flip=False, fps=None, skip_first_frames=0, width=1280, height=720):
        self.__cap = cv2.VideoCapture(source)
        # try HD by default to get better video quality
        self.__cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        self.__cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

        if not self.__cap.isOpened():
            raise RuntimeError(f"Cannot open {'camera' if isinstance(source, int) else ''} {source}")
        # skip first N frames
        self.__cap.set(cv2.CAP_PROP_POS_FRAMES, skip_first_frames)
        # fps of input file
        self.__input_fps = self.__cap.get(cv2.CAP_PROP_FPS)
        if self.__input_fps <= 0:
            self.__input_fps = 60
        # target fps given by user
        self.__output_fps = fps if fps is not None else self.__input_fps
        self.__flip = flip
        self.__size = None
        self.__interpolation = None
        if size is not None:
            self.__size = size
            # AREA better for shrinking, LINEAR better for enlarging
            self.__interpolation = cv2.INTER_AREA if size[0] < self.__cap.get(cv2.CAP_PROP_FRAME_WIDTH) else cv2.INTER_LINEAR
        # first frame
        _, self.__frame = self.__cap.read()
        self.__lock = threading.Lock()
        self.__thread = None
        self.__stop = False    
        
    """
    Start playing.
    """
    def start(self):
        self.__stop = False
        self.__thread = threading.Thread(target=self.__run, daemon=True)
        self.__thread.start()

    """
    Stop playing and release resources
    """
    def stop(self):
        self.__stop = True
        if self.__thread is not None:
            self.__thread.join()
        self.__cap.release()

    def __run(self):
        prev_time = 0
        while not self.__stop:
            t1 = time.time()
            ret, frame = self.__cap.read()
            if not ret:
                break
            
            # fulfill target fps
            if 1 / self.__output_fps < time.time() - prev_time:
                prev_time = time.time()
                # replace by current frame
                with self.__lock:
                    self.__frame = frame

            t2 = time.time()
            # time to wait [s] to fulfill input fps
            wait_time = 1 / self.__input_fps - (t2 - t1)
            # wait until
            time.sleep(max(0, wait_time))

        self.__frame = None
        
    """
    Get current frame.
    """
    def next(self):
        with self.__lock:
            if self.__frame is None:
                return None
            # need to copy frame, because can be cached and reused if fps is low
            frame = self.__frame.copy()
        if self.__size is not None:
            frame = cv2.resize(frame, self.__size, interpolation=self.__interpolation)    
        if self.__flip:
            frame = cv2.flip(frame, 1)
        return frame

def model_infer(compile_model, scaled_img, stride):
    """
    Run model inference on the input image

    Parameters:
        scaled_img: resized image according to the input size of the model
        stride: int, the stride of the window
    """

    # 剪除图像边缘多余的部分
    img = scaled_img[
        0 : scaled_img.shape[0] - (scaled_img.shape[0] % stride),
        0 : scaled_img.shape[1] - (scaled_img.shape[1] % stride),
    ]

    # 图像标准化：减去均值，再缩放，将范围缩小到[-1,1]
    mean_value = 128.0
    scale_value = 255.0
    img = (img - mean_value) / scale_value

    # 改变img的形状，并添加一个额外的维度，从（高度，宽度，通道数）变为（批次大小, 通道数, 高度, 宽度）
    img = np.transpose(img, (2,0,1))[None,]

    # 进行推理，拿到结果
    result = compile_model(img)
    results = (result[0][0], result[1][0], result[2][0])

    return results


def draw_poses(frame, poses_2d, scaled_img, use_popup):
    """
    Draw 2D pose overlays on the image to visualize estimated poses.
    Joints are drawn as circles and limbs are drawn as lines.

    :param frame: the input image
    :param poses_2d: array of human joint pairs
    """
    lines = []
    circles = []

    for pose in poses_2d:
        pose = np.array(pose[0:-1]).reshape((-1,3)).transpose()
        was_found = pose[2] > 0

        pose[0], pose[1] = (
            pose[0] * frame.shape[1] / scaled_img.shape[1],
            pose[1] * frame.shape[0] / scaled_img.shape[0],
        )

        # Draw joints
        for edge in body_edges_2d:
            if was_found[edge[0]] and was_found[edge[1]]:
                # cv2.line(
                #     frame,
                #     tuple(pose[0:2, edge[0]].astype(np.int32)),
                #     tuple(pose[0:2, edge[1]].astype(np.int32)),
                #     (255,255,0),
                #     4,
                #     cv2.LINE_AA,
                # )
                # add to list 'lines'
                lines.append([[pose[0:2, edge[0]][0].astype(np.int32).item(), pose[0:2, edge[0]][1].astype(np.int32).item()], 
                             [pose[0:2, edge[1]][0].astype(np.int32).item(), pose[0:2, edge[1]][1].astype(np.int32).item()]])
        # Draw limbs.
        for kpt_id in range(pose.shape[1]):
            if pose[2, kpt_id] != -1:
                # cv2.circle(
                #     frame,
                #     tuple(pose[0:2, kpt_id].astype(np.int32)),
                #     3,
                #     (0, 255, 255),
                #     -1,
                #     cv2.LINE_AA,
                # )
                # add to list 'circles'
                circles.append([pose[0:2, kpt_id][0].astype(np.int32).item(), pose[0:2, kpt_id][1].astype(np.int32).item()])

    pose_result = {}
    pose_result["lines"] = lines
    pose_result["circles"] = circles
    # print(f"pose result: {pose_result}")
    return pose_result


def run_pose_estimation(source=0, flip=False, use_popup=False, skip_frames=0):
    """
    2D image as input, using OpenVINO as inference backend,
    get joints 3D coordinates, and draw 3D human skeleton in the scene

    :param source:      The webcam number to feed the video stream with primary webcam set to "0", or the video path.
    :param flip:        To be used by VideoPlayer function for flipping capture image.
    :param use_popup:   False for showing encoded frames over this notebook, True for creating a popup window.
    :param skip_frames: Number of frames to skip at the beginning of the video.
    """
    focal_length = -1
    stride = 8
    player = None
    skeleton_set = None

    # Prepare Model

    # 1. Prepare the model
    base_model_dir = Path("model")
    ov_model_path = Path(base_model_dir) / "human-pose-estimation-3d-0001.xml"
    # .xml 描述了网络的拓扑结构，即网络的结构信息
    # .bin 包含了网络的权重和偏置的二进制数据，即模型的权重文件

    # 2. Load the model
    core = ov.Core()
    model = core.read_model(ov_model_path)

    # 获取可用设备
    supported_devices = core.available_devices + ["AUTO"]
    device = supported_devices[0]

    compile_model = core.compile_model(model=model, device_name=device)


    # Start Process
    try:
        player = VideoPlayer(source, flip=flip, fps=30, skip_first_frames=skip_frames)
        player.start()

        input_image = player.next()
        # set the window size
        resize_scale = 450 / input_image.shape[1]
        windows_width = int(input_image.shape[1] * resize_scale)
        windows_height = int(input_image.shape[0] * resize_scale)

        # use visualization library
        engine3D = engine.Engine3js(grid=True, axis=True, view_width=windows_width, view_height=windows_height)

        # if use_popup:
        #     display(engine3D.renderer)
        #     title = "Press ESC to Exit"
        #     cv2.namedWindow(title, cv2.WINDOW_KEEPRATIO | cv2.WINDOW_AUTOSIZE)
        # else:
        #     imgbox = widgets.Image(format="jpg", height=windows_height, width=windows_width)
        #     display(widgets.HBox([engine3D.renderer, imgbox]))

        skeleton = engine.Skeleton(body_edges=body_edges)
        
        processing_times = collections.deque()

        while True:
            frame = player.next()
            if frame is None:
                print("Source ended")
                break
                
            scaled_img = cv2.resize(frame, dsize=(model.inputs[0].shape[3], model.inputs[0].shape[2]))
            if focal_length < 0:
                focal_length = np.float32(0.8 * scaled_img.shape[1])

            # inference start
            start_time = time.time()
            # get results
            inference_result = model_infer(compile_model, scaled_img, stride)

            stop_time = time.time()
            processing_times.append(stop_time - start_time)

            poses_3d, poses_2d = engine.parse_poses(inference_result, 1, stride, focal_length, True)

            if len(processing_times) > 200:
                processing_times.popleft()
            process_time = np.mean(processing_times) * 1000
            fps = 1000 / process_time

            pose_result = {}

            if len(poses_3d) > 0:
                poses_3d_copy = poses_3d.copy()
                x = poses_3d_copy[:, 0::4]
                y = poses_3d_copy[:, 1::4]
                z = poses_3d_copy[:, 2::4]
                poses_3d[:, 0::4], poses_3d[:, 1::4], poses_3d[:, 2::4] = (
                    -z + np.ones(poses_3d[:, 2::4].shape) * 200,
                    -y + np.ones(poses_3d[:, 2::4].shape) * 100,
                    -x,
                )

                poses_3d = poses_3d.reshape(poses_3d.shape[0], 19, -1)[:, :, 0:3]
                people = skeleton(poses_3d=poses_3d)

                try:
                    engine3D.scene_remove(skeleton_set)
                except Exception:
                    pass

                engine3D.scene_add(people)
                skeleton_set = people

                # draw 2D
                pose_result = draw_poses(frame, poses_2d, scaled_img, use_popup)
            else:
                try:
                    engine3D.scene_remove(skeleton_set)
                    skeleton_set = None
                except Exception:
                    pass

            # Send data
            print("start to send data via zmq")
            # convert frame to byte
            _, buffer = cv2.imencode('.jpg', frame)
            frame_bytes = buffer.tobytes()
            # convert pose_result to byte
            result_bytes = json.dumps(pose_result).encode('utf-8')
            # set topic name
            topic = "hello"

            # send data via zmq
            socket.send_multipart([topic.encode(), frame_bytes, result_bytes])
            print(f"Sent a frame on topic: {topic}")


            # cv2.putText(
            #     frame,
            #     f"Inference time: {process_time:.1f}ms ({fps:.1f} FPS)",
            #     (10, 30),
            #     cv2.FONT_HERSHEY_COMPLEX,
            #     0.7,
            #     (0, 0, 255),
            #     1,
            #     cv2.LINE_AA,
            # )

            # if use_popup:
            #     cv2.imshow(title, frame)
            #     key = cv2.waitKey(1)
            #     # escape = 27, use ESC to exit
            #     if key == 27:
            #         break
            # else:
            #     # encode numpy array to jpg
            #     imgbox.value = cv2.imencode(
            #         ".jpg",
            #         frame,
            #         params=[cv2.IMWRITE_JPEG_QUALITY, 90],
            #     )[1].tobytes()
            
            engine3D.renderer.render(engine3D.scene, engine3D.cam)


    except KeyboardInterrupt:
        print("Interrupted")
    except RuntimeError as e:
        print(e)
    finally:
        clear_output()
        if player is not None:
            player.stop()
        if use_popup:
            cv2.destroyAllWindows()
        if skeleton_set:
            engine3D.scene_remove(skeleton_set)
        socket.close()




if __name__ == "__main__":

    # True: will use webcam, False: will use video
    USE_WEBCAM = False

    cam_id = 0
    video_path = "face-demographics-walking.mp4"
    source = cam_id if USE_WEBCAM else video_path

    run_pose_estimation(source=source, flip=isinstance(source, int), use_popup=True)