import cv2
import numpy as np
from PIL import Image
import torch
import mmcv
from mmcv import imread
import mmengine
from mmengine.registry import init_default_scope
from mmpose.apis import inference_topdown
from mmpose.apis import init_model as init_pose_estimator
from mmpose.evaluation.functional import nms
from mmpose.registry import VISUALIZERS
from mmpose.structures import merge_data_samples
from mmdet.apis import inference_detector, init_detector
import asyncio
import websockets
import base64
import os

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device', device)

# Faster R CNN
detector = init_detector(
    'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py',
    'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth',
    device=device
)

# 创建模型
pose_estimator = init_pose_estimator(
    'configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hrnet-w32_8xb64-210e_coco-256x192.py',
    'https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth',
    device=device,
    cfg_options={'model': {'test_cfg': {'output_heatmaps': True}}}
)

def calculate_angle_difference(keypoints1, keypoints2):
    # 检查输入的关键点数量是否相同
    if keypoints1.shape != keypoints2.shape:
        raise ValueError("输入的关键点数量不相同")

    # 计算每个关键点的向量
    vectors1 = keypoints1[:, 1:] - keypoints1[:, :1]  # 提取x和y坐标，计算向量
    vectors2 = keypoints2[:, 1:] - keypoints2[:, :1]

    # 计算向量的长度
    lengths1 = np.linalg.norm(vectors1, axis=2)
    lengths2 = np.linalg.norm(vectors2, axis=2)

    # 计算向量的单位向量
    unit_vectors1 = vectors1 / lengths1[:, :, np.newaxis]
    unit_vectors2 = vectors2 / lengths2[:, :, np.newaxis]

    # 计算点积（dot product）
    dot_products = np.sum(unit_vectors1 * unit_vectors2, axis=2)

    # 将点积限制在 [-1, 1] 范围内，以避免反余弦计算时出现错误
    dot_products = np.clip(dot_products, -1.0, 1.0)

    # 计算角度（以弧度为单位）
    angles = np.arccos(dot_products)

    # 将弧度转换为度数
    angles_degrees = np.degrees(angles)

    return angles_degrees

def compare_poses(img_path1, img_path2, pose_estimator, detector, device):
    # 预测-目标检测
    init_default_scope(detector.cfg.get('default_scope', 'mmdet'))

    # 获取目标检测预测结果
    detect_result1 = inference_detector(detector, img_path1)
    detect_result2 = inference_detector(detector, img_path2)

    # 置信度阈值
    CONF_THRES = 0.5

    pred_instance1 = detect_result1.pred_instances.cpu().numpy()
    bboxes1 = np.concatenate((pred_instance1.bboxes, pred_instance1.scores[:, None]), axis=1)
    bboxes1 = bboxes1[np.logical_and(pred_instance1.labels == 0, pred_instance1.scores > CONF_THRES)]
    bboxes1 = bboxes1[nms(bboxes1, 0.3)][:, :4]
    # 最终目标检测的结果
    # print('最终目标检测的结果', bboxes1)

    pred_instance2 = detect_result2.pred_instances.cpu().numpy()
    bboxes2 = np.concatenate((pred_instance2.bboxes, pred_instance2.scores[:, None]), axis=1)
    bboxes2 = bboxes2[np.logical_and(pred_instance2.labels == 0, pred_instance2.scores > CONF_THRES)]
    bboxes2 = bboxes2[nms(bboxes2, 0.3)][:, :4]
    # 最终目标检测的结果
    # print('最终目标检测的结果', bboxes2)

    # 预测-关键点
    # 获取每个 bbox 的关键点预测结果
    pose_results1 = inference_topdown(pose_estimator, img_path1, bboxes1)
    pose_results2 = inference_topdown(pose_estimator, img_path2, bboxes2)

    data_samples1 = merge_data_samples(pose_results1)
    data_samples2 = merge_data_samples(pose_results2)

    # 比较两个姿态的角度差异
    angles_diff = calculate_angle_difference(data_samples1.pred_instances.keypoints,
                                             data_samples2.pred_instances.keypoints)
    print("关键点之间的角度差异（度数）：", angles_diff)

    # 设置相似性的阈值（您可能需要调整此值）
    threshold = 100.0

    # 如果姿态几乎相同，则返回True，否则返回False
    return np.all(angles_diff < threshold)


async def handle(websocket, path):
    try:
        # response = websockets.http.Response(
        #     status=101,
        #     headers={
        #         "Access-Control-Allow-Origin": "http://192.168.10.4:8080",
        #         "Access-Control-Allow-Methods": "GET, POST, PUT, DELETE, OPTIONS",
        #         "Access-Control-Allow-Headers": "Content-Type",
        #     },
        # )
        # await response.write(websocket.handshake_response())

        async for message in websocket:
            print("message:", message)
            img_data1 = base64.b64decode(message)

            # 将解码后的图像保存到临时文件或使用内存缓冲区
            # 根据需求调整此部分
            temp_img_path = 'temp_image.jpg'
            with open(temp_img_path, 'wb') as f:
                f.write(img_data1)

            img_path1 = temp_img_path
            img_path2 = 'data/test/0007.jpg'

            result = compare_poses(img_path1, img_path2, pose_estimator, detector, device)

            # 处理完毕后删除临时文件
            if os.path.exists(temp_img_path):
                os.remove(temp_img_path)

            await websocket.send(f"相似性结果：{result}")
    except websockets.ConnectionClosed:
        pass


# 将函数封装在main()函数中，使得可异步循环调用
async def main():
    # # 用ngrok提供的公共地址替换 "https://xxxxxxxx.ngrok.io"
    # ngrok_address = "https://928d-36-161-164-53.ngrok-free.app"
    #
    # # 启动WebSocket服务器，监听在指定的主机和端口
    # start_server = await websockets.serve(handle, ngrok_address)
    start_server = await websockets.serve(handle, "localhost", 8768)
    # 等待服务器关闭
    await start_server.wait_closed()

if __name__ == "__main__":
    print("Starting server...")
    asyncio.run(main())
    print("Server closed.")

