from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data import MetadataCatalog

import json
import cv2
import os
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from matplotlib import pyplot as plt

from pose_labeling_utils import PoseLabelingUtils


plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['font.size'] = 40
plt.rcParams['font.weight'] = 'bold'
plt.rcParams['text.color'] = 'red'


class CustomVisualizer(Visualizer):
    def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
        super().__init__(img_rgb, metadata, scale, instance_mode)
        self.pil_img = Image.fromarray(self.output.get_image())
        self.draw = ImageDraw.Draw(self.pil_img)
        self.font = ImageFont.truetype("../data/simhei.ttf", 40)  # 使用中文字体，字体大小设为40
        self.text_color = (1.0, 0.0, 0.0)  # 文字颜色为红色（0-1范围）
        self.text_weight = 'bold'  # 文字加粗
        self._default_font_color = (1.0, 1.0, 1.0)  # 默认字体颜色为白色

    def draw_instance_predictions(self, predictions):
        boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
        scores = predictions.scores if predictions.has("scores") else None
        classes = predictions.pred_classes if predictions.has("pred_classes") else None
        keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
        assigned_colors = [self._default_font_color] * len(predictions)

        file_path = '../data/keypoints_and_thresholds.json'
        keypoint_index = load_keypoints_and_thresholds(file_path)[0]

        pl_utils = PoseLabelingUtils()

        action_labels = []
        for kp in keypoints:
            kp_2d = kp[:, :2]  # 提取二维坐标
            valid_kps = [kp_2d[i] for i, vis in enumerate(kp[:, 2]) if vis > 0]  # 过滤掉不可见的点

            if len(valid_kps) < 5:  # 至少需要5个关键点来判断姿态
                break

            # 提取关键点
            keypoints_dict = {key: kp[index].tolist() for key, index in keypoint_index.items()}

            # 计算头部姿势
            head_labels = pl_utils.label_head_pose(keypoints_dict)
            # 计算臂部姿势
            arm_labels = pl_utils.label_arm_pose(keypoints_dict)
            # 计算腿部姿势
            leg_labels = pl_utils.label_leg_pose(keypoints_dict)

            # 组合标签
            label = f"{head_labels}, {arm_labels}, {leg_labels}"
            action_labels.append(label)

        # 绘制预测实例
        super().overlay_instances(
            boxes=boxes,
            masks=None,
            keypoints=keypoints,
            assigned_colors=assigned_colors,
            labels=action_labels,
            alpha=0.5,
        )

        # 在图像上绘制标签
        for i, keypoints in enumerate(predictions.pred_keypoints):
            x, y = keypoints[0][:2]  # 获取第一个关键点的位置
            label = action_labels
            # 将颜色值从0-1范围转换为0-255范围，并确保是整数
            text_color_int = tuple(int(c * 255) for c in self.text_color)
            self.draw.text((x, y), label[i], font=self.font, fill=text_color_int)  # 填充颜色为红色

        return self.output


class Detectron2Utils:
    def __init__(self):
        # 设置环境变量
        os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'

        # 初始化配置
        self.cfg = get_cfg()
        self.cfg.MODEL.DEVICE = 'cpu'
        self.cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
        self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
        self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")

        # 创建预测器
        self.predictor = DefaultPredictor(self.cfg)

    def body_posture_backbone_image(self, img_path, output_filename, scale_percent):
        im = cv2.imread(img_path)  # 读取图片

        # 进行预测
        outputs = self.predictor(im)

        # 使用自定义的 Visualizer
        v = CustomVisualizer(im[:, :, ::-1], MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0]), scale=1.2)
        v = v.draw_instance_predictions(outputs['instances'].to("cpu"))  # 确保输出也在 CPU 上

        # 获取处理后的图像
        vis_im = v.get_image()

        # 创建输出目录
        output_dir = '../output/'
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        # 定义输出文件路径
        output_path = os.path.join(output_dir, output_filename)
        print(output_path)

        # 保存处理后的图像
        cv2.imwrite(output_path, cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR))

        return output_path


# 加载关键点索引和动作特征向量
def load_keypoints_and_thresholds(file_path):
    with open(file_path, 'r') as file:
        data = json.load(file)
        keypoint_index = data['KEYPOINT_INDEXES']
        face_thresholds = data['FACE_THRESHOLDS']
        arm_thresholds = data['ARM_THRESHOLDS']
        leg_thresholds = data['LEG_THRESHOLDS']
    return keypoint_index, face_thresholds, arm_thresholds, leg_thresholds


def distance(pt1, pt2):
    return np.linalg.norm(pt1 - pt2)


def is_valid_point(point, img_shape):
    return 0 <= point[0] < img_shape[1] and 0 <= point[1] < img_shape[0]
