import cv2
import datetime
import supervision as sv
from typing import Optional, Sequence, Union
import warnings
from pathlib import Path
from mmengine.config import Config
from mmengine.registry import init_default_scope
from mmengine.model.utils import revert_sync_batchnorm
from mmengine.runner import load_checkpoint
from mmdet.registry import MODELS
from mmdet.evaluation import get_classes
from image_demo import LABEL_ANNOTATOR,BOUNDING_BOX_ANNOTATOR
from abc import ABCMeta, abstractmethod
import numpy as np
from mmengine.utils import is_str
from facedet import FaceDetectionSingleton
import time
import os
import json
save_directory = '/root/paraformer/example/'
cs_file_path = os.path.join(save_directory, 'event.bin')
# 初始化上次跌倒时间
last_fall_time = 0
fall_interval = 10



def init_detector(
    config: Union[str, Path, Config],
    checkpoint: Optional[str] = None,
    device: str = 'cuda:0',
    cfg_options: Optional[dict] = None,
):
    """Initialize a detector from config file.
    """
    if isinstance(config, (str, Path)):
        config = Config.fromfile(config)
    elif not isinstance(config, Config):
        raise TypeError('config must be a filename or Config object, '
                        f'but got {type(config)}')
    if cfg_options is not None:
        config.merge_from_dict(cfg_options)
    elif 'init_cfg' in config.model.backbone:
        config.model.backbone.init_cfg = None

    scope = config.get('default_scope', 'mmdet')
    if scope is not None:
        init_default_scope(config.get('default_scope', 'mmdet'))

    model = MODELS.build(config.model)
    model = revert_sync_batchnorm(model)
    if checkpoint is None:
        warnings.simplefilter('once')
        warnings.warn('checkpoint is None, use COCO classes by default.')
        model.dataset_meta = {'classes': get_classes('coco')}
    else:
        checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
        print("sucess load checkpoint.")
        # Weights converted from elsewhere may not have meta fields.
        checkpoint_meta = checkpoint.get('meta', {})

        # save the dataset_meta in the model for convenience
        if 'dataset_meta' in checkpoint_meta:
            # mmdet 3.x, all keys should be lowercase
            model.dataset_meta = {
                k.lower(): v
                for k, v in checkpoint_meta['dataset_meta'].items()
            }
        elif 'CLASSES' in checkpoint_meta:
            # < mmdet 3.x
            classes = checkpoint_meta['CLASSES']
            model.dataset_meta = {'classes': classes}
        else:
            warnings.simplefilter('once')
            warnings.warn(
                'dataset_meta or class names are not saved in the '
                'checkpoint\'s meta data, use COCO classes by default.')
            model.dataset_meta = {'classes': get_classes('coco')}

    model.cfg = config  # save the config in the model for convenience
    model.to(device)
    model.eval()
    return model

def detect_posture(w, h, fall_threshold=1.1, bent_threshold_low=0.7, bent_threshold_high=1.1, standing_threshold=0.7):
    if (w / h) >= fall_threshold:
        return 'Falling', (0, 0, 255)  # 红色
    elif bent_threshold_low <= (w / h) < bent_threshold_high:
        return 'Bending', (0, 255, 255)  # 黄色
    elif (w / h) < standing_threshold:
        return 'Standing', (0, 255, 0)  # 绿色
    return None, None

def save_fall_image(image):
    # 获取当前时间用于文件名
    timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    filename = f"fall_detected_{timestamp}.png"
    # 保存图像
    cv2.imwrite(filename, image)
    print(f"跌倒图像保存为: {filename}")

def visualize(pred_instances, texts, image,dogmessage_queue):
    global last_fall_time

    # 将 GPU 上的张量转移到 CPU 并转换为 NumPy 数组
    pred_instances = pred_instances.cpu().numpy()

    # 将检测结果封装到 sv.Detections 中
    detections = sv.Detections(
        xyxy=pred_instances['bboxes'],
        class_id=pred_instances['labels'],
        confidence=pred_instances['scores']
    )

    # 初始化 FaceDetection 实例
    face_detection = FaceDetectionSingleton()

    # 初始化存储中心点和标签的列表
    center_coords = []  # 用于存储中心点信息
    labels = []  # 存储所有的标签信息

    # 定义表示人的类别关键词
    human_keywords = ["person", "man", "woman", "people", "girl", "boy"]

    # 获取当前时间戳
    current_timestamp = time.time()

    # 遍历检测到的对象
    for bbox, class_id, confidence in zip(detections.xyxy, detections.class_id, detections.confidence):
        x_min, y_min, x_max, y_max = map(int, bbox)
        w = x_max - x_min
        h = y_max - y_min

        # 获取基础标签（类别和置信度）
        base_label = f"{texts[class_id][0]} {confidence:0.2f}"

        # 初始化附加标签内容
        additional_label = ""

        # 计算中心点
        center_x = (x_min + x_max) // 2
        center_y = (y_min + y_max) // 2

        # 默认框颜色为绿色
        box_color = (0, 255, 0)

        # 检查类别是否与人相关
        if any(keyword in texts[class_id][0].lower() for keyword in human_keywords):
            # 从图像中提取人脸区域
            face = image[y_min:y_max, x_min:x_max]
            if face.size > 0:  # 确保检测区域不是空的
                # 检测性别和年龄
                gender, age = face_detection.detect_age_gender(face)
                additional_label += f", {gender} {age}"

            # 姿势检测
            posture, color = detect_posture(w, h)
            if posture:
                additional_label += f", {posture}"

            if texts[class_id][0].lower() not in ['person', 'people', 'the man']:
                data2 = {
                    "type": "event",
                    "time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                    "position": [
                        x_min,
                        y_min,
                        x_max,
                        y_max
                    ]
                }
                dogmessage_queue.put(json.dumps(data2).encode('utf-8'))
                # with open('/root/paraformer/example/dog.bin', 'wb') as f:
                #     f.write(json.dumps(data2).encode('utf-8'))


            # 检测到跌倒姿势
            if posture == 'Falling':
                # 将框颜色设置为红色
                box_color = (0, 0, 255)
                data0 = {
                    "type": "falling",
                    "time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                    "position": [
                        x_min,
                        y_min,
                        x_max,
                        y_max
                    ]
                }
                # with open('/root/paraformer/example/dog.bin', 'wb') as f:
                #     f.write(json.dumps(data0).encode('utf-8'))
                dogmessage_queue.put(json.dumps(data0).encode('utf-8'))

                # 判断跌倒的时间间隔是否满足条件
                if current_timestamp - last_fall_time > fall_interval:
                    currtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                    # 输出跌倒的时间、位置坐标和中心点坐标
                    fall_info = f"{currtime}, 检测到有人跌倒, 位置坐标: ({center_x}, {center_y})"

                    with open(cs_file_path, 'wb') as f:  # 使用 'ab' 模式表示以二进制追加写入
                        f.write(fall_info.encode('utf-8'))  # 将字符串编码为字节数据
                    last_fall_time = current_timestamp  # 更新最后一次跌倒时间
            # else:
            #     data1 = {
            #         "type": "person",
            #         "time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            #         "position": [
            #             x_min,
            #             y_min,
            #             x_max,
            #             y_max
            #         ]
            #     }
                # with open('/root/paraformer/example/dog.bin', 'wb') as f:
                #     f.write(json.dumps(data1).encode('utf-8'))

        # data2 = {
        #     "type": "event",
        #     "time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
        #     "position": [
        #         x_min,
        #         y_min,
        #         x_max,
        #         y_max
        #     ]
        # }
        # with open('/root/paraformer/example/dog.bin', 'wb') as f:
        #     f.write(json.dumps(data2).encode('utf-8'))

        # 合并基础标签和附加标签
        final_label = base_label + additional_label
        labels.append(final_label)

        # 绘制矩形框，根据检测到的姿势更改颜色
        cv2.rectangle(image, (x_min, y_min), (x_max, y_max), box_color, 2)

        # 存储中心点
        center_coords.append((center_x, center_y))

    # 使用 LABEL_ANNOTATOR 绘制标签
    image = LABEL_ANNOTATOR.annotate(image, detections, labels=labels)

    # 获取当前时间并绘制在图像上
    current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
    cv2.putText(image, current_time, (10, 60), cv2.FONT_HERSHEY_SIMPLEX,
                1, (255, 0, 0), 2, cv2.LINE_AA)

    return image, center_coords

class BaseVisualizer(metaclass=ABCMeta):
    """Base class for visualization tools."""

    def __init__(self, max_labels_per_bbox):
        self.max_labels_per_bbox = max_labels_per_bbox

    def draw_predictions(self, task):
        """Visualize stdet predictions on raw frames."""
        # read bboxes from task
        bboxes = task.display_bboxes.cpu().numpy()

        # draw predictions and update task
        keyframe_idx = len(task.frames) // 2
        draw_range = [
            keyframe_idx - task.clip_vis_length // 2,
            keyframe_idx + (task.clip_vis_length - 1) // 2
        ]
        assert draw_range[0] >= 0 and draw_range[1] < len(task.frames)
        task.frames = self.draw_clip_range(task.frames, task.action_preds,
                                           bboxes, draw_range, task.prompt)

        return task

    def draw_clip_range(self, frames, preds, bboxes, draw_range, prompt):
        """Draw a range of frames with the same bboxes and predictions."""
        # no predictions to be draw
        if bboxes is None or len(bboxes) == 0:
            return frames

        # draw frames in `draw_range`
        left_frames = frames[:draw_range[0]]
        right_frames = frames[draw_range[1] + 1:]
        draw_frames = frames[draw_range[0]:draw_range[1] + 1]

        # get labels(texts) and draw predictions
        draw_frames = [
            self.draw_one_image(frame, bboxes, preds, prompt) for frame in draw_frames
        ]

        return list(left_frames) + draw_frames + list(right_frames)

    @abstractmethod
    def draw_one_image(self, frame, bboxes, preds):
        """Draw bboxes and corresponding texts on one frame."""

    @staticmethod
    def abbrev(name):
        """Get the abbreviation of label name:

        'take (an object) from (a person)' -> 'take ... from ...'
        """
        while name.find('(') != -1:
            st, ed = name.find('('), name.find(')')
            name = name[:st] + '...' + name[ed + 1:]
        return name


class DefaultVisualizer(BaseVisualizer):
    """Tools to visualize predictions.

    Args:
        max_labels_per_bbox (int): Max number of labels to visualize for a
            person box. Default: 5.
        plate (str): The color plate used for visualization. Two recommended
            plates are blue plate `03045e-023e8a-0077b6-0096c7-00b4d8-48cae4`
            and green plate `004b23-006400-007200-008000-38b000-70e000`. These
            plates are generated by https://coolors.co/.
            Default: '03045e-023e8a-0077b6-0096c7-00b4d8-48cae4'.
        text_fontface (int): Fontface from OpenCV for texts.
            Default: cv2.FONT_HERSHEY_DUPLEX.
        text_fontscale (float): Fontscale from OpenCV for texts.
            Default: 0.5.
        text_fontcolor (tuple): fontface from OpenCV for texts.
            Default: (255, 255, 255).
        text_thickness (int): Thickness from OpenCV for texts.
            Default: 1.
        text_linetype (int): LInetype from OpenCV for texts.
            Default: 1.
    """

    def __init__(
            self,
            max_labels_per_bbox=5,
            plate='03045e-023e8a-0077b6-0096c7-00b4d8-48cae4',
            text_fontface=cv2.FONT_HERSHEY_DUPLEX,
            text_fontscale=0.5,
            text_fontcolor=(255, 255, 255),  # white
            text_thickness=1,
            text_linetype=1):
        super().__init__(max_labels_per_bbox=max_labels_per_bbox)
        self.text_fontface = text_fontface
        self.text_fontscale = text_fontscale
        self.text_fontcolor = text_fontcolor
        self.text_thickness = text_thickness
        self.text_linetype = text_linetype

        def hex2color(h):
            """Convert the 6-digit hex string to tuple of 3 int value (RGB)"""
            return (int(h[:2], 16), int(h[2:4], 16), int(h[4:], 16))

        plate = plate.split('-')
        self.plate = [hex2color(h) for h in plate]

    def draw_one_image(self, frame, bboxes, preds, prompt):
        """Draw predictions on one image."""
        for bbox, pred in zip(bboxes, preds):
            # draw bbox
            box = bbox.astype(np.int64)
            st, ed = tuple(box[:2]), tuple(box[2:])
            cv2.rectangle(frame, st, ed, (0, 0, 255), 2)

            # draw texts
            for k, (label, score) in enumerate(pred):
                if k >= self.max_labels_per_bbox:
                    break
                text = f'{self.abbrev(label)}: {score:.4f}'
                location = (0 + st[0], 18 + k * 18 + st[1])
                textsize = cv2.getTextSize(text, self.text_fontface,
                                           self.text_fontscale,
                                           self.text_thickness)[0]
                textwidth = textsize[0]
                diag0 = (location[0] + textwidth, location[1] - 14)
                diag1 = (location[0], location[1] + 2)
                cv2.rectangle(frame, diag0, diag1, self.plate[k + 1], -1)
                cv2.putText(frame, text, location, self.text_fontface,
                            self.text_fontscale, self.text_fontcolor,
                            self.text_thickness, self.text_linetype)

            # Draw prompt text above each bounding box
            prompt_location = (st[0], st[1] - 10)  # position above the top-left corner of the box
            cv2.putText(frame, prompt, prompt_location, self.text_fontface,
                        self.text_fontscale, (0,0,0),
                        self.text_thickness, self.text_linetype)

        return frame

def get_classes(dataset) -> list:
    """Get class names of a dataset."""
    alias2name = {'coco': ['coco', 'mscoco', 'ms_coco'],}

    if is_str(dataset):
        if dataset in alias2name:
            labels = eval(alias2name[dataset] + '_classes()')
        else:
            raise ValueError(f'Unrecognized dataset: {dataset}')
    else:
        raise TypeError(f'dataset must a str, but got {type(dataset)}')
    return labels