import importlib
import os
import random
import time

import cv2
import imageio
import matplotlib
import numpy

from Detection import Detection
from GenericError import GenericError
from feature_extract.config import MODEL_CONFIGS
from feature_extract.registry import MODEL_REGISTRY
from feature_extract.load_dict_method.load_dict_methods import *
import torch
import model.torch_reid.osnet_ain
from myutils import *

MODEL_DIR = 'model'


def in_roi(r, ds, exclude=[]):
    in_list = []
    for d in ds:
        x0, y0, x1, y1, _, cls_name = d
        point = ((x0 + x1) // 2, y1)
        is_in = cv2.pointPolygonTest(r, point, False)
        if is_in >= 0 or cls_name in exclude:
            in_list.append(d)
    return in_list


def register_models():
    print(os.getcwd())
    for folder in os.listdir(MODEL_DIR):
        for file in os.listdir(os.path.join(MODEL_DIR, folder)):
            if file.endswith('.py'):
                module_name = file[:-3]
                print(module_name)
                importlib.import_module(f'model.{folder}.{module_name}')


if __name__ == '__main__':
    register_models()
    device = torch.device('cuda')
    model = initialize_model('osnet_ain_x1_0', device)
    # model = initialize_model('osnet_transformer', device)
    data_dir = r'D:\work\DD_SORT-main\MOT\res\id_sort_2'

    # 检查已注册的模型
    print("Registered models:", MODEL_REGISTRY.list_models())
    model.eval()
    class_features = {}

    for class_name in os.listdir(data_dir):
        class_dir = os.path.join(data_dir, class_name)
        if os.path.isdir(class_dir):
            features_list = []
            features_list2 = []
            piclist = [pic for pic in
                       os.listdir(class_dir)]
            chooselist1 = random.sample(piclist, min(20, len(piclist)))
            chooselist2 = random.sample(piclist, min(20, len(piclist)))
            for image_name in chooselist1:
                image_path = os.path.join(class_dir, image_name)
                features = extract_features(model, image_path, device)
                features_list.append(features)
            for image_name in chooselist2:
                image_path = os.path.join(class_dir, image_name)
                features = extract_features(model, image_path, device)
                features_list2.append(features)
            class_features[int(class_name) * 2 - 1] = [np.mean(features_list, axis=0), 0]  # 使用平均特征向量表示该类别
            class_features[int(class_name) * 2] = [np.mean(features_list2, axis=0), 0]  # 使用平均特征向量表示该类别
    torch.set_printoptions(threshold=torch.inf)
    np.set_printoptions(threshold=np.inf)
    # test_images_dir = 'reid/query_07'

    yolo_model = torch.hub.load(r'C:\Users\12169\.cache\torch\hub\WongKinYiu_yolov7_main', 'custom',
                                r'C:\Users\12169\Desktop\work\pythonProject\edi-framewrok-develop\edi-framewrok-develop\models\yolov7-e6e.pt',
                                trust_repo=True, source='local')
    my_class = ['person', 'suitcase', 'backpack', 'head', 'guardian', 'guest', 'basket', 'hand']
    detect_classes = ['guest', 'person']
    yolo_model.to(device)
    print("=-=-=-=-=-=-=-=-=-=-=-=-=-=-")
    black_image = numpy.zeros((416, 416, 3), dtype=numpy.uint8)
    warm_start = time.time()
    for _ in range(10):
        yolo_model(black_image)
    print("Warm up time: %s", time.time() - warm_start)
    print(yolo_model.names)
    confidence = 0.4
    classes = []
    roi = [(14, 580), (1118, 1018), (1244, 504), (316, 228), (0, 306)]
    roi_np = np.array(roi)
    for class_index, class_name in enumerate(yolo_model.names):
        print(class_name)
        if class_name in detect_classes:
            classes.append(class_index)
    if len(classes) == 0:
        raise GenericError(
            "No classes are detected, please check the configuration."
        )
    framecount = 0
    matplotlib.use('QT5Agg')
    readvideo = r'D:\work\sort\5320-2024-08-05_15-22-03\5320-2024-08-05_15-22-03.mp4'
    cap = cv2.VideoCapture(readvideo)
    frame_rate = int(cap.get(cv2.CAP_PROP_FPS))  # 获取视频的帧率
    img_size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    video = cv2.VideoWriter(r'D:\work\sort\5320-2024-08-05_15-22-03_reid-11-6.mp4', cv2.VideoWriter_fourcc(*'mp4v'),
                            frame_rate,
                            img_size)
    while cap.isOpened():
        ret, frame = cap.read()
        framecount += 1
        if not ret:
            break
        if framecount < 800:
            continue
        if framecount >= 5900:
            break
        print(f'=========={framecount}==========')
        detections = []
        boxes = []
        confs = []
        sort_classes = []
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        height, width, _ = frame.shape
        result = yolo_model(frame)
        for *xyxy, conf, cls in result.xyxy[0].tolist():
            # print(f'conf:{conf}')
            _x1, _y1, _x2, _y2 = map(int, xyxy)
            class_id = int(cls)
            if class_id not in classes:
                continue
            if conf > confidence:
                name = yolo_model.names[class_id]
                d = Detection(name, conf, (_x1, _y1, _x2, _y2))
                detections.append([_x1, _y1, _x2, _y2, conf, class_id])
        if detections:
            if roi:
                detections = in_roi(roi_np, detections, exclude=[])
            matched = []
            for item in detections:
                x0, y0, x1, y1, _, _ = item
                cv2.rectangle(frame, (x0, y0), (x1, y1), (0, 0, 255), 2)
                bbox_pic = frame[y0:y1, x0:x1]
                input_feature = myutils.extract_features(model, bbox_pic, device, from_array=True)
                most_similar_class, most_similarity, resdict = myutils.find_most_similar_class_merge(input_feature,
                                                                                                     class_features)
                sorted_res = dict(sorted(resdict.items(), key=lambda item: item[1], reverse=True))
                for key, value in sorted_res.items():
                    if value >= 0.72:
                        if key not in matched:
                            font = cv2.FONT_HERSHEY_SIMPLEX  # 字体
                            font_scale = 1  # 字体大小
                            color = (0, 255, 0)  # 数字颜色 (B, G, R)
                            thickness = 2  # 字体粗细
                            # 将数字转换为字符串并写在图像上
                            frame = cv2.putText(frame, str(int((key + 1) / 2)), (x0, y0 - 10), font,
                                                font_scale,
                                                color,
                                                thickness, cv2.LINE_AA)
                            matched.append(key)
                            break
                    else:
                        break

        frame = cv2.polylines(frame, [roi_np.reshape((-1, 1, 2))], isClosed=True, color=(0, 255, 0),
                              thickness=2)
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        video.write(frame_rgb)
        print(len(detections))
    video.release()
    print(f"视频已保存")
