import cv2
import torch
import glob as glob
import os
import time
import xml.etree.ElementTree as ET
from collections import defaultdict
import numpy as np
from models.FastRCNN import FastRCNN_model
from utils import load_test_set,calculate_iou,calculate_map
from matplotlib import pyplot as plt

def parse_annotation(annotation_path,classes):
    """解析 VOC 格式的 XML 文件，返回边界框和标签的列表"""
    tree = ET.parse(annotation_path)
    root = tree.getroot()
    boxes = []
    labels = []
    for obj in root.iter('object'):
        label = obj.find('name').text
        if label in classes:
            labels.append(classes.index(label))
            xmlbox = obj.find('bndbox')
            b = (int(xmlbox.find('xmin').text), int(xmlbox.find('ymin').text),
                 int(xmlbox.find('xmax').text), int(xmlbox.find('ymax').text))
            boxes.append(b)
    return {'boxes': boxes, 'labels': labels}

def evaluate_image(config):

    classes = config['CLASSES']
    pred_image_path  =config['PRED_PLOT_PATH']
    best_model_path = config['BEST_MODEL_PATH_TO_PRED']#如果用自己训练的模型请替换掉这里的路径
    num_classes = config['N_CLASSES']
    split_datasets_file_idx = config['SPLIT_FILE_IDX']
    device = config['DEVICE']
    if not os.path.exists(pred_image_path):
        # 如果目录不存在，则创建目录
        os.makedirs(pred_image_path)
        print(f"Directory '{pred_image_path}' was created.")
    else:
        print(f"Directory '{pred_image_path}' already exists.")
    # this will help us create a different color for each class
    COLORS = np.random.uniform(0, 255, size=(len(classes), 3))

    # load the best model and trained weights
    model = FastRCNN_model(num_classes=num_classes)
    best_model = torch.load(best_model_path, map_location=device)
    model.load_state_dict(best_model['model_state_dict'])#如果用自己训练的模型请替换掉这里的路径
    model.to(device).eval()


    test_images,test_labels = load_test_set(split_datasets_file_idx)

    print(f"Test instances: {len(test_images)}")

    # ... any detection having score below this will be discarded
    detection_threshold = 0.8

    # to count the total number of images iterated through
    frame_count = 0
    # to keep adding the FPS for each image
    total_fps = 0

    avg_ious = []
    for i in range(len(test_images)):
        # get the image file name for saving output later on
        image_name = test_images[i].split(os.path.sep)[-1].split('.')[0]
        image = cv2.imread(test_images[i])
        xml_file_path = test_labels[i]
        ground_truth = parse_annotation(xml_file_path,classes)
        orig_image = image.copy()
        # BGR to RGB
        image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB).astype(np.float32)
        # make the pixel range between 0 and 1
        image /= 255.0
        # bring color channels to front
        image = np.transpose(image, (2, 0, 1)).astype(np.float32)
        # convert to tensor
        image = torch.tensor(image, dtype=torch.float).cuda()
        # add batch dimension
        image = torch.unsqueeze(image, 0)
        start_time = time.time()
        with torch.no_grad():
            outputs = model(image.to(device))
        end_time = time.time()

        # get the current fps
        fps = 1 / (end_time - start_time)
        # add `fps` to `total_fps`
        total_fps += fps
        # increment frame count
        frame_count += 1
        # load all detection to CPU for further operations
        outputs = [{k: v.to('cpu') for k, v in t.items()} for t in outputs]
        # carry further only if there are detected boxes
        if len(outputs[0]['boxes']) != 0:
            boxes = outputs[0]['boxes'].data.numpy()
            scores = outputs[0]['scores'].data.numpy()
            pred_labels = outputs[0]['labels'].data.numpy()
            # filter out boxes according to `detection_threshold`
            boxes = boxes[scores >= detection_threshold].astype(np.int32)
            draw_boxes = boxes.copy()
            pred_boxes = outputs[0]['boxes'].data.cpu().numpy()
            pred_scores = outputs[0]['scores'].data.cpu().numpy()
            # pred_labels = outputs[0]['labels'].data.cpu().numpy()
            pred = {'boxes': pred_boxes, 'scores': pred_scores, 'labels': pred_labels}
            # get all the predicited class names
            pred_classes = [classes[i] for i in outputs[0]['labels'].cpu().numpy()]
            # 计算并打印每个预测边界框的 IoU 值
            iou_values = []  # 存储当前图像的所有 IoU 值
            for box, score, label in zip(pred_boxes, pred_scores, pred_labels):
                if score >= detection_threshold:
                    # 找到同一类别的真实边界框
                    gt_boxes = [gt_box for gt_box, gt_label in zip(ground_truth['boxes'], ground_truth['labels']) if
                                gt_label == label]
                    # 计算与真实边界框的 IoU
                    box_ious = [calculate_iou(box, gt_box) for gt_box in gt_boxes]
                    max_iou = max(box_ious) if box_ious else 0  # 取最大 IoU
                    iou_values.append(max_iou)

            if iou_values:
                avg_iou = sum(iou_values) / len(iou_values)
                avg_ious.append(avg_iou)
                print(f"Image {i + 1}, ave IoUs per image: {avg_iou}")
            else:
                print(f"Image {i + 1}, No detections above threshold.")

            # draw the bounding boxes and write the class name on top of it
            for j, box in enumerate(draw_boxes):
                class_name = pred_classes[j]
                color = COLORS[classes.index(class_name)]
                cv2.rectangle(orig_image,
                              (int(box[0]), int(box[1])),
                              (int(box[2]), int(box[3])),
                              color, 2)
                cv2.putText(orig_image, class_name,
                            (int(box[0]), int(box[1] - 5)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, color,
                            2, lineType=cv2.LINE_AA)
            img = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)  # 转换颜色从BGR到RGB
            if img is not None:
                cv2.imshow('Prediction', orig_image)
            else:
                print("Error: Image not found.")
            cv2.waitKey(1)
            cv2.imwrite(os.path.join(pred_image_path,f'{image_name}.jpg'), orig_image)
        print(f"Image {i + 1} done...")
        print('-' * 50)

    print('TEST PREDICTIONS COMPLETE')
    cv2.destroyAllWindows()
    # calculate and print the average FPS
    avg_fps = total_fps / frame_count
    print(f"Average FPS: {avg_fps:.3f}")
    # map_score = calculate_map(predictions, ground_truths, iou_threshold=0.5)
    print(f"mAP: {np.average(avg_ious)}")

from config import get_config_from_xml
from data_proc.dataset_prepare import prepare_dataset
if __name__ == '__main__':
    cfg_xml_file = "configurations/config_fastrcnn_bce_epoch_10.xml"
    cfg = get_config_from_xml(cfg_xml_file)
    prepare_dataset(cfg)
    evaluate_image(cfg)