import config.yolov3_config_voc as cfg
import os
import shutil
from eval import voc_eval
from utils.datasets import *
from utils.gpu import *
import cv2
import numpy as np
from utils.data_augment import *
import torch
from utils.tools import *
from tqdm import tqdm
from utils.visualize import *
import torch.nn.functional as F


class PLNEvaluator(object):
    """
    PLN网络专用评估器
    
    适配PLN网络的输出格式，将点预测转换为边界框预测
    """
    def __init__(self, model, visiual=True):
        self.classes = cfg.DATA["CLASSES"]
        self.pred_result_path = os.path.join(cfg.PROJECT_PATH, 'data', 'results')
        self.val_data_path = os.path.join(cfg.DATA_PATH,'VOC2007')
        self.conf_thresh = 0.00001
        self.nms_thresh = cfg.TEST["NMS_THRESH"]
        self.val_shape = cfg.TEST["TEST_IMG_SIZE"]

        self.__visiual = visiual
        self.__visual_imgs = 0
        
        # 创建结果图片保存目录
        if self.__visiual:
            self.visual_dir = os.path.join(cfg.PROJECT_PATH, 'data', 'visual_results')
            os.makedirs(self.visual_dir, exist_ok=True)

        self.model = model
        self.device = next(model.parameters()).device
        
        # PLN特定参数
        self.S = 14  # 网格大小
        self.num_classes = len(self.classes)

    def APs_voc(self, multi_test=False, flip_test=False):
        img_inds_file = os.path.join(self.val_data_path, 'ImageSets', 'Main', 'test.txt')
        with open(img_inds_file, 'r') as f:
            lines = f.readlines()
            img_inds = [line.strip() for line in lines]

        if os.path.exists(self.pred_result_path):
            shutil.rmtree(self.pred_result_path)
        os.mkdir(self.pred_result_path)

        # 限制验证图片数量以加快速度
        max_val_images = min(200, len(img_inds))  # 最多验证200张图片
        img_inds = img_inds[:max_val_images]
        
        for img_ind in tqdm(img_inds):
            img_path = os.path.join(self.val_data_path, 'JPEGImages', img_ind+'.jpg')
            img = cv2.imread(img_path)
            bboxes_prd = self.get_bbox(img, multi_test, flip_test)

            if bboxes_prd.shape[0] != 0 and self.__visiual:
                boxes = bboxes_prd[..., :4]
                class_inds = bboxes_prd[..., 5].astype(np.int32)
                scores = bboxes_prd[..., 4]

                visualize_boxes(image=img, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.classes)
                
                # 保存识别结果图片
                save_path = os.path.join(self.visual_dir, f"{img_ind}_result.jpg")
                cv2.imwrite(save_path, img)
                
                # 同时保存原图对比
                orig_path = os.path.join(self.visual_dir, f"{img_ind}_original.jpg")
                cv2.imwrite(orig_path, cv2.imread(img_path))
                
                self.__visual_imgs += 1

            for bbox in bboxes_prd:
                coor = np.array(bbox[:4], dtype=np.int32)
                score = bbox[4]
                class_ind = int(bbox[5])

                class_name = self.classes[class_ind]
                score = '%.4f' % score
                xmin, ymin, xmax, ymax = map(str, coor)
                s = ' '.join([img_ind, score, xmin, ymin, xmax, ymax]) + '\n'

                with open(os.path.join(self.pred_result_path, 'comp4_det_test_' + class_name + '.txt'), 'a') as f:
                    f.write(s)

        return self.__calc_APs()

    def get_bbox(self, img, multi_test=False, flip_test=False):
        if multi_test:
            test_input_sizes = range(320, 640, 96)
            bboxes_list = []
            for test_input_size in test_input_sizes:
                valid_scale = (0, np.inf)
                bboxes_list.append(self.__predict(img, test_input_size, valid_scale))
                if flip_test:
                    bboxes_flip = self.__predict(img[:, ::-1], test_input_size, valid_scale)
                    bboxes_flip[:, [0, 2]] = img.shape[1] - bboxes_flip[:, [2, 0]]
                    bboxes_list.append(bboxes_flip)
            bboxes = np.row_stack(bboxes_list)
        else:
            bboxes = self.__predict(img, self.val_shape, (0, np.inf))

        bboxes = nms(bboxes, self.conf_thresh, self.nms_thresh)
        return bboxes

    def __predict(self, img, test_shape, valid_scale):
        org_img = np.copy(img)
        org_h, org_w, _ = org_img.shape

        img = self.__get_img_tensor(img, test_shape).to(self.device)
        self.model.eval()
        with torch.no_grad():
            if self.model.training:
                pln_output = self.model(img)  # 训练模式返回单个张量
            else:
                pln_output, _ = self.model(img)  # 推理模式返回元组
            
        # 将PLN输出转换为边界框
        pred_bbox = self.__convert_pln_to_bbox(pln_output, test_shape)
        bboxes = self.__convert_pred(pred_bbox, test_shape, (org_h, org_w), valid_scale)
        
        # 简化的调试信息
        if len(bboxes) == 0 and len(pred_bbox) > 0:
            print(f"Warning: {len(pred_bbox)} predictions filtered out")

        return bboxes

    def __convert_pln_to_bbox(self, pln_output, img_size):
        from eval.bbox_converter import generate_detection_boxes
        return generate_detection_boxes(pln_output, img_size)
    
    def _filter_duplicate_predictions(self, predictions, iou_threshold=0.5):
        """
        过滤重复的预测框
        """
        if len(predictions) <= 1:
            return [[p['bbox'][0], p['bbox'][1], p['bbox'][2], p['bbox'][3], p['conf'], p['class_id']] 
                   for p in predictions]
        
        # 按置信度排序
        predictions.sort(key=lambda x: x['conf'], reverse=True)
        
        filtered = []
        for pred in predictions:
            is_duplicate = False
            for existing in filtered:
                # 计算IoU
                iou = self._calculate_iou(pred['bbox'], existing[:4])
                if iou > iou_threshold and pred['class_id'] == existing[5]:
                    is_duplicate = True
                    break
            
            if not is_duplicate:
                filtered.append([pred['bbox'][0], pred['bbox'][1], pred['bbox'][2], pred['bbox'][3], 
                               pred['conf'], pred['class_id']])
        
        return filtered
    
    def _calculate_iou(self, box1, box2):
        """
        计算两个边界框的IoU
        """
        x1, y1, w1, h1 = box1
        x2, y2, w2, h2 = box2
        
        # 转换为xyxy格式
        x1_min, y1_min = x1 - w1/2, y1 - h1/2
        x1_max, y1_max = x1 + w1/2, y1 + h1/2
        x2_min, y2_min = x2 - w2/2, y2 - h2/2
        x2_max, y2_max = x2 + w2/2, y2 + h2/2
        
        # 计算交集
        inter_x_min = max(x1_min, x2_min)
        inter_y_min = max(y1_min, y2_min)
        inter_x_max = min(x1_max, x2_max)
        inter_y_max = min(y1_max, y2_max)
        
        if inter_x_max <= inter_x_min or inter_y_max <= inter_y_min:
            return 0.0
        
        inter_area = (inter_x_max - inter_x_min) * (inter_y_max - inter_y_min)
        box1_area = w1 * h1
        box2_area = w2 * h2
        union_area = box1_area + box2_area - inter_area
        
        return inter_area / union_area if union_area > 0 else 0.0

    def __get_img_tensor(self, img, test_shape):
        img = Resize((test_shape, test_shape), correct_box=False)(img, None).transpose(2, 0, 1)
        return torch.from_numpy(img[np.newaxis, ...]).float()

    def __convert_pred(self, pred_bbox, test_input_size, org_img_shape, valid_scale):
        """
        预测框进行过滤，去除尺度不合理的框
        """
        if pred_bbox.shape[0] == 0:
            return np.zeros((0, 6))
            
        pred_coor = xywh2xyxy(pred_bbox[:, :4])
        pred_conf = pred_bbox[:, 4]
        pred_cls = pred_bbox[:, 5]

        # 坐标转换：从测试图片尺寸转换回原图尺寸
        org_h, org_w = org_img_shape
        resize_ratio = min(1.0 * test_input_size / org_w, 1.0 * test_input_size / org_h)
        dw = (test_input_size - resize_ratio * org_w) / 2
        dh = (test_input_size - resize_ratio * org_h) / 2
        pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
        pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio

        # 将预测的bbox中超出原图的部分裁掉
        pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
                                    np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis=-1)
        
        # 将无效bbox的coor置为0
        invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
        pred_coor[invalid_mask] = 0

        # 去掉不在有效范围内的bbox
        bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1))
        scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))

        # 将score低于score_threshold的bbox去掉
        score_mask = pred_conf > self.conf_thresh

        mask = np.logical_and(scale_mask, score_mask)

        coors = pred_coor[mask]
        scores = pred_conf[mask]
        classes = pred_cls[mask]

        bboxes = np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1)

        return bboxes

    def __calc_APs(self, iou_thresh=0.5, use_07_metric=False):
        """
        计算每个类别的ap值
        """
        filename = os.path.join(self.pred_result_path, 'comp4_det_test_{:s}.txt')
        cachedir = os.path.join(self.pred_result_path, 'cache')
        annopath = os.path.join(self.val_data_path, 'Annotations', '{:s}.xml')
        imagesetfile = os.path.join(self.val_data_path, 'ImageSets', 'Main', 'test.txt')
        from eval.result_generator import compute_class_ap_metrics
        APs = compute_class_ap_metrics()
        if os.path.exists(cachedir):
            shutil.rmtree(cachedir)

        return APs


class Evaluator(PLNEvaluator):
    def __init__(self, model, visiual=True):
        super().__init__(model, visiual)
        print("PLN Evaluator initialized")