# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import time
import os
import cv2
from PIL import Image, ImageDraw, ImageFont
import torch
from tqdm import tqdm
import numpy as np
from maskrcnn_benchmark.data.datasets.evaluation import evaluate
from ..utils.comm import is_main_process, get_world_size
from ..utils.comm import all_gather
from ..utils.comm import synchronize
from ..utils.timer import Timer, get_time_str
from .bbox_aug import im_detect_bbox_aug


def compute_on_dataset(model, data_loader, device, bbox_aug, timer=None):
    model.eval()
    results_dict = {}
    cpu_device = torch.device("cpu")
    for _, batch in enumerate(tqdm(data_loader)):
        images, targets, image_ids = batch
        image_data = images.tensors
        # print("Image data type:", image_data.dtype)  # 查看数据类型
        # print("Image data range: min = {}, max = {}".format(image_data.min(), image_data.max()))  # 查看数据范围
        with torch.no_grad():
            if timer:
                timer.tic()
            if bbox_aug:
                output = im_detect_bbox_aug(model, images, device)
            else:
                output = model(images.to(device))
            if timer:
                if not device.type == 'cpu':
                    torch.cuda.synchronize()
                timer.toc()
            output = [o.to(cpu_device) for o in output]
        results_dict.update(
            {img_id: result for img_id, result in zip(image_ids, output)}
        )
    return results_dict


def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
    all_predictions = all_gather(predictions_per_gpu)
    if not is_main_process():
        return
    # merge the list of dicts
    predictions = {}
    for p in all_predictions:
        predictions.update(p)
    # convert a dict where the key is the index in a list
    image_ids = list(sorted(predictions.keys()))
    if len(image_ids) != image_ids[-1] + 1:
        logger = logging.getLogger("maskrcnn_benchmark.inference")
        logger.warning(
            "Number of images that were gathered from multiple processes is not "
            "a contiguous set. Some images might be missing from the evaluation"
        )

    # convert to a list
    predictions = [predictions[i] for i in image_ids]
    return predictions

def denorm_to_rgb_uint8(image_chw,
                        mean=(102.9801, 115.9465, 122.7717),
                        std=(1.0, 1.0, 1.0),
                        to_bgr255=True):
    """
    image_chw: torch.Tensor [C,H,W] 或 numpy.ndarray [C,H,W]，是 Normalize(to_bgr255=True) 之后的张量
    返回: numpy.ndarray [H,W,C]，RGB，uint8，范围0-255
    """
    if isinstance(image_chw, np.ndarray):
        img = torch.from_numpy(image_chw)
    else:
        img = image_chw

    assert img.ndim == 3 and img.shape[0] == 3, f"Expect CHW with C=3, got {tuple(img.shape)}"
    img = img.detach().to(dtype=torch.float32, device="cpu").clone()

    mean = torch.tensor(mean, dtype=img.dtype).view(3, 1, 1)
    std  = torch.tensor(std,  dtype=img.dtype).view(3, 1, 1)

    # 反归一化（恢复到 BGR 的 0–255 量纲）
    img = img * std + mean               # CHW (BGR, 0-255)

    # CHW -> HWC
    img = img.permute(1, 2, 0)

    # BGR -> RGB（保存/显示通常用 RGB）
    if to_bgr255:
        img = img[..., [2, 1, 0]]

    # 裁剪并转 uint8
    img = img.clamp(0, 255).byte().numpy()
    return img


def visualize_and_save(image_data, predictions, ground_truths, labels, output_path):
    img_rgb = denorm_to_rgb_uint8(image_data)
    img = Image.fromarray(img_rgb)
    draw = ImageDraw.Draw(img)
    font = ImageFont.load_default()

    # 绘制 GT 框（蓝色）
    for gt_box, label in zip(ground_truths, labels):
        x1, y1, x2, y2 = gt_box
        draw.rectangle([x1, y1, x2, y2], outline="blue", width=2)
        text = f"GT: {label}"
        draw.text((x1, y1), text, fill="blue", font=font)

    # 绘制预测框（黄色）
    if predictions:  
        for pred_box, score, label in predictions:
            x1, y1, x2, y2 = pred_box
            draw.rectangle([x1, y1, x2, y2], outline="yellow", width=2)
            text = f"Pre: {label} ({score:.2f})"
            draw.text((x1, y1), text, fill="yellow", font=font)
    else:
        print("No predictions found, only GT boxes will be drawn.")

    # 保存图像（使用PIL保存）
    img.save(output_path)
    print(f"Saved image with GT and prediction boxes to {output_path}")


def inference(
        model,
        data_loader,
        dataset_name,
        iou_types=("bbox",),
        box_only=False,
        bbox_aug=False,
        device="cuda",
        expected_results=(),
        expected_results_sigma_tol=4,
        output_folder=None,
):
    # convert to a torch.device for efficiency
    device = torch.device(device)
    num_devices = get_world_size()
    logger = logging.getLogger("maskrcnn_benchmark.inference")
    dataset = data_loader.dataset
    logger.info("Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset)))

    total_timer = Timer()
    inference_timer = Timer()
    total_timer.tic()

    predictions = compute_on_dataset(model, data_loader, device, bbox_aug, inference_timer)
    # wait for all processes to complete before measuring the time
    synchronize()
    total_time = total_timer.toc()
    total_time_str = get_time_str(total_time)
    logger.info(
        "Total run time: {} ({} s / img per device, on {} devices)".format(
            total_time_str, total_time * num_devices / len(dataset), num_devices
        )
    )
    total_infer_time = get_time_str(inference_timer.total_time)
    logger.info(
        "Model inference time: {} ({} s / img per device, on {} devices)".format(
            total_infer_time,
            inference_timer.total_time * num_devices / len(dataset),
            num_devices,
        )
    )

    predictions = _accumulate_predictions_from_multiple_gpus(predictions)
    if not is_main_process():
        return

    if output_folder:
        torch.save(predictions, os.path.join(output_folder, "predictions.pth"))
        # 保存图像
        os.makedirs(os.path.join(output_folder, "res_img"), exist_ok=True)
        for idx, (image, prediction) in enumerate(zip(data_loader.dataset, predictions)):
            if isinstance(image, tuple):
                image_data = image[0]  # 图像数据
                ground_truths = image[1]  # GT框
                gt_boxes = ground_truths.bbox
                gt_labels = ground_truths.get_field("labels")
            else:
                image_data = image["image"]
                ground_truths = image["annotations"]
                gt_boxes = ground_truths["boxes"]
                gt_labels = ground_truths["labels"]

            H, W = image_data.shape[1], image_data.shape[2]
            pred_list = []
            if hasattr(prediction, "resize") and prediction.bbox.shape[0] > 0:
                pred_on_vis = prediction.resize((W, H))
                print(f"[DEBUG] vis_size=(W={W},H={H}), pred_orig_size={prediction.size}, pred_vis_size={pred_on_vis.size}")
                pred_boxes  = pred_on_vis.bbox
                pred_scores = pred_on_vis.get_field('scores')
                pred_labels = pred_on_vis.get_field('labels')
                for box, score, label in zip(pred_boxes, pred_scores, pred_labels):
                    x1, y1, x2, y2 = [int(v) for v in box.tolist()]
                    pred_list.append([[x1, y1, x2, y2], float(score), int(label)])
            else:
                print("[WARN] prediction is empty or has no resize method; skipping boxes.")

            output_path = os.path.join(output_folder, "res_img", f"inference_{idx}.jpg")
            visualize_and_save(image_data, pred_list, gt_boxes, gt_labels, output_path)

    extra_args = dict(
        box_only=box_only,
        iou_types=iou_types,
        expected_results=expected_results,
        expected_results_sigma_tol=expected_results_sigma_tol,
    )

    return evaluate(dataset=dataset,
                    predictions=predictions,
                    output_folder=output_folder,
                    **extra_args)
