# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import datetime
import logging
import os
import time

import torch
import torch.distributed as dist
from tqdm import tqdm

from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.utils.comm import get_world_size, synchronize
from maskrcnn_benchmark.utils.metric_logger import MetricLogger
from maskrcnn_benchmark.engine.inference import inference, visualize_and_save
from maskrcnn_benchmark.structures.image_list import ImageList
from apex import amp

def reduce_loss_dict(loss_dict):
    """
    Reduce the loss dictionary from all processes so that process with rank
    0 has the averaged results. Returns a dict with the same fields as
    loss_dict, after reduction.
    """
    world_size = get_world_size()
    if world_size < 2:
        return loss_dict
    with torch.no_grad():
        loss_names = []
        all_losses = []
        for k in sorted(loss_dict.keys()):
            loss_names.append(k)
            all_losses.append(loss_dict[k])
        all_losses = torch.stack(all_losses, dim=0)
        dist.reduce(all_losses, dst=0)
        if dist.get_rank() == 0:
            # only main process gets accumulated, so only divide by
            # world_size in this case
            all_losses /= world_size
        reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
    return reduced_losses

def pack_images_targets(images, targets, divalign_on, anchor_view, device):
    """
    将 (B,A,C,H,W) 的 ImageList/targets 打包成标准 ImageList：
    - divalign_on=True : 展平到 (B*A,C,H,W)，targets 同步展平
    - divalign_on=False: 只取 anchor_view，但保留 ImageList + 正确 image_sizes
    - 单视图/4D 情况：直接 .to(device)
    """
    if hasattr(images, "tensors") and images.tensors.dim() == 5:
        B, A, C, H, W = images.tensors.shape
        is_multi_view_targets = isinstance(targets[0], (list, tuple))
        if divalign_on and is_multi_view_targets:
            flat_tensors = images.tensors.view(B * A, C, H, W)
            flat_sizes, flat_targets = [], []
            for t_list in targets:             # len = B
                assert len(t_list) == A
                for t in t_list:               # len = A
                    # flat_sizes.append(t.size)  # (W,H)
                    w, h = t.size              # BoxList.size 是 (W, H)
                    flat_sizes.append((h, w))  # ImageList 需要 (H, W) 
                    flat_targets.append(t)
            images = ImageList(flat_tensors, flat_sizes).to(device)
            targets = [t.to(device) for t in flat_targets]
        else:
            anc = int(anchor_view) if 0 <= int(anchor_view) < A else 0
            sel_tensors = images.tensors[:, anc, ...]                 # (B,C,H,W)
            if is_multi_view_targets:
                # 多视图→取第 anc 个
                sel_sizes = [(t_list[anc].size[1], t_list[anc].size[0]) for t_list in targets]  # (H,W)
                targets   = [t_list[anc].to(device) for t_list in targets]
            else:
                sel_sizes = [(t.size[1], t.size[0]) for t in targets]  # (H,W)
                targets   = [t.to(device) for t in targets]
            images  = ImageList(sel_tensors, sel_sizes).to(device)
    else:
        images  = images.to(device)
        targets = [t.to(device) for t in targets]
    return images, targets


def _extract_eval_score(eval_result):
    """
    从 inference(...) 的返回值里尽可能提取一个“更大更好”的单标量作为best判据。
    兼容 VOC（mAP）与 COCO（bbox->AP）等常见格式。
    返回: (score: float | None, name: str)
    """
    # 典型：直接就是 {'mAP': 0.123, 'nodule': 0.456, ...}
    if isinstance(eval_result, dict):
        for k in ["mAP", "map", "MAP", "ap", "AP"]:
            if k in eval_result and isinstance(eval_result[k], (int, float)):
                return float(eval_result[k]), k
        # COCO 风格：{'bbox': {'AP': 0.372, 'AP50': ...}, ...}
        if "bbox" in eval_result and isinstance(eval_result["bbox"], dict):
            for k in ["AP", "mAP", "map"]:
                if k in eval_result["bbox"] and isinstance(eval_result["bbox"][k], (int, float)):
                    return float(eval_result["bbox"][k]), f"bbox.{k}"
        # 其它：在所有数值项里取一个代表（不推荐，但兜底）
        nums = []
        def collect(d, prefix=""):
            for kk, vv in d.items():
                if isinstance(vv, (int, float)):
                    nums.append((prefix+kk, float(vv)))
                elif isinstance(vv, dict):
                    collect(vv, prefix+kk+".")
        collect(eval_result)
        if nums:
            # 取名字里带 'AP' 或 'mAP' 的优先
            cand = [x for x in nums if any(t in x[0].lower() for t in ["map", "ap"])]
            key, val = (cand[0] if cand else nums[0])
            return val, key
        return None, "none"

    # 有些实现会返回 (results, ...) 之类的元组
    if isinstance(eval_result, (list, tuple)) and eval_result:
        return _extract_eval_score(eval_result[0])

    return None, "none"


def save_best_val_visualization(
    model,
    data_loader_val,
    checkpointer,
    cfg,
    iteration: int,
    best_val_loss: float,
    divalign_on: bool,
    anchor_view: int,
    device,
):
    """
    在保存 best model 时，随机取 val 的一小批，取第0张图做一次预测并保存可视化结果。
    文件名：best_val_loss_{loss}_iter{iter}_{imgid}.jpg，保存在 best 模型目录下的 best_model_viz/ 中。
    """
    if data_loader_val is None:
        print('data_loader_val is None')
        return

    # 1) 取一小批 val
    try:
        sample_images, sample_targets, sample_ids = next(iter(data_loader_val))
    except StopIteration:
        print('sample_images is None')
        return

    # 2) 打包（与训练/验证一致）
    sample_images, sample_targets = pack_images_targets(
        sample_images, sample_targets, divalign_on, anchor_view, device
    )

    # 3) 前向预测（eval + no_grad）
    was_training = model.training
    model.eval()
    with torch.no_grad():
        preds = model(sample_images)
    if was_training:
        model.train()

    # 4) 只取第0张：准备 visualize_and_save 需要的四个参数
    if not hasattr(sample_images, "tensors"):
        return
    image_data = sample_images.tensors[0].cpu()        # CHW, Normalize 后张量
    H, W = image_data.shape[1], image_data.shape[2]

    pred0 = preds[0]
    pred_list = []
    # 把预测框从“原图坐标”缩放到当前可视化图尺寸 (W,H) 再取出 box/score/label
    if hasattr(pred0, "resize") and pred0.bbox.numel() > 0:
        pred_on_vis = pred0.resize((W, H))             # 这步是关键！
        for box, score, label in zip(
            pred_on_vis.bbox,
            pred_on_vis.get_field("scores"),
            pred_on_vis.get_field("labels"),
        ):
            x1, y1, x2, y2 = [int(v) for v in box.tolist()]
            pred_list.append([[x1, y1, x2, y2], float(score), int(label)])

    tgt0 = sample_targets[0].to("cpu")
    gt_boxes  = tgt0.bbox.tolist() if tgt0.bbox.numel() > 0 else []
    gt_labels = tgt0.get_field("labels").tolist() if tgt0.has_field("labels") else []

    # 5) 组织输出路径/文件名
    import os
    save_dir = getattr(checkpointer, "save_dir", cfg.OUTPUT_DIR)
    viz_dir  = os.path.join(save_dir, "best_model_viz")
    os.makedirs(viz_dir, exist_ok=True)
    sid = str(sample_ids[0]) if isinstance(sample_ids, (list, tuple)) and len(sample_ids) > 0 else "sample"
    out_name = f"best_val_loss_{best_val_loss:.6f}_iter{iteration}_{sid}.jpg"
    out_path = os.path.join(viz_dir, out_name)

    # 6) 直接复用 inference.py 的可视化（含反归一化+绘图+保存）
    visualize_and_save(image_data, pred_list, gt_boxes, gt_labels, out_path) 


def do_train(
    cfg,
    model,
    data_loader,
    data_loader_val,
    optimizer,
    scheduler,
    checkpointer,
    device,
    checkpoint_period,
    test_period,
    arguments,
):
    # ---- 安全读取开关 ----
    def _divalign_flags(_cfg):
        try:
            enable = bool(_cfg.MODEL.DIVALIGN.ENABLE)
            anchor = int(getattr(_cfg.MODEL.DIVALIGN, "ANCHOR_VIEW", 0))
        except Exception:
            enable, anchor = False, 0
        return enable, anchor
    divalign_on, anchor_view = _divalign_flags(cfg)
    best_val_loss = float(arguments.get("best_val_loss", float("inf")))
    # 调试定位
    torch.autograd.set_detect_anomaly(True)
    os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
    logger = logging.getLogger("maskrcnn_benchmark.trainer")
    logger.info("Start training")
    meters = MetricLogger(delimiter="  ")
    max_iter = len(data_loader)
    start_iter = arguments["iteration"]
    model.train()
    start_training_time = time.time()
    end = time.time()

    iou_types = ("bbox",)
    if cfg.MODEL.MASK_ON:
        iou_types = iou_types + ("segm",)
    if cfg.MODEL.KEYPOINT_ON:
        iou_types = iou_types + ("keypoints",)
    dataset_names = cfg.DATASETS.TEST

    for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
        if any(len(target) < 1 for target in targets):
            logger.error(f"Iteration={iteration + 1} || Image Ids used for training {_} || targets Length={[len(target) for target in targets]}" )
            continue
        data_time = time.time() - end
        iteration = iteration + 1
        arguments["iteration"] = iteration
        images, targets = pack_images_targets(images, targets, divalign_on, anchor_view, device)

        loss_dict = model(images, targets)
        loss_dict_scalar = {}
        for k, v in loss_dict.items():
            if isinstance(v, torch.Tensor):
                if v.numel() == 0:
                    v = torch.zeros((), device=v.device, dtype=v.dtype)
                elif v.dim() != 0:
                    v = v.mean()
                else:
                    v = v 
                if not torch.is_floating_point(v):
                    v = v.float()
            else:
                v = torch.tensor(float(v), device=device)
            loss_dict_scalar[k] = v

        losses = torch.stack(list(loss_dict_scalar.values())).sum()
        assert losses.dim() == 0, f"losses must be scalar, got shape {tuple(losses.shape)}"

        optimizer.zero_grad()
        losses.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0)
        optimizer.step()
        scheduler.step()

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = reduce_loss_dict(loss_dict_scalar)
        losses_reduced = torch.stack(list(loss_dict_reduced.values())).sum()
        meters.update(loss=float(losses_reduced.item()), **{k: float(v.item()) for k, v in loss_dict_reduced.items()})

        batch_time = time.time() - end
        end = time.time()
        meters.update(time=batch_time, data=data_time)

        eta_seconds = meters.time.global_avg * (max_iter - iteration)
        eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))

        if iteration % 20 == 0 or iteration == max_iter:
            logger.info(
                meters.delimiter.join(
                    [
                        "eta: {eta}",
                        "iter: {iter}",
                        "{meters}",
                        "lr: {lr:.6f}",
                        "max mem: {memory:.0f}",
                    ]
                ).format(
                    eta=eta_string,
                    iter=iteration,
                    meters=str(meters),
                    lr=optimizer.param_groups[0]["lr"],
                    memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
                )
            )
        # if iteration % checkpoint_period == 0:
        #     checkpointer.save("model_{:07d}".format(iteration), **arguments)
        if data_loader_val is not None and test_period > 0 and iteration % test_period == 0:
            # inference
            eval_result = inference(
                model,
                data_loader_val,
                dataset_name="[Validation]",
                iou_types=iou_types,
                device=cfg.MODEL.DEVICE,
                expected_results=(),
                expected_results_sigma_tol=4,
                output_folder=None,
            )
            synchronize()
            model.train()
            # 解析评估指标
            score, score_name = _extract_eval_score(eval_result)
            if score is not None:
                logger.info(f"[Validation] {score_name}={score:.6f} (best={arguments.get('best_eval', float('-inf')):.6f})")
                best_eval = float(arguments.get("best_eval", float("-inf")))
                if score > best_eval:
                    arguments["best_eval"] = score
                    checkpointer.save("model_best", **arguments)
                    logger.info(f"[Validation] New best model (by {score_name}) saved with score={score:.6f}")
                    # 可视化一张
                    save_best_val_visualization(model, data_loader_val, checkpointer, cfg, iteration, score, divalign_on, anchor_view, device)
            else:
                logger.warning("[Validation] Couldn't extract a scalar eval score from inference result; skip best-check.")
        if iteration == max_iter:
            arguments["best_val_loss"] = float(best_val_loss)
            checkpointer.save("model_final", **arguments)

    total_training_time = time.time() - start_training_time
    total_time_str = str(datetime.timedelta(seconds=total_training_time))
    logger.info(
        "Total training time: {} ({:.4f} s / it)".format(
            total_time_str, total_training_time / (max_iter)
        )
    )
