# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Train and eval functions used in main.py
"""
import math
import os
import sys
from typing import Iterable

import torch

import util.misc as utils
from datasets.coco_eval import CocoEvaluator
from datasets.panoptic_eval import PanopticEvaluator

# ----------------阅读到此--------------阅读训练文件中的 train_one_epoch 函数--->阅读完毕
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
                    data_loader: Iterable, optimizer: torch.optim.Optimizer,
                    device: torch.device, epoch: int, max_norm: float = 0):
    model.train()
    criterion.train()
    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
    header = 'Epoch: [{}]'.format(epoch)
    print_freq = 10
# ----------------阅读到此--------------在寻找 mask 是在哪里生成的-------->debug 时发现在生成 samples 时已经带有了 mask
    for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
        samples = samples.to(device)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        outputs = model(samples)  # 开始将数据传入模型  # outputs:{'pred_logits':[2, 100, 92], 'pred_boxes':[2, 100, 4], 'aux_outputs':[list5]}
        loss_dict = criterion(outputs, targets)  # 计算损失，字典形式。  loss_dict: 'loss_ce' + 'loss_bbox' + 'loss_giou'
        weight_dict = criterion.weight_dict  # 权重系数 {'loss_ce': 1, 'loss_bbox': 5, 'loss_giou': 2}
        losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)  # 总损失 = 回归损失：loss_bbox（L1）+loss_giou + 分类损失：loss_ce

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = utils.reduce_dict(loss_dict)  # 将 loss_dict 中的损失值在所有 GPU 上进行汇总
        loss_dict_reduced_unscaled = {f'{k}_unscaled': v  # 记录未经过权重缩放的损失值
                                      for k, v in loss_dict_reduced.items()}
        loss_dict_reduced_scaled = {k: v * weight_dict[k]  # 计算并记录经过权重缩放后的损失值
                                    for k, v in loss_dict_reduced.items() if k in weight_dict}
        losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())

        loss_value = losses_reduced_scaled.item()  # 将损失张量转换为 Python 标量

        if not math.isfinite(loss_value):  # 检查损失值的有效性---是否为有限值（即是否不是无穷大或 NaN），否则停止训练
            print("Loss is {}, stopping training".format(loss_value))
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()  # 在每次进行梯度更新之前，需要先将模型所有参数的梯度缓存清零
        losses.backward()  # 反向传播，计算当前损失 losses 对模型参数的梯度
        if max_norm > 0:  # 为了防止梯度爆炸，可以选择进行梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)  # 函数将梯度的范数裁剪到指定的最大值 max_norm
        optimizer.step()  # 调用优化器更新模型参数

        metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)  # 更新损失值，其中‘**’表示符号表示该参数是一个字典
        metric_logger.update(class_error=loss_dict_reduced['class_error'])  # 更新分类错误率
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])  # 更新学习率
    # gather the stats from all processes  # 以下主要用于在分布式训练环境中，同步和汇总各进程的统计数据，确保每个进程都能够访问全局的平均统计数据
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}

# ----------------阅读到此------debug到此--------------阅读训练文件--->evaluate函数已经大体阅读完
@torch.no_grad()  # 装饰器，评估时禁用梯度计算，
def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir):
    model.eval()  # 将模型和损失设置为评估模式，在推理时不会修改权重，也避免Dropout等正则化操作
    criterion.eval()

    metric_logger = utils.MetricLogger(delimiter="  ")  # 记录一些评估时的指标，比如类别错误率
    metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
    header = 'Test:'  # 评估过程中的标题，用于打印日志

    iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())  # 根据后处理器postprocessors中的键，确定要评估的IoU类型 # 通常是('segm', 'bbox')或者('bbox')
    coco_evaluator = CocoEvaluator(base_ds, iou_types)  # 创建一个COCO评估器对象，用于计算IoU指标（如mAP）和其他评估指标，以及生成评估结果
    # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]

    panoptic_evaluator = None
    if 'panoptic' in postprocessors.keys():
        panoptic_evaluator = PanopticEvaluator(
            data_loader.dataset.ann_file,
            data_loader.dataset.ann_folder,
            output_dir=os.path.join(output_dir, "panoptic_eval"),
        )

    for samples, targets in metric_logger.log_every(data_loader, 10, header):
        samples = samples.to(device)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        outputs = model(samples)
        loss_dict = criterion(outputs, targets)
        weight_dict = criterion.weight_dict

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = utils.reduce_dict(loss_dict)  # 将 loss_dict 中的损失值在所有 GPU 上进行汇总
        loss_dict_reduced_scaled = {k: v * weight_dict[k] # 记录经过权重缩放后的损失值
                                    for k, v in loss_dict_reduced.items() if k in weight_dict}
        loss_dict_reduced_unscaled = {f'{k}_unscaled': v  # 记录未经过权重缩放的损失值
                                      for k, v in loss_dict_reduced.items()}
        metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),  # 更新损失日志
                             **loss_dict_reduced_scaled,
                             **loss_dict_reduced_unscaled)
        metric_logger.update(class_error=loss_dict_reduced['class_error'])  # 更新错误分类率日志

        orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)  # 获取原始目标尺寸，用于后处理
        results = postprocessors['bbox'](outputs, orig_target_sizes)  # 通过后处理器对模型输出进行处理，得到边界框结果
        if 'segm' in postprocessors.keys():  # 如果存在分割后处理器，则进一步处理分割结果
            target_sizes = torch.stack([t["size"] for t in targets], dim=0)
            results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
        res = {target['image_id'].item(): output for target, output in zip(targets, results)}  # 将模型的预测结果与GT图像的 ID 进行关联，生成一个评估结果字典
        if coco_evaluator is not None:
            coco_evaluator.update(res)

        if panoptic_evaluator is not None:
            res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes)
            for i, target in enumerate(targets):
                image_id = target["image_id"].item()
                file_name = f"{image_id:012d}.png"
                res_pano[i]["image_id"] = image_id
                res_pano[i]["file_name"] = file_name

            panoptic_evaluator.update(res_pano)

    # gather the stats from all processes  # 这段代码主要是对评估过程中各个节点（GPU/进程）收集的评估数据进行同步
    metric_logger.synchronize_between_processes()  # 将所有节点的统计数据（例如损失、准确率等）汇总到主进程
    print("Averaged stats:", metric_logger)  # 打印同步后的平均统计数据
    if coco_evaluator is not None:
        coco_evaluator.synchronize_between_processes()  # 同步所有节点上累积的评估结果（预测框和相应的评估指标）
    if panoptic_evaluator is not None:
        panoptic_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    if coco_evaluator is not None:  # 累积和总结 COCO 评估器的预测结果
        coco_evaluator.accumulate()  # 将所有图像的预测结果进行累积
        coco_evaluator.summarize()  # 将累积的结果进行总结，计算并输出评估指标
    panoptic_res = None
    if panoptic_evaluator is not None:  # 累积和总结 Panoptic 评估器的预测结果
        panoptic_res = panoptic_evaluator.summarize()
    stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}  # 收集 metric_logger 中所有指标的全局平均值，存储在字典 stats 中
    if coco_evaluator is not None:  # 进一步将 coco_evaluator 的评估结果（如 bbox 和 segm 的评估指标）存储在 stats 中
        if 'bbox' in postprocessors.keys():
            stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()
        if 'segm' in postprocessors.keys():
            stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist()
    if panoptic_res is not None:  # 如果 panoptic_res 存在，将 Panoptic 评估结果中的 PQ_all、PQ_th 和 PQ_st 存储在 stats 中
        stats['PQ_all'] = panoptic_res["All"]
        stats['PQ_th'] = panoptic_res["Things"]
        stats['PQ_st'] = panoptic_res["Stuff"]
    return stats, coco_evaluator  # 返回统计信息和 COCO 评估器
