# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.

# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import math
import sys
from typing import Iterable

import torch

import util.misc as misc
import util.lr_sched as lr_sched


def train_one_epoch(model: torch.nn.Module,
                    data_loader: Iterable, optimizer: torch.optim.Optimizer,
                    device: torch.device, epoch: int, loss_scaler,
                    log_writer=None,
                    args=None):
    # 设置模型为训练模式
    model.train(True)

    # 创建 MetricLogger 用于记录训练过程中的指标，如损失和学习率
    metric_logger = misc.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))  # 添加学习率的平滑值显示
    header = 'Epoch: [{}]'.format(epoch)  # 当前训练的 epoch
    print_freq = 20  # 每 20 个batch输出一次日志

    # 获取梯度累积步数，控制梯度更新的频率
    accum_iter = args.accum_iter

    # 初始化优化器的梯度
    optimizer.zero_grad()

    # 如果提供了 log_writer，则输出日志目录
    if log_writer is not None:
        print('log_dir: {}'.format(log_writer.log_dir))

    # 遍历训练数据集
    for data_iter_step, (samples, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):

        # 每步根据迭代次数动态调整学习率
        if data_iter_step % accum_iter == 0:
            lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)

        # 将输入数据移动到目标设备（GPU或CPU）
        samples = samples.to(device, non_blocking=True)

        # 使用自动混合精度（AMP）计算前向传播的损失
        with torch.cuda.amp.autocast():
            loss, _, _ = model(samples, mask_ratio=args.mask_ratio)  # 调用模型进行前向传播，并计算损失

        loss_value = loss.item()  # 获取损失值

        # 如果损失值不是有效数值，停止训练
        if not math.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            sys.exit(1)

        # 对损失进行梯度累积（如果梯度累积步数大于1）
        loss /= accum_iter
        loss_scaler(loss, optimizer, parameters=model.parameters(),
                    update_grad=(data_iter_step + 1) % accum_iter == 0)

        # 每次梯度累积完后清空优化器的梯度
        if (data_iter_step + 1) % accum_iter == 0:
            optimizer.zero_grad()

        torch.cuda.synchronize()  # 确保所有CUDA操作完成

        # 更新 MetricLogger 记录当前的损失值
        metric_logger.update(loss=loss_value)

        # 获取当前学习率，并记录到 MetricLogger
        lr = optimizer.param_groups[0]["lr"]
        metric_logger.update(lr=lr)

        # 对损失值进行全局平均同步（如果是多进程训练）
        loss_value_reduce = misc.all_reduce_mean(loss_value)

        # 如果提供了 log_writer，记录当前的训练损失到 TensorBoard
        if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
            # 使用 epoch_1000x 作为横坐标来记录数据，这样可以在不同批量大小下统一图像
            epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
            log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
            # log_writer.add_scalar('lr', lr, epoch_1000x)  # 可选：记录学习率变化（此行注释掉了）

    # 在多进程环境中，收集和同步所有进程的训练指标
    metric_logger.synchronize_between_processes()

    # 输出训练期间的统计信息
    print("Averaged stats:", metric_logger)

    # 返回平均损失等指标
    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
