# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.

# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import math
import sys
from typing import Iterable
import numpy as np
import torch

import util.misc as misc
import util.lr_sched as lr_sched


def train_one_epoch(model: torch.nn.Module,
                    data_loader: Iterable, optimizer: torch.optim.Optimizer,
                    device: torch.device, epoch: int, loss_scaler,
                    log_writer=None,
                    args=None):
    model.train(True)

    # 初始化度量记录器
    metric_logger = misc.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))

    # 设置日志头
    header = 'Epoch: [{}]'.format(epoch)
    print_freq = 20

    # 获取累积迭代次数
    accum_iter = args.accum_iter

    # 梯度清零
    optimizer.zero_grad()

    # 如果存在日志写入器，打印日志目录
    if log_writer is not None:
        print('log_dir: {}'.format(log_writer.log_dir))

    for data_iter_step, (samples, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
        # 这里很关键，(samples, _)是一个元组，包含了样本和标签，这里因为mae是无监督的训练过程，所以没有标签
        # for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
        # samples = samples.to(device, non_blocking=True)
        # targets = targets.to(device, non_blocking=True)

        # 我们使用每迭代（而不是每轮）学习率调度器
        if data_iter_step % accum_iter == 0:
            lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)

        # 将样本移动到指定设备
        samples = samples.to(device, non_blocking=True)

        # 使用自动混合精度进行前向计算
        with torch.cuda.amp.autocast():
            loss, _, _ = model(samples, mask_ratio=args.mask_ratio)

        # 获取损失值
        loss_value = loss.item()

        # 检查损失值是否为有限数
        if not math.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            sys.exit(1)

        # 损失值除以累积迭代次数
        loss /= accum_iter

        # 使用损失缩放器进行梯度缩放
        loss_scaler(loss, optimizer, parameters=model.parameters(),
                    update_grad=(data_iter_step + 1) % accum_iter == 0)

        # 如果当前迭代次数为累积迭代次数的倍数，则梯度清零
        if (data_iter_step + 1) % accum_iter == 0:
            optimizer.zero_grad()

        # 同步CUDA设备
        torch.cuda.synchronize()

        # 更新度量记录器
        metric_logger.update(loss=loss_value)

        # 更新学习率记录
        lr = optimizer.param_groups[0]["lr"]
        metric_logger.update(lr=lr)

        # 对损失值进行归约操作
        loss_value_reduce = misc.all_reduce_mean(loss_value)

        # 如果存在日志写入器，并且当前迭代次数为累积迭代次数的倍数，则记录日志
        if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
            """ We use epoch_1000x as the x-axis in tensorboard.
            This calibrates different curves when batch size changes.
            """
            epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
            log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
            log_writer.add_scalar('lr', lr, epoch_1000x)

    # 同步所有进程的统计数据
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}


saved_features = []
def feature_hook(module, input, output):
    """钩子函数，用于保存特定层的输出"""
    saved_features.append(output.detach())



def train_one_epoch_MedMamba(model, 
                             medmamba, data_loader, optimizer, 
                             device, epoch, loss_scaler, 
                             log_writer=None, 
                             args=None):
    # 将模型设置为训练模式
    model.train(True)
    # 确保Swin-UMamba模型处于评估模式
    medmamba.eval()
    # 创建度量记录器
    metric_logger = misc.MetricLogger(delimiter="  ")
    # 添加学习率记录器
    metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    # 设置日志头部
    header = 'Epoch: [{}]'.format(epoch)
    # 设置打印频率
    print_freq = 20
    
    # 获取累积迭代次数
    accum_iter = args.accum_iter

    # 清空梯度
    optimizer.zero_grad()
    
    # 在想要捕获输出的层注册钩子
    hook = medmamba.layers[2].blocks[3].conv33conv33conv11[3].register_forward_hook(feature_hook)

    # 遍历数据加载器
    for data_iter_step, (images, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
        # 这里很关键，(images, _)是一个元组，包含了样本和标签，这里因为mae是无监督的训练过程，所以没有标签
        # print('每批images维度:', images.shape)
        # for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
        # samples = samples.to(device, non_blocking=True)
        # targets = targets.to(device, non_blocking=True)
        # 如果达到累积迭代次数，则调整学习率
        if data_iter_step % accum_iter == 0:
            lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
  
        # 将图像移动到指定设备
        images = images.to(device, non_blocking=True)
        
        # 清空之前保存的特征图
        saved_features.clear()
        
        # 使用Swin-UMamba模型和特征适配层处理图像
        with torch.no_grad():
            _ = medmamba(images)  # 假设最后一个输出是我们需要的特征图
        
        # 获取保存的特征图
        feature_maps = saved_features[-1]  # 使用最后一次的特征图
        # print('特征图维度:', feature_maps.shape)

        # 使用自动混合精度进行前向传播
        with torch.cuda.amp.autocast():
            # 计算损失
            loss, _, _ = model(images, mask_ratio=args.mask_ratio, feature_map=feature_maps)
            # 显式调用模型中的forward方法，但是这样调用不会触发钩子
            # loss, _, _ = model.forward(images, mask_ratio=args.mask_ratio, feature_map=feature_maps)
        # 获取损失值
        loss_value = loss.item()
        # 检查损失值是否为有限数
        if not math.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            sys.exit(1)

        # 平均损失值
        loss /= accum_iter
        # 使用损失缩放器进行反向传播和参数更新
        loss_scaler(loss, optimizer, parameters=model.parameters(),
                    update_grad=(data_iter_step + 1) % accum_iter == 0)
        # 梯度裁剪
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        # 如果达到累积迭代次数，则清零梯度
        if (data_iter_step + 1) % accum_iter == 0:
            optimizer.zero_grad()

        # 同步CUDA设备
        torch.cuda.synchronize()

        # 更新度量记录器
        metric_logger.update(loss=loss_value)

        # 获取当前学习率
        lr = optimizer.param_groups[0]["lr"]
        # 更新学习率记录器
        metric_logger.update(lr=lr)

        # 跨进程规约损失值
        loss_value_reduce = misc.all_reduce_mean(loss_value)
        # 如果指定了日志写入器且达到累积迭代次数，则记录日志
        if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
            """ 我们使用epoch_1000x作为TensorBoard的x轴。
            当批量大小改变时，这可以校准不同的曲线。
            """
            epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
            log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
            log_writer.add_scalar('lr', lr, epoch_1000x)

    # 移除钩子
    hook.remove()
    # 跨进程同步度量记录器
    metric_logger.synchronize_between_processes()
    # 打印平均统计信息
    print("Averaged stats:", metric_logger)
    # 返回统计信息
    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}



def train_one_epoch_MedMamba_maskingnet(model, medmamba, data_loader, optimizer, device, epoch, loss_scaler, loss_scaler_mask, log_writer=None, args=None):
    model.train(True)
    medmamba.eval()  # 确保medmamba模型处于评估模式
    metric_logger = misc.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    header = 'Epoch: [{}]'.format(epoch)
    print_freq = 20
    
    accum_iter = args.accum_iter
    optimizer.zero_grad()
    
#     for name, param in model.named_parameters():
#         print(f"{name} requires grad: {param.requires_grad}")

    # 注册钩子以捕获输出
    hook = medmamba.layers[2].blocks[3].conv33conv33conv11[3].register_forward_hook(feature_hook)
    
    for data_iter_step, (images, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
        
        # 每次迭代调整学习率
        if data_iter_step % accum_iter == 0:
            lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
            
        images = images.to(device, non_blocking=True)

        # 清空之前保存的特征图
        saved_features.clear()
        
        # 使用 medmamba 模型和特征适配层处理图像
        with torch.no_grad():
            _ = medmamba(images)
       
        # 获取保存的特征图
        feature_maps = saved_features[-1]
        
        # # 检查 feature_maps 的 requires_grad 属性
        # print(f"Feature maps requires grad: {feature_maps.requires_grad}")
        
        
        # ======================================= train mae =========================================
        # 训练 MAE 结构，冻结 maskingnet
        model.freeze_maskingnet()
        
        with torch.cuda.amp.autocast():
            loss_recon, _, _ = model(images, mask_ratio=args.mask_ratio,feature_map=feature_maps)
        
        
        # # 调试信息
        # print(f"Loss recon: {loss_recon}")
        # print(f"Loss recon requires grad: {loss_recon.requires_grad}")
        # print(f"Loss recon grad fn: {loss_recon.grad_fn}")
        
        
        loss_value = loss_recon.item()
        if not math.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            sys.exit(1)

        loss_recon = loss_recon/accum_iter
        # print(f"Before scaling: loss_recon requires grad: {loss_recon.requires_grad}")
        loss_scaler(loss_recon, optimizer, parameters=model.parameters(), update_grad=(data_iter_step + 1) % accum_iter == 0)
        # print(f"After scaling: loss_recon requires grad: {loss_recon.requires_grad}")
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        
        if (data_iter_step + 1) % accum_iter == 0:
            optimizer.zero_grad()

        torch.cuda.synchronize()

        model.unfreeze_maskingnet()
        
        
        # ====================================== train maskingnet ======================================
        # 训练 maskingnet，冻结 MAE 结构
        model.freeze_backbone()
        
        with torch.cuda.amp.autocast():
            loss, _, _ = model(images, mask_ratio=args.mask_ratio,feature_map=feature_maps, train_mask=True)
        
        loss.requires_grad_(True)
        # # 调试
        # print(f"Loss: {loss}")
        # print(f"Loss requires grad: {loss.requires_grad}")
        # print(f"Loss grad fn: {loss.grad_fn}")
        
        loss_value_mm = loss.item()

        if not math.isfinite(loss_value_mm):
            print("Loss is {}, stopping training".format(loss_value_mm))
            sys.exit(1)

        loss = loss/accum_iter
        # print(f"Before scaling: loss requires grad: {loss.requires_grad}")
        loss_scaler_mask(loss, optimizer, parameters=model.parameters(), update_grad=(data_iter_step + 1) % accum_iter == 0)
        # print(f"After scaling: loss requires grad: {loss.requires_grad}")
        # torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        
        if (data_iter_step + 1) % accum_iter == 0:
            optimizer.zero_grad()
    
        torch.cuda.synchronize()

        model.unfreeze_backbone()
        
        metric_logger.update(loss=loss_value)
        lr = optimizer.param_groups[0]["lr"]
        metric_logger.update(lr=lr)

        loss_value_reduce = misc.all_reduce_mean(loss_value)
        if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
            epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
            log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
            log_writer.add_scalar('lr', lr, epoch_1000x)

    hook.remove()        
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}


def train_one_epoch_MedMamba_cl(model, medmamba, data_loader, optimizer, device, epoch, loss_scaler, num_epochs, log_writer=None, args=None):
    model.train(True)
    medmamba.eval()  # 确保MedMamba模型处于评估模式
    metric_logger = misc.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    header = 'Epoch: [{}]'.format(epoch)
    print_freq = 20
    
    accum_iter = args.accum_iter
    optimizer.zero_grad()
    
    # print("medmamba/n",medmamba)
    # 在想要捕获输出的层注册钩子
    hook = medmamba.layers[2].blocks[3].conv33conv33conv11[8].register_forward_hook(feature_hook)
    for data_iter_step, (images, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
        
        # we use a per iteration (instead of per epoch) lr scheduler
        if data_iter_step % accum_iter == 0:
            lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
            
        images = images.to(device, non_blocking=True)

        
        # 清空之前保存的特征图
        saved_features.clear()
        
        # 使用Swin-UMamba模型和特征适配层处理图像
        with torch.no_grad():
            _ = medmamba(images)  # 假设最后一个输出是我们需要的特征图
            # print("Swin Model outputs:", feature_maps)
       
        # 获取保存的特征图
        feature_maps = saved_features[-1]  # 使用最后一次的特征图
        # print("\nengine_pretrain feature maps:",feature_maps.shape)
        
        with torch.cuda.amp.autocast():
            # curriculum_factor = 1 - np.power(1 - (epoch + 1) / num_epochs, 2)     #0
            # curriculum_factor = (1+np.sin((math.pi*(epoch+1)/2)/num_epochs))/2      #1
            # curriculum_factor = 1 / (1 + np.exp(-10 * ((epoch + 1) / num_epochs - 0.5)))     #2
            
            # if epoch<200:   #3
            #     curriculum_factor = 1 / (1 + np.exp(-10 * ((epoch + 1) / 200 - 0.5)))
            # else:
            #     curriculum_factor = 1
            curriculum_factor = (epoch+1)*1.0/num_epochs      #4
            loss, _, _ = model(images, mask_ratio=args.mask_ratio,feature_map=feature_maps, curriculum_factor=curriculum_factor)

        loss_value = loss.item()
        if not math.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            sys.exit(1)

        loss /= accum_iter
        loss_scaler(loss, optimizer, parameters=model.parameters(),
                    update_grad=(data_iter_step + 1) % accum_iter == 0)
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        if (data_iter_step + 1) % accum_iter == 0:
            optimizer.zero_grad()

        torch.cuda.synchronize()

        metric_logger.update(loss=loss_value)

        lr = optimizer.param_groups[0]["lr"]
        metric_logger.update(lr=lr)

        loss_value_reduce = misc.all_reduce_mean(loss_value)
        if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
            """ We use epoch_1000x as the x-axis in tensorboard.
            This calibrates different curves when batch size changes.
            """
            epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
            log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
            log_writer.add_scalar('lr', lr, epoch_1000x)
    hook.remove()        
    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}

def train_one_epoch_vmamba(model, vss_model, data_loader, optimizer, device, epoch, loss_scaler, log_writer=None, args=None):
    model.train(True)
    vss_model.eval()  
    metric_logger = misc.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    header = 'Epoch: [{}]'.format(epoch)
    print_freq = 20
    
    # print("vssmodel：",vss_model)
    accum_iter = args.accum_iter
    optimizer.zero_grad()
    
    # 在想要捕获输出的层注册钩子
    hook = vss_model.layers[0].blocks[0].op.conv2d.register_forward_hook(feature_hook)

    
    for data_iter_step, (images, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
        
        # we use a per iteration (instead of per epoch) lr scheduler
        if data_iter_step % accum_iter == 0:
            lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
            
        images = images.to(device, non_blocking=True)
        # images_mamba = images.mean(dim=1, keepdim=True)
        # print("===========image:==========",images)
        
        # 清空之前保存的特征图
        saved_features.clear()
        
        # 使用Swin-UMamba模型和特征适配层处理图像
        with torch.no_grad():
            _ = vss_model(images) 
            # print("Swin Model outputs:", feature_maps)
        
        # 获取保存的特征图
        feature_maps = saved_features[-1]  # 使用最后一次的特征图
        # print("\nfeature maps:",feature_maps.shape)
        
        with torch.cuda.amp.autocast():
            loss, _, _ = model(images, mask_ratio=args.mask_ratio,feature_map=feature_maps)

        loss_value = loss.item()
        if not math.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            sys.exit(1)

        loss /= accum_iter
        loss_scaler(loss, optimizer, parameters=model.parameters(),
                    update_grad=(data_iter_step + 1) % accum_iter == 0)
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        if (data_iter_step + 1) % accum_iter == 0:
            optimizer.zero_grad()

        torch.cuda.synchronize()

        metric_logger.update(loss=loss_value)

        lr = optimizer.param_groups[0]["lr"]
        metric_logger.update(lr=lr)

        loss_value_reduce = misc.all_reduce_mean(loss_value)
        if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
            """ We use epoch_1000x as the x-axis in tensorboard.
            This calibrates different curves when batch size changes.
            """
            epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
            log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
            log_writer.add_scalar('lr', lr, epoch_1000x)
            
    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}

