

import math
import random
from copy import deepcopy

import numpy as np
import torch
from matplotlib import pyplot as plt
from timm.models.layers import DropPath
from torch.nn.modules.dropout import _DropoutNd

from mmseg.core import add_prefix
from mmseg.models import UDA, build_segmentor
from mmseg.models.uda.uda_decorator import UDADecorator, get_module
from mmseg.models.utils.dacs_transforms import (denorm, get_class_masks,
                                                get_mean_std, strong_transform)
from mmseg.models.utils.visualization import subplotimg

import os
import sys


import torch
import numpy as np
import random
"""跨域混合"""

# 修改后的 generate_tokenmix 函数（支持跨域混合）
def generate_tokenmix(src_img, src_label, tgt_img, tgt_label, lam, num_tokens=16, device='cuda'):
    """
    跨域分块混合函数（源域与目标域混合）

    Args:
        src_img (Tensor): 源域图像 [B, C, H, W]
        src_label (Tensor): 源域真实标签 [B, H, W]
        tgt_img (Tensor): 目标域图像 [B, C, H, W]
        tgt_label (Tensor): 目标域伪标签 [B, H, W]
        lam (float): 混合比例（目标域分块占比）
        num_tokens (int): 分块数量
        device (str): 计算设备

    Returns:
        mixed_img (Tensor): 跨域混合图像[B, C, H, W]
        mixed_label (Tensor): 跨域混合标签[B, H, W]
    """
    # src_label.shape: torch.Size([1, 1, 512, 512])
    # tgt_label.shape: torch.Size([1, 512, 512])

    # 调整 src_label 形状
    if len(src_label.shape) == 4 and src_label.shape[1] == 1:
        src_label = src_label.squeeze(1)  # 移除多余的通道维度
    # 或者调整 tgt_label 形状
    # if len(tgt_label.shape) == 3:
    #     tgt_label = tgt_label.unsqueeze(1)  # 添加通道维度

    # --- 输入校验 ---
    assert src_img.shape == tgt_img.shape, "源域与目标域图像尺寸必须一致"
    assert src_label.shape == tgt_label.shape, "源域与目标域标签尺寸必须一致"

    B, C, H, W = src_img.size()#1, 3，512，512
    token_size = H // num_tokens#32

    # --- 生成分块掩码 ---
    num_mask_tokens = int(lam * num_tokens ** 2)  # 总替换分块数128
    token_mask = torch.zeros((B, num_tokens, num_tokens), device=device)#[1, 16, 16]

    for b in range(B):
        selected_tokens = random.sample(range(num_tokens ** 2), num_mask_tokens)#128个随机数
        for token in selected_tokens:#token:32
            row = token // num_tokens
            col = token % num_tokens
            token_mask[b, row, col] = 1  # 标记目标域分块位置

    # --- 重塑分块 ---
    # 源域图像/标签分块
    src_img_tokens = src_img.view(B, C, num_tokens, token_size, num_tokens, token_size)
    src_img_tokens = src_img_tokens.permute(0, 2, 4, 1, 3, 5).contiguous()
    src_label_tokens = src_label.view(B, num_tokens, token_size, num_tokens, token_size).contiguous()
    # src_img_tokens: torch.Size([1, 16, 16, 3, 32, 32])
    # src_label_tokens: torch.Size([1, 16, 32, 16, 32])
    # 目标域图像/标签分块
    tgt_img_tokens = tgt_img.view(B, C, num_tokens, token_size, num_tokens, token_size)
    tgt_img_tokens = tgt_img_tokens.permute(0, 2, 4, 1, 3, 5).contiguous()
    tgt_label_tokens = tgt_label.view(B, num_tokens, token_size, num_tokens, token_size).contiguous()
    # tgt_img_tokens: torch.Size([1, 16, 16, 3, 32, 32])
    # tgt_label_tokens: torch.Size([1, 16, 32, 16, 32])

    # --- 跨域混合 ---
    token_mask = token_mask.unsqueeze(3).unsqueeze(4).unsqueeze(5)#[B, 16, 16, 1, 1, 1]
    mixed_img_tokens = src_img_tokens * (1 - token_mask) + tgt_img_tokens * token_mask #mixed_img_tokens: torch.Size([1, 16, 16, 3, 32, 32])

    # 调整 token_mask 的形状以匹配 src_label_tokens 和 tgt_label_tokens
    token_mask = token_mask.view(B, 16, 1, 16, 1, 1)

    mixed_label_tokens = src_label_tokens * (1 - token_mask.squeeze(-1)) + tgt_label_tokens * token_mask.squeeze(-1)

    # --- 重构输出 ---
    mixed_img = mixed_img_tokens.permute(0, 3, 1, 4, 2, 5).contiguous().view(B, C, H, W)
    mixed_label = mixed_label_tokens.view(B, H, W)

    return mixed_img, mixed_label


BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)


# 逐层比较两个模型的参数
def _params_equal(ema_model, model):
    for ema_param, param in zip(ema_model.named_parameters(),
                                model.named_parameters()):
        if not torch.equal(ema_param[1].data, param[1].data):
            # print("Difference in", ema_param[0])
            return False
    return True


# 计算一个张量的gram矩阵
def gram_matrix(tensor):
    d, h, w = tensor.size()
    tensor = tensor.view(d, h * w)
    gram = torch.mm(tensor, tensor.t())
    return gram


# 计算梯度的范数（梯度的大小）
def calc_grad_magnitude(grads, norm_type=2.0):
    norm_type = float(norm_type)
    if norm_type == math.inf:
        norm = max(p.abs().max() for p in grads)
    else:
        norm = torch.norm(
            torch.stack([torch.norm(p, norm_type) for p in grads]), norm_type)

    return norm


# 初始化教师网络权重
def _init_ema_weights(module, module_ema):
    for param in module_ema.parameters():
        param.detach_()
    mp = list(module.parameters())
    mcp = list(module_ema.parameters())
    for i in range(0, len(mp)):
        if not mcp[i].data.shape:  # scalar tensor
            mcp[i].data = mp[i].data.clone()
        else:
            mcp[i].data[:] = mp[i].data[:].clone()


def freeze_module(module):
    for param in module.parameters():
        param.requires_grad = False


def unfreeze_module(module):
    for param in module.parameters():
        param.requires_grad = True


# 计算软标签的熵
def calc_entropy(prob):
    """
    :param prob: softmax of the score
    :return:
    """
    entropy_map = torch.sum(-prob * torch.log(prob + 1e-7), dim=1)
    return entropy_map


@UDA.register_module()
class MultiTeacherIMDTGT(UDADecorator):
    def __init__(self, **cfg):
        super(MultiTeacherIMDTGT, self).__init__(**cfg)
        # print("#########################",cfg)
        self.local_iter = 0  # 初始化本地迭代计数器
        self.max_iters = cfg['max_iters']  # 从配置字典中获取最大迭代次数
        self.alpha = cfg['alpha']  # 从配置字典中获取伪标签阈值
        self.pseudo_threshold = cfg['pseudo_threshold']
        self.psweight_ignore_top = cfg['pseudo_weight_ignore_top']
        self.psweight_ignore_bottom = cfg['pseudo_weight_ignore_bottom']
        self.psweight_ref_ignore_top = cfg['pseudo_ref_weight_ignore_top']
        self.mix = cfg['mix']
        self.blur = cfg['blur']
        self.color_jitter_s = cfg['color_jitter_strength']
        self.color_jitter_p = cfg['color_jitter_probability']
        self.debug_img_interval = cfg['debug_img_interval']
        self.class_probs = {}
        self.teacher_model_target = build_segmentor(deepcopy(cfg['model']))  # 目标域教师网络分割器
        self.teacher_model_imd = build_segmentor(deepcopy(cfg['model']))  # 中间域教师网络分割器 teacher assistence

    def _update_ema(self, iter, module, module_ema):
        alpha_teacher = min(1 - 1 / (iter + 1), self.alpha)
        for ema_param, param in zip(module_ema.parameters(), module.parameters()):
            if not param.data.shape:  # scalar tensor
                ema_param.data = alpha_teacher * ema_param.data + (1 - alpha_teacher) * param.data
            else:
                ema_param.data[:] = alpha_teacher * ema_param[:].data[:] + (1 - alpha_teacher) * param[:].data[:]

    def train_step(self, data_batch, optimizer, **kwargs):
        """The iteration step during training.

        This method defines an iteration step during training, except for the
        back propagation and optimizer updating, which are done in an optimizer
        hook. Note that in some complicated cases or models, the whole process
        including back propagation and optimizer updating is also defined in
        this method, such as GAN.

        Args:
            data (dict): The output of dataloader.
            optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
                runner is passed to ``train_step()``. This argument is unused
                and reserved.

        Returns:
            dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
                ``num_samples``.
                ``loss`` is a tensor for back propagation, which can be a
                weighted sum of multiple losses.
                ``log_vars`` contains all the variables to be sent to the
                logger.
                ``num_samples`` indicates the batch size (when the model is
                DDP, it means the batch size on each GPU), which is used for
                averaging the logs.
        """

        optimizer.zero_grad()
        log_vars = self(**data_batch)
        optimizer.step()

        log_vars.pop('loss', None)  # remove the unnecessary 'loss'
        outputs = dict(
            log_vars=log_vars, num_samples=len(data_batch['img_metas']))
        return outputs

    def forward_train(self,
                      img, img_metas, gt_semantic_seg=None,
                      imd_img=None, imd_img_metas=None,
                      target_img=None, target_img_metas=None):
        """Forward function for training.

        Args:
            img (Tensor): Input images.
            img_metas (list[dict]): List of image info dict where each dict
                has: 'img_shape', 'scale_factor', 'flip', and may also contain
                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
                For details on the values of these keys see
                `mmseg/datasets/pipelines/formatting.py:Collect`.
            gt_semantic_seg (Tensor): Semantic segmentation masks 源域图像真实标签的分割图
                used if the architecture supports semantic segmentation task.
            gt_semantic_seg这里用来保存标签
        Returns:
            dict[str, Tensor]: 损失组件的字典
            :param img:
                src image from cityscapes
            :param img_metas:
                meta information of src images
            :param gt_semantic_seg:
                gt seg map of src images
            :param imd_img:
                intermediate reference images
            :param imd_img_metas:
                meta information of intermediate images
            :param gt_semantic_seg_imd:
                generated pseudo label of intermediate images (not very correct)
            :param target_img:
                acdc fog images
            :param target_img_metas:
                meta information of acdc fog images
        """
        log_vars = {}
        batch_size = img.shape[0]
        dev = img.device

        # 原始图像的均值和标准差
        means, stds = get_mean_std(img_metas, dev)
        strong_parameters = {
            'mix': None,
            'color_jitter': random.uniform(0, 1),
            'color_jitter_s': self.color_jitter_s,
            'color_jitter_p': self.color_jitter_p,
            'blur': random.uniform(0, 1) if self.blur else 0,
            'mean': means[0].unsqueeze(0),  # assume same normalization
            'std': stds[0].unsqueeze(0)
        }

        # Init/update ema model
        if self.local_iter == 0:
            _init_ema_weights(module=self.get_model(),
                              module_ema=get_module(self.teacher_model_imd))
            _init_ema_weights(module=self.get_model(),
                              module_ema=get_module(self.teacher_model_target))

            # always freeze teacher_model
            freeze_module(get_module(self.teacher_model_imd))
            freeze_module(get_module(self.teacher_model_target))

        if self.local_iter > 0:
            # 1. Train on source images, no matter the iter changes
            # 说明：此步骤确保模型在每次迭代中都继续从源域图像中学习。
            clean_losses = self.get_model().forward_train(
                img, img_metas, gt_semantic_seg, return_feat=False)
            # 为损失添加 'src' 前缀以区分源域损失
            # 为损失值添加前缀，以便后续处理和监控时能够区分源域的损失。
            clean_losses = add_prefix(clean_losses, 'src')
            # 理模型返回的损失值，准备进行反向传播并记录训练过程
            clean_loss, clean_log_vars = self._parse_losses(clean_losses)
            log_vars.update(clean_log_vars)  # 更新日志变量，包含解析后的损失值
            # 对源域损失执行反向传播，以更新模型参数。
            clean_loss.backward(retain_graph=False)


        if self.local_iter % 2 == 0:
            # 更新中间域教师网络参数
            self._update_ema(self.local_iter,
                             module=self.get_model(),
                             module_ema=get_module(self.teacher_model_imd))

            # 2. Generate pseudo_label by teacher assist model
            # 遍历教师模型的所有模块，以禁用dropout和drop path，确保在评估时模型表现一致
            for m in get_module(self.teacher_model_imd).modules():
                if isinstance(m, _DropoutNd):
                    m.training = False
                if isinstance(m, DropPath):
                    m.training = False
            imd_ema_logits = get_module(self.teacher_model_imd).encode_decode(imd_img, imd_img_metas)
            imd_ema_softmax = torch.softmax(imd_ema_logits.detach(), dim=1)
            imd_pseudo_prob, imd_pseudo_label = torch.max(imd_ema_softmax, dim=1)
            imd_ps_large_p = imd_pseudo_prob.ge(self.pseudo_threshold).long() == 1
            imd_ps_size = np.size(np.array(imd_pseudo_label.cpu()))
            imd_pseudo_weight = torch.sum(imd_ps_large_p).item() / imd_ps_size
            imd_pseudo_weight = imd_pseudo_weight * torch.ones(
                imd_pseudo_prob.shape, device=dev)

            if self.psweight_ignore_top > 0:
                # Don't trust pseudo-labels in regions with potential
                # rectification artifacts. This can lead to a pseudo-label
                # drift from sky towards building or traffic light.
                imd_pseudo_weight[:, :self.psweight_ignore_top, :] = 0
            if self.psweight_ignore_bottom > 0:
                imd_pseudo_weight[:, -self.psweight_ignore_bottom:, :] = 0
            imd_gt_pixel_weight = torch.ones(imd_pseudo_weight.shape, device=dev)

            # --- 修改后的混合调用 ---
            # 新增参数：目标域图像（imd_img）和目标域伪标签（imd_pseudo_label）
            # print("Source label shape:", gt_semantic_seg.shape)
            # print("Target label shape:", imd_pseudo_label.shape)
            # gt_semantic_seg: torch.Size([1, 1, 512, 512])
            # imd_pseudo_label: torch.Size([1, 512, 512])

            src_imd_mixed_img, src_imd_mixed_lbl = generate_tokenmix(
                src_img=img,
                src_label=gt_semantic_seg,  # 源域真实标签
                tgt_img=imd_img,  # 中间域图像
                tgt_label=imd_pseudo_label,  # 中间域伪标签
                lam=0.5
            )
            if src_imd_mixed_lbl.dim() == 3:
                src_imd_mixed_lbl = src_imd_mixed_lbl.unsqueeze(1)
            src_imd_mixed_lbl = src_imd_mixed_lbl.long()

            # for i in range(batch_size):
            #     _, imd_pseudo_weight[i] = strong_transform(
            #         strong_parameters,
            #         target=torch.stack((imd_gt_pixel_weight[i], imd_pseudo_weight[i])))

                # 对混合后的图像和标签进行训练
            src_imd_mix_losses = self.get_model().forward_train(
                src_imd_mixed_img, img_metas, src_imd_mixed_lbl, return_feat=True)
            src_imd_mix_losses.pop('features')
            src_imd_mix_losses = add_prefix(src_imd_mix_losses, 'src_imd_mix')
            src_imd_mix_loss, src_imd_mix_log_vars = self._parse_losses(src_imd_mix_losses)
            log_vars.update(src_imd_mix_log_vars)
            src_imd_mix_loss.backward()

        if self.local_iter % 2 == 1:
            self._update_ema(self.local_iter,
                             module=self.get_model(),
                             module_ema=get_module(self.teacher_model_target))
            # 3. Train on src_tgt image
            for m in get_module(self.teacher_model_target).modules():
                if isinstance(m, _DropoutNd):
                    m.training = False
                if isinstance(m, DropPath):
                    m.training = False

            ema_logits = get_module(self.teacher_model_target).encode_decode(target_img, target_img_metas)
            ema_softmax = torch.softmax(ema_logits.detach(), dim=1)
            pseudo_prob, pseudo_label = torch.max(ema_softmax, dim=1)
            ps_large_p = pseudo_prob.ge(self.pseudo_threshold).long() == 1  # 计算伪标签置信度
            ps_size = np.size(np.array(pseudo_label.cpu()))
            # 计算伪标签的权重，将比例扩展到整个伪标签张量的形状，以便在后续损失计算中使用。
            pseudo_weight = torch.sum(ps_large_p).item() / ps_size
            pseudo_weight = pseudo_weight * torch.ones(pseudo_prob.shape, device=dev)

            # 调整伪标签权重，防止伪标签漂移
            if self.psweight_ignore_top > 0:
                # Don't trust pseudo-labels in regions with potential
                # rectification artifacts. This can lead to a pseudo-label
                # drift from sky towards building or traffic light.
                pseudo_weight[:, :self.psweight_ignore_top, :] = 0
            if self.psweight_ignore_bottom > 0:
                pseudo_weight[:, -self.psweight_ignore_bottom:, :] = 0
            gt_pixel_weight = torch.ones(pseudo_weight.shape, device=dev)

            # --- 修改后的混合调用 ---
            mixed_img, mixed_lbl = generate_tokenmix(
                src_img=img,
                src_label=gt_semantic_seg,  # 源域真实标签
                tgt_img=target_img,  # 目标域图像
                tgt_label=pseudo_label,  # 目标域伪标签
                lam=0.5
            )
            if mixed_lbl.dim() == 3:
                mixed_lbl = mixed_lbl.unsqueeze(1)
            mixed_lbl = mixed_lbl.long()

            # Train on mixed images
            mix_losses = self.get_model().forward_train(
                mixed_img, img_metas, mixed_lbl, pseudo_weight, return_feat=True)
            mix_losses.pop('features')
            mix_losses = add_prefix(mix_losses, 'mix')
            mix_loss, mix_log_vars = self._parse_losses(mix_losses)  # 解析损失
            log_vars.update(mix_log_vars)
            mix_loss.backward()  # 计算这个损失相对于模型参数的梯度

        if self.local_iter % self.debug_img_interval == 0:
            out_dir = os.path.join(self.train_cfg['work_dir'],
                                   'visualization')
            os.makedirs(out_dir, exist_ok=True)
            with torch.no_grad():
                src_logits = self.get_model().encode_decode(img, img_metas)
                imd_logits = self.get_model().encode_decode(imd_img, imd_img_metas)
                trg_logits = self.get_model().encode_decode(target_img, target_img_metas)

                src_softmax_prob = torch.softmax(src_logits, dim=1)
                _, pred_src = torch.max(src_softmax_prob, dim=1)

                imd_softmax_prob = torch.softmax(imd_logits, dim=1)
                _, pred_imd = torch.max(imd_softmax_prob, dim=1)

                trg_softmax_prob = torch.softmax(trg_logits, dim=1)
                _, pred_trg = torch.max(trg_softmax_prob, dim=1)

                vis_img = torch.clamp(denorm(img, means, stds), 0, 1)
                vis_imd_img = torch.clamp(denorm(imd_img, means, stds), 0, 1)
                vis_trg_img = torch.clamp(denorm(target_img, means, stds), 0, 1)
                if self.local_iter % 2 == 0:
                    vis_src_imd_mix_img = torch.clamp(denorm(src_imd_mixed_img, means, stds), 0, 1)
                if self.local_iter % 2 == 1:
                    vis_src_tgt_mix_img = torch.clamp(denorm(mixed_img, means, stds), 0, 1)

                for j in range(batch_size):
                    rows, cols = 3, 4
                    fig, axs = plt.subplots(
                        rows,
                        cols,
                        figsize=(3 * cols, 3 * rows),
                        gridspec_kw={
                            'hspace': 0.1,
                            'wspace': 0,
                            'top': 0.95,
                            'bottom': 0,
                            'right': 1,
                            'left': 0
                        },
                    )
                    # Source domain related
                    subplotimg(axs[0][0], vis_img[j], 'Source Image')
                    subplotimg(axs[0][1], gt_semantic_seg[j], 'Source Seg GT', cmap='cityscapes')
                    subplotimg(axs[0][2], pred_src[j], 'Pred Source', cmap='cityscapes')

                    # Intermediate domain related
                    subplotimg(axs[1][0], vis_imd_img[j], 'Intermediate (Ref) Image')
                    subplotimg(axs[1][1], pred_imd[j], 'Pred Intermediate (Ref)', cmap='cityscapes')
                    if self.local_iter % 2 == 0:
                        subplotimg(axs[1][2], vis_src_imd_mix_img[j], 'Src_Imd_Mix Image')
                        subplotimg(axs[1][3], src_imd_mixed_lbl[j], 'Src_Imd_Mix Label', cmap='cityscapes')

                    # Target domain related
                    subplotimg(axs[2][0], vis_trg_img[j], 'Target Image')
                    subplotimg(axs[2][1], pred_trg[j], 'Pred target', cmap='cityscapes')
                    if self.local_iter % 2 == 1:
                        subplotimg(axs[2][2], vis_src_tgt_mix_img[j], 'Src_Tgt_Mix Image')
                        subplotimg(axs[2][3], mixed_lbl[j], 'Src_Tgt_Mix Label', cmap='cityscapes')

                    for ax in axs.flat:
                        ax.axis('off')
                    plt.savefig(os.path.join(out_dir, f'{(self.local_iter + 1):06d}_{j}.png'))
                    plt.close()

        self.local_iter += 1
        return log_vars
