# ---------------------------------------------------------------
# Copyright (c) 2021-2022 ETH Zurich, Lukas Hoyer. All rights reserved.
# Licensed under the Apache License, Version 2.0
# ---------------------------------------------------------------

# The ema model update and the domain-mixing are based on:
# https://github.com/vikolss/DACS
# Copyright (c) 2020 vikolss. Licensed under the MIT License.
# A copy of the license is available at resources/license_dacs

import math
import random
from copy import deepcopy

import numpy as np
import torch
from matplotlib import pyplot as plt
from timm.models.layers import DropPath
from torch.nn.modules.dropout import _DropoutNd

from mmseg.core import add_prefix
from mmseg.models import UDA, build_segmentor
from mmseg.models.uda.uda_decorator import UDADecorator, get_module
from mmseg.models.utils.dacs_transforms import (denorm, get_class_masks,
                                                get_mean_std, strong_transform)
from mmseg.models.utils.visualization import subplotimg

import os
import sys


import torch
import numpy as np
import random


def generate_tokenmix(img, label, lam, num_tokens=16, device='cuda'):
    """
    Args:
        img (Tensor): Input image tensor of shape [B, C, H, W].
        label (Tensor): Corresponding labels of shape [B, H, W].
        lam (float): Mix ratio for TokenMix.
        num_tokens (int): Number of tokens (patches) for mixing.
        device (str): Device type (cuda or cpu).

    Returns:
        Tensor: Mixed image and mixed label after TokenMix.
    """
    # print('input:',img.shape, label.shape)
    # img [1, 3, 512, 512] , label [1, 512, 512]
    B, C, H, W = img.size()
    token_size = H // num_tokens  # Assuming square tokens (H=W) 512/16 = 32

    # Ensure that H and W are divisible by num_tokens
    assert H % num_tokens == 0 and W % num_tokens == 0, "Image dimensions must be divisible by num_tokens"

    # Generate mask for token selection
    num_mask_tokens = int(lam * num_tokens)  # 8
    token_mask = torch.zeros((B, num_tokens, num_tokens), device=device)

    # Randomly select tokens to mix
    for b in range(B):
        selected_tokens = random.sample(range(num_tokens), num_mask_tokens)  # 16取8
        for token in selected_tokens:
            # 通过整除和取余操作，找到token在二维空间中的确切位置
            # 整除结果用于确定行位置，取余结果用于确定列位置,更新token_mask矩阵中的相应位置为1
            token_mask[b, token // num_tokens, token % num_tokens] = 1
    # token_mask [1, 16, 16]

    # Reshape tokens (for image and label)
    img_tokens = img.view(B, C, num_tokens, token_size, num_tokens, token_size).permute(0, 2, 4, 1, 3, 5).contiguous()
    label_tokens = label.view(B, num_tokens, num_tokens, token_size, token_size).contiguous()
    # print('#############img-tokens-size',img_tokens.size(),'label_tokens_size',label_tokens.size())
    # img-tokens: [1, 16, 16, 3, 32, 32] ; label_tokens: [1, 16, 16, 32, 32]

    # Ensure token_mask has the correct dimensions to match img_tokens
    token_mask = token_mask.unsqueeze(3).unsqueeze(4).unsqueeze(5)  # Shape becomes [B, num_tokens, num_tokens, 1, 1, 1]

    # Mix tokens
    mixed_img_tokens = img_tokens * (1 - token_mask) + img_tokens.flip(0) * token_mask

    # Remove the last dimension
    token_mask = token_mask.squeeze(-1)  # Shape becomes [B, num_tokens, num_tokens, 1, 1]

    mixed_label_tokens = label_tokens * (1 - token_mask) + label_tokens.flip(0) * token_mask
    # print('##################',mixed_label_tokens.shape)
    # mixed_img_tokens [1, 16, 16, 3, 32, 32]  ；mixed_label_tokens  [1, 16, 16, 16, 32, 32]

    # Reconstruct mixed image and label
    mixed_img_tokens = mixed_img_tokens.permute(0, 3, 1, 4, 2, 5).contiguous()
    mixed_img = mixed_img_tokens.view(B, C, H, W)  # Ensure this matches the total number of elements in img

    mixed_label = mixed_label_tokens.view(B, H, W)
    # print('output:',mixed_img.shape, mixed_label.shape)

    return mixed_img, mixed_label


BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)


# 逐层比较两个模型的参数
def _params_equal(ema_model, model):
    for ema_param, param in zip(ema_model.named_parameters(),
                                model.named_parameters()):
        if not torch.equal(ema_param[1].data, param[1].data):
            # print("Difference in", ema_param[0])
            return False
    return True


# 计算一个张量的gram矩阵
def gram_matrix(tensor):
    d, h, w = tensor.size()
    tensor = tensor.view(d, h * w)
    gram = torch.mm(tensor, tensor.t())
    return gram


# 计算梯度的范数（梯度的大小）
def calc_grad_magnitude(grads, norm_type=2.0):
    norm_type = float(norm_type)
    if norm_type == math.inf:
        norm = max(p.abs().max() for p in grads)
    else:
        norm = torch.norm(
            torch.stack([torch.norm(p, norm_type) for p in grads]), norm_type)

    return norm


# 初始化教师网络权重
def _init_ema_weights(module, module_ema):
    for param in module_ema.parameters():
        param.detach_()
    mp = list(module.parameters())
    mcp = list(module_ema.parameters())
    for i in range(0, len(mp)):
        if not mcp[i].data.shape:  # scalar tensor
            mcp[i].data = mp[i].data.clone()
        else:
            mcp[i].data[:] = mp[i].data[:].clone()


def freeze_module(module):
    for param in module.parameters():
        param.requires_grad = False


def unfreeze_module(module):
    for param in module.parameters():
        param.requires_grad = True


# 计算软标签的熵
def calc_entropy(prob):
    """
    :param prob: softmax of the score
    :return:
    """
    entropy_map = torch.sum(-prob * torch.log(prob + 1e-7), dim=1)
    return entropy_map


@UDA.register_module()
class MultiTeacherIMDTGT(UDADecorator):
    def __init__(self, **cfg):
        super(MultiTeacherIMDTGT, self).__init__(**cfg)
        # print("#########################",cfg)
        self.local_iter = 0  # 初始化本地迭代计数器
        self.max_iters = cfg['max_iters']  # 从配置字典中获取最大迭代次数
        self.alpha = cfg['alpha']  # 从配置字典中获取伪标签阈值
        self.pseudo_threshold = cfg['pseudo_threshold']
        self.psweight_ignore_top = cfg['pseudo_weight_ignore_top']
        self.psweight_ignore_bottom = cfg['pseudo_weight_ignore_bottom']
        self.psweight_ref_ignore_top = cfg['pseudo_ref_weight_ignore_top']
        self.mix = cfg['mix']
        self.blur = cfg['blur']
        self.color_jitter_s = cfg['color_jitter_strength']
        self.color_jitter_p = cfg['color_jitter_probability']
        self.debug_img_interval = cfg['debug_img_interval']
        self.class_probs = {}
        self.teacher_model_target = build_segmentor(deepcopy(cfg['model']))  # 目标域教师网络分割器
        self.teacher_model_imd = build_segmentor(deepcopy(cfg['model']))  # 中间域教师网络分割器 teacher assistence

    def _update_ema(self, iter, module, module_ema):
        alpha_teacher = min(1 - 1 / (iter + 1), self.alpha)
        for ema_param, param in zip(module_ema.parameters(), module.parameters()):
            if not param.data.shape:  # scalar tensor
                ema_param.data = alpha_teacher * ema_param.data + (1 - alpha_teacher) * param.data
            else:
                ema_param.data[:] = alpha_teacher * ema_param[:].data[:] + (1 - alpha_teacher) * param[:].data[:]

    def train_step(self, data_batch, optimizer, **kwargs):
        """The iteration step during training.

        This method defines an iteration step during training, except for the
        back propagation and optimizer updating, which are done in an optimizer
        hook. Note that in some complicated cases or models, the whole process
        including back propagation and optimizer updating is also defined in
        this method, such as GAN.

        Args:
            data (dict): The output of dataloader.
            optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
                runner is passed to ``train_step()``. This argument is unused
                and reserved.

        Returns:
            dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
                ``num_samples``.
                ``loss`` is a tensor for back propagation, which can be a
                weighted sum of multiple losses.
                ``log_vars`` contains all the variables to be sent to the
                logger.
                ``num_samples`` indicates the batch size (when the model is
                DDP, it means the batch size on each GPU), which is used for
                averaging the logs.
        """

        optimizer.zero_grad()
        log_vars = self(**data_batch)
        optimizer.step()

        log_vars.pop('loss', None)  # remove the unnecessary 'loss'
        outputs = dict(
            log_vars=log_vars, num_samples=len(data_batch['img_metas']))
        return outputs

    def forward_train(self,
                      img, img_metas, gt_semantic_seg=None,
                      imd_img=None, imd_img_metas=None,
                      target_img=None, target_img_metas=None):
        """Forward function for training.

        Args:
            img (Tensor): Input images.
            img_metas (list[dict]): List of image info dict where each dict
                has: 'img_shape', 'scale_factor', 'flip', and may also contain
                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
                For details on the values of these keys see
                `mmseg/datasets/pipelines/formatting.py:Collect`.
            gt_semantic_seg (Tensor): Semantic segmentation masks 源域图像真实标签的分割图
                used if the architecture supports semantic segmentation task.
            gt_semantic_seg这里用来保存标签
        Returns:
            dict[str, Tensor]: 损失组件的字典
            :param img:
                src image from cityscapes
            :param img_metas:
                meta information of src images
            :param gt_semantic_seg:
                gt seg map of src images
            :param imd_img:
                intermediate reference images
            :param imd_img_metas:
                meta information of intermediate images
            :param gt_semantic_seg_imd:
                generated pseudo label of intermediate images (not very correct)
            :param target_img:
                acdc fog images
            :param target_img_metas:
                meta information of acdc fog images
        """
        log_vars = {}
        batch_size = img.shape[0]
        dev = img.device

        # 原始图像的均值和标准差
        means, stds = get_mean_std(img_metas, dev)
        strong_parameters = {
            'mix': None,
            'color_jitter': random.uniform(0, 1),
            'color_jitter_s': self.color_jitter_s,
            'color_jitter_p': self.color_jitter_p,
            'blur': random.uniform(0, 1) if self.blur else 0,
            'mean': means[0].unsqueeze(0),  # assume same normalization
            'std': stds[0].unsqueeze(0)
        }

        # Init/update ema model
        if self.local_iter == 0:
            _init_ema_weights(module=self.get_model(),
                              module_ema=get_module(self.teacher_model_imd))
            _init_ema_weights(module=self.get_model(),
                              module_ema=get_module(self.teacher_model_target))

            # always freeze teacher_model
            freeze_module(get_module(self.teacher_model_imd))
            freeze_module(get_module(self.teacher_model_target))

        if self.local_iter > 0:
            # 1. Train on source images, no matter the iter changes
            # 说明：此步骤确保模型在每次迭代中都继续从源域图像中学习。
            clean_losses = self.get_model().forward_train(
                img, img_metas, gt_semantic_seg, return_feat=False)
            # 为损失添加 'src' 前缀以区分源域损失
            # 为损失值添加前缀，以便后续处理和监控时能够区分源域的损失。
            clean_losses = add_prefix(clean_losses, 'src')
            # 理模型返回的损失值，准备进行反向传播并记录训练过程
            clean_loss, clean_log_vars = self._parse_losses(clean_losses)
            log_vars.update(clean_log_vars)  # 更新日志变量，包含解析后的损失值
            # 对源域损失执行反向传播，以更新模型参数。
            clean_loss.backward(retain_graph=False)

        # 1.更新EMA：
        # 每隔两次迭代更新一次教师模型的EMA参数，确保模型参数平滑变化。
        # 2.生成伪标签：
        # 使用教师模型生成中间域图像的伪标签，并计算每个像素的置信度。
        # 根据置信度阈值筛选出高质量的伪标签，并计算权重。
        # 3.数据增强与混合：
        # 对源域和中间域图像进行混合，并应用数据增强技术（如TokenMix）。
        # 生成混合后的图像和标签，用于后续训练。
        # 4.训练混合图像：
        # 使用混合后的图像进行训练，并计算损失。
        # 将损失反向传播，更新学生模型的参数。

        if self.local_iter % 2 == 0:
            # 更新中间域教师网络参数
            self._update_ema(self.local_iter,
                             module=self.get_model(),
                             module_ema=get_module(self.teacher_model_imd))

            # 2. Generate pseudo_label by teacher assist model
            # 遍历教师模型的所有模块，以禁用dropout和drop path，确保在评估时模型表现一致
            for m in get_module(self.teacher_model_imd).modules():
                if isinstance(m, _DropoutNd):
                    m.training = False
                if isinstance(m, DropPath):
                    m.training = False
            imd_ema_logits = get_module(self.teacher_model_imd).encode_decode(imd_img, imd_img_metas)
            imd_ema_softmax = torch.softmax(imd_ema_logits.detach(), dim=1)
            imd_pseudo_prob, imd_pseudo_label = torch.max(imd_ema_softmax, dim=1)
            imd_ps_large_p = imd_pseudo_prob.ge(self.pseudo_threshold).long() == 1
            imd_ps_size = np.size(np.array(imd_pseudo_label.cpu()))
            imd_pseudo_weight = torch.sum(imd_ps_large_p).item() / imd_ps_size
            imd_pseudo_weight = imd_pseudo_weight * torch.ones(
                imd_pseudo_prob.shape, device=dev)

            if self.psweight_ignore_top > 0:
                # Don't trust pseudo-labels in regions with potential
                # rectification artifacts. This can lead to a pseudo-label
                # drift from sky towards building or traffic light.
                imd_pseudo_weight[:, :self.psweight_ignore_top, :] = 0
            if self.psweight_ignore_bottom > 0:
                imd_pseudo_weight[:, -self.psweight_ignore_bottom:, :] = 0
            imd_gt_pixel_weight = torch.ones(imd_pseudo_weight.shape, device=dev)

            # 使用 TokenMix 对源域图像和伪标签进行混合
            src_imd_mixed_img, src_imd_mixed_lbl = generate_tokenmix(img, imd_pseudo_label, lam=0.5)
            if src_imd_mixed_lbl.dim() == 3:
                src_imd_mixed_lbl = src_imd_mixed_lbl.unsqueeze(1)
            src_imd_mixed_lbl = src_imd_mixed_lbl.long()

            # for i in range(batch_size):
            #     _, imd_pseudo_weight[i] = strong_transform(
            #         strong_parameters,
            #         target=torch.stack((imd_gt_pixel_weight[i], imd_pseudo_weight[i])))

                # 对混合后的图像和标签进行训练
            src_imd_mix_losses = self.get_model().forward_train(
                src_imd_mixed_img, img_metas, src_imd_mixed_lbl, return_feat=True)
            src_imd_mix_losses.pop('features')
            src_imd_mix_losses = add_prefix(src_imd_mix_losses, 'src_imd_mix')
            src_imd_mix_loss, src_imd_mix_log_vars = self._parse_losses(src_imd_mix_losses)
            log_vars.update(src_imd_mix_log_vars)
            src_imd_mix_loss.backward()

        if self.local_iter % 2 == 1:
            self._update_ema(self.local_iter,
                             module=self.get_model(),
                             module_ema=get_module(self.teacher_model_target))
            # 3. Train on src_tgt image
            for m in get_module(self.teacher_model_target).modules():
                if isinstance(m, _DropoutNd):
                    m.training = False
                if isinstance(m, DropPath):
                    m.training = False

            ema_logits = get_module(self.teacher_model_target).encode_decode(target_img, target_img_metas)
            ema_softmax = torch.softmax(ema_logits.detach(), dim=1)
            pseudo_prob, pseudo_label = torch.max(ema_softmax, dim=1)
            ps_large_p = pseudo_prob.ge(self.pseudo_threshold).long() == 1  # 计算伪标签置信度
            ps_size = np.size(np.array(pseudo_label.cpu()))
            # 计算伪标签的权重，将比例扩展到整个伪标签张量的形状，以便在后续损失计算中使用。
            pseudo_weight = torch.sum(ps_large_p).item() / ps_size
            pseudo_weight = pseudo_weight * torch.ones(pseudo_prob.shape, device=dev)

            # 调整伪标签权重，防止伪标签漂移
            if self.psweight_ignore_top > 0:
                # Don't trust pseudo-labels in regions with potential
                # rectification artifacts. This can lead to a pseudo-label
                # drift from sky towards building or traffic light.
                pseudo_weight[:, :self.psweight_ignore_top, :] = 0
            if self.psweight_ignore_bottom > 0:
                pseudo_weight[:, -self.psweight_ignore_bottom:, :] = 0
            gt_pixel_weight = torch.ones(pseudo_weight.shape, device=dev)

            # 使用 TokenMix 对目标域图像和伪标签进行混合
            mixed_img, mixed_lbl = generate_tokenmix(img, pseudo_label, lam=self.alpha)
            if mixed_lbl.dim() == 3:
                mixed_lbl = mixed_lbl.unsqueeze(1)
            mixed_lbl = mixed_lbl.long()

            # Train on mixed images
            mix_losses = self.get_model().forward_train(
                mixed_img, img_metas, mixed_lbl, pseudo_weight, return_feat=True)
            mix_losses.pop('features')
            mix_losses = add_prefix(mix_losses, 'mix')
            mix_loss, mix_log_vars = self._parse_losses(mix_losses)  # 解析损失
            log_vars.update(mix_log_vars)
            mix_loss.backward()  # 计算这个损失相对于模型参数的梯度

        if self.local_iter % self.debug_img_interval == 0:
            out_dir = os.path.join(self.train_cfg['work_dir'],
                                   'visualization')
            os.makedirs(out_dir, exist_ok=True)
            with torch.no_grad():
                src_logits = self.get_model().encode_decode(img, img_metas)
                imd_logits = self.get_model().encode_decode(imd_img, imd_img_metas)
                trg_logits = self.get_model().encode_decode(target_img, target_img_metas)

                src_softmax_prob = torch.softmax(src_logits, dim=1)
                _, pred_src = torch.max(src_softmax_prob, dim=1)

                imd_softmax_prob = torch.softmax(imd_logits, dim=1)
                _, pred_imd = torch.max(imd_softmax_prob, dim=1)

                trg_softmax_prob = torch.softmax(trg_logits, dim=1)
                _, pred_trg = torch.max(trg_softmax_prob, dim=1)

                vis_img = torch.clamp(denorm(img, means, stds), 0, 1)
                vis_imd_img = torch.clamp(denorm(imd_img, means, stds), 0, 1)
                vis_trg_img = torch.clamp(denorm(target_img, means, stds), 0, 1)
                if self.local_iter % 2 == 0:
                    vis_src_imd_mix_img = torch.clamp(denorm(src_imd_mixed_img, means, stds), 0, 1)
                if self.local_iter % 2 == 1:
                    vis_src_tgt_mix_img = torch.clamp(denorm(mixed_img, means, stds), 0, 1)

                for j in range(batch_size):
                    rows, cols = 3, 4
                    fig, axs = plt.subplots(
                        rows,
                        cols,
                        figsize=(3 * cols, 3 * rows),
                        gridspec_kw={
                            'hspace': 0.1,
                            'wspace': 0,
                            'top': 0.95,
                            'bottom': 0,
                            'right': 1,
                            'left': 0
                        },
                    )
                    # Source domain related
                    subplotimg(axs[0][0], vis_img[j], 'Source Image')
                    subplotimg(axs[0][1], gt_semantic_seg[j], 'Source Seg GT', cmap='cityscapes')
                    subplotimg(axs[0][2], pred_src[j], 'Pred Source', cmap='cityscapes')

                    # Intermediate domain related
                    subplotimg(axs[1][0], vis_imd_img[j], 'Intermediate (Ref) Image')
                    subplotimg(axs[1][1], pred_imd[j], 'Pred Intermediate (Ref)', cmap='cityscapes')
                    if self.local_iter % 2 == 0:
                        subplotimg(axs[1][2], vis_src_imd_mix_img[j], 'Src_Imd_Mix Image')
                        subplotimg(axs[1][3], src_imd_mixed_lbl[j], 'Src_Imd_Mix Label', cmap='cityscapes')

                    # Target domain related
                    subplotimg(axs[2][0], vis_trg_img[j], 'Target Image')
                    subplotimg(axs[2][1], pred_trg[j], 'Pred target', cmap='cityscapes')
                    if self.local_iter % 2 == 1:
                        subplotimg(axs[2][2], vis_src_tgt_mix_img[j], 'Src_Tgt_Mix Image')
                        subplotimg(axs[2][3], mixed_lbl[j], 'Src_Tgt_Mix Label', cmap='cityscapes')

                    for ax in axs.flat:
                        ax.axis('off')
                    plt.savefig(os.path.join(out_dir, f'{(self.local_iter + 1):06d}_{j}.png'))
                    plt.close()

        self.local_iter += 1
        return log_vars
