# ---------------------------------------------------------------
# Copyright (c) 2021-2022 ETH Zurich, Lukas Hoyer. All rights reserved.
# Licensed under the Apache License, Version 2.0
# ---------------------------------------------------------------

# The ema model update and the domain-mixing are based on:
# https://github.com/vikolss/DACS
# Copyright (c) 2020 vikolss. Licensed under the MIT License.
# A copy of the license is available at resources/license_dacs

import math
import os
import random
from copy import deepcopy

import mmcv
import numpy as np
import torch
import torch.nn as nn
from matplotlib import pyplot as plt
from timm.models.layers import DropPath
from torch.nn.modules.dropout import _DropoutNd

from mmseg.core import add_prefix
from mmseg.models import UDA, build_segmentor
from mmseg.models.uda.uda_decorator import UDADecorator, get_module
from mmseg.models.utils.dacs_transforms import (denorm, get_class_masks,
                                                get_mean_std, strong_transform)
from mmseg.models.utils.visualization import subplotimg



import os
import sys

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)

# 逐层比较两个模型的参数
def _params_equal(ema_model, model):
    for ema_param, param in zip(ema_model.named_parameters(),
                                model.named_parameters()):
        if not torch.equal(ema_param[1].data, param[1].data):
            # print("Difference in", ema_param[0])
            return False
    return True

# 计算一个张量的gram矩阵
def gram_matrix(tensor):
    d, h, w = tensor.size()
    tensor = tensor.view(d, h * w)
    gram = torch.mm(tensor, tensor.t())
    return gram

# 计算梯度的范数（梯度的大小）
def calc_grad_magnitude(grads, norm_type=2.0):
    norm_type = float(norm_type)
    if norm_type == math.inf:
        norm = max(p.abs().max() for p in grads)
    else:
        norm = torch.norm(
            torch.stack([torch.norm(p, norm_type) for p in grads]), norm_type)

    return norm

# 初始化教师网络权重
def _init_ema_weights(module, module_ema):
    for param in module_ema.parameters():
        param.detach_()
    mp = list(module.parameters())
    mcp = list(module_ema.parameters())
    for i in range(0, len(mp)):
        if not mcp[i].data.shape:  # scalar tensor
            mcp[i].data = mp[i].data.clone()
        else:
            mcp[i].data[:] = mp[i].data[:].clone()


def freeze_module(module):
    for param in module.parameters():
        param.requires_grad = False


def unfreeze_module(module):
    for param in module.parameters():
        param.requires_grad = True

# 计算软标签的熵
def calc_entropy(prob):
    """
    :param prob: softmax of the score
    :return:
    """
    entropy_map = torch.sum(-prob * torch.log(prob + 1e-7), dim=1)
    return entropy_map


@UDA.register_module()
class MultiTeacherIMDTGT(UDADecorator):
    def __init__(self, **cfg):
        super(MultiTeacherIMDTGT, self).__init__(**cfg)
        self.local_iter = 0    # 初始化本地迭代计数器
        self.max_iters = cfg['max_iters'] # 从配置字典中获取最大迭代次数
        self.alpha = cfg['alpha'] # 从配置字典中获取伪标签阈值
        self.pseudo_threshold = cfg['pseudo_threshold']
        self.psweight_ignore_top = cfg['pseudo_weight_ignore_top']
        self.psweight_ignore_bottom = cfg['pseudo_weight_ignore_bottom']
        self.psweight_ref_ignore_top = cfg['pseudo_ref_weight_ignore_top']
        self.mix = cfg['mix']
        self.blur = cfg['blur']
        self.color_jitter_s = cfg['color_jitter_strength']
        self.color_jitter_p = cfg['color_jitter_probability']
        self.debug_img_interval = cfg['debug_img_interval']
        self.class_probs = {}
        self.teacher_model_target = build_segmentor(deepcopy(cfg['model']))
        self.teacher_model_imd = build_segmentor(deepcopy(cfg['model']))  # teacher assistence

    def _update_ema(self, iter, module, module_ema):
        alpha_teacher = min(1 - 1 / (iter + 1), self.alpha)
        for ema_param, param in zip(module_ema.parameters(), module.parameters()):
            if not param.data.shape:  # scalar tensor
                ema_param.data = alpha_teacher * ema_param.data + (1 - alpha_teacher) * param.data
            else:
                ema_param.data[:] = alpha_teacher * ema_param[:].data[:] + (1 - alpha_teacher) * param[:].data[:]

    def train_step(self, data_batch, optimizer, **kwargs):
        """The iteration step during training.

        This method defines an iteration step during training, except for the
        back propagation and optimizer updating, which are done in an optimizer
        hook. Note that in some complicated cases or models, the whole process
        including back propagation and optimizer updating is also defined in
        this method, such as GAN.

        Args:
            data (dict): The output of dataloader.
            optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
                runner is passed to ``train_step()``. This argument is unused
                and reserved.

        Returns:
            dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
                ``num_samples``.
                ``loss`` is a tensor for back propagation, which can be a
                weighted sum of multiple losses.
                ``log_vars`` contains all the variables to be sent to the
                logger.
                ``num_samples`` indicates the batch size (when the model is
                DDP, it means the batch size on each GPU), which is used for
                averaging the logs.
        """

        optimizer.zero_grad()
        log_vars = self(**data_batch)
        optimizer.step()

        log_vars.pop('loss', None)  # remove the unnecessary 'loss'
        outputs = dict(
            log_vars=log_vars, num_samples=len(data_batch['img_metas']))
        return outputs

    def forward_train(self,
                      img, img_metas, gt_semantic_seg=None,
                      imd_img=None, imd_img_metas=None,
                      target_img=None, target_img_metas=None):
        """Forward function for training.

        Args:
            img (Tensor): Input images.
            img_metas (list[dict]): List of image info dict where each dict
                has: 'img_shape', 'scale_factor', 'flip', and may also contain
                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
                For details on the values of these keys see
                `mmseg/datasets/pipelines/formatting.py:Collect`.
            gt_semantic_seg (Tensor): Semantic segmentation masks
                used if the architecture supports semantic segmentation task.

        Returns:
            dict[str, Tensor]: a dictionary of loss components
            :param img:
                src image from cityscapes
            :param img_metas:
                meta information of src images
            :param gt_semantic_seg:
                gt seg map of src images
            :param imd_img:
                intermediate reference images
            :param imd_img_metas:
                meta information of intermediate images
            :param gt_semantic_seg_imd:
                generated pseudo label of intermediate images (not very correct)
            :param target_img:
                acdc fog images
            :param target_img_metas:
                meta information of acdc fog images
        """
        log_vars = {}
        batch_size = img.shape[0]
        dev = img.device

        means, stds = get_mean_std(img_metas, dev)
        strong_parameters = {
            'mix': None,
            'color_jitter': random.uniform(0, 1),
            'color_jitter_s': self.color_jitter_s,
            'color_jitter_p': self.color_jitter_p,
            'blur': random.uniform(0, 1) if self.blur else 0,
            'mean': means[0].unsqueeze(0),  # assume same normalization
            'std': stds[0].unsqueeze(0)
        }

        # Init/update ema model
        if self.local_iter == 0:
            _init_ema_weights(module=self.get_model(),
                              module_ema=get_module(self.teacher_model_imd))
            _init_ema_weights(module=self.get_model(),
                              module_ema=get_module(self.teacher_model_target))

            # always freeze teacher_model
            freeze_module(get_module(self.teacher_model_imd))
            freeze_module(get_module(self.teacher_model_target))

        if self.local_iter > 0:
            # 1. Train on source images, no matter the iter changes
            clean_losses = self.get_model().forward_train(
                img, img_metas, gt_semantic_seg, return_feat=False)
            clean_losses = add_prefix(clean_losses, 'src')
            clean_loss, clean_log_vars = self._parse_losses(clean_losses)
            log_vars.update(clean_log_vars)
            clean_loss.backward(retain_graph=False)

        if self.local_iter % 2 == 0:
            self._update_ema(self.local_iter,
                             module=self.get_model(),
                             module_ema=get_module(self.teacher_model_imd))
            # 2. Generate pseudo_label by teacher assist model
            for m in get_module(self.teacher_model_imd).modules():
                if isinstance(m, _DropoutNd):
                    m.training = False
                if isinstance(m, DropPath):
                    m.training = False
            imd_ema_logits = get_module(self.teacher_model_imd).encode_decode(
                imd_img, imd_img_metas)
            imd_ema_softmax = torch.softmax(imd_ema_logits.detach(), dim=1)
            imd_pseudo_prob, imd_pseudo_label = torch.max(imd_ema_softmax, dim=1)
            imd_ps_large_p = imd_pseudo_prob.ge(self.pseudo_threshold).long() == 1
            imd_ps_size = np.size(np.array(imd_pseudo_label.cpu()))
            imd_pseudo_weight = torch.sum(imd_ps_large_p).item() / imd_ps_size
            imd_pseudo_weight = imd_pseudo_weight * torch.ones(
                imd_pseudo_prob.shape, device=dev)

            if self.psweight_ignore_top > 0:
                # Don't trust pseudo-labels in regions with potential
                # rectification artifacts. This can lead to a pseudo-label
                # drift from sky towards building or traffic light.
                imd_pseudo_weight[:, :self.psweight_ignore_top, :] = 0
            if self.psweight_ignore_bottom > 0:
                imd_pseudo_weight[:, -self.psweight_ignore_bottom:, :] = 0
            imd_gt_pixel_weight = torch.ones(imd_pseudo_weight.shape, device=dev)

            # Apply mixing
            src_imd_mixed_img, src_imd_mixed_lbl = [None] * batch_size, [None] * batch_size
            src_imd_mix_masks = get_class_masks(gt_semantic_seg)

            for i in range(batch_size):
                strong_parameters['mix'] = src_imd_mix_masks[i]
                src_imd_mixed_img[i], src_imd_mixed_lbl[i] = strong_transform(
                    strong_parameters,
                    data=torch.stack((img[i], imd_img[i])),
                    target=torch.stack((gt_semantic_seg[i][0], imd_pseudo_label[i])))
                _, imd_pseudo_weight[i] = strong_transform(
                    strong_parameters,
                    target=torch.stack((imd_gt_pixel_weight[i], imd_pseudo_weight[i])))
            src_imd_mixed_img = torch.cat(src_imd_mixed_img)
            src_imd_mixed_lbl = torch.cat(src_imd_mixed_lbl)

            # Train on src_imd mixed images
            src_imd_mix_losses = self.get_model().forward_train(
                src_imd_mixed_img, img_metas, src_imd_mixed_lbl, imd_pseudo_weight, return_feat=True)
            src_imd_mix_losses.pop('features')
            src_imd_mix_losses = add_prefix(src_imd_mix_losses, 'src_imd_mix')
            src_imd_mix_loss, src_imd_mix_log_vars = self._parse_losses(src_imd_mix_losses)
            log_vars.update(src_imd_mix_log_vars)
            src_imd_mix_loss.backward()

        if self.local_iter % 2 == 1:
            self._update_ema(self.local_iter,
                             module=self.get_model(),
                             module_ema=get_module(self.teacher_model_target))
            # 3. Train on src_tgt image
            for m in get_module(self.teacher_model_target).modules():
                if isinstance(m, _DropoutNd):
                    m.training = False
                if isinstance(m, DropPath):
                    m.training = False
            ema_logits = get_module(self.teacher_model_target).encode_decode(
                target_img, target_img_metas)

            ema_softmax = torch.softmax(ema_logits.detach(), dim=1)
            pseudo_prob, pseudo_label = torch.max(ema_softmax, dim=1)
            ps_large_p = pseudo_prob.ge(self.pseudo_threshold).long() == 1
            ps_size = np.size(np.array(pseudo_label.cpu()))
            pseudo_weight = torch.sum(ps_large_p).item() / ps_size
            pseudo_weight = pseudo_weight * torch.ones(
                pseudo_prob.shape, device=dev)

            if self.psweight_ignore_top > 0:
                # Don't trust pseudo-labels in regions with potential
                # rectification artifacts. This can lead to a pseudo-label
                # drift from sky towards building or traffic light.
                pseudo_weight[:, :self.psweight_ignore_top, :] = 0
            if self.psweight_ignore_bottom > 0:
                pseudo_weight[:, -self.psweight_ignore_bottom:, :] = 0
            gt_pixel_weight = torch.ones(pseudo_weight.shape, device=dev)

            # Apply mixing
            mixed_img, mixed_lbl = [None] * batch_size, [None] * batch_size
            mix_masks = get_class_masks(gt_semantic_seg)

            for i in range(batch_size):
                strong_parameters['mix'] = mix_masks[i]
                mixed_img[i], mixed_lbl[i] = strong_transform(
                    strong_parameters,
                    data=torch.stack((img[i], target_img[i])),
                    target=torch.stack((gt_semantic_seg[i][0], pseudo_label[i])))
                _, pseudo_weight[i] = strong_transform(
                    strong_parameters,
                    target=torch.stack((gt_pixel_weight[i], pseudo_weight[i])))
            mixed_img = torch.cat(mixed_img)
            mixed_lbl = torch.cat(mixed_lbl)

            # Train on mixed images
            mix_losses = self.get_model().forward_train(
                mixed_img, img_metas, mixed_lbl, pseudo_weight, return_feat=True)
            mix_losses.pop('features')
            mix_losses = add_prefix(mix_losses, 'mix')
            mix_loss, mix_log_vars = self._parse_losses(mix_losses)
            log_vars.update(mix_log_vars)
            mix_loss.backward()

        if self.local_iter % self.debug_img_interval == 0:
            out_dir = os.path.join(self.train_cfg['work_dir'],
                                   'visualization')
            os.makedirs(out_dir, exist_ok=True)
            with torch.no_grad():
                src_logits = self.get_model().encode_decode(img, img_metas)
                imd_logits = self.get_model().encode_decode(imd_img, imd_img_metas)
                trg_logits = self.get_model().encode_decode(target_img, target_img_metas)

                src_softmax_prob = torch.softmax(src_logits, dim=1)
                _, pred_src = torch.max(src_softmax_prob, dim=1)

                imd_softmax_prob = torch.softmax(imd_logits, dim=1)
                _, pred_imd = torch.max(imd_softmax_prob, dim=1)

                trg_softmax_prob = torch.softmax(trg_logits, dim=1)
                _, pred_trg = torch.max(trg_softmax_prob, dim=1)

                vis_img = torch.clamp(denorm(img, means, stds), 0, 1)
                vis_imd_img = torch.clamp(denorm(imd_img, means, stds), 0, 1)
                vis_trg_img = torch.clamp(denorm(target_img, means, stds), 0, 1)
                if self.local_iter % 2 == 0:
                    vis_src_imd_mix_img = torch.clamp(denorm(src_imd_mixed_img, means, stds), 0, 1)
                if self.local_iter % 2 == 1:
                    vis_src_tgt_mix_img = torch.clamp(denorm(mixed_img, means, stds), 0, 1)

                for j in range(batch_size):
                    rows, cols = 3, 4
                    fig, axs = plt.subplots(
                        rows,
                        cols,
                        figsize=(3 * cols, 3 * rows),
                        gridspec_kw={
                            'hspace': 0.1,
                            'wspace': 0,
                            'top': 0.95,
                            'bottom': 0,
                            'right': 1,
                            'left': 0
                        },
                    )
                    # Source domain related
                    subplotimg(axs[0][0], vis_img[j], 'Source Image')
                    subplotimg(axs[0][1], gt_semantic_seg[j], 'Source Seg GT', cmap='cityscapes')
                    subplotimg(axs[0][2], pred_src[j], 'Pred Source', cmap='cityscapes')

                    # Intermediate domain related
                    subplotimg(axs[1][0], vis_imd_img[j], 'Intermediate (Ref) Image')
                    subplotimg(axs[1][1], pred_imd[j], 'Pred Intermediate (Ref)', cmap='cityscapes')
                    if self.local_iter % 2 == 0:
                        subplotimg(axs[1][2], vis_src_imd_mix_img[j], 'Src_Imd_Mix Image')
                        subplotimg(axs[1][3], src_imd_mixed_lbl[j], 'Src_Imd_Mix Label', cmap='cityscapes')

                    # Target domain related
                    subplotimg(axs[2][0], vis_trg_img[j], 'Target Image')
                    subplotimg(axs[2][1], pred_trg[j], 'Pred target', cmap='cityscapes')
                    if self.local_iter % 2 == 1:
                        subplotimg(axs[2][2], vis_src_tgt_mix_img[j], 'Src_Tgt_Mix Image')
                        subplotimg(axs[2][3], mixed_lbl[j], 'Src_Tgt_Mix Label', cmap='cityscapes')

                    for ax in axs.flat:
                        ax.axis('off')
                    plt.savefig(os.path.join(out_dir, f'{(self.local_iter + 1):06d}_{j}.png'))
                    plt.close()

        self.local_iter += 1
        return log_vars
