import os
import argparse

import mindspore.numpy
import yaml
from mindspore import nn
from mindspore import Tensor
from mindspore import ops
from tools.model_utils import load_visual_model, load_text_model
from mindspore import dtype as mstype
from mindspore import numpy as np
from .modules import MultiCropWrapper, MultiCropWrapper_v2, CrossEntropyLabelSmooth, \
    ClusteringTwoHead, get_params_groups
from .modules import RandomAffine

EPS = 1e-8
parser = argparse.ArgumentParser()
args = parser.parse_args()
config_path = args.config_path
ckpt_path = args.ckpt_path
with open(config_path, 'r') as stream:
    config = yaml.safe_load(stream)


class DINO_Clustering(nn.Cell):
    def __init__(
            self,
            head_dino,
            head_cluster,
            dino_cfg,
            lr_cfg,
            optimize_cfg,
            wd_cfg,
            label_smooth,
            batch_size_per_gpu=None,
            epochs=None,
            pretrained=None,
            dataset=None, ):
        super().__init__()
        self.student_visual = load_visual_model(config['model']['visual'], ckpt_path)
        self.teacher_visual = load_visual_model(config['model']['visual'], ckpt_path)
        self.head_dino = head_dino
        self.head_cluster = head_cluster
        self.cast = ops.Cast()
        self.flip = ops.ReverseV2(axis=[2])
        embed_dim = 512
        # multi-crop wrapper handles forward with inputs of different resolutions
        self.student = MultiCropWrapper(
            self.student_visual,
            DINOHead(embed_dim, head_dino['out_dim'], head_dino['use_bn_in_head'],
                     head_dino['norm_last_layer']
                     ))
        self.teacher = MultiCropWrapper(
            self.teacher_visual,
            DINOHead(embed_dim, head_dino['out_dim'], head_dino['use_bn_in_head']),
        )

        # ============ preparing clustering ... ============
        in_embed_dim = 768
        self.student_cluster = MultiCropWrapper_v2(self.student_visual, ClusteringTwoHead(
            in_embed_dim,
            head_cluster['out_dim_A'],  # 25
            head_cluster['out_dim_B'],  # 75
            input_sz=[224, 224],
        ))

        # ============ end clustering ... ============
        self.teacher.load_state_dict(self.student.state_dict())
        self.student = self.student.cuda()
        self.student_cluster = self.student_cluster.cuda()

        for p in self.teacher.parameters():
            p.requires_grad = False
        print(f"Student and Teacher are built.")

        # ============ building text network ... ============
        self.transformer = load_text_model(config['model']['text'], ckpt_path)
        for p in self.transformer.parameters():  # 不更新transformer
            p.requires_grad = False

        self.dino_loss = DINOLoss(
            head_dino['out_dim'],
            dino_cfg['local_crops_number'] + 2,  # total number of crops = 2 global crops + local_crops_number
            dino_cfg['warmup_teacher_temp'],
            dino_cfg['teacher_temp'],
            dino_cfg['warmup_teacher_temp_epochs'],
            epochs,
            dino_cfg['epoch_split'],
        ).cuda()
        # ============ preparing CLIP loss ... ============
        self.logit_scale = nn.Parameter(ops.log(Tensor(1 / 0.07)))
        self.clip_loss = CrossEntropyLabelSmooth(label_smooth)

        # ============ preparing optimizer ... ============
        params_groups = get_params_groups([self.student])

        self.image_norm = nn.Norm(axis=-1, keep_dims=True)
        self.epochs = epochs
        self.lr = lr_cfg['lr']
        self.batch_size_per_gpu = batch_size_per_gpu  ##################################################### batch_size_per_gpu
        self.min_lr = lr_cfg['min_lr']
        self.warmup_epochs = lr_cfg['warmup_epochs']
        self.weight_decay = wd_cfg['weight_decay']
        self.weight_decay_end = wd_cfg['weight_decay_end']
        self.momentum_teacher = dino_cfg['momentum_teacher']

        self.freeze_last_layer = optimize_cfg['freeze_last_layer']

    def init_schedule(self, data_len):
        self.data_len = data_len
        # ============ init schedulers ... ============
        print(self.epochs, data_len, self.warmup_epochs)
        self.lr_schedule = cosine_scheduler(
            self.lr * (self.batch_size_per_gpu * get_world_size()) / 256.,  # linear scaling rule
            self.min_lr,
            self.epochs, data_len,
            warmup_epochs=self.warmup_epochs,
        )
        self.wd_schedule = cosine_scheduler(
            self.weight_decay,
            self.weight_decay_end,
            self.epochs, data_len,
        )
        # momentum parameter is increased to 1. during training with a cosine schedule
        self.momentum_schedule = cosine_scheduler(self.momentum_teacher, 1,
                                                  self.epochs, data_len)
        print(f"Loss, optimizer and schedulers ready.")

    @property
    def dtype(self):
        return self.student.backbone.conv1.weight.dtype

    @torch.no_grad()
    def _momentum_update_teacher(self):
        m = self.momentum_schedule[self.it]  # momentum parameter
        for param_q, param_k in zip(self.student.parameters(), self.teacher.parameters()):
            param_k.data.mul_(m).add_((1 - m) * param_q.detach().data)

    def encode_image(self, image):
        # print(image.shape)

        return self.student_visual(image.type(self.dtype))[0]

    def get_feature_2(self, image):
        # 得到512维的feature
        return self.student_visual.get_img_feature_2(image.type(self.dtype))

    def get_channelmask(self, image, patches_h, patches_w):
        return self.student_cluster.get_mask(image, patches_h, patches_w)

    def get_feature_by_mask(self, image, mask):
        return self.student_visual.get_img_feature_by_mask(image, mask)

    def encode_text(self, text):
        return self.transformer(text)

    def update_it(self, it):
        self.it = self.data_len * self.dino_loss.epoch + it

    def update_lr_wd(self):
        # update weight decay and learning rate according to their schedule
        # print (self.lr_schedule[self.it])
        for i, param_group in enumerate(self.optimizer.param_groups):
            param_group["lr"] = self.lr_schedule[self.it]
            if i == 0:  # only the first group is regularized
                param_group["weight_decay"] = self.wd_schedule[self.it]

    def forward(self, crops1, crops2, text):
        # crops K B C H W
        crops1 = crops1.transpose(1, 0, 2, 3, 4)
        crops2 = crops2.transpose(1, 0, 2, 3, 4)
        images = [crops1[0], crops1[1]]
        for i in range(10):
            images.append(crops2[i])
        images.append(crops1[2])
        random_affine = RandomAffine(min_rot=-30, max_rot=30, min_shear=-10, max_shear=10,
                                     min_scale=0.8, max_scale=1.2)
        img2, affine1_to_2, affine2_to_1 = random_affine(crops1[-1])
        if np.random.rand() > 0.5:
            img2 = self.flip(img2)
            affine2_to_1[0, :] *= -1.
        image = images[-3]
        # =============== dino loss=============
        teacher_output = self.teacher(images[:2])
        student_output = self.student(images[:-3])
        dino_loss = self.dino_loss(student_output, teacher_output)

        # ================ clip loss ==================
        image_features = self.encode_image(image)  # [18,512]
        text_features = self.encode_text(text)  # [18,512]

        # normalized features
        image_features = image_features / self.image_norm(image_features)
        image_features = self.cast(image_features, mstype.float16)
        text_features = text_features / self.image_norm(text_features)
        text_features = self.cast(text_features, mstype.float16)
        # cosine similarity as logits
        logit_scale = ops.exp(self.logit_scale)
        image_gather = torch.cat(GatherLayer.apply(image_features))
        text_gather = torch.cat(GatherLayer.apply(text_features))
        # full grad backward is better than detach one-side
        logits_per_image = ops.clip_by_value((logit_scale * ops.matmul(image_features, text_gather.
                                                                       transpose(3, 2, 1, 0))), clip_value_min=-100,
                                             clip_value_max=100)
        logits_per_text = ops.clip_by_value((logit_scale * ops.matmul(text_features, image_gather.
                                                                      transpose(3, 2, 1, 0))), clip_value_min=-100,
                                            clip_value_max=100)
        # logits_per_image = (logit_scale * image_features @ text_gather.T).clamp(min=-100, max=100)
        # logits_per_text = (logit_scale * text_features @ image_gather.T).clamp(min=-100, max=100)

        rank = get_rank()
        target = mindspore.numpy.arange(rank * image.shape[0], rank * image.shape[0] + image.shape[0])
        # target = torch.arange(rank * image.shape[0], rank * image.shape[0] + image.shape[0]).to(image.device)
        clip_loss = (self.clip_loss(logits_per_image, target) + self.clip_loss(logits_per_text, target)) / 2

        # ========================cluster loss========================
        two_head_loss = {}
        for head in ["A", "B"]:
            x1_outs = self.student_cluster(image)
            x2_outs = self.student_cluster(img2)
            avg_loss_batch = None
            avg_loss_no_lamb_batch = None
            num_sub_heads = self.head_cluster['num_sub_heads']
            for i in range(num_sub_heads):
                cluster_loss, loss_no_lamb = loss_fn(x1_outs[i], x2_outs[i], all_affine2_to_1=all_affine2_to_1,
                                                     lamb=1, half_T_side_dense=0, half_T_side_sparse_max=0,
                                                     half_T_side_sparse_min=0)
            if avg_loss_batch is None:
                avg_loss_batch = cluster_loss
                avg_loss_no_lamb_batch = loss_no_lamb
            else:
                avg_loss_batch += cluster_loss
                avg_loss_no_lamb_batch += loss_no_lamb
            avg_loss_batch /= num_sub_heads
            avg_loss_no_lamb_batch /= num_sub_heads

            two_head_loss[head] = avg_loss_batch

        return dict(dino_loss=dino_loss, clip_loss=clip_loss, cluster_loss_hA=two_head_loss["A"],
                    cluster_loss_hB=two_head_loss["B"], loss=dino_loss + clip_loss +
                                                             0.05 * two_head_loss["A"] + 0.05 * two_head_loss["B"])

    def load_pretrain(self, pretrained):
        logger = get_root_logger()
        logger.info(f"Loading pretrain model from {pretrained}")
        if not os.path.exists(pretrained):
            raise FileNotFoundError(f"File '{pretrained}' not exists")
        try:
            # loading JIT archive
            jit = True
            # load the whole jit model
            model_jit = torch.jit.load(pretrained, map_location="cpu")
            # model_jit = torch.jit.load(path, map_location="cuda" if jit else "cpu")
            state_dict = model_jit.state_dict()
        except RuntimeError:
            # loading saved state dict
            if jit:
                logger.warning(
                    f"File {pretrained} is not a JIT archive. Loading as a state dict instead"
                )
                jit = False
            state_dict = torch.load(pretrained, map_location="cpu")
            if 'state_dict' in state_dict:
                state_dict = state_dict['state_dict']
        for key in ["input_resolution", "context_length", "vocab_size"]:
            if key in state_dict:
                del state_dict[key]

        # TODO:load vit pretrain的时候把下面的加上，load自己的model的时候把下面的去掉
        state_dict = {k.replace("visual.", "student_visual."): v for k, v in state_dict.items()}
        state_dict["transformer.positional_embedding"] = state_dict.pop("positional_embedding")
        state_dict["transformer.text_projection"] = state_dict.pop("text_projection")
        state_dict["transformer.token_embedding.weight"] = state_dict.pop("token_embedding.weight")
        state_dict["transformer.ln_final.weight"] = state_dict.pop("ln_final.weight")
        state_dict["transformer.ln_final.bias"] = state_dict.pop("ln_final.bias")

        msg = load_state_dict(self, state_dict)
        print('Pretrained weights found at {} and loaded with msg1: {}'.format(pretrained, msg))


class DINOLoss(nn.Cell):
    def __init__(self, out_dim, ncrops, warmup_teacher_temp, teacher_temp,
                 warmup_teacher_temp_epochs, nepochs, epoch_split, student_temp=0.1,
                 center_momentum=0.9):
        super().__init__()
        self.student_temp = student_temp
        self.center_momentum = center_momentum
        self.ncrops = ncrops
        self.register_buffer("center", torch.zeros(1, out_dim))
        # we apply a warm up for the teacher temperature because
        # a too high temperature makes the training instable at the beginning
        self.epoch_split = epoch_split
        self.teacher_temp_schedule = np.concatenate((
            np.linspace(warmup_teacher_temp,
                        teacher_temp, int(warmup_teacher_temp_epochs / self.epoch_split)),
            np.ones(int(nepochs / self.epoch_split) - int(warmup_teacher_temp_epochs / self.epoch_split)) * teacher_temp
        ))
        # print ( int(warmup_teacher_temp_epochs/self.epoch_split), int(nepochs/self.epoch_split))

    def forward(self, student_output, teacher_output):
        """
        Cross-entropy between softmax outputs of the teacher and student networks.
        """
        student_out = student_output / self.student_temp
        student_out = student_out.chunk(self.ncrops)

        # teacher centering and sharpening
        # print (self.iter)
        # print (self.epoch)
        temp = self.teacher_temp_schedule[int(self.iter / 3337)]
        # print(temp)
        teacher_out = F.softmax((teacher_output - self.center) / temp, dim=-1)
        teacher_out = teacher_out.detach().chunk(2)

        total_loss = 0
        n_loss_terms = 0
        for iq, q in enumerate(teacher_out):
            for v in range(len(student_out)):
                if v == iq:
                    # we skip cases where student and teacher operate on the same view
                    continue
                loss = torch.sum(-q * F.log_softmax(student_out[v], dim=-1), dim=-1)
                total_loss += loss.mean()
                n_loss_terms += 1
        total_loss /= n_loss_terms
        self.update_center(teacher_output)
        return total_loss

    @torch.no_grad()
    def update_center(self, teacher_output):
        """
        Update center used for teacher output.
        """
        # t1 = time.time()
        batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
        dist.all_reduce(batch_center)
        batch_center = batch_center / (len(teacher_output) * dist.get_world_size())
        # print ('update center time %.4f' % (time.time() - t1))
        # ema update
        self.center = self.center * self.center_momentum + batch_center * (1 - self.center_momentum)

    def update_epoch(self, epoch):
        self.epoch = epoch

    def update_iter(self, iter):
        self.iter = iter


class DINOHead(nn.Module):
    def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048,
                 bottleneck_dim=256):
        super().__init__()
        nlayers = max(nlayers, 1)
        if nlayers == 1:
            self.mlp = nn.Dense(in_dim, bottleneck_dim)
        else:
            layers = [nn.Dense(in_dim, hidden_dim)]
            if use_bn:
                layers.append(nn.BatchNorm1d(hidden_dim))
            layers.append(nn.GELU())
            for _ in range(nlayers - 2):
                layers.append(nn.Dense(hidden_dim, hidden_dim))
                if use_bn:
                    layers.append(nn.BatchNorm1d(hidden_dim))
                layers.append(nn.GELU())
            layers.append(nn.Dense(hidden_dim, bottleneck_dim))
            self.mlp = nn.SequentialCell(*layers)
        self.apply(self._init_weights)
        self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))  # ?
        self.last_layer.weight_g.data.fill_(1)  # ?
        self.normalize = ops.L2Normalize(axis=-1, epsilon=1e-12)
        if norm_last_layer:
            self.last_layer.weight_g.requires_grad = False

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        # x = self.l1(x)  # TODO:新加的一个linear层，可能和别的不兼容
        x = self.mlp(x)
        x = self.normalize(x)
        x = self.last_layer(x)
        return x

