from mindspore import nn
from .modules import RandomAffine
import numpy as np
from mindspore import ops
import yaml
from mindspore import dtype as mstype
from .modules import MultiCropWrapper, MultiCropWrapper_v2, DINOHead, ClusteringTwoHead, DINOLoss, \
    CrossEntropyLabelSmooth, IID_segmentation_loss_uncollapsed, get_param_groups
from model.visual_encoder import VisualTransformer, get_vit_config
from .text_encoder import TextTransformer
from mindspore import Tensor, Parameter
from tools.model_utils import load_visual_model
from model_utils.config import config
from mindspore.communication import get_rank
import mindspore

vit_config = get_vit_config()

class Net(nn.Cell):
    def __init__(self):
        super(Net, self).__init__()
        self.flip = ops.ReverseV2(axis=[2])
        self.image_norm = nn.Norm(axis=-1, keep_dims=True)
        embed_dim = 512
        self.student_visual = VisualTransformer(
            input_resolution=224,
            layers=12,
            width=768,
            patch_size=16,
            output_dim=512,
        )
        # self.student_visual = load_visual_model(vit_config, config.pretrained)
        # self.teacher_visual = load_visual_model(vit_config, config.pretrained)
        self.teacher_visual = VisualTransformer(
            input_resolution=224,
            layers=12,
            width=768,
            patch_size=16,
            output_dim=512,
        )

        self.student = MultiCropWrapper(
            self.student_visual,
            DINOHead(embed_dim, out_dim=65536, use_bn=False,
                     norm_last_layer=False
                     ))
        self.teacher = MultiCropWrapper(
            self.teacher_visual,
            DINOHead(embed_dim, out_dim=65536, use_bn=False),
        )
        in_embed_dim = 768
        self.student_cluster = MultiCropWrapper_v2(self.student_visual, ClusteringTwoHead(
            in_embed_dim,
            out_dim_A=25,  # 25
            out_dim_B=75,  # 75
            input_sz=(224, 224),
        ))
        self.dino_loss = DINOLoss(
            out_dim=65536,
            ncrops=12,  # total number of crops = 2 global crops + local_crops_number
            warmup_teacher_temp=0.04,
            teacher_temp=0.07,
            warmup_teacher_temp_epochs=30 * 0.0482,
            nepochs=10,
            epoch_split=0.0482,
        )
        # ============ building text network ... ============
        self.transformer = TextTransformer(
            context_length=77,
            vocab_size=49408,
            width=512,
            output_dim=512,
            heads=8,
            layers=12,
        )
        self.logit_scale = Parameter(ops.log(Tensor((1 / 0.07))))
        self.clip_loss = CrossEntropyLabelSmooth(epsilon=0.1)
        params_groups = get_param_groups(self.student)
        self.loss_fn = IID_segmentation_loss_uncollapsed
        self.cast = ops.Cast()
        self.rank = get_rank() + 1

    def encode_image(self, image):
        return self.student_visual(image)[0]

    def encode_text(self, text):
        return self.transformer(text)

    def construct(self, crops1, crops2, text):
        crops1 = crops1.transpose(1, 0, 2, 3, 4)
        crops2 = crops2.transpose(1, 0, 2, 3, 4)
        images = [crops1[0], crops1[1]]
        for i in range(10):
            images.append(crops2[i])
        images.append(crops1[2])
        random_affine = RandomAffine(min_rot=-30, max_rot=30, min_shear=-10, max_shear=10,
                                     min_scale=0.8, max_scale=1.2)
        img2, affine1_to_2, affine2_to_1 = random_affine(crops1[-1])
        if np.random.rand() > 0.5:
            img2 = self.flip(img2)
            affine2_to_1[0, :] *= -1.
        images.append(img2)
        images.append(affine2_to_1)
        image = images[-3]
        all_affine2_to_1 = images[-1]
        # =============== dino loss=============
        student_output = self.student(images[:12])
        teacher_output = self.teacher(images[:2])
        
        dino_loss = self.dino_loss(student_output, teacher_output)

        # ================ clip loss ==================
        image_features = self.encode_image(image)  # [18,512]
        text_features = self.encode_text(text)  # [18,512]

        # normalized features
        image_features = image_features / self.image_norm(image_features)
        image_features = self.cast(image_features, mstype.float16)
        text_features = text_features / self.image_norm(text_features)
        text_features = self.cast(text_features, mstype.float16)

        image_gather = image_features
        text_gather = text_features

        logit_scale = ops.exp(self.logit_scale)
        logits_per_image = ops.clip_by_value(ops.matmul(logit_scale * image_features, text_gather.T), -100, 100)
        logits_per_text = ops.clip_by_value(ops.matmul(logit_scale * text_features, image_gather.T), -100, 100)

        # TODO get_rank
        # rank = get_rank()
        # rank = 0
        target = mindspore.numpy.arange(self.rank * image.shape[0], self.rank * image.shape[0] + image.shape[0])
        clip_loss = (self.clip_loss(logits_per_image, target) + self.clip_loss(logits_per_text, target)) / 2
        amp = True
        two_head_loss = 0
        if amp:  # 从这里开始不适用amp
            two_head_loss = 0
            for head in ['A', 'B']:
                x1_outs = self.student_cluster(image)
                x2_outs = self.student_cluster(img2)

                avg_loss_batch = None
                avg_loss_no_lamb_batch = None
                cluster_loss = None
                loss_no_lamb = None
                num_sub_heads = 1
                for i in range(num_sub_heads):
                    cluster_loss, loss_no_lamb = self.loss_fn(x1_outs[i], x2_outs[i],
                                                              all_affine2_to_1=all_affine2_to_1,
                                                              lamb=1, half_T_side_dense=0,
                                                              half_T_side_sparse_max=0,
                                                              half_T_side_sparse_min=0)
                if avg_loss_batch is None:
                    avg_loss_batch = cluster_loss
                    avg_loss_no_lamb_batch = loss_no_lamb
                else:
                    avg_loss_batch += cluster_loss
                    avg_loss_no_lamb_batch += loss_no_lamb
                avg_loss_batch /= num_sub_heads
                avg_loss_no_lamb_batch /= num_sub_heads

                two_head_loss += avg_loss_batch


        loss = 0.05 * two_head_loss + clip_loss + dino_loss
        
        return loss

        # return dict(dino_loss=dino_loss, clip_loss=clip_loss, cluster_loss_hA=two_head_loss["A"],
        #             cluster_loss_hB=two_head_loss["B"],
        #             loss=dino_loss + clip_loss + 0.05 * two_head_loss["A"] + 0.05 * two_head_loss["B"])
