# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch
from mmocr.models.builder import LOSSES
from .ce_loss import CELoss
import numpy as np
import torch.nn.functional as F
@LOSSES.register_module()
class LanguageLoss(CELoss):
    """Implementation of loss module for transformer.

    Args:
        ignore_index (int, optional): The character index to be ignored in
            loss computation.
        reduction (str): Type of reduction to apply to the output,
            should be one of the following: ("none", "mean", "sum").
        flatten (bool): Whether to flatten the vectors for loss computation.

    Warning:
        TFLoss assumes that the first input token is always `<SOS>`.
    """

    def __init__(self,
                 ignore_index=-1,
                 reduction='sum',
                 flatten=True,
                 with_vlloss=False,
                 vl_loss_weight=1,
                 **kwargs):
        super().__init__(ignore_index, reduction)
        assert isinstance(flatten, bool)

        self.flatten = flatten
        self.ignore_index= ignore_index
        self.vl_loss=with_vlloss
        if self.vl_loss:
            self.logit_scale = torch.nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
            self.vl_loss_weight=vl_loss_weight

    def format(self, outputs, targets_dict):
        #outputs = outputs[:, :-1, :].contiguous()
        targets = targets_dict['padded_targets']
        targets = targets[:, 1:].contiguous()
        if self.flatten:
            outputs = outputs.view(-1, outputs.size(-1))
            targets = targets.view(-1)
        else:
            outputs = outputs.permute(0, 2, 1).contiguous()

        return outputs, targets

    def forward(self, lan_out, targets_dict, img_metas=None):
            """
            Args:
                outputs (Tensor): A raw logit tensor of shape :math:`(N, T, C)`.
                targets_dict (dict): A dict with a key ``padded_targets``, which is
                    a tensor of shape :math:`(N, T)`. Each element is the index of
                    a character.
                img_metas (None): Unused.

            Returns:
                dict: A loss dict with the key ``loss_ce``.
            """
            outputs, targets = self.format(lan_out['cls'], targets_dict)
            targets=targets.to(outputs.device)
            loss_ce = self.loss_ce(outputs, targets)
            # mask=torch.eq(targets,self.ignore_index)
            # loss_ce=loss_ce/(1+torch.sum(mask))
            losses = dict(loss_tf=loss_ce)

            if self.vl_loss:
                text_feature=lan_out['output']
                pad_mask=lan_out['pad_mask']
                image_feature=lan_out['feature']

                # normalized features
                image_features = image_feature / image_feature.norm(dim=-1, keepdim=True)
                text_features = text_feature / text_feature.norm(dim=-1, keepdim=True)
                self.logit_scale.data = torch.clamp(self.logit_scale.data, 0, 4.6052)
                logit_scale = self.logit_scale.exp()

                n, t, c = image_features.size()
                logits_per_image = logit_scale * image_features.view(-1, c).contiguous() @ text_features.view(-1,
                                                                                                              c).contiguous().t()
                logits_per_text = logit_scale * text_features.view(-1, c).contiguous() @ image_features.view(-1,
                                                                                                             c).contiguous().t()

                label = torch.arange(n * t).to(logits_per_image.device)
                loss_image = F.cross_entropy(logits_per_image, label, reduction='none')
                loss_text = F.cross_entropy(logits_per_text, label, reduction='none')
                loss_mask = pad_mask.view(n,t)
                loss_image = loss_image.view(n, t)
                loss_image = torch.sum(loss_image * loss_mask) / (torch.sum(loss_mask) + 1)
                loss_text = loss_text.view(n, t)
                loss_text = torch.sum(loss_text * loss_mask) / (torch.sum(loss_mask) + 1)
                loss_clip=(loss_text + loss_image) / 2
                losses['loss_clip']=loss_clip*self.vl_loss_weight
            return losses