# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F

from mmocr.models.builder import LOSSES,build_loss
from .ce_loss import TFLoss
@LOSSES.register_module()
class MultiLingualLoss(nn.Module):
    """Implementation of ABINet multiloss that allows mixing different types of
    losses with weights.

    Args:
        enc_weight (float): The weight of encoder loss. Defaults to 1.0.
        dec_weight (float): The weight of decoder loss. Defaults to 1.0.
        fusion_weight (float): The weight of fuser (aligner) loss.
            Defaults to 1.0.
        num_classes (int): Number of unique output language tokens.

    Returns:
        A dictionary whose key/value pairs are the losses of three modules.
    """

    def __init__(self,
                 dec_weight=1.0,
                 cla_weight=1.0,
                 all_langs=['arabic','bangla','chinese','latin','hindi','korean','japanese'],
                 ignore_index=[],
                 reduction='mean',
                 **kwargs):
        assert isinstance(dec_weight, float) or isinstance(dec_weight, int)
        assert isinstance(cla_weight, float) or isinstance(cla_weight, int)
        super().__init__()
        assert len(all_langs)==len(ignore_index)
        self.dec_weight = dec_weight
        self.cla_weight = cla_weight
        self.all_langs=all_langs
        self.losses={}
        for lang,ig_idx in zip(all_langs,ignore_index):
            self.losses[lang]=TFLoss(ignore_index=ig_idx,reduction=reduction)
        self.loss_ce = nn.CrossEntropyLoss(ignore_index=-1, reduction=reduction)

    def forward(self, outputs, targets_dict, lang_dict, img_metas=None):
        assert 'out_cla' in outputs or 'out_dec' in outputs
        losses = {}

        device = outputs['out_cla'].device

        # SAR Loss (Decoder)
        if outputs.get('out_dec') is not None:
            for lang in self.all_langs:
                if lang in outputs['out_dec'].keys():
                    losses['loss_%s'%lang]=self.losses[lang](outputs['out_dec'][lang],targets_dict[lang])['loss_ce']
        # CE Loss (Classifier)
        if outputs.get('out_cla') is not None:
            cla_outputs = outputs['out_cla']
            lang_targets = torch.tensor(lang_dict, dtype=torch.long, device=device)
            cla_loss = self.loss_ce(cla_outputs, lang_targets.to(device)) * self.cla_weight
            losses['loss_cla'] = cla_loss
        return losses
