# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F

from mmocr.models.builder import LOSSES


@LOSSES.register_module()
class MultiheadLoss(nn.Module):
    """Implementation of ABINet multiloss that allows mixing different types of
    losses with weights.

    Args:
        enc_weight (float): The weight of encoder loss. Defaults to 1.0.
        dec_weight (float): The weight of decoder loss. Defaults to 1.0.
        fusion_weight (float): The weight of fuser (aligner) loss.
            Defaults to 1.0.
        num_classes (int): Number of unique output language tokens.

    Returns:
        A dictionary whose key/value pairs are the losses of three modules.
    """

    def __init__(self,
                 dec_weight=1.0,
                 cla_weight=1.0,
                 num_classes=[],
                 num_lang=7,
                 ignore_index=[],
                 ignore_first_char=False,
                 reduction='mean',
                 **kwargs):
        assert isinstance(dec_weight, float) or isinstance(dec_weight, int)
        assert isinstance(cla_weight, float) or isinstance(cla_weight, int)
        super().__init__()
        self.dec_weight = dec_weight
        self.cla_weight = cla_weight
        self.num_classes = num_classes
        self.num_lang = num_lang
        self.loss_ce_arabic = nn.CrossEntropyLoss(ignore_index=ignore_index[0], reduction=reduction)
        self.loss_ce_bangla = nn.CrossEntropyLoss(ignore_index=ignore_index[1], reduction=reduction)
        self.loss_ce_chinese = nn.CrossEntropyLoss(ignore_index=ignore_index[2], reduction=reduction)
        self.loss_ce_latin = nn.CrossEntropyLoss(ignore_index=ignore_index[3], reduction=reduction)
        self.loss_ce_hindi = nn.CrossEntropyLoss(ignore_index=ignore_index[4], reduction=reduction)
        self.loss_ce_korean = nn.CrossEntropyLoss(ignore_index=ignore_index[5], reduction=reduction)
        self.loss_ce_japanese = nn.CrossEntropyLoss(ignore_index=ignore_index[6], reduction=reduction)
        self.loss_ce = nn.CrossEntropyLoss(ignore_index=-1, reduction=reduction)
        self.ignore_first_char = ignore_first_char

    def forward(self, outputs, targets_dict, lang_dict, img_metas=None):
        assert 'out_cla' in outputs or 'out_dec' in outputs
        losses = {}

        device = outputs['out_cla'].device

        # SAR Loss (Decoder)
        if outputs.get('out_dec') is not None:
            if 'arabic' in outputs['out_dec']:
                targets = targets_dict['arabic']['padded_targets']
                targets = targets[:, 1:].contiguous()
                dec_outputs = outputs['out_dec']['arabic'][:, :-1, :].permute(0, 2, 1).contiguous()
                dec_loss = self.loss_ce_arabic(dec_outputs, targets.to(device)) * self.dec_weight
                losses['arabic_loss'] = dec_loss
            if 'bangla' in outputs['out_dec']:
                targets = targets_dict['bangla']['padded_targets']
                targets = targets[:, 1:].contiguous()
                dec_outputs = outputs['out_dec']['bangla'][:, :-1, :].permute(0, 2, 1).contiguous()
                dec_loss = self.loss_ce_bangla(dec_outputs, targets.to(device)) * self.dec_weight
                losses['bangla_loss'] = dec_loss
            if 'chinese' in outputs['out_dec']:
                targets = targets_dict['chinese']['padded_targets']
                targets = targets[:, 1:].contiguous()
                dec_outputs = outputs['out_dec']['chinese'][:, :-1, :].permute(0, 2, 1).contiguous()
                dec_loss = self.loss_ce_chinese(dec_outputs, targets.to(device)) * self.dec_weight
                losses['chinese_loss'] = dec_loss
            if 'latin' in outputs['out_dec']:
                targets = targets_dict['latin']['padded_targets']
                targets = targets[:, 1:].contiguous()
                dec_outputs = outputs['out_dec']['latin'][:, :-1, :].permute(0, 2, 1).contiguous()
                dec_loss = self.loss_ce_latin(dec_outputs, targets.to(device)) * self.dec_weight
                losses['latin_loss'] = dec_loss
            if 'hindi' in outputs['out_dec']:
                targets = targets_dict['hindi']['padded_targets']
                targets = targets[:, 1:].contiguous()
                dec_outputs = outputs['out_dec']['hindi'][:, :-1, :].permute(0, 2, 1).contiguous()
                dec_loss = self.loss_ce_hindi(dec_outputs, targets.to(device)) * self.dec_weight
                losses['hindi_loss'] = dec_loss
            if 'korean' in outputs['out_dec']:
                targets = targets_dict['korean']['padded_targets']
                targets = targets[:, 1:].contiguous()
                dec_outputs = outputs['out_dec']['korean'][:, :-1, :].permute(0, 2, 1).contiguous()
                dec_loss = self.loss_ce_korean(dec_outputs, targets.to(device)) * self.dec_weight
                losses['korean_loss'] = dec_loss
            if 'japanese' in outputs['out_dec']:
                targets = targets_dict['japanese']['padded_targets']
                targets = targets[:, 1:].contiguous()
                dec_outputs = outputs['out_dec']['japanese'][:, :-1, :].permute(0, 2, 1).contiguous()
                dec_loss = self.loss_ce_japanese(dec_outputs, targets.to(device)) * self.dec_weight
                losses['japanese_loss'] = dec_loss

        # CE Loss (Classifier)
        if outputs.get('out_cla') is not None:
            cla_outputs = outputs['out_cla']
            lang_targets = torch.tensor(lang_dict, dtype=torch.long, device=device)
            cla_loss = self.loss_ce(cla_outputs, lang_targets.to(device)) * self.cla_weight
            losses['cla_loss'] = cla_loss
        return losses
