# Copyright (c) OpenMMLab. All rights reserved.
import math

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

from mmocr.models.builder import DECODERS
from .base_decoder import BaseDecoder
from mmocr.models.builder import build_decoder


@DECODERS.register_module()
class MLTDecoder(BaseDecoder):
    def __init__(self,
                 langs=['arabic','bangla','chinese','latin','hindi','korean','japanese'],
                 num_classes=None,
                 start_idx=None,
                 padding_idx=None,
                 max_seq_len=50,
                 in_channels=512,
                 aligner=dict(type='NRTRAligner'),
                 init_cfg=None):
        super().__init__(init_cfg=init_cfg)

        self.max_seq_len = max_seq_len
        self.langs = langs
        for lang in langs:
            # temp=nn.ModuleList([
            #     nn.Linear(512,num_classes)]
            # )
            temp=nn.Linear(512,num_classes)
            setattr(self,lang+'_decoder',temp)

        aligner.update(num_classes=num_classes)
        aligner.update(start_idx=start_idx)
        aligner.update(padding_idx=padding_idx)
        aligner.update(max_seq_len=self.max_seq_len)
        self.aligner=build_decoder(aligner)

    def forward_train(self, feat, out_enc, targets_dict, img_metas, lang_dict, *args, **kwargs):
        aligned_features=self.aligner.forward_train(feat,out_enc,targets_dict,img_metas)
        lang_dict = np.array(lang_dict)
        output = {}
        masks={}
        for i in range(len(self.langs)):
            func=getattr(self,self.langs[i]+'_decoder')
            temp=func(aligned_features)
            output[self.langs[i]]=temp
            masks[self.langs[i]]=lang_dict==i
        return output,masks


    def forward_test(self, feat, out_enc, img_metas, out_cla, *args, **kwargs):
        _, num_lang = out_cla.size()
        if  kwargs is not None and 'gt_lang' in kwargs:
            index=kwargs['gt_lang']
        else:
            out = F.softmax(out_cla, dim=-1)
            _, max_idx = torch.max(out, dim=1, keepdim=False)
            index=max_idx.cpu().numpy()

        src_mask = self.aligner._get_mask(out_enc, img_metas)
        N = out_enc.size(0)
        init_target_seq = torch.full((N, self.max_seq_len + 1),
                                     self.aligner.padding_idx,
                                     device=out_enc.device,
                                     dtype=torch.long)
        # bsz * seq_len
        init_target_seq[:, 0] = self.aligner.start_idx

        outputs = []
        for step in range(0, self.max_seq_len):
            decoder_output = self.aligner._attention(
                init_target_seq, out_enc, src_mask=src_mask)
            # bsz * seq_len * C
            langs_output=[]
            for i in range(len(self.langs)):
                cur_decoder=getattr(self,self.langs[i]+'_decoder')
                cur_output=F.softmax(cur_decoder(decoder_output[:, step, :]),dim=-1)
                langs_output.append(cur_output)
            #-----------get corresponding language outputs
            correct_outputs = []
            for i in range(N):
                cur_lang_index = index[i]
                cur_out = langs_output[cur_lang_index][i]
                correct_outputs.append(cur_out)
            step_result=torch.stack(correct_outputs,dim=0)
            # bsz * num_classes
            outputs.append(step_result)
            _, step_max_index = torch.max(step_result, dim=-1)
            init_target_seq[:, step + 1] = step_max_index

        outputs = torch.stack(outputs, dim=1)

        return outputs,index

