# Copyright (c) OpenMMLab. All rights reserved.
import math

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

from mmocr.models.builder import DECODERS
from .base_decoder import BaseDecoder
from mmocr.models.builder import build_decoder


@DECODERS.register_module()
class MultiheadDecoder(BaseDecoder):
    def __init__(self,
                 num_lang=7,
                 num_classes=None,
                 start_idx=None,
                 padding_idx=None,
                 max_seq_len=50,
                 decoder=dict(type='ParallelSARDecoder'),
                 init_cfg=None):
        super().__init__(init_cfg=init_cfg)

        self.max_seq_len = max_seq_len
        self.num_lang = num_lang

        for i in range(num_lang):
            tmp_decoder = decoder
            tmp_decoder.update(num_classes=num_classes[i])
            tmp_decoder.update(start_idx=start_idx[i])
            tmp_decoder.update(padding_idx=padding_idx[i])
            tmp_decoder.update(max_seq_len=self.max_seq_len)
            self.build_language_decoder(i, tmp_decoder)

    def forward_train(self, feat, out_enc, targets_dict, img_metas, lang_dict, *args, **kwargs):
        lang_dict=np.array(lang_dict)
        output = {}
        new_targets_dict = {}
        all_langs=['arabic','bangla','chinese','latin','hindi','korean','japanese']
        for i in range(self.num_lang):
            mask=lang_dict==i
            index=mask.nonzero()
            index=index[0]
            if len(index)==0:
                ## avoid multi gpus training error
                cur_decoder=self.get_language_decoder(i)
                cur_feat=torch.ones_like(feat[0:1])*-1.
                cur_out_enc=torch.ones_like(out_enc[0:1])*-1.
                cur_img_metas=[img_metas[0]]
                cur_target_dict =torch.ones_like(targets_dict['padded_targets'][0:1])*(cur_decoder.start_idx+1)
                cur_target_dict = {'padded_targets': cur_target_dict.long()}
                cur_output = cur_decoder(cur_feat, cur_out_enc, cur_target_dict, cur_img_metas,train_mode=True)
                output[all_langs[i]] = cur_output
                new_targets_dict[all_langs[i]] = cur_target_dict
                continue
            cur_feat=feat[index]
            cur_out_enc=out_enc[index]
            cur_target_dict=targets_dict['padded_targets'][index]
            cur_img_metas=[img_metas[cc] for cc in index] 
            cur_target_dict={'padded_targets':cur_target_dict}
            cur_output=self.get_language_decoder(i)(cur_feat, cur_out_enc,cur_target_dict , cur_img_metas, train_mode=True)
            output[all_langs[i]]=cur_output
            new_targets_dict[all_langs[i]]=cur_target_dict
        return output, new_targets_dict

    def forward_test(self, feat, out_enc, img_metas, out_cla, *args, **kwargs):
        n= out_enc.size()[0]
        _, num_lang = out_cla.size()
        if  kwargs is not None and 'gt_lang' in kwargs:
            index=kwargs['gt_lang']
        else:
            out = F.softmax(out_cla, dim=-1)
            _, max_idx = torch.max(out, dim=1, keepdim=False)
            index=max_idx.cpu().numpy()
        all_outputs=[]
        for i in range(self.num_lang):
            cur_decode=self.get_language_decoder(i)(feat, out_enc, img_metas, train_mode=False)
            all_outputs.append(cur_decode)
        correct_outputs=[]
        
        for i in range(n):
            cur_lang_index=index[i]
            cur_out=all_outputs[cur_lang_index][i]
            correct_outputs.append(cur_out)
        return correct_outputs,index

    def build_language_decoder(self, lang, decoder):
        if lang == 0:
            self.arabic_decoder = build_decoder(decoder)
        elif lang == 1:
            self.bangla_decoder = build_decoder(decoder)
        elif lang == 2:
            self.chinese_decoder = build_decoder(decoder)
        elif lang == 3:
            self.latin_decoder = build_decoder(decoder)
        elif lang == 4:
            self.hindi_decoder = build_decoder(decoder)
        elif lang == 5:
            self.korean_decoder = build_decoder(decoder)
        elif lang == 6:
            self.japanese_decoder = build_decoder(decoder)
        else:
            print("Language decoder build error")
            return 0

    def get_language_decoder(self, lang):

        if lang == 0:
            return self.arabic_decoder
        elif lang == 1:
            return self.bangla_decoder
        elif lang == 2:
            return self.chinese_decoder
        elif lang == 3:
            return self.latin_decoder
        elif lang == 4:
            return self.hindi_decoder
        elif lang == 5:
            return self.korean_decoder
        elif lang == 6:
            return self.japanese_decoder
        else:
            print("Language decoder build error")
            return 0