# Copyright (c) OpenMMLab. All rights reserved.
import torch

import mmocr.utils as utils
from mmocr.models.builder import CONVERTORS
from .base import BaseConvertor
from .attn import AttnConvertor
from mmocr.utils import list_from_file
from collections import defaultdict
from mmocr.models.builder import build_convertor

@CONVERTORS.register_module()
class MultiheadAttnConvertor(BaseConvertor):
    def __init__(self,
                 dict_type='DICT90',
                 dict_file=None,
                 lang_file=None,
                 dict_list=None,
                 with_unknown=True,
                 max_seq_len=50,
                 lower=False,
                 start_end_same=True,
                 **kwargs):
        super().__init__(dict_type, dict_file, dict_list)
        assert isinstance(with_unknown, bool)
        assert isinstance(max_seq_len, int)
        assert isinstance(lower, bool)

        self.with_unknown = with_unknown
        self.max_seq_len = max_seq_len
        self.lower = lower
        self.start_end_same = start_end_same
        self.dict_file = dict_file

        self.idx2lang = []
        if lang_file is not None:
            for line in list_from_file(lang_file):
                line = line.strip()
                if line != '':
                    self.idx2lang.append(line)

        self.lang2idx = {}
        for idx, lang in enumerate(self.idx2lang):
            self.lang2idx[lang] = idx
        self.num_lang = idx + 1

        self.lang_dirs = []
        dir = self.dict_file.replace(self.dict_file.split('/')[-1], '')
        for i, val in enumerate(self.idx2lang):
            lang_dir = dir + val + '_dict.txt'
            self.lang_dirs.append(lang_dir)

        arabic = dict(type='AttnConvertor', dict_file=self.lang_dirs[0], with_unknown=True, lower=False)
        self.arabic_label_convertor = build_convertor(arabic)
        bangla = dict(type='AttnConvertor', dict_file=self.lang_dirs[1], with_unknown=True, lower=False)
        self.bangla_label_convertor = build_convertor(bangla)
        chinese = dict(type='AttnConvertor', dict_file=self.lang_dirs[2], with_unknown=True, lower=False)
        self.chinese_label_convertor = build_convertor(chinese)
        latin = dict(type='AttnConvertor', dict_file=self.lang_dirs[3], with_unknown=True, lower=False)
        self.latin_label_convertor = build_convertor(latin)
        hindi = dict(type='AttnConvertor', dict_file=self.lang_dirs[4], with_unknown=True, lower=False)
        self.hindi_label_convertor = build_convertor(hindi)
        korean = dict(type='AttnConvertor', dict_file=self.lang_dirs[5], with_unknown=True, lower=False)
        self.korean_label_convertor = build_convertor(korean)
        japanese = dict(type='AttnConvertor', dict_file=self.lang_dirs[6], with_unknown=True, lower=False)
        self.japanese_label_convertor = build_convertor(japanese)
        # no need to update_dict, cause it will update during build_convertor

    def multihead_str2tensor(self, strings, languages):
        assert utils.is_type_list(strings, str)

        tensors, padded_targets = [], []

        indexes = self.multihead_str2idx(strings, languages)
        for i, index in enumerate(indexes):
            lang_obj = self.choose_language_convertor(languages[i])

            tensor = torch.LongTensor(index)
            tensors.append(tensor)
            # target tensor for loss
            src_target = torch.LongTensor(tensor.size(0) + 2).fill_(0)
            src_target[-1] = lang_obj.end_idx
            src_target[0] = lang_obj.start_idx
            src_target[1:-1] = tensor
            padded_target = (torch.ones(lang_obj.max_seq_len) *
                             lang_obj.padding_idx).long()
            char_num = src_target.size(0)
            if char_num > lang_obj.max_seq_len:
                padded_target = src_target[:lang_obj.max_seq_len]
            else:
                padded_target[:char_num] = src_target
            padded_targets.append(padded_target)
        padded_targets = torch.stack(padded_targets, 0).long()

        return {'targets': tensors, 'padded_targets': padded_targets}

    def lang_str2tensor(self, strings):
        assert utils.is_type_list(strings, str)
        indexes = []
        for idx, lang in enumerate(strings):
            index = self.lang2idx[lang]
            indexes.append(index)
        return indexes

    def multihead_str2idx(self, strings, languages):
        assert isinstance(strings, list)
        indexes = []
        for i, string in enumerate(strings):
            lang_obj = self.choose_language_convertor(languages[i])
            if self.lower:
                string = string.lower()
            index = []
            for char in string:
                char_idx = lang_obj.char2idx.get(char, lang_obj.unknown_idx)
                if char_idx is None:
                    raise Exception(f'Chararcter: {char} not in dict,'
                                    f' please check gt_label and use'
                                    f' custom dict file,'
                                    f' or set "with_unknown=True"')
                index.append(char_idx)
            indexes.append(index)
        return indexes

    def multihead_tensor2idx(self, outputs, language, img_metas=None):
        """
        Convert output tensor to text-index
        Args:
            outputs (tensor): model outputs with size: N * T * C
            img_metas (list[dict]): Each dict contains one image info.
        Returns:
            indexes (list[list[int]]): [[1,2,3,3,4], [5,4,6,3,7]]
            scores (list[list[float]]): [[0.9,0.8,0.95,0.97,0.94],
                                         [0.9,0.9,0.98,0.97,0.96]]
        """
        batch_size = len(outputs)
        indexes, scores = [], []
        for idx in range(batch_size):
            lang_obj = self.choose_language_convertor(language[idx])
            ignore_indexes = [lang_obj.padding_idx]
            # This could be inalterable, change the type of ignore_indexes
            seq = outputs[idx][:, :]
            max_value, max_idx = torch.max(seq, -1)
            str_index, str_score = [], []
            output_index = max_idx.cpu().detach().numpy().tolist()
            output_score = max_value.cpu().detach().numpy().tolist()
            for char_index, char_score in zip(output_index, output_score):
                if char_index in ignore_indexes:
                    continue
                if char_index == lang_obj.end_idx:
                    break
                str_index.append(char_index)
                str_score.append(char_score)

            indexes.append(str_index)
            scores.append(str_score)

        return indexes, scores

    def multihead_idx2str(self, indexes, language):
        """Convert indexes to text strings.

        Args:
            indexes (list[list[int]]): [[1,2,3,3,4], [5,4,6,3,7]].
        Returns:
            strings (list[str]): ['hello', 'world'].
        """
        assert isinstance(indexes, list)

        strings = []
        for i, index in enumerate(indexes):
            lang_obj = self.choose_language_convertor(language[i])
            string = [lang_obj.idx2char[i] for i in index]
            strings.append(''.join(string))

        return strings

    def choose_language_convertor(self, lang):
        if lang == 0:
            return self.arabic_label_convertor
        elif lang == 1:
            return self.bangla_label_convertor
        elif lang == 2:
            return self.chinese_label_convertor
        elif lang == 3:
            return self.latin_label_convertor
        elif lang == 4:
            return self.hindi_label_convertor
        elif lang == 5:
            return self.korean_label_convertor
        elif lang == 6:
            return self.japanese_label_convertor
        else:
            print("Language error")
            return 0

    def get_num_language(self):
        """Number of output languages."""
        return self.num_lang

    def get_num_classes(self):
        multi_classes = []
        multi_classes.append(self.arabic_label_convertor.num_classes())
        multi_classes.append(self.bangla_label_convertor.num_classes())
        multi_classes.append(self.chinese_label_convertor.num_classes())
        multi_classes.append(self.latin_label_convertor.num_classes())
        multi_classes.append(self.hindi_label_convertor.num_classes())
        multi_classes.append(self.korean_label_convertor.num_classes())
        multi_classes.append(self.japanese_label_convertor.num_classes())
        return multi_classes

    def get_start_idx(self):
        multi_start_idx = []
        multi_start_idx.append(self.arabic_label_convertor.start_idx)
        multi_start_idx.append(self.bangla_label_convertor.start_idx)
        multi_start_idx.append(self.chinese_label_convertor.start_idx)
        multi_start_idx.append(self.latin_label_convertor.start_idx)
        multi_start_idx.append(self.hindi_label_convertor.start_idx)
        multi_start_idx.append(self.korean_label_convertor.start_idx)
        multi_start_idx.append(self.japanese_label_convertor.start_idx)
        return multi_start_idx

    def get_padding_idx(self):
        multi_padding_idx = []
        multi_padding_idx.append(self.arabic_label_convertor.padding_idx)
        multi_padding_idx.append(self.bangla_label_convertor.padding_idx)
        multi_padding_idx.append(self.chinese_label_convertor.padding_idx)
        multi_padding_idx.append(self.latin_label_convertor.padding_idx)
        multi_padding_idx.append(self.hindi_label_convertor.padding_idx)
        multi_padding_idx.append(self.korean_label_convertor.padding_idx)
        multi_padding_idx.append(self.japanese_label_convertor.padding_idx)
        return multi_padding_idx







