from PET.pet_config import ProjectConfig
from transformers import AutoTokenizer
from typing import Union, List
import random


class Verbalizer():
    def __init__(self, verbalizer_file: str, tokenizer, max_label_len: int):
        """
        Args:
            verbalizer_file (str): verbalizer文件存放地址。
            tokenizer: 分词器，用于文本和id之间的转换。
            max_label_len (int): 标签长度，若大于则截断，若小于则补齐
        """
        self.tokenizer = tokenizer
        self.max_label_len = max_label_len
        self.label_dict = self.load_label_dict(verbalizer_file)

    def load_label_dict(self, verbalizer_file):
        """
        读取本地文件，构建verbalizer字典。
        Args:
            verbalizer_file (str): verbalizer文件存放地址。
        Returns:
            dict -> {
                '体育': ['篮球', '足球','网球', '排球',  ...],
                '酒店': ['宾馆', '旅馆', '旅店', '酒店', ...],
                ...
            }
        """
        label_dict = {}
        with open(verbalizer_file, 'r', encoding='utf8') as f:
            for line in f.readlines():
                label, sub_labels = line.strip().split('\t')
                label_dict[label] = list(set(sub_labels.split(',')))
        return label_dict

    def find_sub_labels(self, label: Union[list, str]):
        """
        通过父标签找到对应所有的子标签。
        Args:
            label (Union[list, str]): 标签, 文本型 或 id_list, e.g. -> '体育' or [860, 5509]
        Returns:
            dict -> {
                'sub_labels': ['足球', '网球'],
                'token_ids': [[6639, 4413], [5381, 4413]]
            }
        """
        # [3717, 3362] ->'水果'
        if type(label) == list:
            # 去除pad填充
            while self.tokenizer.pad_token_id in label:
                label.remove(self.tokenizer.pad_token_id)
            # 将id_list转换成文本
            label = ''.join(self.tokenizer.convert_ids_to_tokens(label))

        # '水果'
        if label not in self.label_dict.keys():
            raise ValueError(f'Lable Error: {label} is not in label_dict')
        sub_labels = self.label_dict[label]
        result = {'sub_labels': sub_labels}

        token_ids = self.tokenizer(sub_labels, add_special_tokens=False)['input_ids']
        for i in range(len(token_ids)):
            token_ids[i] = token_ids[i][:self.max_label_len]  # 对标签进行截断
            if len(token_ids[i]) < self.max_label_len:  # 对标签进行补齐
                token_ids[i] = token_ids[i] + [self.tokenizer.pad_token_id] * (self.max_label_len - len(token_ids[i]))
        result['token_ids'] = token_ids
        return result

    def batch_find_sub_labels(self, label: List[Union[list, str]]):
        """
        批量找到子标签。
        Args:
        label (List[list, str]): 标签列表, [[4510, 5554], [860, 5509]] or ['体育', '电脑']
        Returns:
            list -> [
                        {
                         'sub_labels': ['足球', '网球'],
                         'token_ids': [[6639, 4413], [5381, 4413]]
                        },
                        ...
                    ]
        """
        return [self.find_sub_labels(l) for l in label]

    def get_common_sub_str(self, str1: str, str2: str):
        """
        寻找最大公共子串。
        str1:abcd
        str2:abadbcdba
        return:('bcd', 3)
        """
        lstr1, lstr2 = len(str1), len(str2)
        # 生成0矩阵，为方便后续计算，比字符串长度多了一列
        record = [[0 for i in range(lstr2 + 1)] for j in range(lstr1 + 1)]
        p = 0  # 最长匹配对应在str1中的最后一位
        maxNum = 0  # 最长匹配长度

        for i in range(lstr1):
            for j in range(lstr2):
                if str1[i] == str2[j]:
                    record[i + 1][j + 1] = record[i][j] + 1
                    if record[i + 1][j + 1] > maxNum:
                        maxNum = record[i + 1][j + 1]
                        p = i + 1

        return str1[p - maxNum:p], maxNum

    def hard_mapping(self, sub_label: str):
        """
        强匹配函数，当模型生成的子label不存在时，通过最大公共子串找到重合度最高的主label。
        Args:
            sub_label (str): 子label。
        Returns:
            str: 主label。
        """
        label, max_overlap_str = '', 0
        label = random.choice(list(self.label_dict.keys()))
        # print(self.label_dict.items())
        for main_label, sub_labels in self.label_dict.items():
            overlap_num = 0
            for s_label in sub_labels:  # 求所有子label与当前推理label之间的最长公共子串长度
                overlap_num += self.get_common_sub_str(sub_label, s_label)[1]
            if overlap_num > max_overlap_str:
                max_overlap_str = overlap_num
                label = main_label
        return label

    def find_main_label(self, sub_label: Union[list, str], hard_mapping=True):
        """
        通过子标签找到父标签。
        Args:
            sub_label (List[Union[list, str]]): 子标签, 文本型 或 id_list, e.g. -> '苹果' or [5741, 3362]
            hard_mapping (bool): 当生成的词语不存在时，是否一定要匹配到一个最相似的label。
        Returns:
            dict -> {
                'label': '水果',
                'token_ids': [3717, 3362]
            }
        """
        # [6205, 6163] ->'西装'
        if type(sub_label) == list:
            # 去除pad填充
            while self.tokenizer.pad_token_id in sub_label:
                sub_label.remove(self.tokenizer.pad_token_id)
            # 将id_list转换成文本
            sub_label = ''.join(self.tokenizer.convert_ids_to_tokens(sub_label))
            # print('sub_label:', sub_label)

        main_label = '无'
        for label, sub_labels in self.label_dict.items():
            if sub_label in sub_labels:
                main_label = label
                break

        if hard_mapping and main_label == '无':
            main_label = self.hard_mapping(sub_label)

        ret = {
            'label': main_label,
            'token_ids': self.tokenizer(main_label, add_special_tokens=False)['input_ids']
        }
        return ret

    def batch_find_main_label(self, sub_label: List[Union[list, str]], hard_mapping=True):
        """
        批量通过子标签找父标签。
        Args:
            sub_label (List[Union[list, str]]): 子标签列表, ['苹果', ...] or [[5741, 3362], ...]
        Returns:
            list: [
                    {
                    'label': '水果',
                    'token_ids': [3717, 3362]
                    },
                    ...
            ]
        """
        return [self.find_main_label(l, hard_mapping) for l in sub_label]


if __name__ == '__main__':
    cf = ProjectConfig()
    cf.pre_model = '../../pre_model/bert-base-chinese'
    cf.verbalizer = '../data/verbalizer.txt'

    tokenizer = AutoTokenizer.from_pretrained(cf.pre_model)
    verbalizer = Verbalizer(verbalizer_file=cf.verbalizer,
                            tokenizer=tokenizer,
                            max_label_len=cf.max_label_len)

    # # 通过主标签找到子标签
    # # label = '水果'
    # label = [3717, 3362]
    # result = verbalizer.find_sub_labels(label)
    # print(result)

    # label = ['水果', '衣服']
    # # label = [[3717, 3362], [6132, 3302]]
    # result = verbalizer.batch_find_sub_labels(label)
    # print(result)

    # 通过子标签找到父标签
    # sub_label = '西装'
    # sub_label = [6205, 6163]
    sub_label = '裤裙'
    result = verbalizer.find_main_label(sub_label, hard_mapping=True)
    print(result)

    sub_label = ['裤裙', '香蕉']
    result = verbalizer.batch_find_main_label(sub_label, hard_mapping=True)
    print(result)
