# -*- coding:utf-8 -*-
import os
from typing import Union, List

from pet.model_tokenizer import ModelTokenizer
from pet.parameter_config import TrainConfig

# Union 是 typing 模块中定义的一个类,用于表示多个类型中的任意一种类型

label_mapping_dict = {
    "电脑": ["电脑"],
    "水果": ["水果"],
    "平板": ["平板"],
    "衣服": ["衣服"],
    "酒店": ["酒店"],
    "洗浴": ["洗浴"],
    "书籍": ["书籍"],
    "蒙牛": ["蒙牛"],
    "手机": ["手机"],
    "电器": ["电器"],
}


class LabelVerbalize(object):
    """
    Verbalize，用于将一个Label对应到其子Label的映射。
    """

    def __init__(self, tokenizer, max_label_len: int):
        """
        Args:
            tokenizer: 用于文本和id之间的转换。
            max_label_len (int): 标签长度，若大于则截断，若小于则补齐
        """
        self.tokenizer = tokenizer
        self.labels_dict = label_mapping_dict
        self.max_label_len = max_label_len

    def find_sub_labels(self, super_label: Union[list, str]):
        """
        通过标签找到所有的子标签。

        Args:
            label_info (Union[list, str]): 标签, 文本型 或 id_list, e.g. -> '体育' or [860, 5509]
        
        Returns:
            dict -> {
                'sub_labels': ['足球', '网球'],
                'token_ids': [[6639, 4413], [5381, 4413]]
            }
        """
        if type(super_label) == list:
            # 如果传入为id_list, 则通过tokenizer转回来
            while self.tokenizer.pad_token_id in super_label:
                super_label.remove(self.tokenizer.pad_token_id)
            label_str = ''.join(self.tokenizer.convert_ids_to_tokens(super_label))
        else:
            label_str = super_label
        if label_str not in self.labels_dict:
            raise ValueError(f"Label Error: '{label_str}' not in labels_dict {list(self.labels_dict.keys())}")

        sub_labels = self.labels_dict[label_str]
        sub_labels_dict = {'sub_labels': sub_labels}
        labels_ids = [_id[1:-1] for _id in self.tokenizer(sub_labels)['input_ids']]

        for index in range(len(labels_ids)):
            labels_ids[index] = labels_ids[index][:self.max_label_len]  # 对标签进行截断与补齐
            if len(labels_ids[index]) < self.max_label_len:
                labels_ids[index] = labels_ids[index] + [self.tokenizer.pad_token_id] * (
                        self.max_label_len - len(labels_ids[index]))
        sub_labels_dict['token_ids'] = labels_ids
        return sub_labels_dict

    def batch_find_sub_labels(self, super_labels: List[Union[list, str]]):
        """
        批量找到子标签。
        Args:
            super_labels (List[list, str]): 标签列表, [[4510, 5554], [860, 5509]] or ['体育', '电脑']

        Returns:
            list -> [
                        {
                            'sub_labels': ['笔记本', '电脑'], 
                            'token_ids': [[5011, 6381, 3315], [4510, 5554]]
                        },
                        ...
                    ]
        """
        return [self.find_sub_labels(super_label) for super_label in super_labels]

    def hard_mapping(self, input_sub_label: str):
        """
        强匹配函数，当模型生成的子label不存在时，通过最大公共子串找到重合度最高的主label。
        Args:
            input_sub_label (str): 子label。
        Returns:
            str: 主label。
        """
        out_super_label = ''
        max_overlap_str = 0
        for super_label, sub_labels in self.labels_dict.items():
            overlap_num = 0
            for sub_label in sub_labels:
                # 求所有子label与当前推理label之间的最长公共子串长度
                overlap_num += self.get_common_sub_str(sub_label, input_sub_label)[1]
            if overlap_num >= max_overlap_str:
                max_overlap_str = overlap_num
                out_super_label = super_label
        return out_super_label

    def find_super_label(self, sub_label: List[Union[list, str]], hard_mapping=True):
        """
        通过子标签找到父标签。

        Args:
            sub_label (List[Union[list, str]]): 子标签, 文本型 或 id_list, e.g. -> '苹果' or [5741, 3362]
            hard_mapping (bool): 当生成的词语不存在时，是否一定要匹配到一个最相似的label。
        Returns:
            dict -> {
                'label': '水果',
                'token_ids': [3717, 3362]
            }
        """
        if type(sub_label) == list:
            # 如果传入为id_list, 则通过tokenizer转回来
            pad_token_id = self.tokenizer.pad_token_id
            while pad_token_id in sub_label:  # 移除[PAD]token
                sub_label.remove(pad_token_id)
            sub_label = ''.join(self.tokenizer.convert_ids_to_tokens(sub_label))
        # print(sub_label)
        main_label = '无'
        for super_label, sub_labels in self.labels_dict.items():
            if sub_label in sub_labels:
                main_label = super_label
                break

        if main_label == '无' and hard_mapping:
            main_label = self.hard_mapping(sub_label)

        super_label_dict = {
            'label': main_label,
            'token_ids': self.tokenizer(main_label)['input_ids'][1:-1]
        }
        return super_label_dict

    def batch_find_super_label(self, sub_labels: List[Union[list, str]], hard_mapping=True):
        """
        批量通过子标签找父标签。

        Args:
            sub_labels (List[Union[list, str]]): 子标签列表, ['苹果', ...] or [[5741, 3362], ...]

        Returns:
            list: [
                    {
                    'label': '水果',
                    'token_ids': [3717, 3362]
                    },
                    ...
            ]
        """
        return [self.find_super_label(sub_label, hard_mapping) for sub_label in sub_labels]

    @staticmethod
    def get_common_sub_str(str1: str, str2: str):
        """
        寻找最大公共子串。
        str1:abcd
        str2:abcdefgh
        """
        lstr1, lstr2 = len(str1), len(str2)
        # 生成0矩阵，为方便后续计算，比字符串长度多了一列
        record = [[0 for i in range(lstr2 + 1)] for j in range(lstr1 + 1)]
        # 最长匹配对应在str1中的最后一位
        p = 0
        # 最长匹配长度
        max_num = 0

        for i in range(lstr1):
            for j in range(lstr2):
                if str1[i] == str2[j]:
                    record[i + 1][j + 1] = record[i][j] + 1
                    if record[i + 1][j + 1] > max_num:
                        max_num = record[i + 1][j + 1]
                        p = i + 1

        return str1[p - max_num:p], max_num


if __name__ == '__main__':
    from transformers import AutoTokenizer

    config = TrainConfig()
    tokenizer = ModelTokenizer(config.tokenizer_path).get_tokenizer()
    verbalize = LabelVerbalize(
        tokenizer=tokenizer,
        max_label_len=2
    )

    # print(verbalize.labels_dict)
    super_labels1 = [[4510, 5554], [6132, 3302]]
    ret1 = verbalize.batch_find_sub_labels(super_labels1)
    print(ret1)
    super_labels2 = ['电脑', '衣服']
    ret2 = verbalize.batch_find_sub_labels(super_labels2)
    print(ret2)

    super_label1 = '衣服'
    ret3 = verbalize.find_sub_labels(super_label1)
    print(ret3)

    super_label2 = [4510, 5554]
    ret3 = verbalize.find_sub_labels(super_label2)
    print(ret3)

    sub_labels = ['电脑', '衣服']
    ret = verbalize.batch_find_super_label(sub_labels)
    print(ret)
