from collections import defaultdict
from genericpath import isfile
import os
import re
from typing import List
import torch
import jieba
from word_vec_encoders.word_vec_encoder_base import WordVecEncoderBase

class KnowlegeInjectionWordVecEncoder(WordVecEncoderBase):
    """
    该编码器根据普通词典及领域词典，根据每个字符可能所属的词汇及领域实体类型，生成向量
    """

    def __init__(self) -> None:        
        knowlege_path = './data/knowlege'
        self.lexicon_by_label_index = defaultdict(list)
        self.flat_lexicon_by_label_index = defaultdict(list)
        self.flat_lexicon_by_word = dict()
        file_names = [file_name for file_name in os.listdir(knowlege_path) if isfile(os.path.join(knowlege_path, file_name)) and file_name.endswith('.tsv')]
        self.known_labels = []
        for file_name in file_names:
            label_index = os.path.splitext(file_name)[0]
            if label_index not in self.known_labels:
                self.known_labels.append(label_index)
            label_index = self.known_labels.index(label_index)
            for line in open(os.path.join(knowlege_path, file_name), encoding='utf-8'):
                synonym_list = []
                for item in line.strip().split('\t'):
                    # 带有括号的项目，去掉括号
                    item = re.sub('（.*）', '', item)
                    item = re.sub('\(.*\)', '', item)
                    # 带有逗号的项目，逆序合成
                    if ',' in item:
                        subitems = item.split(',')
                        subitems.reverse()
                        item = ''.join([subitem.strip() for subitem in subitems])
                    if item == '':
                        continue
                    synonym_list.append(item)
                    self.flat_lexicon_by_label_index[label_index].append(item)
                    self.flat_lexicon_by_word[item] = label_index
                self.lexicon_by_label_index[label_index].append(synonym_list)
        self.known_labels.append('一般词汇')
        self.num_labels = len(self.known_labels)
        super().__init__('knowlege', self.num_labels * 2)
        print('领域知识统计:')
        for label_index, label_index in zip(self.lexicon_by_label_index.keys(), self.flat_lexicon_by_label_index.keys()):
            print(f'{self.known_labels[label_index]:<10}{len(self.lexicon_by_label_index[label_index]):>5}群{len(self.flat_lexicon_by_label_index[label_index]):>7}项')

    _sentence_to_tensor_cache:dict() = dict()
    def sentence_to_tensor(self, sentence:str) -> torch.Tensor:
        if sentence in KnowlegeInjectionWordVecEncoder._sentence_to_tensor_cache:
            return KnowlegeInjectionWordVecEncoder._sentence_to_tensor_cache[sentence]
        matches_by_pos_and_len = dict()
        # 找到所有一般词语的可能性
        jb_tokenized = jieba.tokenize(sentence, mode='search')
        for word, start, end in jb_tokenized:
            matches_by_pos_and_len[(start, end - start)] = self.num_labels - 1 # 最后一个标签是一般词汇
        # 找到所有术语的可能性，若有与一般词语重复的则覆盖之
        sentence_len = len(sentence)
        for i in range(0, sentence_len):
            for word, label_index in self.flat_lexicon_by_word.items():
                word_len = len(word)
                if sentence[i] != word[0] or i + word_len > sentence_len:
                    continue
                elif sentence[i:word_len] == word:
                    matches_by_pos_and_len[(i, word_len)] = label_index
        # 生成实体信息张量，每个词的向量维度为标签数量的二倍
        entity_info_tensor = torch.zeros((sentence_len, self.num_labels * 2))
        for (pos, length), label_index in matches_by_pos_and_len.items():
            # 实体的头部
            entity_info_tensor[pos, label_index] += 1
            # 实体的尾部
            if length > 1:
                label_index = label_index + self.num_labels
                entity_info_tensor[pos+1:pos+length, label_index] += 1
        # 缩小张量内的数值范围到 0 ~ 1
        entity_info_tensor /= entity_info_tensor.amax(dim=(0, 1)).item()
        # 缓存
        KnowlegeInjectionWordVecEncoder._sentence_to_tensor_cache[sentence] = entity_info_tensor
        return entity_info_tensor

    def collate_batch_tensor(self, batch_sentences:List[str]):
        out = self.batch_str_to_input_tensor_bert_style_cached(batch_sentences)
        return out

    def forward(self, input_tensor:torch.Tensor, batch_size:int):
        return input_tensor

    def empty_chars_tensor(self, num_chars:int) -> torch.Tensor:
        return torch.zeros((num_chars, self.embedding_dim))