##预处理

import jieba.posseg as pseg
# import word2vec as w2v
# from bert_serving.client import BertClient
import torch
import torch.nn as nn

from gL import GlobalVar


def get_bert_encode_for_single(text):
    """
    description: 使用bert-chinese编码中文文本
    :param text: 要进行编码的文本
    :return: 使用bert编码后的文本张量表示
    """
    model = GlobalVar.model
    tokenizer = GlobalVar.tokenizer
    # 首先使用字符映射器对每个汉字进行映射
    # 这里需要注意, bert的tokenizer映射后会为结果前后添加开始和结束标记即101和102
    # 这对于多段文本的编码是有意义的, 但在我们这里没有意义, 因此使用[1:-1]对头和尾进行切片
    indexed_tokens = tokenizer.encode(text)
    # 之后将列表结构转化为tensor
    tokens_tensor = torch.tensor([indexed_tokens])
    # 使模型不自动计算梯度
    with torch.no_grad():
        # 调用模型获得隐层输出
        out = model(tokens_tensor)[0][0][0]
    return out.tolist()


##导入文件，生成词向量，返回字典，key是该词，value是对应的词向量
def load_word2vec_object(source_data_path):
    source_data = open(source_data_path, 'r', encoding='utf-8')
    source_data_dict = {}
    for i, line in enumerate(source_data):
        if i == 0:
            continue
        line = line.split(' ')
        word = line[0]
        # print(line[1:-1])
        word_vec_list = [float(x) for x in line[1:-1]]
        # source_data_dict[word] = np.array(word_vec_list)  # 写入字典
        source_data_dict[word] = word_vec_list

    return source_data_dict



class NLUER:
    import os
    # 设置项目代码库的root路径, 为后续所有的包导入提供便利
    root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    word_vecs = load_word2vec_object(root_path + r'/data/chinese_wiki_embeding20000.txt')

    def __init__(self, sentence):
        self.sentence = sentence
        self.sentence_vec = []
        self.bigwords = []

    # @classmethod
    # def load_dicts(cls):
    ##加载词向量

    # jieba分词，返回分好的词和该词的词性
    def nlu(self):
        # Bw = Bigword(self.sentence)
        words_pseg = pseg.cut(self.sentence)
        for word_pseg, flag_pseg in words_pseg:
            word = word_pseg
            pos = flag_pseg
            bigword = Bigword(word)
            bigword.pos = pos
            # bigword.word_vec = NLUER.word_vecs[word]
            bigword.word_vec = NLUER.word_vecs.get(word, [0] * 300)
            self.bigwords.append(bigword)
        # self.sentence_vec = self.get_sentence_vec()
        self.sentence_vec = get_bert_encode_for_single(self.sentence)

    # 先安装bert服务端，并且将服务开启


class Bigword:
    def __init__(self, ori_word):
        self.ori_word = ori_word
        self.word = ori_word  ##预留
        self.word_vec = None  ##一串数字；eg：[1,2,0.5,4.2,...]
        self.pos = None  ##词性


if __name__ == '__main__':
    nlu = NLUER('今天连不上网络')
    nlu.nlu()
    # for word in nlu.bigwords:
    #     print(word.word, word.word_vec[:15], word.pos)
    # nlu.get_sentence_vec()
    print(len(nlu.sentence_vec))


