import re

import gensim
import jieba.analyse
from gensim.similarities import WmdSimilarity


def wf(dict, all_question_file):
    """
    分词
    """
    jieba.load_userdict(dict)
    stop_words_file =  'D:/11\weixin\static\stopwords.txt'
    words = []
    with open(all_question_file, 'r', encoding='utf-8') as fp:
        all_question = fp.readlines()
        with open(stop_words_file, 'r', encoding='utf-8') as stop_fp:
            stop_words = stop_fp.readlines()
            for i in all_question:
                se = re.sub('\W', '', i)
                split_words = jieba.analyse.extract_tags(se, topK=6)
                split_words = [i for i in split_words if not i.isdigit() and i not in stop_words]
                words.append(split_words)
    return words


def split_question(question):
    """
    对目标问题进行分词
    """
    stop_words_file = 'D:/11\weixin\static\stopwords.txt'
    with open(stop_words_file, 'r', encoding='utf-8') as stop_fp:
        stop_words = stop_fp.readlines()
        words = jieba.analyse.extract_tags(question, topK=6)
        words = [i for i in words if not i.isdigit() and i not in stop_words]
    return words


def get_wmd_distance(split_allquestions_words):
    """
    计算‘词移距离’
    """
    model_file = 'D:/11\weixin\static\MyModel'
    model = gensim.models.Word2Vec.load(model_file)
    distance = WmdSimilarity(split_allquestions_words, model, num_best=5)
    return distance




















