import jieba
from datasketch import MinHash, MinHashLSHForest
import re
import random

def get_sentences_list(file):
    with open(file, 'r', encoding='utf-8') as f:
        text = f.read()
        text = text.replace('\n', '')   #去掉回车换行
        text = text.replace('\u200b','') #去掉全角空格
        sentences = re.split('[，。？！：“]', text) #根据标点符号将句子切分
    stopwords_list = []
    with open('./stopword.txt', 'r', encoding='utf-8') as f:
        for line in f:
            word = line.strip()
            if  word not in stopwords_list:
                stopwords_list.append(word)

    sentences_words = []
    for sentence in sentences:
        sentence_words = [word for word in jieba.cut(sentence.strip()) if word not in stopwords_list]
        sentences_words.append(sentence_words)

    return sentences_words

sentences = get_sentences_list('./weibos.txt')
forest = MinHashLSHForest()

#计算每个句子的MinHash,并追加到LSHForest中去
for index,sentence in enumerate(sentences):
    m = MinHash()
    for word in sentence:
        m.update(word.encode('utf8'))
    forest.add(index, m)
forest.index()

#随机选一个句子
# rand_sentence_index = random.randint(0, len(sentences))
# print('选取的index编号为:'+str(rand_sentence_index))
query = ['里','皮', '的', '辞职']
#query = ['关于','里皮', '的', '辞职']

m1= MinHash()
for word in query:
    m1.update(word.encode('utf-8'))

top_k = 3
result = forest.query(m1, top_k)
print('top 3邻居', result)
for i in result:
    print(sentences[i])