from gensim.models import KeyedVectors
import json
from collections import OrderedDict
from annoy import AnnoyIndex

# 引入模型
tc_wv_model = KeyedVectors.load_word2vec_format(
    'C:\\Users\86176\Desktop\\changshi.txt',limit=4000000, binary=False)

# 很省内存，可以运算most_similar
tc_wv_model.init_sims(replace=True)

# tc_wv_model = KeyedVectors.load(r'C:\\Users\86176\Desktop\\ChineseEmbedding.bin', mmap='r')

# 构建一份词汇ID映射表，并以json格式离线保存一份（这个方便以后离线直接加载annoy索引时使用）
word_index = OrderedDict()
for counter, key in enumerate(tc_wv_model.vocab.keys()):
    word_index[key] = counter

with open('C:\\Users\86176\\annoy检索\\tc_word_index.json', 'w') as fp:
    json.dump(word_index, fp)

# 开始基于词向量构建Annoy索引，词向量大概是882万条
# 词向量的维度是200
tc_index = AnnoyIndex(200)
i = 0
for key in tc_wv_model.vocab.keys():
    v = tc_wv_model[key]
    tc_index.add_item(i, v)
    i += 1

# n_trees这个参数很关键10
tc_index.build(10)

#  可以将这份index存储到硬盘上
tc_index.save('C:\\Users\86176\\annoy检索\\tc_index_build10.index')

# 准备一个反向id==>word映射词表
# reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
filename = 'C:\\Users\\86176\\Desktop\\输出.txt'
with open(filename, 'w') as file_object:
    reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
    for item in tc_index.get_nns_by_item(word_index[u'台风'], 11):
        file_object.write(reverse_word_index[item] + '\n')
        # print(reverse_word_index[item])
