#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   predict.py
@Time    :   2021/11/09 10:43:45
@Author  :   Yaadon 
'''

# here put the import lib
from data.loader import load_text8,data_preprocess,build_dict
import paddle
import numpy as np


#定义一个使用word-embedding查询同义词的函数
#这个函数query_token是要查询的词，k表示要返回多少个最相似的词，embed是我们学习到的word-embedding参数
#我们通过计算不同词之间的cosine距离，来衡量词和词的相似度
#具体实现如下，x代表要查询词的Embedding，Embedding参数矩阵W代表所有词的Embedding
#两者计算Cos得出所有词对查询词的相似度得分向量，排序取top_k放入indices列表
def get_similar_tokens(query_token, word2id_dict, id2word_dict, k, embed):
    W = embed.numpy()
    x = W[word2id_dict[query_token]]
    cos = np.dot(W, x) / np.sqrt(np.sum(W * W, axis=1) * np.sum(x * x) + 1e-9)
    flat = cos.flatten()
    indices = np.argpartition(flat, -k)[-k:]
    indices = indices[np.argsort(-flat[indices])]
    for i in indices:
        print('for word %s, the similar word is %s' % (query_token, str(id2word_dict[i])))


if __name__ == '__main__':
    embedding_size = 200

    corpus = load_text8()
    corpus = data_preprocess(corpus)
    
    word2id_freq, word2id_dict, id2word_dict = build_dict(corpus)
    vocab_size = len(word2id_freq)

    # model = SkipGram(vocab_size, embedding_size)

    params_file_path = 'nlp_epoch13'
    param_dict = paddle.load(params_file_path)
    # model.load_dict(param_dict)
    # print(param_dict['embedding.weight'])

    get_similar_tokens('apple', word2id_dict, id2word_dict, 5, param_dict['embedding.weight'])