import gensim
text_file_name =r'C:\Users\Aiomi\Desktop\wikipedia_smaller\wikipedia.txt'
sentences = gensim.models.word2vec.LineSentence(text_file_name,limit=10000)

simple_model = gensim.models.Word2Vec(
    sentences=sentences,
    vector_size=10,
    window=5,
    min_count=5,
    workers=2
)
word_vectors=simple_model.wv
#print(word_vectors)

# simple_model.save("word2vec.model")
# new_model = gensim.models.Word2Vec.load("word2vec.model")
# new_model.wv.save_word2vec_format('word2vec.vector')
# new_model.wv.save_word2vec_format('word2vec.bin')

# from gensim.models import keyedvectors
# model = keyedvectors.load_word2vec_format('word2vec.vector')



def analogy(model,x1,x2,y1):
    result = model.most_similar(positive=[y1,x2],negative=[x1])
    return result[0][0]

import gensim.downloader
gensim_builtin_glove_model = gensim.downloader.load("glove-wiki-gigaword-50")
print(analogy(gensim_builtin_glove_model,'woman','king','man'))

print(gensim_builtin_glove_model.similarity('cat','dog'),gensim_builtin_glove_model.similarity('cat','hero'))
print(gensim_builtin_glove_model.most_similar('mother'))

import numpy as np
import matplotlib.pyplot as plt
# %config InlineBackend.figure_format = 'svg'
plt.style.use('bmh')

from sklearn.decomposition import  TruncatedSVD
from sklearn.manifold import  TSNE,MDS
def plot_embedding_pca(embedding_model,words):
    example_vectors = embedding_model[words]
    example_vectors -=example_vectors.mean(axis = 0)
    twodim = TruncatedSVD(n_components=2).fit_transform(example_vectors)
    plt.figure(figsize=(5,5))
    plt.scatter(twodim[:,0],twodim[:,1],edgecolors='k',c='r')
    for word,(x,y) in zip(words,twodim):
        plt.text(x+0.02,y,word)
    plt.axis('off')

words = ['man','boy','king','boyfriend','prince','woman','girl','queen','girlfriend','princess']
plot_embedding_pca(gensim_builtin_glove_model,words)
plt.show()