from gensim.models import Word2Vec
import pandas as pd
import os


class Node2Vec:

    def __init__(self,word2vec_save_basepath,word2vec_save_name="word2vec.bin"):
        self.word2vec_save_basepath = word2vec_save_basepath
        self.word2vec_path = os.path.join(self.word2vec_save_basepath,word2vec_save_name)

    def train(self, sentences,embed_size=128, window_size=7, workers=3, iter=10, **kwargs):
        if not os.path.exists(self.word2vec_path):
            kwargs["sentences"] = sentences
            kwargs["min_count"] = kwargs.get("min_count", 0)
            kwargs["size"] = embed_size
            kwargs["sg"] = 1
            kwargs["hs"] = 0  # node2vec not use Hierarchical Softmax
            kwargs["workers"] = workers
            kwargs["window"] = window_size
            kwargs["iter"] = iter

            print("Learning embedding vectors...")
            model = Word2Vec(**kwargs)
            print("Learning embedding vectors done!")

            model.save(self.word2vec_path)
        else:
            print("already exists ,not train or delete the model first")


    def get_embeddings(self):
        if not os.path.exists(self.word2vec_path):
            print("model not train")
            return {}

        model = Word2Vec.load(self.word2vec_path)

        self._embeddings = {}
        for word in model.wv.vocab.keys():
            self._embeddings[word] = model.wv[word]

        return self._embeddings
