#!pip install networkx node2vec


import networkx as nx
from node2vec import Node2Vec
from gensim.models import Word2Vec
# from gensim.models import
# Embed edges using Hadamard method
from node2vec.edges import HadamardEmbedder


def save_word2vec_status(nodes, edges):
    G = nx.Graph()
    G.add_nodes_from(nodes)
    G.add_edges_from(edges)
    node2vec = Node2Vec(
        G,
        dimensions=64,  # 嵌入维度（默认值：128）
        walk_length=20,  # 每个路径的节点数（默认值：80）
        num_walks=200,  # 经过每个节点的次数（默认值：10）
        workers=8,  # 计算使用的cpu数量
        p=1,
        q=0.1
    )
    model = node2vec.fit(window=2, min_count=1,
                         batch_words=8)

    # embeddings = model.wv
    # node_embedding = embeddings['0']  # 获取节点0的嵌入向量
    # model.wv.save_word2vec_format('node2vec_embeddings.bin')

    # Look for most similar nodes

    # Save embeddings for later use
    model.wv.save_word2vec_format('node2vec_embeddings.bin')
    # Save model for later use
    model.save('node2vec_model.bin')
def get_vector_for_word(text):
    # # 加载之前保存的节点嵌入文件
    # embeddings = Word2Vec.load_word2vec_format('node2vec_embeddings.bin', binary=True)
    #
    # # 获取节点0的嵌入向量
    # node_embedding = embeddings['10.29.130.21']
    # # 查找与节点0最相似的节点
    # most_similar_nodes = embeddings.most_similar('10.29.130.21')
    #
    # print(node_embedding)
    # print(most_similar_nodes)

    # 加载之前保存的完整模型（假设你使用了可以序列化的模型版本，但原生node2vec库并不直接支持保存整个模型）
    # 请注意，下面这一部分可能需要根据实际保存模型的方法进行调整
    model = Word2Vec.load('node2vec_model.bin')  # 如果你用gensim的Word2Vec保存的
    # 或者如果你能找到相应方法加载node2vec库保存的完整模型的话

    # 使用模型进行预测或进一步操作
    embeddings = model.wv

    # mean = embeddings.get_mean_vector([], 1, pre_normalize=True, post_normalize=True, ignore_missing=True)
    # print(mean)
    most_similar_nodes = embeddings.most_similar(text, topn=2, ignore_missing=True)
    print("################",most_similar_nodes)
    node_embedding = embeddings[text]  # 获取节点0的嵌入向量
    return node_embedding



# 海德曼点积
# edges_embs = HadamardEmbedder(keyed_vectors=model.wv)
#
# # Look for embeddings on the fly - here we pass normal tuples
# # print(edges_embs[("A环境", '10.10.112.67')])
# ''' OUTPUT
# array([ 5.75068220e-03, -1.10937878e-02,  3.76693785e-01,  2.69105062e-02,
#        ... ... ....
#        ..................................................................],
#       dtype=float32)
# '''
# # Get all edges in a separate KeyedVectors instance - use with caution could be huge for big networks
# edges_kv = edges_embs.as_keyed_vectors()
# # Look for most similar edges - this time tuples must be sorted and as str
#
#
# edges_kv.most_similar(str(("10.10.112.59", "A环境")))
# # Save embeddings for later use
# edges_kv.save_word2vec_format('node2vec_edge_embeddings.bin')

# model.load()

