from py2neo import Graph, Node, Relationship
from sentence_transformers import SentenceTransformer
from tqdm import tqdm
import time

# -------------------------- 数据库配置 --------------------------
# 请替换为你的Neo4j连接信息
NEO4J_URI = "neo4j://localhost:7687"
NEO4J_USER = "neo4j"
NEO4J_PASSWORD = "053116wj"

# -------------------------- 模型加载 --------------------------
# 加载一个高效的中文嵌入模型，它会自动从Hugging Face下载
print("🚀 正在加载 Sentence Transformer 模型...")
model = SentenceTransformer('moka-ai/m3e-base')
print("✅ 模型加载成功！")

# -------------------------- 连接数据库 --------------------------
try:
    graph = Graph(NEO4J_URI, auth=(NEO4J_USER, NEO4J_PASSWORD))
    print("✅ 成功连接到图数据库！")
except Exception as e:
    print(f"❌ 连接数据库失败：{e}")
    exit()


# -------------------------- 核心操作：批量更新节点和关系 --------------------------

def embed_and_update_nodes():
    """为所有节点生成并更新嵌入向量"""
    print("\n🔍 正在获取所有需要嵌入的节点...")
    # 获取所有有 'name' 属性且没有嵌入向量的节点
    query_nodes = """
    MATCH (n)
    WHERE n.name IS NOT NULL AND n.embedding IS NULL
    RETURN n.name AS name, id(n) AS id
    """

    node_names_to_embed = graph.run(query_nodes).data()
    total_nodes = len(node_names_to_embed)
    print(f"找到 {total_nodes} 个需要处理的节点。")

    if total_nodes == 0:
        print("所有节点都已包含嵌入向量，无需更新。")
        return

    # 批量嵌入以提高效率
    batch_size = 32
    for i in tqdm(range(0, total_nodes, batch_size), desc="生成并更新节点向量中"):
        batch_items = node_names_to_embed[i:i + batch_size]
        batch_names = [d['name'] for d in batch_items]
        embeddings = model.encode(batch_names, convert_to_tensor=False).tolist()

        tx = graph.begin()
        for j, item in enumerate(batch_items):
            cypher = f"""
            MATCH (n)
            WHERE id(n) = {item['id']}
            SET n.embedding = $embedding
            """
            tx.run(cypher, embedding=embeddings[j])
        graph.commit(tx)

    print("\n🎉 所有节点的嵌入向量已成功写入！")


def embed_and_update_relationships():
    """为所有关系生成并更新嵌入向量"""
    print("\n🔍 正在获取所有需要嵌入的关系...")
    # 获取所有关系
    query_rels = """
    MATCH (s)-[r]->(t)
    WHERE s.name IS NOT NULL AND t.name IS NOT NULL AND r.embedding IS NULL
    RETURN s.name AS start_name, type(r) AS rel_type, t.name AS end_name, id(r) AS rel_id
    """

    relations_to_embed = graph.run(query_rels).data()
    total_relations = len(relations_to_embed)
    print(f"找到 {total_relations} 个需要处理的关系。")

    if total_relations == 0:
        print("所有关系都已包含嵌入向量，无需更新。")
        return

    batch_size = 32
    for i in tqdm(range(0, total_relations, batch_size), desc="生成并更新关系向量中"):
        batch_items = relations_to_embed[i:i + batch_size]
        # 构建关系的文本描述，例如 "蔗糖的优点是...高甜度"
        batch_text = [f"{item['start_name']}的{item['rel_type']}是{item['end_name']}" for item in batch_items]
        embeddings = model.encode(batch_text, convert_to_tensor=False).tolist()

        tx = graph.begin()
        for j, item in enumerate(batch_items):
            cypher = f"""
            MATCH ()-[r]->()
            WHERE id(r) = {item['rel_id']}
            SET r.embedding = $embedding
            """
            tx.run(cypher, embedding=embeddings[j])
        graph.commit(tx)

    print("\n🎉 所有关系的嵌入向量已成功写入！")


if __name__ == "__main__":
    start_time = time.time()
    embed_and_update_nodes()
    embed_and_update_relationships()
    end_time = time.time()
    print(f"\n总耗时：{end_time - start_time:.2f} 秒")