# from neo4j import GraphDatabase
# import json
# from collections import defaultdict
#
# class Neo4jImporter:
#     def __init__(self):
#         self.driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", "12345678"))
#
#     def close(self):
#         self.driver.close()
#
#     def import_entities(self, entity_file):
#         with open(entity_file, 'r', encoding='utf-8') as f:
#             entities = json.load(f)
#
#         with self.driver.session() as session:
#             query = """
#             UNWIND $batch AS entity
#             MERGE (n {name: entity.n.properties.name})
#             SET n += entity.n.properties,
#                 n.labels = entity.n.labels
#             """
#             batch_size = 100
#             for i in range(0, len(entities), batch_size):
#                 batch = entities[i:i + batch_size]
#                 session.run(query, batch=batch)
#             print(f"实体导入完成，共 {len(entities)} 个")
#
#     def import_relations(self, relation_file):
#         bidirectional = {
#             "妻子": "丈夫",
#             "徒弟": "师父",
#             "敌对": "敌对"
#         }
#
#         with open(relation_file, 'r', encoding='utf-8') as f:
#             relations = [line.strip().strip('()').split(',') for line in f]
#
#         all_relations = []
#         for parts in relations:
#             if len(parts) != 3:
#                 print(f"忽略无效行: {parts}")
#                 continue
#             head, rel_type, tail = parts
#             head = head.strip()
#             rel_type = rel_type.strip()
#             tail = tail.strip()
#             all_relations.append((head, rel_type, tail))
#             if rel_type in bidirectional:
#                 reverse_rel = bidirectional[rel_type]
#                 all_relations.append((tail, reverse_rel, head))
#
#         rel_groups = defaultdict(list)
#         for rel in all_relations:
#             rel_groups[rel[1]].append(rel)
#
#         with self.driver.session() as session:
#             batch_size = 100
#             for rel_type, group in rel_groups.items():
#                 rel_type_escaped = rel_type.replace('`', '')  # 转义反引号
#                 query = f"""
#                 UNWIND $batch AS rel
#                 MERGE (h {{name: rel.head}})
#                 MERGE (t {{name: rel.tail}})
#                 MERGE (h)-[:`{rel_type_escaped}`]->(t)
#                 """
#                 data_batch = [{'head': r[0], 'tail': r[2]} for r in group]
#                 for i in range(0, len(data_batch), batch_size):
#                     batch = data_batch[i:i + batch_size]
#                     session.run(query, batch=batch)
#             print(f"关系导入完成，共 {len(all_relations)} 条")
#
# if __name__ == "__main__":
#     importer = Neo4jImporter()
#     try:
#         importer.import_entities("data.json")
#         importer.import_relations("triplet.txt")
#     finally:
#         importer.close()
from neo4j import GraphDatabase
import json
from collections import defaultdict
class Neo4jImporter:
    def __init__(self):
        # self.driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", "12345678"))
        self.driver = GraphDatabase.driver("bolt://47.115.226.128:7687", auth=("neo4j", "12345678"))
    def close(self):
        self.driver.close()
    def import_entities(self, entity_file):
        with open(entity_file, 'r', encoding='utf-8') as f:
            entities = json.load(f)
        # 按标签分组（处理多标签和空标签情况）
        label_groups = defaultdict(list)
        for entity in entities:
            labels = entity["n"]["labels"]
            label_groups[tuple(labels)].append(entity)
        with self.driver.session() as session:
            # 批量处理每个标签组
            batch_size = 100
            for labels, group in label_groups.items():
                # 构建标签字符串（处理特殊字符）
                label_str = ""
                if labels:
                    escaped_labels = [f"`{label}`" for label in labels]
                    label_str = ":" + ":".join(escaped_labels)
                query = f"""
                UNWIND $batch AS entity
                MERGE (n{label_str} {{name: entity.n.properties.name}})
                SET n += entity.n.properties
                """
                # 分批次提交
                for i in range(0, len(group), batch_size) :
                    batch = group[i :i + batch_size]
                    session.run(query, batch=batch)
            print(f"✅ 实体导入完成 | 总数: {len(entities)} | 标签组: {len(label_groups)}")

    def import_relations(self, relation_file):
        bidirectional = {
            "妻子": "丈夫",
            "徒弟": "师父",
            "敌对": "敌对"
        }
        # 数据清洗和预处理
        with open(relation_file, 'r', encoding='utf-8') as f :
            relations = []
            for line in f :
                try :
                    clean_line = line.strip().strip('()')
                    if not clean_line: continue
                    parts = [p.strip() for p in clean_line.split(',')]
                    if len(parts) != 3: continue
                    relations.append(parts)
                except Exception as e:
                    print(f"格式错误: {line} | 错误: {str(e)}")
        # 生成双向关系
        all_relations = []
        for head, rel_type, tail in relations:
            all_relations.append((head, rel_type, tail))
            if rel_type in bidirectional:
                reverse_rel = bidirectional[rel_type]
                all_relations.append((tail, reverse_rel, head))
        # 按关系类型分组处理
        rel_groups = defaultdict(list)
        for rel in all_relations:
            rel_groups[rel[1]].append(rel)
        with self.driver.session() as session:
            batch_size = 100
            for rel_type, group in rel_groups.items():
                # 处理特殊字符关系类型
                escaped_rel_type = rel_type.replace('`', '')  # 防止双重转义
                query = f"""
                UNWIND $batch AS rel
                MERGE (h {{name: rel.head}})
                MERGE (t {{name: rel.tail}})
                MERGE (h)-[:`{escaped_rel_type}`]->(t)
                """
                data_batch = [{'head': h, 'tail': t} for h, _, t in group]
                # 批量提交
                for i in range(0, len(data_batch), batch_size) :
                    batch = data_batch[i :i + batch_size]
                    session.run(query, batch=batch)
            print(f"✅ 关系导入完成 | 总数: {len(all_relations)} | 关系类型: {len(rel_groups)}")

if __name__ == "__main__":
    importer = Neo4jImporter()
    try :
        importer.import_entities("data.json")
        importer.import_relations("triplet.txt")
    except Exception as e :
        print(f"❌ 导入失败: {str(e)}")
    finally:
        importer.close()