#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
超高性能Neo4j导入工具
功能：专为大数据量设计的极速导入方案
特点：
1. 预处理数据去重
2. 最小化数据库操作
3. 优化的批量大小
4. 内存友好的流式处理
"""

from neo4j import GraphDatabase
from pathlib import Path
import re
import jieba
import sys
import time
from collections import defaultdict


class UltraFastImporter:
    def __init__(self, uri="bolt://192.168.1.169:7687", user="neo4j", password="Pa@ssw0rd"):
        try:
            self.driver = GraphDatabase.driver(uri, auth=(user, password))
            print(f"✅ 成功连接到Neo4j数据库: {uri}")
        except Exception as e:
            print(f"❌ 连接Neo4j失败: {e}")
            self.driver = None
    
    def close(self):
        if self.driver:
            self.driver.close()
    
    def clear_database(self):
        """清空数据库"""
        if not self.driver:
            return False
        
        print("🗑️  清空数据库...")
        with self.driver.session() as session:
            session.run("MATCH (n) DETACH DELETE n")
            print("✅ 数据库已清空")
            return True
    
    def create_optimized_indexes(self):
        """创建优化索引"""
        print("🔧 创建优化索引...")
        
        if not self.driver:
            return False
        
        with self.driver.session() as session:
            # 基础索引
            basic_indexes = [
                "CREATE INDEX file_name_idx IF NOT EXISTS FOR (f:File) ON (f.name)",
                "CREATE INDEX product_code_idx IF NOT EXISTS FOR (p:Product) ON (p.code)",
                "CREATE INDEX record_id_idx IF NOT EXISTS FOR (r:Record) ON (r.id)",
                "CREATE INDEX record_filename_idx IF NOT EXISTS FOR (r:Record) ON (r.filename)",
                "CREATE INDEX record_product_idx IF NOT EXISTS FOR (r:Record) ON (r.product_code)",
                "CREATE INDEX node_id_idx IF NOT EXISTS FOR (n:DecisionNode) ON (n.id)",
                "CREATE INDEX node_filename_idx IF NOT EXISTS FOR (n:DecisionNode) ON (n.filename)",
                "CREATE INDEX node_product_idx IF NOT EXISTS FOR (n:DecisionNode) ON (n.product_code)",
                "CREATE INDEX line_id_idx IF NOT EXISTS FOR (l:LineRecord) ON (l.id)",
                "CREATE INDEX line_filename_idx IF NOT EXISTS FOR (l:LineRecord) ON (l.filename)",
            ]
            
            for index_query in basic_indexes:
                try:
                    session.run(index_query)
                except Exception as e:
                    print(f"⚠️ 索引创建警告: {e}")
        
        print("✅ 索引创建完成")
        return True
    
    def preprocess_data(self, folder_path):
        """预处理数据 - 去重和优化"""
        print(f"📁 预处理数据: {folder_path}")
        
        folder_path = Path(folder_path)
        if not folder_path.exists():
            print(f"❌ 文件夹 {folder_path} 不存在")
            return None
        
        # 收集所有数据
        files_data = set()
        products_data = {}
        records_data = []
        line_records_data = []
        decision_nodes_data = []
        
        txt_files = list(folder_path.glob("*.txt"))
        print(f"📊 处理 {len(txt_files)} 个文件...")
        
        processed_lines = 0
        
        for file_path in txt_files:
            try:
                with open(file_path, 'r', encoding='utf-8') as f:
                    lines = f.readlines()
                
                files_data.add(file_path.name)
                
                for line_num, line in enumerate(lines, 1):
                    line = line.strip()
                    if line and '->' in line:
                        parsed = self._parse_line(file_path.name, line_num, line)
                        if parsed:
                            # 收集产品信息
                            products_data[parsed['product']] = parsed['cargo_number']
                            
                            # 收集记录数据
                            record_id = f"{parsed['filename']}_{parsed['line_number']}"
                            records_data.append({
                                'id': record_id,
                                'content': parsed['final_content'],
                                'full_path': parsed['original_content'],
                                'filename': parsed['filename'],
                                'line_number': parsed['line_number'],
                                'product_code': parsed['product'],
                                'cargo_number': parsed['cargo_number']
                            })
                            
                            # 收集行记录数据
                            line_records_data.append({
                                'id': record_id,
                                'filename': parsed['filename'],
                                'line_number': parsed['line_number'],
                                'content': parsed['original_content']
                            })
                            
                            # 收集决策节点数据
                            for node in parsed['nodes']:
                                node_id = f"{parsed['product']}_{parsed['line_number']}_{node['level']}_{hash(node['content'])}"
                                decision_nodes_data.append({
                                    'id': node_id,
                                    'content': node['content'],
                                    'type': node['type'],
                                    'level': node['level'],
                                    'product_code': parsed['product'],
                                    'keywords': node['keywords'],
                                    'filename': parsed['filename'],
                                    'line_number': parsed['line_number'],
                                    'full_path': parsed['original_content'],
                                    'record_id': record_id
                                })
                            
                            processed_lines += 1
                            
            except Exception as e:
                print(f"❌ 处理文件失败 {file_path}: {e}")
        
        print(f"✅ 预处理完成: {processed_lines} 行数据")
        print(f"📊 统计: {len(files_data)} 文件, {len(products_data)} 产品, "
              f"{len(records_data)} 记录, {len(decision_nodes_data)} 决策节点")
        
        return {
            'files': list(files_data),
            'products': products_data,
            'records': records_data,
            'line_records': line_records_data,
            'decision_nodes': decision_nodes_data
        }
    
    def _parse_line(self, filename, line_number, content):
        """解析单行数据"""
        # 提取产品编号
        product_match = re.search(r'^([A-Z]{1,3}\d{3,4})', content)
        if not product_match:
            return None
        
        product = product_match.group(1)
        
        # 提取货号
        cargo_match = re.search(r'\[货号:([^\]]+)\]', content)
        cargo_number = cargo_match.group(1) if cargo_match else product
        
        # 按 -> 分割决策路径
        parts = [part.strip() for part in content.split('->')]
        if len(parts) < 2:
            return None
        
        # 移除货号信息
        last_part = parts[-1]
        last_part = re.sub(r'\s*\[货号:[^\]]+\]', '', last_part).strip()
        parts[-1] = last_part
        
        # 创建决策节点
        nodes = []
        for i, part in enumerate(parts):
            if part:
                keywords = list(jieba.cut(part))
                keywords = [k.strip() for k in keywords if len(k.strip()) > 1]
                
                nodes.append({
                    'content': part,
                    'type': 'condition' if i < len(parts) - 1 else 'result',
                    'level': i,
                    'keywords': keywords
                })
        
        return {
            'filename': filename,
            'line_number': line_number,
            'original_content': content,
            'final_content': parts[-1] if parts else '',
            'product': product,
            'cargo_number': cargo_number,
            'nodes': nodes
        }
    
    def ultra_fast_import(self, preprocessed_data, batch_size=2000):
        """超高速导入"""
        if not self.driver or not preprocessed_data:
            print("❌ 数据库连接不可用或无数据")
            return False
        
        print(f"🚀 开始超高速导入...")
        start_time = time.time()
        
        # 创建索引
        self.create_optimized_indexes()
        
        with self.driver.session() as session:
            with session.begin_transaction() as tx:
                # 1. 批量创建文件节点
                print("📁 创建文件节点...")
                files_data = [{'name': f} for f in preprocessed_data['files']]
                tx.run("""
                    UNWIND $files AS file
                    CREATE (f:File {name: file.name, timestamp: datetime()})
                """, files=files_data)
                
                # 2. 批量创建产品节点
                print("📦 创建产品节点...")
                products_data = [{'code': code, 'cargo_number': cargo} 
                               for code, cargo in preprocessed_data['products'].items()]
                tx.run("""
                    UNWIND $products AS product
                    CREATE (p:Product {
                        code: product.code,
                        cargo_number: product.cargo_number,
                        timestamp: datetime()
                    })
                """, products=products_data)
                
                # 3. 分批创建记录节点
                records = preprocessed_data['records']
                self._batch_create_records(tx, records, batch_size)
                
                # 4. 分批创建行记录节点
                line_records = preprocessed_data['line_records']
                self._batch_create_line_records(tx, line_records, batch_size)
                
                # 5. 分批创建决策节点
                decision_nodes = preprocessed_data['decision_nodes']
                self._batch_create_decision_nodes(tx, decision_nodes, batch_size)
                
                # 6. 批量创建关系
                print("🔗 创建关系...")
                self._create_all_relationships(tx, preprocessed_data)
                
                tx.commit()
        
        total_time = time.time() - start_time
        total_records = len(preprocessed_data['records'])
        
        print(f"✅ 超高速导入完成！")
        print(f"⏱️  总耗时: {total_time:.2f}秒")
        print(f"🚀 平均速度: {total_records/total_time:.1f} 条/秒")
        
        return True
    
    def _batch_create_records(self, tx, records, batch_size):
        """分批创建记录节点"""
        print(f"📝 创建 {len(records)} 个记录节点...")
        
        for i in range(0, len(records), batch_size):
            batch = records[i:i+batch_size]
            tx.run("""
                UNWIND $records AS record
                CREATE (r:Record {
                    id: record.id,
                    content: record.content,
                    full_path: record.full_path,
                    filename: record.filename,
                    line_number: record.line_number,
                    product_code: record.product_code,
                    cargo_number: record.cargo_number,
                    timestamp: datetime()
                })
            """, records=batch)
    
    def _batch_create_line_records(self, tx, line_records, batch_size):
        """分批创建行记录节点"""
        print(f"📄 创建 {len(line_records)} 个行记录节点...")
        
        for i in range(0, len(line_records), batch_size):
            batch = line_records[i:i+batch_size]
            tx.run("""
                UNWIND $line_records AS line_record
                CREATE (l:LineRecord {
                    id: line_record.id,
                    filename: line_record.filename,
                    line_number: line_record.line_number,
                    content: line_record.content,
                    timestamp: datetime()
                })
            """, line_records=batch)
    
    def _batch_create_decision_nodes(self, tx, decision_nodes, batch_size):
        """分批创建决策节点"""
        print(f"🌳 创建 {len(decision_nodes)} 个决策节点...")
        
        for i in range(0, len(decision_nodes), batch_size):
            batch = decision_nodes[i:i+batch_size]
            tx.run("""
                UNWIND $nodes AS node
                CREATE (n:DecisionNode {
                    id: node.id,
                    content: node.content,
                    type: node.type,
                    level: node.level,
                    product_code: node.product_code,
                    keywords: node.keywords,
                    filename: node.filename,
                    line_number: node.line_number,
                    full_path: node.full_path,
                    timestamp: datetime()
                })
            """, nodes=batch)

    def _create_all_relationships(self, tx, preprocessed_data):
        """批量创建所有关系"""
        # 文件到记录关系
        file_record_relations = []
        file_line_relations = []
        record_node_relations = []

        for record in preprocessed_data['records']:
            file_record_relations.append({
                'filename': record['filename'],
                'record_id': record['id']
            })

        for line_record in preprocessed_data['line_records']:
            file_line_relations.append({
                'filename': line_record['filename'],
                'line_id': line_record['id']
            })

        for node in preprocessed_data['decision_nodes']:
            record_node_relations.append({
                'record_id': node['record_id'],
                'node_id': node['id']
            })

        # 批量创建关系
        print("🔗 创建文件-记录关系...")
        tx.run("""
            UNWIND $relations AS rel
            MATCH (f:File {name: rel.filename})
            MATCH (r:Record {id: rel.record_id})
            CREATE (f)-[:CONTAINS]->(r)
        """, relations=file_record_relations)

        print("🔗 创建文件-行记录关系...")
        tx.run("""
            UNWIND $relations AS rel
            MATCH (f:File {name: rel.filename})
            MATCH (l:LineRecord {id: rel.line_id})
            CREATE (f)-[:CONTAINS]->(l)
        """, relations=file_line_relations)

        print("🔗 创建记录-节点关系...")
        tx.run("""
            UNWIND $relations AS rel
            MATCH (r:Record {id: rel.record_id})
            MATCH (n:DecisionNode {id: rel.node_id})
            CREATE (r)-[:INCLUDES]->(n)
        """, relations=record_node_relations)


def main():
    """主函数"""
    print("⚡ 超高性能Neo4j导入工具")
    print("=" * 50)

    # 配置参数
    folder_path = "/Users/daijunxiong/Downloads/knowledge_txt"
    batch_size = 2000  # 更大的批量大小
    auto_clear = len(sys.argv) > 1 and sys.argv[1] == "--auto-clear"

    print(f"📁 目录路径: {folder_path}")
    print(f"🎯 连接数据库: bolt://192.168.1.169:7687")
    print(f"📦 批量大小: {batch_size}")

    # 创建导入器
    importer = UltraFastImporter()

    if not importer.driver:
        print("❌ 无法连接到Neo4j数据库")
        return

    try:
        # 自动清空数据库或询问
        if auto_clear:
            print("🗑️  自动清空数据库...")
            importer.clear_database()
        else:
            choice = input("是否清空现有数据库？(y/N): ").strip().lower()
            if choice == 'y':
                importer.clear_database()

        # 预处理数据
        print(f"\n🔄 预处理数据...")
        preprocessed_data = importer.preprocess_data(folder_path)

        if not preprocessed_data or not preprocessed_data['records']:
            print("❌ 没有找到有效数据")
            return

        # 超高速导入
        print(f"\n⚡ 开始超高速导入...")
        success = importer.ultra_fast_import(preprocessed_data, batch_size)

        if success:
            print("\n🎉 导入完成！")
            print(f"📊 统计信息:")
            print(f"   - 处理文件数: {len(preprocessed_data['files'])}")
            print(f"   - 导入记录数: {len(preprocessed_data['records'])}")
            print(f"   - 产品数: {len(preprocessed_data['products'])}")
            print(f"   - 决策节点数: {len(preprocessed_data['decision_nodes'])}")

            print("\n💡 现在您可以使用以下命令进行查询:")
            print("   python test.bak 'PK511无产物'")
            print("   python test.bak")

    finally:
        importer.close()


if __name__ == "__main__":
    main()
