#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
高性能Neo4j知识库导入工具
功能：使用批量导入和事务优化，大幅提升导入速度
适配数据库：bolt://192.168.1.169:7687
"""

from neo4j import GraphDatabase
from pathlib import Path
import re
import jieba
from datetime import datetime
import sys
import time


class FastNeo4jImporter:
    def __init__(self, uri="bolt://192.168.1.169:7687", user="neo4j", password="Pa@ssw0rd"):
        try:
            self.driver = GraphDatabase.driver(uri, auth=(user, password))
            print(f"✅ 成功连接到Neo4j数据库: {uri}")
        except Exception as e:
            print(f"❌ 连接Neo4j失败: {e}")
            self.driver = None
    
    def close(self):
        if self.driver:
            self.driver.close()
    
    def clear_database(self):
        """清空数据库"""
        if not self.driver:
            return False
        
        print("🗑️  清空数据库...")
        with self.driver.session() as session:
            # 删除所有关系和节点
            session.run("MATCH (n) DETACH DELETE n")
            print("✅ 数据库已清空")
            return True
    
    def create_indexes(self):
        """创建性能优化索引"""
        print("🔧 创建性能优化索引...")
        
        if not self.driver:
            return False
        
        with self.driver.session() as session:
            indexes = [
                "CREATE INDEX IF NOT EXISTS FOR (f:File) ON (f.name)",
                "CREATE INDEX IF NOT EXISTS FOR (p:Product) ON (p.code)",
                "CREATE INDEX IF NOT EXISTS FOR (r:Record) ON (r.id)",
                "CREATE INDEX IF NOT EXISTS FOR (r:Record) ON (r.filename)",
                "CREATE INDEX IF NOT EXISTS FOR (r:Record) ON (r.product_code)",
                "CREATE INDEX IF NOT EXISTS FOR (n:DecisionNode) ON (n.id)",
                "CREATE INDEX IF NOT EXISTS FOR (n:DecisionNode) ON (n.filename)",
                "CREATE INDEX IF NOT EXISTS FOR (n:DecisionNode) ON (n.product_code)",
                "CREATE INDEX IF NOT EXISTS FOR (l:LineRecord) ON (l.id)",
                "CREATE INDEX IF NOT EXISTS FOR (l:LineRecord) ON (l.filename)",
                # 全文搜索索引 (Neo4j 4.x+ 语法)
                "CREATE FULLTEXT INDEX record_content_index IF NOT EXISTS FOR (r:Record) ON EACH [r.full_path, r.content]",
                "CREATE FULLTEXT INDEX node_content_index IF NOT EXISTS FOR (n:DecisionNode) ON EACH [n.content, n.full_path]",
            ]
            
            for index_query in indexes:
                try:
                    session.run(index_query)
                except Exception as e:
                    print(f"⚠️ 索引创建警告: {e}")
        
        print("✅ 索引创建完成")
        return True
    
    def scan_txt_files(self, folder_path):
        """扫描文件夹下的所有txt文件"""
        folder_path = Path(folder_path)
        if not folder_path.exists():
            print(f"❌ 文件夹 {folder_path} 不存在")
            return []
        
        all_lines = []
        txt_files = list(folder_path.glob("*.txt"))
        
        print(f"📁 找到 {len(txt_files)} 个txt文件")
        
        for file_path in txt_files:
            try:
                with open(file_path, 'r', encoding='utf-8') as f:
                    lines = f.readlines()
                    
                for line_num, line in enumerate(lines, 1):
                    line = line.strip()
                    if line and '->' in line:  # 只处理包含决策树的行
                        all_lines.append({
                            'filename': file_path.name,
                            'line_number': line_num,
                            'content': line
                        })
                        
            except Exception as e:
                print(f"❌ 读取文件失败 {file_path}: {e}")
        
        print(f"📊 总共找到 {len(all_lines)} 行决策树数据")
        return all_lines
    
    def parse_decision_tree_line(self, line_data):
        """解析决策树行数据"""
        content = line_data['content']
        
        # 提取产品编号
        product_match = re.search(r'^([A-Z]{1,3}\d{3,4})', content)
        if not product_match:
            return None
        
        product = product_match.group(1)
        
        # 提取货号
        cargo_match = re.search(r'\[货号:([^\]]+)\]', content)
        cargo_number = cargo_match.group(1) if cargo_match else product
        
        # 按 -> 分割决策路径
        parts = [part.strip() for part in content.split('->')]
        if len(parts) < 2:
            return None
        
        # 移除货号信息
        last_part = parts[-1]
        last_part = re.sub(r'\s*\[货号:[^\]]+\]', '', last_part).strip()
        parts[-1] = last_part
        
        # 创建决策节点
        nodes = []
        for i, part in enumerate(parts):
            if part:
                # 提取关键词
                keywords = list(jieba.cut(part))
                keywords = [k.strip() for k in keywords if len(k.strip()) > 1]
                
                nodes.append({
                    'content': part,
                    'type': 'condition' if i < len(parts) - 1 else 'result',
                    'level': i,
                    'keywords': keywords
                })
        
        return {
            'filename': line_data['filename'],
            'line_number': line_data['line_number'],
            'original_content': content,
            'final_content': parts[-1] if parts else '',
            'product': product,
            'cargo_number': cargo_number,
            'nodes': nodes
        }
    
    def batch_import_data(self, parsed_data_list, batch_size=1000):
        """批量导入数据 - 超高性能版本"""
        if not self.driver or not parsed_data_list:
            print("❌ 数据库连接不可用或无数据")
            return False
        
        print(f"🚀 开始批量导入 {len(parsed_data_list)} 条记录...")
        start_time = time.time()
        
        # 预创建索引
        self.create_indexes()
        
        total_batches = (len(parsed_data_list) + batch_size - 1) // batch_size
        imported_count = 0
        
        for batch_idx in range(total_batches):
            batch_start_time = time.time()
            start_idx = batch_idx * batch_size
            end_idx = min(start_idx + batch_size, len(parsed_data_list))
            batch_data = parsed_data_list[start_idx:end_idx]
            
            try:
                with self.driver.session() as session:
                    with session.begin_transaction() as tx:
                        self._super_batch_import(tx, batch_data)
                        tx.commit()
                
                imported_count += len(batch_data)
                batch_time = time.time() - batch_start_time
                
                print(f"📊 批次 {batch_idx + 1}/{total_batches} 完成 "
                      f"({imported_count}/{len(parsed_data_list)} 条记录, "
                      f"耗时: {batch_time:.2f}秒)")
                
            except Exception as e:
                print(f"❌ 批次 {batch_idx + 1} 导入失败: {e}")
                continue
        
        total_time = time.time() - start_time
        print(f"✅ 导入完成！总耗时: {total_time:.2f}秒")
        print(f"📈 平均速度: {imported_count/total_time:.1f} 条/秒")
        
        return True
    
    def _super_batch_import(self, tx, batch_data):
        """超级批量导入 - 单次事务处理所有数据"""
        # 准备所有数据
        files_set = set()
        products_set = set()
        records_data = []
        line_records_data = []
        decision_nodes_data = []
        
        for data in batch_data:
            files_set.add(data['filename'])
            products_set.add((data['product'], data['cargo_number']))
            
            # Record数据
            record_id = f"{data['filename']}_{data['line_number']}"
            records_data.append({
                'id': record_id,
                'content': data['final_content'],
                'full_path': data['original_content'],
                'filename': data['filename'],
                'line_number': data['line_number'],
                'product_code': data['product'],
                'cargo_number': data['cargo_number']
            })
            
            # LineRecord数据
            line_records_data.append({
                'id': record_id,
                'filename': data['filename'],
                'line_number': data['line_number'],
                'content': data['original_content']
            })
            
            # DecisionNode数据
            for node in data['nodes']:
                node_id = f"{data['product']}_{data['line_number']}_{node['level']}_{hash(node['content'])}"
                decision_nodes_data.append({
                    'id': node_id,
                    'content': node['content'],
                    'type': node['type'],
                    'level': node['level'],
                    'product_code': data['product'],
                    'keywords': node['keywords'],
                    'filename': data['filename'],
                    'line_number': data['line_number'],
                    'full_path': data['original_content']
                })
        
        # 批量创建所有节点
        self._batch_create_all_nodes(tx, files_set, products_set, records_data, 
                                   line_records_data, decision_nodes_data)
        
        # 批量创建所有关系
        self._batch_create_all_relationships(tx, batch_data)
    
    def _batch_create_all_nodes(self, tx, files_set, products_set, records_data, 
                               line_records_data, decision_nodes_data):
        """一次性批量创建所有节点"""
        # 创建文件节点
        if files_set:
            files_data = [{'name': f} for f in files_set]
            tx.run("""
                UNWIND $files AS file
                MERGE (f:File {name: file.name})
                SET f.timestamp = datetime()
            """, files=files_data)
        
        # 创建产品节点
        if products_set:
            products_data = [{'code': p[0], 'cargo_number': p[1]} for p in products_set]
            tx.run("""
                UNWIND $products AS product
                MERGE (p:Product {code: product.code})
                SET p.cargo_number = product.cargo_number,
                    p.timestamp = datetime()
            """, products=products_data)
        
        # 创建Record节点
        if records_data:
            tx.run("""
                UNWIND $records AS record
                CREATE (r:Record {
                    id: record.id,
                    content: record.content,
                    full_path: record.full_path,
                    filename: record.filename,
                    line_number: record.line_number,
                    product_code: record.product_code,
                    cargo_number: record.cargo_number,
                    timestamp: datetime()
                })
            """, records=records_data)
        
        # 创建LineRecord节点
        if line_records_data:
            tx.run("""
                UNWIND $line_records AS line_record
                CREATE (l:LineRecord {
                    id: line_record.id,
                    filename: line_record.filename,
                    line_number: line_record.line_number,
                    content: line_record.content,
                    timestamp: datetime()
                })
            """, line_records=line_records_data)
        
        # 创建DecisionNode节点
        if decision_nodes_data:
            tx.run("""
                UNWIND $nodes AS node
                CREATE (n:DecisionNode {
                    id: node.id,
                    content: node.content,
                    type: node.type,
                    level: node.level,
                    product_code: node.product_code,
                    keywords: node.keywords,
                    filename: node.filename,
                    line_number: node.line_number,
                    full_path: node.full_path,
                    timestamp: datetime()
                })
            """, nodes=decision_nodes_data)

    def _batch_create_all_relationships(self, tx, batch_data):
        """一次性批量创建所有关系"""
        # 准备关系数据
        file_record_relations = []
        file_line_relations = []
        record_node_relations = []

        for data in batch_data:
            record_id = f"{data['filename']}_{data['line_number']}"

            # 文件到Record关系
            file_record_relations.append({
                'filename': data['filename'],
                'record_id': record_id
            })

            # 文件到LineRecord关系
            file_line_relations.append({
                'filename': data['filename'],
                'line_id': record_id
            })

            # Record到DecisionNode关系
            node_ids = [f"{data['product']}_{data['line_number']}_{node['level']}_{hash(node['content'])}"
                       for node in data['nodes']]

            for node_id in node_ids:
                record_node_relations.append({
                    'record_id': record_id,
                    'node_id': node_id
                })

        # 批量创建关系
        if file_record_relations:
            tx.run("""
                UNWIND $relations AS rel
                MATCH (f:File {name: rel.filename})
                MATCH (r:Record {id: rel.record_id})
                CREATE (f)-[:CONTAINS]->(r)
            """, relations=file_record_relations)

        if file_line_relations:
            tx.run("""
                UNWIND $relations AS rel
                MATCH (f:File {name: rel.filename})
                MATCH (l:LineRecord {id: rel.line_id})
                CREATE (f)-[:CONTAINS]->(l)
            """, relations=file_line_relations)

        if record_node_relations:
            tx.run("""
                UNWIND $relations AS rel
                MATCH (r:Record {id: rel.record_id})
                MATCH (n:DecisionNode {id: rel.node_id})
                CREATE (r)-[:INCLUDES]->(n)
            """, relations=record_node_relations)


def main():
    """主函数"""
    print("🚀 高性能Neo4j知识库导入工具")
    print("=" * 50)

    # 配置参数
    folder_path = "/Users/daijunxiong/Downloads/knowledge_txt"
    batch_size = 1000  # 可调整批量大小
    auto_clear = len(sys.argv) > 1 and sys.argv[1] == "--auto-clear"

    print(f"📁 目录路径: {folder_path}")
    print(f"🎯 连接数据库: bolt://192.168.1.169:7687")
    print(f"📦 批量大小: {batch_size}")

    # 创建导入器
    importer = FastNeo4jImporter()

    if not importer.driver:
        print("❌ 无法连接到Neo4j数据库")
        return

    try:
        # 自动清空数据库或询问
        if auto_clear:
            print("🗑️  自动清空数据库...")
            importer.clear_database()
        else:
            choice = input("是否清空现有数据库？(y/N): ").strip().lower()
            if choice == 'y':
                importer.clear_database()

        # 扫描txt文件
        print(f"\n📁 扫描文件夹: {folder_path}")
        all_lines = importer.scan_txt_files(folder_path)

        if not all_lines:
            print("❌ 没有找到决策树数据")
            return

        # 解析数据
        print("\n🔄 解析决策树数据...")
        parsed_data = []
        parse_start_time = time.time()

        for line_data in all_lines:
            parsed = importer.parse_decision_tree_line(line_data)
            if parsed:
                parsed_data.append(parsed)

        parse_time = time.time() - parse_start_time
        print(f"✅ 解析完成，共 {len(parsed_data)} 条有效记录 (耗时: {parse_time:.2f}秒)")

        if not parsed_data:
            print("❌ 没有有效的决策树数据")
            return

        # 批量导入到Neo4j
        print(f"\n💾 开始高性能批量导入...")
        success = importer.batch_import_data(parsed_data, batch_size)

        if success:
            print("\n🎉 导入完成！")
            print(f"📊 统计信息:")
            print(f"   - 处理文件数: {len(set(d['filename'] for d in parsed_data))}")
            print(f"   - 导入记录数: {len(parsed_data)}")
            print(f"   - 产品数: {len(set(d['product'] for d in parsed_data))}")
            print(f"   - 决策节点数: {sum(len(d['nodes']) for d in parsed_data)}")

            print("\n💡 现在您可以使用以下命令进行查询:")
            print("   python test.bak 'PK511无产物'")
            print("   python test.bak")

    finally:
        importer.close()


if __name__ == "__main__":
    main()
