#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Neo4j知识库导入工具 - 适用于192.168.1.169数据库
功能：扫描指定路径下的txt文件，将决策树数据导入Neo4j图数据库
特点：Record的content是最后一个->右边的内容
"""

import os
import re
import jieba
from pathlib import Path
from neo4j import GraphDatabase
from datetime import datetime
import sys


class Neo4jKnowledgeImporter:
    def __init__(self, uri="bolt://192.168.1.169:7687", user="neo4j", password="Pa@ssw0rd"):
        try:
            self.driver = GraphDatabase.driver(uri, auth=(user, password))
            print(f"✅ 成功连接到Neo4j数据库: {uri}")
        except Exception as e:
            print(f"❌ 连接Neo4j失败: {e}")
            self.driver = None
    
    def close(self):
        if self.driver:
            self.driver.close()
    
    def clear_database(self):
        """清空数据库"""
        if not self.driver:
            return False
        
        with self.driver.session() as session:
            session.run("MATCH (n) DETACH DELETE n")
            print("✅ 数据库已清空")
            return True
    
    def scan_txt_files(self, folder_path):
        """扫描文件夹下的所有txt文件"""
        folder_path = Path(folder_path)
        if not folder_path.exists():
            print(f"❌ 文件夹 {folder_path} 不存在")
            return []
        
        txt_files = list(folder_path.glob("*.txt"))
        print(f"📁 找到 {len(txt_files)} 个txt文件")
        
        all_lines = []
        for txt_file in txt_files:
            try:
                # 尝试UTF-8编码
                with open(txt_file, 'r', encoding='utf-8') as file:
                    lines = file.readlines()
                
                file_lines = []
                for line_num, line in enumerate(lines, 1):
                    line_content = line.strip()
                    if line_content and ' -> ' in line_content:  # 只处理决策树格式的行
                        file_lines.append({
                            'filename': txt_file.name,
                            'filepath': str(txt_file),
                            'line_number': line_num,
                            'content': line_content
                        })
                
                all_lines.extend(file_lines)
                print(f"✅ {txt_file.name}: {len(file_lines)} 行决策树数据")
                
            except UnicodeDecodeError:
                # 尝试GBK编码
                try:
                    with open(txt_file, 'r', encoding='gbk') as file:
                        lines = file.readlines()
                    
                    file_lines = []
                    for line_num, line in enumerate(lines, 1):
                        line_content = line.strip()
                        if line_content and ' -> ' in line_content:
                            file_lines.append({
                                'filename': txt_file.name,
                                'filepath': str(txt_file),
                                'line_number': line_num,
                                'content': line_content
                            })
                    
                    all_lines.extend(file_lines)
                    print(f"✅ {txt_file.name} (GBK): {len(file_lines)} 行决策树数据")
                    
                except Exception as e:
                    print(f"❌ 读取文件失败: {txt_file.name} - {e}")
            
            except Exception as e:
                print(f"❌ 读取文件失败: {txt_file.name} - {e}")
        
        print(f"📊 总计读取 {len(all_lines)} 行决策树数据")
        return all_lines
    
    def parse_decision_tree_line(self, line_data):
        """解析决策树行数据"""
        content = line_data['content']
        parts = content.split(' -> ')
        
        if len(parts) < 2:
            return None
        
        # 提取产品编号
        product_match = re.search(r'([A-Z]{1,3}\d{3,4})', parts[0])
        product = product_match.group(1) if product_match else parts[0].strip()
        
        # 提取货号
        cargo_match = re.search(r'\[货号:([^\]]+)\]', content)
        cargo_number = cargo_match.group(1) if cargo_match else product
        
        # 清理最后一个部分的货号信息，获取最终内容
        final_part = parts[-1]
        if cargo_match:
            final_part = final_part.replace(cargo_match.group(0), '').strip()
        
        # 构建节点路径（跳过产品编号）
        nodes = []
        for i, part in enumerate(parts[1:], 1):  # 从第二个部分开始
            clean_part = re.sub(r'\[货号:[^\]]+\]', '', part).strip()
            if clean_part:
                nodes.append({
                    'level': i,
                    'content': clean_part,
                    'type': self._classify_node_type(clean_part, i),
                    'keywords': ' '.join(jieba.cut(clean_part))
                })
        
        return {
            'product': product,
            'cargo_number': cargo_number,
            'nodes': nodes,
            'final_content': final_part,  # 最后一个->右边的内容
            'filename': line_data['filename'],
            'line_number': line_data['line_number'],
            'original_content': content
        }
    
    def _classify_node_type(self, content, level):
        """分类节点类型"""
        if '？' in content or '?' in content:
            return 'question'
        elif '建议' in content or '推荐' in content or '应该' in content:
            return 'solution'
        elif content in ['是', '否', 'Y', 'N', 'yes', 'no']:
            return 'answer'
        elif level == 1:
            return 'problem'
        elif level == 2:
            return 'category'
        elif '则' in content or '如果' in content:
            return 'condition'
        else:
            return 'step'
    
    def import_to_neo4j(self, parsed_data_list):
        """批量导入数据到Neo4j - 优化版本"""
        if not self.driver:
            print("❌ 数据库连接不可用")
            return False

        print("🚀 开始批量导入...")

        # 预创建索引以提高性能
        self._create_indexes_before_import()

        # 批量大小
        batch_size = 500
        total_batches = (len(parsed_data_list) + batch_size - 1) // batch_size

        imported_count = 0

        for batch_idx in range(total_batches):
            start_idx = batch_idx * batch_size
            end_idx = min(start_idx + batch_size, len(parsed_data_list))
            batch_data = parsed_data_list[start_idx:end_idx]

            try:
                with self.driver.session() as session:
                    with session.begin_transaction() as tx:
                        # 批量创建所有节点和关系
                        self._batch_import_decision_trees(tx, batch_data)
                        tx.commit()

                imported_count += len(batch_data)
                print(f"📊 已导入批次 {batch_idx + 1}/{total_batches} ({imported_count}/{len(parsed_data_list)} 条记录)")

            except Exception as e:
                print(f"❌ 批次 {batch_idx + 1} 导入失败: {e}")
                continue

        print(f"✅ 成功导入 {imported_count} 条决策树")
        return True

    def _create_indexes_before_import(self):
        """在导入前创建索引以提高性能"""
        print("🔧 创建性能优化索引...")

        with self.driver.session() as session:
            indexes = [
                "CREATE INDEX IF NOT EXISTS FOR (f:File) ON (f.name)",
                "CREATE INDEX IF NOT EXISTS FOR (p:Product) ON (p.code)",
                "CREATE INDEX IF NOT EXISTS FOR (r:Record) ON (r.id)",
                "CREATE INDEX IF NOT EXISTS FOR (n:DecisionNode) ON (n.id)",
                "CREATE INDEX IF NOT EXISTS FOR (l:LineRecord) ON (l.id)",
                "CREATE INDEX IF NOT EXISTS FOR (r:Record) ON (r.filename)",
                "CREATE INDEX IF NOT EXISTS FOR (n:DecisionNode) ON (n.filename)",
                "CREATE INDEX IF NOT EXISTS FOR (r:Record) ON (r.product_code)",
            ]

            for index_query in indexes:
                try:
                    session.run(index_query)
                except Exception as e:
                    print(f"⚠️ 索引创建警告: {e}")

        print("✅ 索引创建完成")

    def _batch_import_decision_trees(self, tx, batch_data):
        """批量导入决策树数据"""
        # 收集所有需要创建的数据
        files_data = []
        products_data = []
        records_data = []
        line_records_data = []
        decision_nodes_data = []
        file_relations = []
        record_relations = []
        node_relations = []

        for data in batch_data:
            # 文件数据
            files_data.append({
                'name': data['filename'],
                'timestamp': datetime.now().isoformat()
            })

            # 产品数据
            products_data.append({
                'code': data['product'],
                'cargo_number': data['cargo_number'],
                'timestamp': datetime.now().isoformat()
            })

            # Record数据
            record_id = f"{data['filename']}_{data['line_number']}"
            records_data.append({
                'id': record_id,
                'content': data['final_content'],
                'full_path': data['original_content'],
                'filename': data['filename'],
                'line_number': data['line_number'],
                'product_code': data['product'],
                'cargo_number': data['cargo_number'],
                'timestamp': datetime.now().isoformat()
            })

            # LineRecord数据
            line_id = f"{data['filename']}_{data['line_number']}"
            line_records_data.append({
                'id': line_id,
                'filename': data['filename'],
                'line_number': data['line_number'],
                'content': data['original_content'],
                'timestamp': datetime.now().isoformat()
            })

            # DecisionNode数据
            for node in data['nodes']:
                node_id = f"{data['product']}_{data['line_number']}_{node['level']}_{hash(node['content'])}"
                decision_nodes_data.append({
                    'id': node_id,
                    'content': node['content'],
                    'type': node['type'],
                    'level': node['level'],
                    'product_code': data['product'],
                    'keywords': node['keywords'],
                    'filename': data['filename'],
                    'line_number': data['line_number'],
                    'full_path': data['original_content']
                })

            # 关系数据
            file_relations.extend([
                {'filename': data['filename'], 'record_id': record_id},
                {'filename': data['filename'], 'line_id': line_id}
            ])

            record_relations.append({
                'record_id': record_id,
                'nodes': [f"{data['product']}_{data['line_number']}_{node['level']}_{hash(node['content'])}" for node in data['nodes']]
            })

        # 批量创建节点
        self._batch_create_files(tx, files_data)
        self._batch_create_products(tx, products_data)
        self._batch_create_records(tx, records_data)
        self._batch_create_line_records(tx, line_records_data)
        self._batch_create_decision_nodes(tx, decision_nodes_data)

        # 批量创建关系
        self._batch_create_relationships(tx, file_relations, record_relations)

    def _batch_create_files(self, tx, files_data):
        """批量创建文件节点"""
        if not files_data:
            return

        # 去重
        unique_files = {f['name']: f for f in files_data}.values()

        tx.run("""
            UNWIND $files AS file
            MERGE (f:File {name: file.name})
            SET f.timestamp = file.timestamp
        """, files=list(unique_files))

    def _batch_create_products(self, tx, products_data):
        """批量创建产品节点"""
        if not products_data:
            return

        # 去重
        unique_products = {p['code']: p for p in products_data}.values()

        tx.run("""
            UNWIND $products AS product
            MERGE (p:Product {code: product.code})
            SET p.cargo_number = product.cargo_number,
                p.timestamp = product.timestamp
        """, products=list(unique_products))

    def _batch_create_records(self, tx, records_data):
        """批量创建Record节点"""
        if not records_data:
            return

        tx.run("""
            UNWIND $records AS record
            MERGE (r:Record {id: record.id})
            SET r.content = record.content,
                r.full_path = record.full_path,
                r.filename = record.filename,
                r.line_number = record.line_number,
                r.product_code = record.product_code,
                r.cargo_number = record.cargo_number,
                r.timestamp = record.timestamp
        """, records=records_data)

    def _batch_create_line_records(self, tx, line_records_data):
        """批量创建LineRecord节点"""
        if not line_records_data:
            return

        tx.run("""
            UNWIND $line_records AS line_record
            MERGE (l:LineRecord {id: line_record.id})
            SET l.filename = line_record.filename,
                l.line_number = line_record.line_number,
                l.content = line_record.content,
                l.timestamp = line_record.timestamp
        """, line_records=line_records_data)

    def _batch_create_decision_nodes(self, tx, decision_nodes_data):
        """批量创建DecisionNode节点"""
        if not decision_nodes_data:
            return

        tx.run("""
            UNWIND $nodes AS node
            MERGE (n:DecisionNode {id: node.id})
            SET n.content = node.content,
                n.type = node.type,
                n.level = node.level,
                n.product_code = node.product_code,
                n.keywords = node.keywords,
                n.filename = node.filename,
                n.line_number = node.line_number,
                n.full_path = node.full_path
        """, nodes=decision_nodes_data)

    def _batch_create_relationships(self, tx, file_relations, record_relations):
        """批量创建关系"""
        # 创建文件到记录的关系
        if file_relations:
            # 文件到Record关系
            record_file_relations = [r for r in file_relations if 'record_id' in r]
            if record_file_relations:
                tx.run("""
                    UNWIND $relations AS rel
                    MATCH (f:File {name: rel.filename})
                    MATCH (r:Record {id: rel.record_id})
                    MERGE (f)-[:CONTAINS]->(r)
                """, relations=record_file_relations)

            # 文件到LineRecord关系
            line_file_relations = [r for r in file_relations if 'line_id' in r]
            if line_file_relations:
                tx.run("""
                    UNWIND $relations AS rel
                    MATCH (f:File {name: rel.filename})
                    MATCH (l:LineRecord {id: rel.line_id})
                    MERGE (f)-[:CONTAINS]->(l)
                """, relations=line_file_relations)

        # 创建Record到DecisionNode关系
        if record_relations:
            for relation in record_relations:
                if relation['nodes']:
                    tx.run("""
                        MATCH (r:Record {id: $record_id})
                        UNWIND $node_ids AS node_id
                        MATCH (n:DecisionNode {id: node_id})
                        MERGE (r)-[:INCLUDES]->(n)
                    """, record_id=relation['record_id'], node_ids=relation['nodes'])
    
    def _import_decision_tree(self, session, data):
        """导入决策树数据"""
        # 1. 创建产品节点
        session.run("""
            MERGE (p:Product {code: $product})
            SET p.cargo_number = $cargo_number,
                p.name = $product
        """, product=data['product'], cargo_number=data['cargo_number'])
        
        # 2. 创建文件节点
        session.run("""
            MERGE (f:File {name: $filename})
            SET f.path = $filepath
        """, filename=data['filename'], filepath=data.get('filepath', ''))
        
        # 3. 创建Record节点 - content是最后一个->右边的内容
        record_id = f"{data['filename']}_{data['line_number']}"
        session.run("""
            MERGE (r:Record {id: $record_id})
            SET r.content = $content,
                r.full_path = $full_path,
                r.filename = $filename,
                r.line_number = $line_number,
                r.product_code = $product_code,
                r.cargo_number = $cargo_number,
                r.timestamp = $timestamp
        """, 
        record_id=record_id,
        content=data['final_content'],  # 最后一个->右边的内容
        full_path=data['original_content'],
        filename=data['filename'],
        line_number=data['line_number'],
        product_code=data['product'],
        cargo_number=data['cargo_number'],
        timestamp=datetime.now().isoformat()
        )
        
        # 4. 创建决策节点
        for node in data['nodes']:
            node_id = f"{data['product']}_{data['line_number']}_{node['level']}_{hash(node['content'])}"
            
            session.run("""
                MERGE (n:DecisionNode {id: $node_id})
                SET n.content = $content,
                    n.type = $type,
                    n.level = $level,
                    n.product_code = $product_code,
                    n.keywords = $keywords,
                    n.filename = $filename,
                    n.line_number = $line_number
            """,
            node_id=node_id,
            content=node['content'],
            type=node['type'],
            level=node['level'],
            product_code=data['product'],
            keywords=node['keywords'],
            filename=data['filename'],
            line_number=data['line_number']
            )
        
        # 5. 建立节点之间的LEADS_TO关系
        for i in range(len(data['nodes']) - 1):
            current_node = data['nodes'][i]
            next_node = data['nodes'][i + 1]
            
            current_node_id = f"{data['product']}_{data['line_number']}_{current_node['level']}_{hash(current_node['content'])}"
            next_node_id = f"{data['product']}_{data['line_number']}_{next_node['level']}_{hash(next_node['content'])}"
            
            session.run("""
                MATCH (current:DecisionNode {id: $current_id})
                MATCH (next:DecisionNode {id: $next_id})
                MERGE (current)-[:LEADS_TO {
                    filename: $filename,
                    line_number: $line_number
                }]->(next)
            """,
            current_id=current_node_id,
            next_id=next_node_id,
            filename=data['filename'],
            line_number=data['line_number']
            )
        
        # 6. 建立产品到第一个决策节点的关系
        if data['nodes']:
            first_node = data['nodes'][0]
            first_node_id = f"{data['product']}_{data['line_number']}_{first_node['level']}_{hash(first_node['content'])}"
            
            session.run("""
                MATCH (p:Product {code: $product})
                MATCH (n:DecisionNode {id: $node_id})
                MERGE (p)-[:HAS_DECISION_TREE {
                    filename: $filename,
                    line_number: $line_number
                }]->(n)
            """,
            product=data['product'],
            node_id=first_node_id,
            filename=data['filename'],
            line_number=data['line_number']
            )
        
        # 7. 建立文件到Record的关系
        session.run("""
            MATCH (f:File {name: $filename})
            MATCH (r:Record {id: $record_id})
            MERGE (f)-[:CONTAINS]->(r)
        """, filename=data['filename'], record_id=record_id)
        
        # 8. 建立Record到决策节点的关系
        for node in data['nodes']:
            node_id = f"{data['product']}_{data['line_number']}_{node['level']}_{hash(node['content'])}"
            session.run("""
                MATCH (r:Record {id: $record_id})
                MATCH (n:DecisionNode {id: $node_id})
                MERGE (r)-[:INCLUDES]->(n)
            """, record_id=record_id, node_id=node_id)
    
    def create_indexes(self):
        """创建索引以提高查询性能"""
        if not self.driver:
            return
        
        with self.driver.session() as session:
            indexes = [
                "CREATE INDEX product_code_idx IF NOT EXISTS FOR (p:Product) ON (p.code)",
                "CREATE INDEX decision_node_id_idx IF NOT EXISTS FOR (n:DecisionNode) ON (n.id)",
                "CREATE INDEX decision_node_content_idx IF NOT EXISTS FOR (n:DecisionNode) ON (n.content)",
                "CREATE INDEX decision_node_type_idx IF NOT EXISTS FOR (n:DecisionNode) ON (n.type)",
                "CREATE INDEX record_id_idx IF NOT EXISTS FOR (r:Record) ON (r.id)",
                "CREATE INDEX record_content_idx IF NOT EXISTS FOR (r:Record) ON (r.content)",
                "CREATE INDEX file_name_idx IF NOT EXISTS FOR (f:File) ON (f.name)"
            ]
            
            for index_query in indexes:
                try:
                    session.run(index_query)
                    print(f"✅ 索引创建成功")
                except Exception as e:
                    print(f"⚠️  索引创建失败: {e}")
    
    def get_statistics(self):
        """获取导入统计信息"""
        if not self.driver:
            return
        
        with self.driver.session() as session:
            # 统计各类节点数量
            stats = {}
            
            result = session.run("MATCH (n:DecisionNode) RETURN count(n) as count")
            stats['decision_nodes'] = result.single()['count']
            
            result = session.run("MATCH (r:Record) RETURN count(r) as count")
            stats['records'] = result.single()['count']
            
            result = session.run("MATCH (p:Product) RETURN count(p) as count")
            stats['products'] = result.single()['count']
            
            result = session.run("MATCH (f:File) RETURN count(f) as count")
            stats['files'] = result.single()['count']
            
            result = session.run("MATCH ()-[r:LEADS_TO]->() RETURN count(r) as count")
            stats['decision_relations'] = result.single()['count']
            
            print(f"\n📊 导入统计信息:")
            print(f"   - 决策节点数: {stats['decision_nodes']}")
            print(f"   - 记录数: {stats['records']}")
            print(f"   - 产品数: {stats['products']}")
            print(f"   - 文件数: {stats['files']}")
            print(f"   - 决策关系数: {stats['decision_relations']}")


def main():
    """主函数"""
    print("🚀 Neo4j知识库导入工具")
    print("=" * 50)
    
    # 配置参数
    folder_path = "/Users/daijunxiong/Downloads/knowledge_txt"
    auto_clear = len(sys.argv) > 1 and sys.argv[1] == "--auto-clear"
    
    print(f"📁 目录路径: {folder_path}")
    print(f"🎯 连接数据库: bolt://192.168.1.169:7687")
    
    # 创建导入器
    importer = Neo4jKnowledgeImporter()
    
    if not importer.driver:
        print("❌ 无法连接到Neo4j数据库")
        return
    
    try:
        # 自动清空数据库或询问
        if auto_clear:
            print("🗑️  自动清空数据库...")
            importer.clear_database()
        else:
            choice = input("是否清空现有数据库？(y/N): ").strip().lower()
            if choice == 'y':
                importer.clear_database()
        
        # 扫描txt文件
        print(f"\n📁 扫描文件夹: {folder_path}")
        all_lines = importer.scan_txt_files(folder_path)
        
        if not all_lines:
            print("❌ 没有找到决策树数据")
            return
        
        # 解析数据
        print("\n🔄 解析决策树数据...")
        parsed_data = []
        for line_data in all_lines:
            parsed = importer.parse_decision_tree_line(line_data)
            if parsed:
                parsed_data.append(parsed)
        
        print(f"✅ 解析完成，共 {len(parsed_data)} 条决策树")
        
        # 导入到Neo4j
        print("\n💾 导入决策树到Neo4j...")
        success = importer.import_to_neo4j(parsed_data)
        
        if success:
            # 创建索引
            print("\n🔧 创建索引...")
            importer.create_indexes()
            
            # 显示统计信息
            importer.get_statistics()
            
            print("\n🎉 导入完成！")
            print("\n💡 现在您可以使用Cypher查询决策树:")
            print("   MATCH (p:Product {code: 'PK511'})-[:HAS_DECISION_TREE*1..10]->(n)")
            print("   RETURN p, n")
            print("\n   MATCH (r:Record) WHERE r.content CONTAINS '建议'")
            print("   RETURN r.product_code, r.content")
        
    finally:
        importer.close()


if __name__ == "__main__":
    main()
