#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
树形结构Neo4j导入工具
功能：将决策链路构建为真正的树形结构，避免数据冗余
特点：
1. 树节点：决策路径中的每个步骤
2. 叶子节点：最终的解决方案或结果
3. 无冗余：每个节点只存储自己的内容
4. 高效查询：支持从根到叶的路径查询
"""

from neo4j import GraphDatabase
from pathlib import Path
import re
import jieba
import sys
import time
import numpy as np
from collections import defaultdict

# 向量化相关导入
try:
    import requests
    import json
    OLLAMA_AVAILABLE = True
except ImportError:
    OLLAMA_AVAILABLE = False
    print("⚠️  requests未安装，将跳过向量化功能")
    print("   安装命令: pip install requests")


class TreeNeo4jImporter:
    def __init__(self, uri="bolt://localhost:7687", user="neo4j", password="changsha2025", enable_vectorization=True):
        try:
            self.driver = GraphDatabase.driver(uri, auth=(user, password))
            print(f"✅ 成功连接到Neo4j数据库: {uri}")
        except Exception as e:
            print(f"❌ 连接Neo4j失败: {e}")
            self.driver = None

        # 初始化Ollama向量化
        self.enable_vectorization = enable_vectorization and OLLAMA_AVAILABLE
        self.ollama_url = "http://192.168.31.63:11434"
        self.embedding_model = "mxbai-embed-large"  # 默认使用nomic-embed-text模型

        if self.enable_vectorization:
            try:
                print("🔄 正在测试Ollama连接...")
                self._test_ollama_connection()
                print("✅ Ollama向量化服务连接成功")
            except Exception as e:
                print(f"⚠️  Ollama连接失败: {e}")
                print("   请确保Ollama服务正在运行: ollama serve")
                print(f"   请确保已安装嵌入模型: ollama pull {self.embedding_model}")
                self.enable_vectorization = False
    
    def close(self):
        if self.driver:

            self.driver.close()

    def _test_ollama_connection(self):
        """测试Ollama连接"""
        try:
            response = requests.get(f"{self.ollama_url}/api/tags", timeout=5)
            if response.status_code == 200:
                models = response.json().get('models', [])
                model_names = [model['name'] for model in models]

                # 检查是否有可用的嵌入模型
                if self.embedding_model not in model_names:
                    # 尝试其他常见的嵌入模型
                    available_embed_models = [name for name in model_names if 'embed' in name.lower()]
                    if available_embed_models:
                        self.embedding_model = available_embed_models[0]
                        print(f"🔄 使用可用的嵌入模型: {self.embedding_model}")
                    else:
                        # 如果没有专门的嵌入模型，使用现有模型进行文本向量化
                        if model_names:
                            self.embedding_model = model_names[0]  # 使用第一个可用模型
                            self.use_generate_api = True  # 标记使用generate API
                            print(f"🔄 使用通用模型进行向量化: {self.embedding_model}")
                        else:
                            raise Exception("未找到任何可用模型")
                else:
                    self.use_generate_api = False  # 使用专门的embeddings API

                return True
            else:
                raise Exception(f"Ollama服务响应异常: {response.status_code}")
        except requests.exceptions.RequestException as e:
            raise Exception(f"无法连接到Ollama服务: {e}")

    def vectorize_text(self, text):
        """使用Ollama将文本转换为向量"""
        if not self.enable_vectorization:
            return None

        try:
            if hasattr(self, 'use_generate_api') and self.use_generate_api:
                # 使用通用模型的generate API创建简单向量
                return self._create_simple_vector(text)
            else:
                # 使用专门的嵌入API
                payload = {
                    "model": self.embedding_model,
                    "prompt": text
                }

                response = requests.post(
                    f"{self.ollama_url}/api/embeddings",
                    json=payload,
                    timeout=30
                )

                if response.status_code == 200:
                    result = response.json()
                    embedding = result.get('embedding')
                    if embedding:
                        return embedding
                    else:
                        print(f"⚠️  Ollama返回的响应中没有embedding字段")
                        return None
                else:
                    print(f"⚠️  Ollama API调用失败: {response.status_code}")
                    return None

        except Exception as e:
            print(f"⚠️  文本向量化失败: {e}")
            return None

    def _create_simple_vector(self, text):
        """创建基于文本特征的简单向量"""
        try:
            import hashlib

            # 使用jieba分词
            words = list(jieba.cut(text))

            # 创建固定维度的向量 (384维)
            vector = [0.0] * 384

            # 基于词汇特征填充向量
            for i, word in enumerate(words[:100]):  # 最多处理100个词
                # 使用词的hash值来确定向量位置
                hash_val = int(hashlib.md5(word.encode('utf-8')).hexdigest(), 16)
                pos1 = hash_val % 384
                pos2 = (hash_val // 384) % 384
                pos3 = (hash_val // (384 * 384)) % 384

                # 在多个位置设置值
                vector[pos1] += 1.0
                vector[pos2] += 0.5
                vector[pos3] += 0.25

            # 简单归一化
            max_val = max(vector) if max(vector) > 0 else 1.0
            vector = [v / max_val for v in vector]

            return vector

        except Exception as e:
            print(f"⚠️  简单向量化失败: {e}")
            return None

    def clear_database(self):
        """清空数据库"""
        if not self.driver:
            return False
        
        print("🗑️  清空数据库...")
        with self.driver.session() as session:
            session.run("MATCH (n) DETACH DELETE n")
            print("✅ 数据库已清空")
            return True
    
    def create_tree_indexes(self):
        """创建树形结构优化索引"""
        print("🔧 创建树形结构索引...")
        
        if not self.driver:
            return False
        
        with self.driver.session() as session:
            indexes = [
                # 基础节点索引
                "CREATE INDEX tree_node_id_idx IF NOT EXISTS FOR (n:TreeNode) ON (n.id)",
                "CREATE INDEX tree_node_content_idx IF NOT EXISTS FOR (n:TreeNode) ON (n.content)",
                "CREATE INDEX tree_node_product_idx IF NOT EXISTS FOR (n:TreeNode) ON (n.product_code)",
                "CREATE INDEX tree_node_level_idx IF NOT EXISTS FOR (n:TreeNode) ON (n.level)",
                
                # 叶子节点索引
                "CREATE INDEX leaf_node_id_idx IF NOT EXISTS FOR (l:LeafNode) ON (l.id)",
                "CREATE INDEX leaf_node_content_idx IF NOT EXISTS FOR (l:LeafNode) ON (l.content)",
                "CREATE INDEX leaf_node_product_idx IF NOT EXISTS FOR (l:LeafNode) ON (l.product_code)",
                
                # 产品和文件索引
                "CREATE INDEX product_code_idx IF NOT EXISTS FOR (p:Product) ON (p.code)",
                "CREATE INDEX file_name_idx IF NOT EXISTS FOR (f:File) ON (f.name)",
                
                # 复合索引
                "CREATE INDEX tree_node_product_level_idx IF NOT EXISTS FOR (n:TreeNode) ON (n.product_code, n.level)",
            ]
            
            for index_query in indexes:
                try:
                    session.run(index_query)
                except Exception as e:
                    print(f"⚠️ 索引创建警告: {e}")
        
        print("✅ 树形索引创建完成")
        return True
    
    def scan_txt_files(self, folder_path):
        """扫描文件夹下的所有txt文件"""
        folder_path = Path(folder_path)
        if not folder_path.exists():
            print(f"❌ 文件夹 {folder_path} 不存在")
            return []
        
        all_lines = []
        txt_files = list(folder_path.glob("*.txt"))
        
        print(f"📁 找到 {len(txt_files)} 个txt文件")
        
        for file_path in txt_files:
            try:
                with open(file_path, 'r', encoding='utf-8') as f:
                    lines = f.readlines()
                    
                for line_num, line in enumerate(lines, 1):
                    line = line.strip()
                    if line and '->' in line:  # 只处理包含决策树的行
                        all_lines.append({
                            'filename': file_path.name,
                            'line_number': line_num,
                            'content': line
                        })
                        
            except Exception as e:
                print(f"❌ 读取文件失败 {file_path}: {e}")
        
        print(f"📊 总共找到 {len(all_lines)} 行决策树数据")
        return all_lines
    
    def parse_decision_tree_to_tree(self, line_data):
        """解析决策树为真正的树形结构"""
        content = line_data['content']
        
        # 提取产品编号
        product_match = re.search(r'^([A-Z]{1,3}\d{3,4})', content)
        if not product_match:
            return None
        
        product = product_match.group(1)
        
        # 提取货号
        cargo_match = re.search(r'\[货号:([^\]]+)\]', content)
        cargo_number = cargo_match.group(1) if cargo_match else product
        
        # 按 -> 分割决策路径
        parts = [part.strip() for part in content.split('->')]
        if len(parts) < 2:
            return None
        
        # 移除货号信息
        last_part = parts[-1]
        last_part = re.sub(r'\s*\[货号:[^\]]+\]', '', last_part).strip()
        parts[-1] = last_part
        
        # 构建树节点（除了最后一个）
        tree_nodes = []
        for i, part in enumerate(parts[:-1]):  # 排除最后一个作为叶子节点
            if part:
                keywords = list(jieba.cut(part))
                keywords = [k.strip() for k in keywords if len(k.strip()) > 1]
                
                tree_nodes.append({
                    'content': part,
                    'level': i,
                    'keywords': keywords,
                    'node_type': self._determine_node_type(part, i)
                })
        
        # 最后一个作为叶子节点
        leaf_content = parts[-1] if parts else ''
        leaf_keywords = list(jieba.cut(leaf_content))
        leaf_keywords = [k.strip() for k in leaf_keywords if len(k.strip()) > 1]
        
        return {
            'filename': line_data['filename'],
            'line_number': line_data['line_number'],
            'original_content': content,
            'product': product,
            'cargo_number': cargo_number,
            'tree_nodes': tree_nodes,
            'leaf_node': {
                'content': leaf_content,
                'keywords': leaf_keywords,
                'node_type': 'solution'
            }
        }
    
    def _determine_node_type(self, content, level):
        """根据内容和层级确定节点类型"""
        content_lower = content.lower()
        
        if level == 0:
            return 'product'
        elif '?' in content or '是否' in content:
            return 'question'
        elif any(word in content for word in ['问题', '排查', '检查', '异常']):
            return 'category'
        elif any(word in content for word in ['投入', '浓度', '体系', '温度']):
            return 'parameter'
        else:
            return 'step'
    
    def build_tree_structure(self, parsed_data_list):
        """构建树形结构数据"""
        print("🌳 构建树形结构...")
        
        # 收集所有唯一的树节点
        unique_tree_nodes = {}  # key: (product, level, content), value: node_data
        unique_leaf_nodes = {}  # key: (product, content), value: leaf_data
        tree_relationships = []  # 存储父子关系
        
        for data in parsed_data_list:
            product = data['product']
            
            # 处理树节点
            prev_node_id = None
            for i, tree_node in enumerate(data['tree_nodes']):
                node_key = (product, tree_node['level'], tree_node['content'])
                node_id = f"tree_{product}_{tree_node['level']}_{hash(tree_node['content'])}"
                
                if node_key not in unique_tree_nodes:
                    # 生成节点内容的向量
                    content_vector = self.vectorize_text(tree_node['content'])

                    unique_tree_nodes[node_key] = {
                        'id': node_id,
                        'content': tree_node['content'],
                        'level': tree_node['level'],
                        'node_type': tree_node['node_type'],
                        'keywords': tree_node['keywords'],
                        'product_code': product,
                        'filename': data['filename'],
                        'line_number': data['line_number'],
                        'content_vector': content_vector
                    }
                
                # 建立父子关系
                if prev_node_id:
                    tree_relationships.append({
                        'parent_id': prev_node_id,
                        'child_id': node_id,
                        'relationship_type': 'LEADS_TO'
                    })
                
                prev_node_id = node_id
            
            # 处理叶子节点 - 包含行号确保每个原始行都有独立的叶子节点
            leaf_key = (product, data['leaf_node']['content'], data['line_number'])
            leaf_id = f"leaf_{product}_{data['line_number']}_{hash(data['leaf_node']['content'])}"

            # 每个叶子节点都是唯一的（基于内容+行号）
            # 生成叶子节点内容的向量
            leaf_content_vector = self.vectorize_text(data['leaf_node']['content'])

            unique_leaf_nodes[leaf_key] = {
                'id': leaf_id,
                'content': data['leaf_node']['content'],
                'node_type': data['leaf_node']['node_type'],
                'keywords': data['leaf_node']['keywords'],
                'product_code': product,
                'filename': data['filename'],
                'line_number': data['line_number'],
                'content_vector': leaf_content_vector
            }
            
            # 连接最后一个树节点到叶子节点
            if prev_node_id:
                tree_relationships.append({
                    'parent_id': prev_node_id,
                    'child_id': leaf_id,
                    'relationship_type': 'RESULTS_IN'
                })
        
        print(f"📊 构建完成: {len(unique_tree_nodes)} 个树节点, {len(unique_leaf_nodes)} 个叶子节点, {len(tree_relationships)} 个关系")
        
        return {
            'tree_nodes': list(unique_tree_nodes.values()),
            'leaf_nodes': list(unique_leaf_nodes.values()),
            'relationships': tree_relationships
        }
    
    def import_tree_structure(self, tree_data, batch_size=1000):
        """导入树形结构到Neo4j"""
        if not self.driver or not tree_data:
            print("❌ 数据库连接不可用或无数据")
            return False
        
        print(f"🚀 开始导入树形结构...")
        start_time = time.time()
        
        # 创建索引
        self.create_tree_indexes()
        
        with self.driver.session() as session:
            with session.begin_transaction() as tx:
                # 1. 创建产品节点
                print("📦 创建产品节点...")
                products = set(node['product_code'] for node in tree_data['tree_nodes'])
                products_data = [{'code': product} for product in products]
                tx.run("""
                    UNWIND $products AS product
                    CREATE (p:Product {
                        code: product.code,
                        timestamp: datetime()
                    })
                """, products=products_data)
                
                # 2. 批量创建树节点
                print(f"🌳 创建 {len(tree_data['tree_nodes'])} 个树节点...")
                for i in range(0, len(tree_data['tree_nodes']), batch_size):
                    batch = tree_data['tree_nodes'][i:i+batch_size]
                    tx.run("""
                        UNWIND $nodes AS node
                        CREATE (n:TreeNode {
                            id: node.id,
                            content: node.content,
                            level: node.level,
                            node_type: node.node_type,
                            keywords: node.keywords,
                            product_code: node.product_code,
                            filename: node.filename,
                            line_number: node.line_number,
                            content_vector: node.content_vector,
                            timestamp: datetime()
                        })
                    """, nodes=batch)
                
                # 3. 批量创建叶子节点
                print(f"🍃 创建 {len(tree_data['leaf_nodes'])} 个叶子节点...")
                for i in range(0, len(tree_data['leaf_nodes']), batch_size):
                    batch = tree_data['leaf_nodes'][i:i+batch_size]
                    tx.run("""
                        UNWIND $nodes AS node
                        CREATE (l:LeafNode {
                            id: node.id,
                            content: node.content,
                            node_type: node.node_type,
                            keywords: node.keywords,
                            product_code: node.product_code,
                            filename: node.filename,
                            line_number: node.line_number,
                            timestamp: datetime()
                        })
                    """, nodes=batch)
                
                # 4. 批量创建关系
                print(f"🔗 创建 {len(tree_data['relationships'])} 个关系...")
                for i in range(0, len(tree_data['relationships']), batch_size):
                    batch = tree_data['relationships'][i:i+batch_size]
                    
                    # 树节点之间的关系
                    leads_to_relations = [r for r in batch if r['relationship_type'] == 'LEADS_TO']
                    if leads_to_relations:
                        tx.run("""
                            UNWIND $relations AS rel
                            MATCH (parent:TreeNode {id: rel.parent_id})
                            MATCH (child:TreeNode {id: rel.child_id})
                            CREATE (parent)-[:LEADS_TO]->(child)
                        """, relations=leads_to_relations)
                    
                    # 树节点到叶子节点的关系
                    results_in_relations = [r for r in batch if r['relationship_type'] == 'RESULTS_IN']
                    if results_in_relations:
                        tx.run("""
                            UNWIND $relations AS rel
                            MATCH (parent:TreeNode {id: rel.parent_id})
                            MATCH (child:LeafNode {id: rel.child_id})
                            CREATE (parent)-[:RESULTS_IN]->(child)
                        """, relations=results_in_relations)
                
                # 5. 创建产品到根节点的关系
                print("🔗 创建产品到根节点关系...")
                root_nodes = [node for node in tree_data['tree_nodes'] if node['level'] == 0]
                root_relations = [{'product_code': node['product_code'], 'root_id': node['id']} for node in root_nodes]
                
                tx.run("""
                    UNWIND $relations AS rel
                    MATCH (p:Product {code: rel.product_code})
                    MATCH (root:TreeNode {id: rel.root_id})
                    CREATE (p)-[:HAS_ROOT]->(root)
                """, relations=root_relations)
                
                tx.commit()
        
        total_time = time.time() - start_time
        total_nodes = len(tree_data['tree_nodes']) + len(tree_data['leaf_nodes'])
        
        print(f"✅ 树形结构导入完成！")
        print(f"⏱️  总耗时: {total_time:.2f}秒")
        print(f"🚀 平均速度: {total_nodes/total_time:.1f} 节点/秒")
        
        return True


def main():
    """主函数"""
    print("🌳 树形结构Neo4j导入工具")
    print("=" * 50)

    # 配置参数
    folder_path = "/Users/daijunxiong/Downloads/knowledge_txt"
    batch_size = 1000
    auto_clear = len(sys.argv) > 1 and sys.argv[1] == "--auto-clear"

    print(f"📁 目录路径: {folder_path}")
    print(f"🎯 连接数据库: bolt://192.168.1.169:7687")
    print(f"📦 批量大小: {batch_size}")

    # 创建导入器
    importer = TreeNeo4jImporter()

    if not importer.driver:
        print("❌ 无法连接到Neo4j数据库")
        return

    try:
        # 自动清空数据库或询问
        if auto_clear:
            print("🗑️  自动清空数据库...")
            importer.clear_database()
        else:
            choice = input("是否清空现有数据库？(y/N): ").strip().lower()
            if choice == 'y':
                importer.clear_database()

        # 扫描txt文件
        print(f"\n📁 扫描文件夹: {folder_path}")
        all_lines = importer.scan_txt_files(folder_path)

        if not all_lines:
            print("❌ 没有找到决策树数据")
            return

        # 解析数据为树形结构
        print("\n🔄 解析决策树为树形结构...")
        parsed_data = []
        parse_start_time = time.time()

        for line_data in all_lines:
            parsed = importer.parse_decision_tree_to_tree(line_data)
            if parsed:
                parsed_data.append(parsed)

        parse_time = time.time() - parse_start_time
        print(f"✅ 解析完成，共 {len(parsed_data)} 条有效记录 (耗时: {parse_time:.2f}秒)")

        if not parsed_data:
            print("❌ 没有有效的决策树数据")
            return

        # 构建树形结构
        tree_data = importer.build_tree_structure(parsed_data)

        # 导入树形结构到Neo4j
        print(f"\n🌳 开始导入树形结构...")
        success = importer.import_tree_structure(tree_data, batch_size)

        if success:
            print("\n🎉 导入完成！")
            print(f"📊 统计信息:")
            print(f"   - 处理文件数: {len(set(d['filename'] for d in parsed_data))}")
            print(f"   - 原始记录数: {len(parsed_data)}")
            print(f"   - 树节点数: {len(tree_data['tree_nodes'])}")
            print(f"   - 叶子节点数: {len(tree_data['leaf_nodes'])}")
            print(f"   - 关系数: {len(tree_data['relationships'])}")
            print(f"   - 产品数: {len(set(node['product_code'] for node in tree_data['tree_nodes']))}")

            print("\n💡 现在您可以使用树形查询工具进行查询:")
            print("   python tree_structure_neo4j/tree_query_tool.py 'TD504投入量'")
            print("   python tree_structure_neo4j/tree_query_tool.py")

    finally:
        importer.close()


if __name__ == "__main__":
    main()
