# -*- coding: utf-8 -*-
"""
知识图谱检索器
基于知识图谱的检索方法
"""

import json
import os
from typing import Dict, List, Any, Set, Tuple, Optional
from collections import defaultdict, deque
from loguru import logger
import re

from .base_retriever import BaseRetriever, RetrievalResult
from ..ollama_client import ollama_client


class KnowledgeNode:
    """知识图谱节点"""
    
    def __init__(self, node_id: str, node_type: str, properties: Dict[str, Any] = None):
        self.id = node_id
        self.type = node_type
        self.properties = properties or {}
        self.relations = defaultdict(list)  # 关系类型 -> 目标节点列表
    
    def add_relation(self, relation_type: str, target_node_id: str, properties: Dict = None):
        """添加关系"""
        self.relations[relation_type].append({
            'target': target_node_id,
            'properties': properties or {}
        })
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return {
            'id': self.id,
            'type': self.type,
            'properties': self.properties,
            'relations': dict(self.relations)
        }


class KnowledgeGraph:
    """知识图谱"""
    
    def __init__(self):
        self.nodes = {}  # 节点ID -> KnowledgeNode
        self.type_index = defaultdict(list)  # 节点类型 -> 节点ID列表
        self.property_index = defaultdict(list)  # 属性值 -> 节点ID列表
    
    def add_node(self, node: KnowledgeNode) -> None:
        """添加节点"""
        self.nodes[node.id] = node
        self.type_index[node.type].append(node.id)
        
        # 建立属性索引
        for key, value in node.properties.items():
            if isinstance(value, str):
                self.property_index[value.lower()].append(node.id)
    
    def get_node(self, node_id: str) -> Optional[KnowledgeNode]:
        """获取节点"""
        return self.nodes.get(node_id)
    
    def find_nodes_by_type(self, node_type: str) -> List[KnowledgeNode]:
        """根据类型查找节点"""
        node_ids = self.type_index.get(node_type, [])
        return [self.nodes[node_id] for node_id in node_ids]
    
    def find_nodes_by_property(self, property_value: str) -> List[KnowledgeNode]:
        """根据属性值查找节点"""
        node_ids = self.property_index.get(property_value.lower(), [])
        return [self.nodes[node_id] for node_id in node_ids]
    
    def find_path(self, start_node_id: str, end_node_id: str, max_depth: int = 3) -> List[List[str]]:
        """查找两个节点之间的路径"""
        if start_node_id not in self.nodes or end_node_id not in self.nodes:
            return []
        
        paths = []
        queue = deque([(start_node_id, [start_node_id])])
        visited = set()
        
        while queue:
            current_id, path = queue.popleft()
            
            if len(path) > max_depth:
                continue
            
            if current_id == end_node_id:
                paths.append(path)
                continue
            
            if current_id in visited:
                continue
            
            visited.add(current_id)
            current_node = self.nodes[current_id]
            
            # 遍历所有关系
            for relation_type, targets in current_node.relations.items():
                for target_info in targets:
                    target_id = target_info['target']
                    if target_id not in visited:
                        new_path = path + [target_id]
                        queue.append((target_id, new_path))
        
        return paths
    
    def get_neighbors(self, node_id: str, relation_types: List[str] = None) -> List[KnowledgeNode]:
        """获取邻居节点"""
        if node_id not in self.nodes:
            return []
        
        neighbors = []
        node = self.nodes[node_id]
        
        for relation_type, targets in node.relations.items():
            if relation_types is None or relation_type in relation_types:
                for target_info in targets:
                    target_id = target_info['target']
                    if target_id in self.nodes:
                        neighbors.append(self.nodes[target_id])
        
        return neighbors


class KnowledgeGraphRetriever(BaseRetriever):
    """知识图谱检索器"""
    
    def __init__(self, config: Dict[str, Any] = None):
        """
        初始化知识图谱检索器
        
        Args:
            config: 配置参数
        """
        super().__init__("KnowledgeGraphRetriever", config)
        
        # 配置参数
        self.max_search_depth = self.config.get("max_search_depth", 3)
        self.min_confidence = self.config.get("min_confidence", 0.4)
        self.use_reasoning = self.config.get("use_reasoning", True)
        self.cache_path = self.config.get("cache_path", "data/knowledge_graph.json")
        
        # 知识图谱
        self.knowledge_graph = KnowledgeGraph()
        self.entity_types = {
            'question': '问题',
            'answer': '答案',
            'concept': '概念',
            'entity': '实体',
            'category': '分类'
        }
        
        logger.info("🕸️ 知识图谱检索器初始化完成")
    
    def initialize(self, data: List[Dict[str, Any]]) -> bool:
        """
        初始化知识图谱检索器
        
        Args:
            data: 训练数据
        
        Returns:
            bool: 初始化是否成功
        """
        try:
            logger.info(f"📊 开始初始化知识图谱检索器，数据量: {len(data)}")
            
            # 尝试加载缓存
            if self._load_cache():
                logger.info("✅ 从缓存加载知识图谱")
                self.is_initialized = True
                return True
            
            # 构建知识图谱
            self._build_knowledge_graph(data)
            
            # 保存缓存
            self._save_cache()
            
            self.is_initialized = True
            logger.info("✅ 知识图谱检索器初始化成功")
            return True
            
        except Exception as e:
            logger.error(f"❌ 知识图谱检索器初始化异常: {e}")
            return False
    
    def _build_knowledge_graph(self, data: List[Dict[str, Any]]) -> None:
        """
        构建知识图谱
        
        Args:
            data: 训练数据
        """
        logger.info("🔧 构建知识图谱...")
        
        # 第一步：创建基础节点
        for item in data:
            self._create_basic_nodes(item)
        
        # 第二步：提取实体和概念
        for item in data:
            self._extract_entities_and_concepts(item)
        
        # 第三步：建立关系
        for item in data:
            self._create_relationships(item)
        
        logger.info(f"✅ 知识图谱构建完成，节点数: {len(self.knowledge_graph.nodes)}")
    
    def _create_basic_nodes(self, item: Dict[str, Any]) -> None:
        """
        创建基础节点（问题、答案、分类）
        
        Args:
            item: 数据项
        """
        doc_id = str(item.get('id', len(self.knowledge_graph.nodes)))
        
        # 创建问题节点
        question_node = KnowledgeNode(
            node_id=f"q_{doc_id}",
            node_type="question",
            properties={
                'text': item['question'],
                'doc_id': doc_id,
                'difficulty': item.get('difficulty', 'medium'),
                'type': item.get('type', 'general')
            }
        )
        self.knowledge_graph.add_node(question_node)
        
        # 创建答案节点
        if item.get('answer'):
            answer_node = KnowledgeNode(
                node_id=f"a_{doc_id}",
                node_type="answer",
                properties={
                    'text': item['answer'],
                    'doc_id': doc_id
                }
            )
            self.knowledge_graph.add_node(answer_node)
            
            # 建立问答关系
            question_node.add_relation('has_answer', f"a_{doc_id}")
            answer_node.add_relation('answers', f"q_{doc_id}")
        
        # 创建分类节点
        if item.get('category'):
            category_id = f"cat_{item['category']}"
            if category_id not in self.knowledge_graph.nodes:
                category_node = KnowledgeNode(
                    node_id=category_id,
                    node_type="category",
                    properties={
                        'name': str(item['category']),
                        'category_id': item['category']
                    }
                )
                self.knowledge_graph.add_node(category_node)
            
            # 建立分类关系
            question_node.add_relation('belongs_to', category_id)
            self.knowledge_graph.get_node(category_id).add_relation('contains', f"q_{doc_id}")
    
    def _extract_entities_and_concepts(self, item: Dict[str, Any]) -> None:
        """
        提取实体和概念
        
        Args:
            item: 数据项
        """
        doc_id = str(item.get('id', len(self.knowledge_graph.nodes)))
        text = f"{item['question']} {item.get('answer', '')}"
        
        # 提取实体
        entities = self._extract_entities(text)
        for entity in entities:
            entity_id = f"ent_{hash(entity) % 100000}"
            
            if entity_id not in self.knowledge_graph.nodes:
                entity_node = KnowledgeNode(
                    node_id=entity_id,
                    node_type="entity",
                    properties={
                        'name': entity,
                        'text': entity
                    }
                )
                self.knowledge_graph.add_node(entity_node)
            
            # 建立实体关系
            question_node = self.knowledge_graph.get_node(f"q_{doc_id}")
            if question_node:
                question_node.add_relation('mentions', entity_id)
                self.knowledge_graph.get_node(entity_id).add_relation('mentioned_in', f"q_{doc_id}")
        
        # 提取概念
        concepts = self._extract_concepts(text)
        for concept in concepts:
            concept_id = f"con_{hash(concept) % 100000}"
            
            if concept_id not in self.knowledge_graph.nodes:
                concept_node = KnowledgeNode(
                    node_id=concept_id,
                    node_type="concept",
                    properties={
                        'name': concept,
                        'text': concept
                    }
                )
                self.knowledge_graph.add_node(concept_node)
            
            # 建立概念关系
            question_node = self.knowledge_graph.get_node(f"q_{doc_id}")
            if question_node:
                question_node.add_relation('involves', concept_id)
                self.knowledge_graph.get_node(concept_id).add_relation('involved_in', f"q_{doc_id}")
    
    def _extract_entities(self, text: str) -> List[str]:
        """
        提取实体
        
        Args:
            text: 输入文本
        
        Returns:
            List[str]: 实体列表
        """
        entities = []
        
        # 提取专有名词（大写开头的词组）
        proper_nouns = re.findall(r'\b[A-Z][a-z]+(?:\s+[A-Z][a-z]+)*\b', text)
        entities.extend(proper_nouns)
        
        # 提取中文专有名词（可能的地名、人名、机构名）
        chinese_entities = re.findall(r'[\u4e00-\u9fff]{2,6}(?:市|省|县|区|公司|大学|学院|医院|银行)', text)
        entities.extend(chinese_entities)
        
        # 提取数字和单位
        numbers_with_units = re.findall(r'\d+(?:\.\d+)?\s*(?:万|千|百|亿|元|米|公里|年|月|日|小时|分钟)', text)
        entities.extend(numbers_with_units)
        
        return list(set(entities))
    
    def _extract_concepts(self, text: str) -> List[str]:
        """
        提取概念
        
        Args:
            text: 输入文本
        
        Returns:
            List[str]: 概念列表
        """
        concepts = []
        
        # 预定义的概念词汇
        concept_patterns = [
            r'机器学习|深度学习|人工智能|神经网络|算法',
            r'数据库|系统|网络|服务器|软件|硬件',
            r'管理|运营|营销|财务|人力资源',
            r'教育|培训|学习|知识|技能',
            r'健康|医疗|治疗|诊断|药物',
            r'环境|生态|污染|保护|可持续'
        ]
        
        for pattern in concept_patterns:
            matches = re.findall(pattern, text)
            concepts.extend(matches)
        
        # 提取技术术语（通常是名词短语）
        tech_terms = re.findall(r'[\u4e00-\u9fff]{2,4}(?:技术|方法|系统|平台|框架|模型)', text)
        concepts.extend(tech_terms)
        
        return list(set(concepts))
    
    def _create_relationships(self, item: Dict[str, Any]) -> None:
        """
        创建关系
        
        Args:
            item: 数据项
        """
        # 基于关键词创建相似性关系
        if 'keywords' in item and item['keywords']:
            doc_id = str(item.get('id'))
            question_node = self.knowledge_graph.get_node(f"q_{doc_id}")
            
            if question_node:
                for keyword in item['keywords']:
                    # 查找包含相同关键词的其他问题
                    similar_nodes = self.knowledge_graph.find_nodes_by_property(keyword)
                    for similar_node in similar_nodes:
                        if similar_node.type == 'question' and similar_node.id != question_node.id:
                            question_node.add_relation('similar_to', similar_node.id)
    
    def retrieve(self, question: str, top_k: int = 5) -> RetrievalResult:
        """
        执行知识图谱检索
        
        Args:
            question: 查询问题
            top_k: 返回结果数量
        
        Returns:
            RetrievalResult: 检索结果
        """
        if not self.is_initialized:
            logger.error("❌ 知识图谱检索器未初始化")
            return RetrievalResult(question, "", 0.0, 0.0)
        
        try:
            # 测量检索时间
            def _retrieve_internal():
                # 1. 实体识别和匹配
                query_entities = self._extract_entities(question)
                query_concepts = self._extract_concepts(question)
                
                # 2. 图谱搜索
                candidate_nodes = self._graph_search(query_entities, query_concepts)
                
                # 3. 路径推理
                if self.use_reasoning:
                    reasoning_nodes = self._reasoning_search(candidate_nodes, question)
                    candidate_nodes.extend(reasoning_nodes)
                
                # 4. 节点评分和排序
                scored_nodes = self._score_nodes(candidate_nodes, question, query_entities, query_concepts)
                
                # 5. 构建检索结果
                retrieved_docs = self._build_retrieval_results(scored_nodes[:top_k])
                
                # 6. 生成答案
                answer, confidence = self._generate_answer(question, retrieved_docs)
                
                return answer, confidence, retrieved_docs
            
            result_data, response_time = self._measure_time(_retrieve_internal)
            answer, confidence, retrieved_docs = result_data
            
            # 更新成功计数
            if answer:
                self.success_count += 1
            
            return RetrievalResult(
                question=question,
                answer=answer,
                confidence=confidence,
                response_time=response_time,
                retrieved_docs=retrieved_docs,
                metadata={
                    'method': 'knowledge_graph',
                    'num_retrieved': len(retrieved_docs),
                    'query_entities': self._extract_entities(question),
                    'query_concepts': self._extract_concepts(question)
                }
            )
            
        except Exception as e:
            logger.error(f"❌ 知识图谱检索失败: {e}")
            return RetrievalResult(question, "", 0.0, 0.0)
    
    def _graph_search(self, query_entities: List[str], query_concepts: List[str]) -> List[KnowledgeNode]:
        """
        图谱搜索
        
        Args:
            query_entities: 查询实体
            query_concepts: 查询概念
        
        Returns:
            List[KnowledgeNode]: 候选节点
        """
        candidate_nodes = []
        
        # 基于实体搜索
        for entity in query_entities:
            nodes = self.knowledge_graph.find_nodes_by_property(entity)
            candidate_nodes.extend(nodes)
        
        # 基于概念搜索
        for concept in query_concepts:
            nodes = self.knowledge_graph.find_nodes_by_property(concept)
            candidate_nodes.extend(nodes)
        
        # 扩展搜索：获取相关节点
        extended_nodes = []
        for node in candidate_nodes:
            neighbors = self.knowledge_graph.get_neighbors(node.id)
            extended_nodes.extend(neighbors)
        
        candidate_nodes.extend(extended_nodes)
        
        # 去重
        unique_nodes = {}
        for node in candidate_nodes:
            unique_nodes[node.id] = node
        
        return list(unique_nodes.values())
    
    def _reasoning_search(self, candidate_nodes: List[KnowledgeNode], question: str) -> List[KnowledgeNode]:
        """
        推理搜索
        
        Args:
            candidate_nodes: 候选节点
            question: 查询问题
        
        Returns:
            List[KnowledgeNode]: 推理得到的节点
        """
        reasoning_nodes = []
        
        # 基于路径的推理
        question_nodes = [node for node in candidate_nodes if node.type == 'question']
        
        for q_node in question_nodes:
            # 查找相似问题
            similar_questions = self.knowledge_graph.get_neighbors(q_node.id, ['similar_to'])
            reasoning_nodes.extend(similar_questions)
            
            # 查找同类别问题
            categories = self.knowledge_graph.get_neighbors(q_node.id, ['belongs_to'])
            for cat in categories:
                cat_questions = self.knowledge_graph.get_neighbors(cat.id, ['contains'])
                reasoning_nodes.extend(cat_questions)
        
        return reasoning_nodes
    
    def _score_nodes(self, 
                    nodes: List[KnowledgeNode], 
                    question: str, 
                    query_entities: List[str], 
                    query_concepts: List[str]) -> List[Tuple[KnowledgeNode, float]]:
        """
        节点评分
        
        Args:
            nodes: 候选节点
            question: 查询问题
            query_entities: 查询实体
            query_concepts: 查询概念
        
        Returns:
            List[Tuple[KnowledgeNode, float]]: 评分后的节点
        """
        scored_nodes = []
        
        for node in nodes:
            score = 0.0
            
            # 节点类型权重
            type_weights = {
                'question': 1.0,
                'answer': 0.8,
                'concept': 0.6,
                'entity': 0.4,
                'category': 0.3
            }
            score += type_weights.get(node.type, 0.1)
            
            # 文本相似度
            node_text = node.properties.get('text', '')
            if node_text:
                text_similarity = self._calculate_text_similarity(question, node_text)
                score += text_similarity * 0.5
            
            # 实体匹配
            for entity in query_entities:
                if entity.lower() in node_text.lower():
                    score += 0.3
            
            # 概念匹配
            for concept in query_concepts:
                if concept.lower() in node_text.lower():
                    score += 0.2
            
            # 关系权重
            relation_bonus = len(node.relations) * 0.1
            score += min(relation_bonus, 0.5)
            
            scored_nodes.append((node, score))
        
        # 排序
        scored_nodes.sort(key=lambda x: x[1], reverse=True)
        
        return scored_nodes
    
    def _calculate_text_similarity(self, text1: str, text2: str) -> float:
        """
        计算文本相似度
        
        Args:
            text1: 文本1
            text2: 文本2
        
        Returns:
            float: 相似度分数
        """
        # 简单的词汇重叠相似度
        words1 = set(text1.lower().split())
        words2 = set(text2.lower().split())
        
        if not words1 or not words2:
            return 0.0
        
        intersection = words1.intersection(words2)
        union = words1.union(words2)
        
        return len(intersection) / len(union) if union else 0.0
    
    def _build_retrieval_results(self, scored_nodes: List[Tuple[KnowledgeNode, float]]) -> List[Dict]:
        """
        构建检索结果
        
        Args:
            scored_nodes: 评分后的节点
        
        Returns:
            List[Dict]: 检索结果
        """
        results = []
        
        for node, score in scored_nodes:
            if node.type == 'question':
                # 获取对应的答案
                answer_nodes = self.knowledge_graph.get_neighbors(node.id, ['has_answer'])
                answer = answer_nodes[0].properties.get('text', '') if answer_nodes else ''
                
                result = {
                    'id': node.properties.get('doc_id'),
                    'question': node.properties.get('text', ''),
                    'answer': answer,
                    'score': score,
                    'node_type': node.type,
                    'metadata': {
                        'node_id': node.id,
                        'properties': node.properties,
                        'relations': dict(node.relations)
                    }
                }
                results.append(result)
        
        return results
    
    def _generate_answer(self, question: str, retrieved_docs: List[Dict]) -> Tuple[str, float]:
        """
        生成答案
        
        Args:
            question: 查询问题
            retrieved_docs: 检索到的文档
        
        Returns:
            Tuple[str, float]: (答案, 置信度)
        """
        if not retrieved_docs:
            # 使用LLM直接回答
            answer = ollama_client.answer_question(question)
            return answer, 0.3
        
        # 获取最佳答案
        best_doc = retrieved_docs[0]
        answer = best_doc.get('answer', '')
        confidence = min(best_doc.get('score', 0.0), 1.0)
        
        # 如果置信度较低，使用LLM增强
        if confidence < self.min_confidence:
            context = "\n".join([doc.get('answer', '') for doc in retrieved_docs[:3]])
            llm_answer = ollama_client.answer_question(question, context)
            
            if llm_answer and len(llm_answer) > len(answer):
                answer = llm_answer
                confidence = min(confidence + 0.3, 1.0)
        
        return answer, confidence
    
    def _load_cache(self) -> bool:
        """
        加载缓存的知识图谱
        
        Returns:
            bool: 加载是否成功
        """
        try:
            if os.path.exists(self.cache_path):
                with open(self.cache_path, 'r', encoding='utf-8') as f:
                    cache_data = json.load(f)
                
                # 重建知识图谱
                for node_data in cache_data.get('nodes', []):
                    node = KnowledgeNode(
                        node_id=node_data['id'],
                        node_type=node_data['type'],
                        properties=node_data['properties']
                    )
                    
                    # 重建关系
                    for relation_type, targets in node_data['relations'].items():
                        for target_info in targets:
                            if isinstance(target_info, dict):
                                node.add_relation(relation_type, target_info['target'], target_info.get('properties'))
                            else:
                                node.add_relation(relation_type, target_info)
                    
                    self.knowledge_graph.add_node(node)
                
                return True
            
            return False
            
        except Exception as e:
            logger.warning(f"⚠️ 加载知识图谱缓存失败: {e}")
            return False
    
    def _save_cache(self) -> None:
        """
        保存知识图谱到缓存
        """
        try:
            os.makedirs(os.path.dirname(self.cache_path), exist_ok=True)
            
            cache_data = {
                'nodes': [node.to_dict() for node in self.knowledge_graph.nodes.values()]
            }
            
            with open(self.cache_path, 'w', encoding='utf-8') as f:
                json.dump(cache_data, f, ensure_ascii=False, indent=2)
            
            logger.info(f"💾 知识图谱缓存已保存到 {self.cache_path}")
            
        except Exception as e:
            logger.warning(f"⚠️ 保存知识图谱缓存失败: {e}")


if __name__ == "__main__":
    # 测试知识图谱检索器
    config = {
        "max_search_depth": 3,
        "use_reasoning": True,
        "min_confidence": 0.4
    }
    
    retriever = KnowledgeGraphRetriever(config)
    
    # 测试数据
    test_data = [
        {
            "id": 1,
            "question": "什么是机器学习？",
            "answer": "机器学习是人工智能的一个分支，通过算法让计算机从数据中学习。",
            "category": "AI",
            "keywords": ["机器学习", "人工智能", "算法"]
        },
        {
            "id": 2,
            "question": "深度学习的原理是什么？",
            "answer": "深度学习使用多层神经网络来模拟人脑的学习过程。",
            "category": "AI",
            "keywords": ["深度学习", "神经网络", "学习"]
        }
    ]
    
    # 初始化
    if retriever.initialize(test_data):
        # 测试检索
        result = retriever.retrieve("机器学习是什么？")
        print(f"检索结果: {result.to_dict()}")
        
        # 性能统计
        stats = retriever.get_performance_stats()
        print(f"性能统计: {stats}")