"""知识处理器 - 知识文档处理和检索"""

import asyncio
import hashlib
import re
from typing import Dict, Any, List, Optional, Union, Tuple
from datetime import datetime, timedelta
from loguru import logger

from ..database.manager import DatabaseManager

# 数据库客户端
from ..database.mongodb_client import MongoDBClient
from ..database.neo4j_client import Neo4jClient

# 可选导入ChromaDBClient
try:
    from ..database.chromadb_client import ChromaDBClient
    CHROMADB_AVAILABLE = True
except ImportError:
    ChromaDBClient = None
    CHROMADB_AVAILABLE = False

# 新增的处理工具
from ..utils.document_processor import DocumentProcessor
from ..utils.text_splitter import SemanticTextSplitter
from ..utils.vectorizer import DocumentVectorizer, create_embedding_model, create_vector_store
from ..utils.knowledge_graph_builder import KnowledgeGraphBuilder, create_knowledge_graph_builder


class KnowledgeProcessor:
    """知识处理器 - 处理知识文档的上传、存储和检索"""
    
    def __init__(self, db_manager: DatabaseManager, config: Dict[str, Any]):
        self.db_manager = db_manager
        self.config = config
        self.knowledge_config = config.get('knowledge', {})
        
        # 知识处理配置
        self.chunk_size = self.knowledge_config.get('chunk_size', 1000)  # 文档分块大小
        self.chunk_overlap = self.knowledge_config.get('chunk_overlap', 200)  # 分块重叠
        self.max_document_size = self.knowledge_config.get('max_document_size', 10 * 1024 * 1024)  # 10MB
        self.similarity_threshold = self.knowledge_config.get('similarity_threshold', 0.6)
        self.max_documents_per_user = self.knowledge_config.get('max_documents_per_user', 1000)
        
        # 初始化状态
        self.is_initialized = False
        
        # 新增的处理组件
        self.document_processor = None
        self.text_splitter = None
        self.vectorizer = None
        self.knowledge_graph_builder = None
        
        logger.info("知识处理器已创建")
    
    @property
    def initialized(self) -> bool:
        """向后兼容属性: 映射到 is_initialized"""
        return self.is_initialized
    
    @initialized.setter
    def initialized(self, value: bool):
        self.is_initialized = bool(value)
    
    async def initialize(self):
        """初始化知识处理器"""
        if self.is_initialized:
            logger.warning("知识处理器已初始化")
            return
        
        try:
            # 确保数据库连接已初始化
            if not self.db_manager.is_initialized:
                await self.db_manager.initialize()
            
            # 获取数据库客户端
            self.mongodb_client = self.db_manager.mongodb_client
            
            # 可选获取ChromaDB客户端
            if CHROMADB_AVAILABLE:
                try:
                    self.chromadb_client = self.db_manager.chromadb_client
                except RuntimeError as e:
                    logger.warning(f"ChromaDB不可用: {e}")
                    self.chromadb_client = None
            else:
                self.chromadb_client = None
                logger.info("ChromaDB不可用，跳过初始化")
            
            # Neo4j客户端改为可选
            neo4j_enabled = self.config.get('databases', {}).get('neo4j', {}).get('enabled', True)
            if not neo4j_enabled:
                self.neo4j_client = None
                logger.info("Neo4j已在配置中禁用，跳过初始化")
            else:
                try:
                    self.neo4j_client = self.db_manager.neo4j_client
                except RuntimeError as e:
                    logger.warning(f"Neo4j不可用: {e}")
                    self.neo4j_client = None
            
            # 初始化处理组件
            await self._initialize_processors()
            
            self.is_initialized = True
            logger.info("知识处理器初始化完成")
            
        except Exception as e:
            logger.error(f"知识处理器初始化失败: {e}")
            raise
    
    async def _initialize_processors(self):
        """
        初始化处理组件
        """
        try:
            # 初始化文档处理器
            self.document_processor = DocumentProcessor()
            
            # 初始化文本分割器
            self.text_splitter = SemanticTextSplitter(
                chunk_size=self.chunk_size,
                chunk_overlap=self.chunk_overlap,
                language='chinese'
            )
            
            # 初始化向量化器以支持RAG管道
            embedding_model = create_embedding_model()
            vector_store = create_vector_store()
            self.vectorizer = DocumentVectorizer(
                embedding_model=embedding_model,
                vector_store=vector_store
            )
            
            # 初始化知识图谱构建器
            self.knowledge_graph_builder = create_knowledge_graph_builder()
            
            # 从ChromaDB恢复向量数据到内存
            await self._restore_vectors_from_chromadb()
            
            logger.info("处理组件初始化完成")
            
        except Exception as e:
            logger.error(f"处理组件初始化失败: {e}")
            raise
    
    async def shutdown(self):
        """关闭知识处理器"""
        if not self.is_initialized:
            logger.warning("知识处理器未初始化")
            return
        
        try:
            self.is_initialized = False
            logger.info("知识处理器已关闭")
            
        except Exception as e:
            logger.error(f"关闭知识处理器失败: {e}")
    
    async def create_user_context(self, user_id: str) -> Dict[str, Any]:
        """为用户创建知识上下文"""
        if not self.is_initialized:
            raise RuntimeError("知识处理器未初始化")
        
        try:
            # 获取用户文档统计
            user_docs = await self.db_manager.mongodb_client.get_user_documents(user_id)
            
            # 获取最近上传的文档
            recent_docs = await self.get_recent_documents(user_id, limit=5)
            
            context = {
                'user_id': user_id,
                'total_documents': len(user_docs),
                'recent_documents': recent_docs,
                'created_at': datetime.utcnow().isoformat()
            }
            
            logger.debug(f"创建用户知识上下文: {user_id}")
            return context
            
        except Exception as e:
            logger.error(f"创建用户知识上下文失败: {e}")
            raise
    
    async def upload_document(self, content: str, title: str, 
                            user_id: Optional[str] = None,
                            metadata: Optional[Dict[str, Any]] = None) -> str:
        """上传知识文档"""
        if not self.is_initialized:
            raise RuntimeError("知识处理器未初始化")
        
        try:
            # 检查文档大小
            if len(content.encode('utf-8')) > self.max_document_size:
                raise ValueError(f"文档大小超过限制: {self.max_document_size} 字节")
            
            import uuid
            document_id = str(uuid.uuid4())
            timestamp = datetime.utcnow()
            
            # 准备文档数据
            document_data = {
                'document_id': document_id,
                'title': title,
                'content': content,
                'user_id': user_id,
                'created_at': timestamp,
                'updated_at': timestamp,
                'metadata': metadata or {},
                'status': 'processing'
            }
            
            # 1. 存储原始文档到MongoDB
            await self.db_manager.mongodb_client.store_document(document_data)
            
            # 2. 处理文档（分块、向量化等）
            await self._process_document(document_id, content, title, user_id, metadata)
            
            # 3. 更新文档状态
            await self.db_manager.mongodb_client.update_document(
                document_id, {'status': 'completed', 'processed_at': datetime.utcnow()}
            )
            
            logger.info(f"上传文档完成: {document_id} - {title}")
            return {
                "success": True,
                "document_id": document_id,
                "title": title,
                "message": "文档上传成功"
            }
            
        except Exception as e:
            logger.error(f"上传文档失败: {e}")
            # 更新文档状态为失败
            if 'document_id' in locals():
                await self.db_manager.mongodb_client.update_document(
                    document_id, {'status': 'failed', 'error': str(e)}
                )
            raise
    
    async def _process_document(self, document_id: str, content: str, title: str,
                              user_id: Optional[str], metadata: Optional[Dict[str, Any]]):
        """处理文档（分块和向量化）"""
        try:
            # 1. 使用新的文档处理器和文本分割器
            if self.document_processor:
                # 预处理文档内容
                processed_result = await self.document_processor.process_document(content)
                content = processed_result.get('content', content)
            
            # 2. 使用语义文本分割器分块
            if self.text_splitter:
                chunk_objects = await self.text_splitter.split_text(
                    content,
                    metadata={'document_id': document_id, 'title': title, 'user_id': user_id}
                )
                chunks = [chunk['content'] for chunk in chunk_objects]
            else:
                # 回退到原始分块方法
                chunks = await self._chunk_document(content)
            
            # 3. 向量化由 ChromaDB 内部处理，无需单独调用
            
            # 4. 为每个分块创建向量并存储到ChromaDB
            chunk_tasks = []
            for i, chunk in enumerate(chunks):
                chunk_id = f"{document_id}_chunk_{i}"
                chunk_metadata = {
                    'document_id': document_id,
                    'title': title,
                    'user_id': user_id,
                    'chunk_index': i,
                    'total_chunks': len(chunks),
                    'created_at': datetime.utcnow().isoformat()
                }
                
                if metadata:
                    chunk_metadata.update(metadata)
                
                # 存储到ChromaDB
                chunk_tasks.append(
                    self.db_manager.chromadb_client.store_document(
                        chunk_id, chunk, chunk_metadata
                    )
                )
            
            # 并行处理所有分块
            await asyncio.gather(*chunk_tasks)
            
            # 5. 创建知识图谱节点（如果有用户ID）
            if user_id:
                await self._create_knowledge_graph_nodes(
                    document_id, title, content, user_id, chunks
                )
            
            logger.debug(f"文档处理完成: {document_id} - {len(chunks)} 个分块")
            
        except Exception as e:
            logger.error(f"处理文档失败: {e}")
            raise
    
    async def _chunk_document(self, content: str) -> List[str]:
        """将文档分块"""
        try:
            chunks = []
            
            # 按段落分割
            paragraphs = content.split('\n\n')
            
            current_chunk = ""
            for paragraph in paragraphs:
                # 如果当前分块加上新段落超过大小限制
                if len(current_chunk) + len(paragraph) > self.chunk_size:
                    if current_chunk:
                        chunks.append(current_chunk.strip())
                        
                        # 保留重叠部分
                        if len(current_chunk) > self.chunk_overlap:
                            overlap_start = len(current_chunk) - self.chunk_overlap
                            current_chunk = current_chunk[overlap_start:] + "\n\n" + paragraph
                        else:
                            current_chunk = paragraph
                    else:
                        # 单个段落就超过大小限制，需要进一步分割
                        sub_chunks = await self._split_large_paragraph(paragraph)
                        chunks.extend(sub_chunks[:-1])  # 除了最后一个
                        current_chunk = sub_chunks[-1] if sub_chunks else ""
                else:
                    if current_chunk:
                        current_chunk += "\n\n" + paragraph
                    else:
                        current_chunk = paragraph
            
            # 添加最后一个分块
            if current_chunk.strip():
                chunks.append(current_chunk.strip())
            
            return chunks
            
        except Exception as e:
            logger.error(f"文档分块失败: {e}")
            return [content]  # 返回原始内容作为单个分块
    
    async def _split_large_paragraph(self, paragraph: str) -> List[str]:
        """分割大段落"""
        try:
            chunks = []
            sentences = paragraph.split('。')  # 按句号分割
            
            current_chunk = ""
            for sentence in sentences:
                if len(current_chunk) + len(sentence) > self.chunk_size:
                    if current_chunk:
                        chunks.append(current_chunk.strip() + '。')
                        current_chunk = sentence
                    else:
                        # 单个句子就超过限制，强制分割
                        words = sentence.split()
                        for i in range(0, len(words), self.chunk_size // 10):
                            chunk_words = words[i:i + self.chunk_size // 10]
                            chunks.append(' '.join(chunk_words))
                        current_chunk = ""
                else:
                    if current_chunk:
                        current_chunk += '。' + sentence
                    else:
                        current_chunk = sentence
            
            if current_chunk.strip():
                chunks.append(current_chunk.strip())
            
            return chunks
            
        except Exception as e:
            logger.error(f"分割大段落失败: {e}")
            return [paragraph]
    
    async def _create_knowledge_graph_nodes(self, document_id: str, title: str, 
                                          content: str, user_id: str, chunks: List[str]):
        """创建知识图谱节点"""
        try:
            # 确保用户节点存在
            await self.db_manager.neo4j_client.create_user_node(user_id)
            
            # 创建文档节点
            await self.db_manager.neo4j_client.create_knowledge_node(
                document_id, title, 'document', user_id
            )
            
            # 使用知识图谱构建器提取实体和关系
            if self.knowledge_graph_builder:
                knowledge_graph = await self.knowledge_graph_builder.build_knowledge_graph(
                    content, document_id
                )
                
                # 创建实体节点
                for entity in knowledge_graph.entities:
                    entity_id = f"entity_{hash(entity.name) % 1000000}"
                    entity_properties = {
                        'name': entity.name,
                        'type': entity.entity_type,
                        'confidence': entity.confidence,
                        'context': entity.context
                    }
                    await self.db_manager.neo4j_client.create_concept_node(entity_id, entity_properties)
                    
                    # 创建文档-实体关系
                    await self.db_manager.neo4j_client.create_relationship(
                        document_id, 'Document', entity_id, 'Entity', 'CONTAINS_ENTITY', 
                        {'confidence': entity.confidence}
                    )
                
                # 创建关系
                for relation in knowledge_graph.relations:
                    # 查找或创建主语和宾语实体
                    subject_id = f"entity_{hash(relation.subject) % 1000000}"
                    object_id = f"entity_{hash(relation.object) % 1000000}"
                    
                    # 创建实体间的关系
                    await self.db_manager.neo4j_client.create_relationship(
                        subject_id, 'Entity', object_id, 'Entity', relation.predicate,
                        {
                            'confidence': relation.confidence,
                            'context': relation.context,
                            'document_id': document_id
                        }
                    )
            else:
                # 回退到简单的概念提取
                concepts = await self._extract_document_concepts(content)
                
                # 为每个概念创建节点和关系
                for concept in concepts:
                    concept_id = f"concept_{hash(concept) % 1000000}"
                    concept_data = {'name': concept, 'type': 'extracted_concept'}
                    await self.db_manager.neo4j_client.create_concept_node(concept_id, concept_data)
                    await self.db_manager.neo4j_client.create_relationship(
                        document_id, 'Document', concept_id, 'Concept', 'CONTAINS', {'strength': 1.0}
                    )
            
            # 查找相似文档并创建关联
            similar_docs = await self.search_knowledge(
                title + " " + content[:500], user_id, limit=3
            )
            
            for similar_doc in similar_docs:
                if (similar_doc.get('document_id') != document_id and 
                    similar_doc.get('similarity', 0) > self.similarity_threshold):
                    
                    await self.db_manager.neo4j_client.create_relationship(
                        document_id, 'Document', similar_doc['document_id'], 'Document', 'SIMILAR_TO',
                        {'similarity': similar_doc['similarity']}
                    )
            
        except Exception as e:
            logger.error(f"创建知识图谱节点失败: {e}")
    
    async def _extract_document_concepts(self, content: str) -> List[str]:
        """从文档中提取关键概念"""
        try:
            # 简化的概念提取逻辑
            import re
            
            # 提取可能的概念（大写开头的词组、专业术语等）
            concepts = set()
            
            # 提取大写开头的词组
            capitalized_words = re.findall(r'\b[A-Z][a-z]+(?:\s+[A-Z][a-z]+)*\b', content)
            concepts.update(capitalized_words)
            
            # 提取中文专业术语（简化版本）
            chinese_terms = re.findall(r'[\u4e00-\u9fff]{2,8}', content)
            # 过滤常见词汇
            stop_words = {'这个', '那个', '可以', '应该', '需要', '进行', '实现', '使用', '通过', '具有'}
            chinese_terms = [term for term in chinese_terms if term not in stop_words and len(term) >= 2]
            concepts.update(chinese_terms)
            
            # 限制概念数量
            return list(concepts)[:20]
            
        except Exception as e:
            logger.error(f"提取文档概念失败: {e}")
            return []
    
    async def search_knowledge(self, query: str, user_id: Optional[str] = None,
                             limit: int = 5, threshold: float = 0.7, 
                             use_rag_pipeline: bool = True) -> List[Dict[str, Any]]:
        """搜索知识库 - 支持RAG检索管道和传统GraphRAG混合检索"""
        if not self.is_initialized:
            raise RuntimeError("知识处理器未初始化")
        
        try:
            if use_rag_pipeline:
                # 使用完整的RAG检索管道
                results = await self._rag_pipeline(query, user_id)
                logger.debug(f"RAG管道搜索完成: 查询='{query[:50]}...', 结果数={len(results) if isinstance(results, list) else 'n/a'}")
                # 若RAG检索结果为空，自动回退到传统GraphRAG检索
                if results:
                    return results
                logger.debug("RAG管道返回空结果，回退至传统GraphRAG检索")
                return await self._legacy_search_knowledge(query, user_id, limit, threshold)
            else:
                # 使用传统的GraphRAG混合检索（保持向后兼容）
                return await self._legacy_search_knowledge(query, user_id, limit, threshold)
            
        except Exception as e:
            logger.error(f"搜索知识库失败: {e}")
            return []
    
    async def _legacy_search_knowledge(self, query: str, user_id: Optional[str] = None,
                                     limit: int = 5, threshold: float = 0.7) -> List[Dict[str, Any]]:
        """传统的GraphRAG混合检索（向后兼容）"""
        try:
            # GraphRAG混合搜索：向量搜索 + 图搜索
            all_results = {}
            
            # 1. 向量搜索（使用新的向量化器）
            if self.vectorizer:
                vector_results = await self.vectorizer.search_similar_chunks(
                    query, limit * 2, threshold
                )
                
                # 转换格式并添加到结果中
                for result in vector_results:
                    doc_id = result.get('document_id')
                    if doc_id:
                        if doc_id not in all_results:
                            all_results[doc_id] = {
                                'document_id': doc_id,
                                'chunks': [],
                                'vector_score': 0,
                                'graph_score': 0
                            }
                        
                        chunk_data = {
                            'content': result['content'],
                            'similarity': result['similarity'],
                            'chunk_id': result['chunk_id'],
                            'metadata': result['metadata']
                        }
                        all_results[doc_id]['chunks'].append(chunk_data)
                        all_results[doc_id]['vector_score'] = max(
                            all_results[doc_id]['vector_score'],
                            result['similarity']
                        )
            
            # 2. ChromaDB向量搜索（备用）
            elif self.db_manager.chromadb_client is not None:
                where_filter = {'user_id': user_id} if user_id else None
                vector_results = await self.db_manager.chromadb_client.search_documents(
                    'documents', query, n_results=limit * 2, where=where_filter
                )
                
                # 添加向量搜索结果
                docs = vector_results.get('documents', [])
                metas = vector_results.get('metadatas', [])
                ids = vector_results.get('ids', [])
                dists = vector_results.get('distances', [])
                
                for i, doc in enumerate(docs):
                    metadata = metas[i] if i < len(metas) else {}
                    doc_id = metadata.get('document_id')
                    similarity = 1 - dists[i] if i < len(dists) else 0.0
                    
                    if doc_id:
                        if doc_id not in all_results:
                            all_results[doc_id] = {
                                'document_id': doc_id,
                                'chunks': [],
                                'vector_score': 0,
                                'graph_score': 0
                            }
                        
                        chunk_result = {
                            'content': doc,
                            'similarity': similarity,
                            'metadata': metadata,
                            'chunk_id': ids[i] if i < len(ids) else None
                        }
                        all_results[doc_id]['chunks'].append(chunk_result)
                        all_results[doc_id]['vector_score'] = max(
                            all_results[doc_id]['vector_score'],
                            similarity
                        )
            
            # 3. 图搜索（Neo4j）- 基于用户上下文和实体关系
            graph_results = []
            if user_id and self.db_manager.neo4j_client is not None:
                graph_results = await self.db_manager.neo4j_client.find_related_knowledge(
                    query, user_id, limit=limit
                )
                
                # 添加图谱搜索结果
                for result in graph_results:
                    doc_id = result.get('document_id')
                    if doc_id:
                        if doc_id not in all_results:
                            all_results[doc_id] = {
                                'document_id': doc_id,
                                'chunks': [],
                                'vector_score': 0,
                                'graph_score': 0
                            }
                        
                        all_results[doc_id]['graph_score'] = result.get('relevance', 0)
            
            # 4. 获取完整文档信息并计算综合得分
            final_results = []
            for doc_id, result in all_results.items():
                try:
                    # 获取文档信息
                    doc_info = await self.db_manager.mongodb_client.get_document(doc_id)
                    if not doc_info:
                        continue
                    
                    # 计算综合得分（GraphRAG权重调整）
                    vector_score = result['vector_score']
                    graph_score = result['graph_score']
                    combined_score = vector_score * 0.7 + graph_score * 0.3  # 增加图谱权重
                    
                    # 选择最相关的分块
                    best_chunks = sorted(
                        result['chunks'],
                        key=lambda x: x.get('similarity', 0),
                        reverse=True
                    )[:3]  # 最多3个分块
                    
                    final_result = {
                        'document_id': doc_id,
                        'title': doc_info.get('title', ''),
                        'user_id': doc_info.get('user_id'),
                        'created_at': doc_info.get('created_at'),
                        'similarity': combined_score,
                        'vector_score': vector_score,
                        'graph_score': graph_score,
                        'relevant_chunks': best_chunks,
                        'metadata': doc_info.get('metadata', {})
                    }
                    
                    final_results.append(final_result)
                    
                except Exception as e:
                    logger.error(f"处理搜索结果失败: {doc_id} - {e}")
                    continue
            
            # 5. 按综合得分排序
            final_results.sort(key=lambda x: x['similarity'], reverse=True)
            
            logger.debug(f"GraphRAG搜索完成: 查询='{query[:50]}...', 结果数={len(final_results)}")
            return final_results[:limit]
            
        except Exception as e:
            logger.error(f"搜索知识库失败: {e}")
            return []
    
    async def get_document(self, document_id: str) -> Optional[Dict[str, Any]]:
        """获取文档信息"""
        if not self.is_initialized:
            raise RuntimeError("知识处理器未初始化")
        
        try:
            return await self.db_manager.mongodb_client.get_document(document_id)
            
        except Exception as e:
            logger.error(f"获取文档失败: {e}")
            return None
    
    async def get_recent_documents(self, user_id: str, limit: int = 10) -> List[Dict[str, Any]]:
        """获取用户最近的文档"""
        if not self.is_initialized:
            raise RuntimeError("知识处理器未初始化")
        
        try:
            return await self.db_manager.mongodb_client.get_user_documents(
                user_id, limit=limit, sort_by='created_at'
            )
            
        except Exception as e:
            logger.error(f"获取最近文档失败: {e}")
            return []
    
    async def delete_document(self, document_id: str, user_id: Optional[str] = None) -> bool:
        """删除文档"""
        if not self.is_initialized:
            raise RuntimeError("知识处理器未初始化")
        
        try:
            # 获取文档信息
            doc_info = await self.db_manager.mongodb_client.get_document(document_id)
            if not doc_info:
                logger.warning(f"文档不存在: {document_id}")
                return False
            
            # 检查权限
            if user_id and doc_info.get('user_id') != user_id:
                logger.warning(f"用户无权删除文档: {user_id} -> {document_id}")
                return False
            
            # 从各个存储层删除
            tasks = [
                self.db_manager.mongodb_client.delete_document(document_id),
                self.db_manager.chromadb_client.delete_document(document_id),
            ]
            
            # 如果有用户ID，也从图谱中删除
            if doc_info.get('user_id'):
                tasks.append(
                    self.db_manager.neo4j_client.delete_knowledge_node(document_id)
                )
            
            results = await asyncio.gather(*tasks, return_exceptions=True)
            
            # 检查删除结果
            success = all(not isinstance(result, Exception) for result in results)
            
            if success:
                logger.info(f"删除文档: {document_id}")
            else:
                logger.warning(f"删除文档部分失败: {document_id}")
            
            return success
            
        except Exception as e:
            logger.error(f"删除文档失败: {e}")
            return False
    
    async def update_document(self, document_id: str, updates: Dict[str, Any],
                            user_id: Optional[str] = None) -> bool:
        """更新文档"""
        if not self.is_initialized:
            raise RuntimeError("知识处理器未初始化")
        
        try:
            # 检查文档存在性和权限
            doc_info = await self.db_manager.mongodb_client.get_document(document_id)
            if not doc_info:
                return False
            
            if user_id and doc_info.get('user_id') != user_id:
                return False
            
            # 更新文档
            updates['updated_at'] = datetime.utcnow()
            success = await self.db_manager.mongodb_client.update_document(
                document_id, updates
            )
            
            # 如果更新了内容，需要重新处理
            if 'content' in updates and success:
                await self._reprocess_document(document_id, updates)
            
            return success
            
        except Exception as e:
            logger.error(f"更新文档失败: {e}")
            return False
    
    async def _reprocess_document(self, document_id: str, updates: Dict[str, Any]):
        """重新处理文档"""
        try:
            # 获取更新后的文档信息
            doc_info = await self.db_manager.mongodb_client.get_document(document_id)
            if not doc_info:
                return
            
            # 删除旧的分块和向量
            await self.db_manager.chromadb_client.delete_document(document_id)
            
            # 重新处理
            await self._process_document(
                document_id,
                doc_info['content'],
                doc_info['title'],
                doc_info.get('user_id'),
                doc_info.get('metadata')
            )
            
            logger.info(f"重新处理文档完成: {document_id}")
            
        except Exception as e:
            logger.error(f"重新处理文档失败: {e}")
    
    async def get_user_context(self, user_id: str) -> Dict[str, Any]:
        """获取用户知识上下文"""
        if not self.is_initialized:
            raise RuntimeError("知识处理器未初始化")
        
        try:
            # 获取用户文档
            user_docs = await self.db_manager.mongodb_client.get_user_documents(user_id)
            
            # 获取文档统计
            total_docs = len(user_docs)
            recent_docs = [doc for doc in user_docs if 
                          (datetime.utcnow() - doc.get('created_at', datetime.min)).days <= 7]
            
            # 获取知识图谱统计
            graph_stats = await self.db_manager.neo4j_client.get_user_knowledge_stats(user_id)
            
            return {
                'user_id': user_id,
                'total_documents': total_docs,
                'recent_documents_count': len(recent_docs),
                'recent_documents': recent_docs[:5],  # 最近5个
                'graph_stats': graph_stats,
                'context_created_at': datetime.utcnow().isoformat()
            }
            
        except Exception as e:
            logger.error(f"获取用户知识上下文失败: {e}")
            return {'user_id': user_id, 'error': str(e)}
    
    async def cleanup_old_documents(self, days_threshold: int = 90) -> int:
        """清理旧文档"""
        if not self.is_initialized:
            raise RuntimeError("知识处理器未初始化")
        
        try:
            cutoff_date = datetime.utcnow() - timedelta(days=days_threshold)
            
            # 获取要删除的文档
            old_docs = await self.db_manager.mongodb_client.get_documents_before_date(
                cutoff_date
            )
            
            deleted_count = 0
            for doc in old_docs:
                document_id = doc.get('document_id')
                if document_id:
                    success = await self.delete_document(document_id)
                    if success:
                        deleted_count += 1
            
            logger.info(f"清理旧文档完成: 删除了 {deleted_count} 个文档")
            return deleted_count
            
        except Exception as e:
            logger.error(f"清理旧文档失败: {e}")
            return 0
    
    async def get_stats(self) -> Dict[str, Any]:
        """获取知识处理器统计信息"""
        try:
            if not self.is_initialized:
                return {'status': 'not_initialized'}
            
            # 获取文档统计
            doc_stats = await self.db_manager.mongodb_client.get_document_stats()
            
            # 获取向量统计
            vector_stats = await self.db_manager.chromadb_client.get_document_stats()
            
            # 获取图谱统计
            graph_stats = await self.db_manager.neo4j_client.get_knowledge_stats()
            
            return {
                'status': 'initialized',
                'config': {
                    'chunk_size': self.chunk_size,
                    'chunk_overlap': self.chunk_overlap,
                    'max_document_size': self.max_document_size,
                    'similarity_threshold': self.similarity_threshold,
                    'max_documents_per_user': self.max_documents_per_user
                },
                'document_stats': doc_stats,
                'vector_stats': vector_stats,
                'graph_stats': graph_stats
            }
            
        except Exception as e:
            logger.error(f"获取知识处理器统计失败: {e}")
            return {'error': str(e)}
    
    async def _rag_pipeline(self, query: str, user_id: Optional[str] = None,
                          top_k: int = 10, threshold: float = 0.7) -> List[Dict[str, Any]]:
        """
        RAG检索管道 - 完整的检索、增强、生成流程
        
        Args:
            query: 用户查询
            user_id: 用户ID
            top_k: 返回结果数量
            threshold: 相似度阈值
            
        Returns:
            增强后的检索结果
        """
        try:
            # 调试信息
            logger.info(f"开始RAG管道处理，查询: {query}")
            logger.info(f"vectorizer状态: {self.vectorizer is not None}")
            if self.vectorizer:
                logger.info(f"vector_store类型: {type(self.vectorizer.vector_store)}")
                if hasattr(self.vectorizer.vector_store, 'vectors'):
                    logger.info(f"当前向量数量: {len(self.vectorizer.vector_store.vectors)}")
            
            # 1. 查询理解和预处理
            processed_query = await self._preprocess_query(query)
            
            # 2. 多策略检索
            retrieval_results = await self._multi_strategy_retrieval(
                processed_query, user_id, top_k, threshold
            )
            
            # 3. 结果重排序和增强
            enhanced_results = await self._rerank_and_enhance(
                retrieval_results, processed_query, user_id
            )
            
            # 4. 上下文构建
            contextualized_results = await self._build_context(
                enhanced_results, user_id
            )
            
            return contextualized_results
            
        except Exception as e:
            logger.error(f"RAG检索管道失败: {e}")
            return []
    
    async def _preprocess_query(self, query: str) -> Dict[str, Any]:
        """
        查询预处理和理解
        
        Args:
            query: 原始查询
            
        Returns:
            处理后的查询信息
        """
        # 基础文本清理
        cleaned_query = query.strip().lower()
        
        # 提取关键词（简单实现）
        keywords = [word for word in cleaned_query.split() if len(word) > 2]
        
        # 查询意图分析（简化版）
        intent = 'search'  # 默认为搜索意图
        if any(word in cleaned_query for word in ['如何', '怎么', 'how']):
            intent = 'how_to'
        elif any(word in cleaned_query for word in ['什么', '是什么', 'what']):
            intent = 'definition'
        elif any(word in cleaned_query for word in ['为什么', 'why']):
            intent = 'explanation'
        
        return {
            'original': query,
            'cleaned': cleaned_query,
            'keywords': keywords,
            'intent': intent,
            'query_type': 'semantic'  # 语义查询
        }
    
    async def _multi_strategy_retrieval(self, processed_query: Dict[str, Any],
                                      user_id: Optional[str], top_k: int,
                                      threshold: float) -> List[Dict[str, Any]]:
        """
        多策略检索：向量检索 + 图谱检索 + 关键词检索
        
        Args:
            processed_query: 预处理的查询
            user_id: 用户ID
            top_k: 结果数量
            threshold: 相似度阈值
            
        Returns:
            多策略检索结果
        """
        all_results = []
        query_text = processed_query['original']
        
        # 1. 语义向量检索
        if self.vectorizer:
            # 检查内存向量存储是否为空，如果为空则从ChromaDB恢复
            vector_count = len(self.vectorizer.vector_store.vectors) if hasattr(self.vectorizer.vector_store, 'vectors') else 0
            logger.info(f"当前内存向量存储中有 {vector_count} 个向量")
            
            if hasattr(self.vectorizer.vector_store, 'vectors') and not self.vectorizer.vector_store.vectors:
                logger.info("内存向量存储为空，尝试从ChromaDB恢复向量")
                await self._restore_vectors_from_chromadb()
            
            vector_results = await self.vectorizer.search_similar_chunks(
                query_text, top_k * 2, threshold
            )
            
            for result in vector_results:
                all_results.append({
                    **result,
                    'retrieval_method': 'vector',
                    'base_score': result['similarity']
                })
            
            # 当内存向量仍不足以返回结果时，补充使用ChromaDB直接检索作为回退
            try:
                need_chroma_fallback = len(vector_results) < top_k
            except Exception:
                need_chroma_fallback = True
            
            if need_chroma_fallback and getattr(self.db_manager, 'chromadb_client', None):
                try:
                    where_filter = {'user_id': user_id} if user_id else None
                    chroma_res = await self.db_manager.chromadb_client.search_documents(
                        'documents', query_text, n_results=top_k * 2, where=where_filter
                    )
                    for i, doc in enumerate(chroma_res.get('documents', []) or []):
                        meta = (chroma_res.get('metadatas', []) or [])[i] if i < len(chroma_res.get('metadatas', [])) else {}
                        distance = (chroma_res.get('distances', []) or [])[i] if i < len(chroma_res.get('distances', [])) else 1.0
                        vid = (chroma_res.get('ids', []) or [])[i] if i < len(chroma_res.get('ids', [])) else None
                        similarity = 1 - float(distance) if distance is not None else 0.0
                        all_results.append({
                            'content': doc,
                            'vector_id': vid,
                            'document_id': meta.get('document_id', vid),
                            'chunk_id': meta.get('chunk_id') or meta.get('chunk_index'),
                            'similarity': similarity,
                            'metadata': meta,
                            'retrieval_method': 'chroma',
                            'base_score': similarity
                        })
                except Exception as e:
                    logger.warning(f"ChromaDB回退检索失败: {e}")
        
        # 2. 图谱关系检索
        if self.db_manager.neo4j_client is not None and user_id:
            graph_results = await self.db_manager.neo4j_client.find_related_knowledge(
                query_text, user_id, limit=top_k
            )
            
            for result in graph_results:
                all_results.append({
                    **result,
                    'retrieval_method': 'graph',
                    'base_score': result.get('relevance', 0.5)
                })
        
        # 3. 关键词匹配检索（补充）
        if processed_query['keywords']:
            keyword_results = await self._keyword_search(
                processed_query['keywords'], top_k
            )
            
            for result in keyword_results:
                all_results.append({
                    **result,
                    'retrieval_method': 'keyword',
                    'base_score': result.get('match_score', 0.3)
                })
        
        return all_results
    
    async def _restore_vectors_from_chromadb(self):
        """从ChromaDB恢复向量到内存向量存储"""
        try:
            if not self.db_manager.chromadb_client or not hasattr(self.db_manager.chromadb_client, '_connected') or not self.db_manager.chromadb_client._connected:
                logger.warning("ChromaDB客户端未连接，无法恢复向量")
                return
            
            # 额外健壮性检查：底层client可能为None（例如依赖缺失但标记为已连接以避免重复尝试）
            chroma_client = self.db_manager.chromadb_client
            if not getattr(chroma_client, 'client', None):
                logger.warning("ChromaDB客户端底层实例不存在（client=None），跳过向量恢复")
                return
            
            # 使用get_collection方法获取documents集合
            collection = await self.db_manager.chromadb_client.get_collection('documents')
            if not collection:
                logger.warning("documents集合不存在，无法恢复向量")
                return
            
            # 获取所有文档数据
            all_data = collection.get(include=['embeddings', 'metadatas', 'documents'])
            
            if not all_data['ids']:
                logger.info("ChromaDB中没有文档向量数据")
                return
            
            # 将向量和元数据恢复到内存向量存储
            vectors = []
            metadatas = []
            
            for i, (doc_id, embedding, metadata, document) in enumerate(zip(
                all_data['ids'], all_data['embeddings'], all_data['metadatas'], all_data['documents']
            )):
                try:
                    # embedding可能为None（例如集合配置不持久化embedding），此时使用本地模型即时编码
                    if embedding is None and self.vectorizer and getattr(self.vectorizer, 'embedding_model', None):
                        embedding = self.vectorizer.embedding_model.encode(document)
                    
                    if embedding is not None:
                        import numpy as np
                        vector = np.array(embedding)
                        vectors.append(vector)
                        
                        # 构建元数据
                        meta = {
                            'vector_id': doc_id,
                            'document_id': metadata.get('document_id', doc_id) if metadata else doc_id,
                            'chunk_id': (metadata or {}).get('chunk_id', f'chunk_{i}'),
                            'model_name': (metadata or {}).get('model_name', 'unknown'),
                            'dimension': len(embedding),
                            'text_hash': (metadata or {}).get('text_hash', ''),
                            'content': document or (metadata or {}).get('content', ''),
                            'chunk_metadata': metadata or {}
                        }
                        metadatas.append(meta)
                except Exception as ie:
                    logger.warning(f"恢复单条向量失败[{doc_id}]: {ie}")
            
            if vectors:
                # 批量添加到内存向量存储
                await self.vectorizer.vector_store.add_vectors(vectors, metadatas)
                logger.info(f"从ChromaDB恢复了 {len(vectors)} 个向量到内存存储")
            else:
                logger.warning("ChromaDB中没有有效的向量数据")
                
        except Exception as e:
            logger.error(f"从ChromaDB恢复向量失败: {e}")
    
    async def _rerank_and_enhance(self, results: List[Dict[str, Any]],
                                processed_query: Dict[str, Any],
                                user_id: Optional[str]) -> List[Dict[str, Any]]:
        """
        结果重排序和增强
        
        Args:
            results: 原始检索结果
            processed_query: 处理后的查询
            user_id: 用户ID
            
        Returns:
            重排序和增强后的结果
        """
        enhanced_results = []
        
        for result in results:
            # 计算增强得分
            enhanced_score = await self._calculate_enhanced_score(
                result, processed_query, user_id
            )
            
            # 添加上下文信息
            context_info = await self._get_result_context(
                result, user_id
            )
            
            enhanced_result = {
                **result,
                'enhanced_score': enhanced_score,
                'context': context_info,
                'relevance_factors': {
                    'semantic_similarity': result.get('base_score', 0),
                    'user_preference': context_info.get('user_relevance', 0),
                    'recency': context_info.get('recency_score', 0),
                    'authority': context_info.get('authority_score', 0)
                }
            }
            
            enhanced_results.append(enhanced_result)
        
        # 按增强得分排序
        enhanced_results.sort(key=lambda x: x['enhanced_score'], reverse=True)
        
        return enhanced_results
    
    async def _calculate_enhanced_score(self, result: Dict[str, Any],
                                      processed_query: Dict[str, Any],
                                      user_id: Optional[str]) -> float:
        """
        计算增强得分
        
        Args:
            result: 检索结果
            processed_query: 处理后的查询
            user_id: 用户ID
            
        Returns:
            增强得分
        """
        base_score = result.get('base_score', 0)
        method = result.get('retrieval_method', 'unknown')
        
        # 方法权重
        method_weights = {
            'vector': 0.6,
            'graph': 0.3,
            'keyword': 0.1
        }
        
        # 基础得分加权
        weighted_score = base_score * method_weights.get(method, 0.1)
        
        # 查询意图匹配加分
        intent_bonus = 0
        if processed_query['intent'] == 'how_to' and '步骤' in result.get('content', ''):
            intent_bonus = 0.1
        elif processed_query['intent'] == 'definition' and '定义' in result.get('content', ''):
            intent_bonus = 0.1
        
        # 用户个性化加分（如果有用户历史）
        user_bonus = 0
        if user_id and result.get('document_id'):
            # 简化的用户偏好计算
            user_bonus = 0.05  # 基础用户相关性加分
        
        # 文档新鲜度加分
        recency_bonus = 0
        if result.get('metadata', {}).get('created_at'):
            # 简化的新鲜度计算
            recency_bonus = 0.02
        
        final_score = weighted_score + intent_bonus + user_bonus + recency_bonus
        return min(final_score, 1.0)  # 限制在1.0以内
    
    async def _get_result_context(self, result: Dict[str, Any],
                                user_id: Optional[str]) -> Dict[str, Any]:
        """
        获取结果上下文信息
        
        Args:
            result: 检索结果
            user_id: 用户ID
            
        Returns:
            上下文信息
        """
        context = {
            'user_relevance': 0.5,  # 默认用户相关性
            'recency_score': 0.5,   # 默认新鲜度
            'authority_score': 0.5, # 默认权威性
            'related_documents': []  # 相关文档
        }
        
        # 如果有文档ID，获取相关信息
        if result.get('document_id'):
            try:
                # 获取文档元数据
                doc_info = await self.get_document(result['document_id'])
                if doc_info:
                    context['document_title'] = doc_info.get('title', '')
                    context['document_type'] = doc_info.get('file_type', '')
                    context['created_at'] = doc_info.get('created_at')
                
                # 查找相关文档（通过图谱）
                if self.db_manager.neo4j_client is not None and user_id:
                    related_docs = await self.db_manager.neo4j_client.find_related_documents(
                        result['document_id'], limit=3
                    )
                    context['related_documents'] = related_docs
                    
            except Exception as e:
                logger.warning(f"获取结果上下文失败: {e}")
        
        return context
    
    async def _build_context(self, results: List[Dict[str, Any]],
                           user_id: Optional[str]) -> List[Dict[str, Any]]:
        """
        构建最终上下文
        
        Args:
            results: 增强后的结果
            user_id: 用户ID
            
        Returns:
            带有完整上下文的结果
        """
        contextualized = []
        
        for i, result in enumerate(results):
            # 添加排名信息
            result['rank'] = i + 1
            result['confidence'] = result.get('enhanced_score', 0)
            result['score'] = result.get('enhanced_score', 0)  # 添加score字段以兼容测试
            
            # 构建引用信息
            citation = {
                'document_id': result.get('document_id'),
                'chunk_id': result.get('chunk_id'),
                'title': result.get('context', {}).get('document_title', ''),
                'source': result.get('retrieval_method', 'unknown')
            }
            result['citation'] = citation
            
            # 添加使用建议
            usage_hint = self._generate_usage_hint(result)
            result['usage_hint'] = usage_hint
            
            contextualized.append(result)
        
        return contextualized
    
    def _generate_usage_hint(self, result: Dict[str, Any]) -> str:
        """
        生成使用建议
        
        Args:
            result: 检索结果
            
        Returns:
            使用建议文本
        """
        confidence = result.get('confidence', 0)
        method = result.get('retrieval_method', 'unknown')
        
        if confidence > 0.8:
            return "高度相关，建议优先参考"
        elif confidence > 0.6:
            return "相关性较高，可作为参考"
        elif confidence > 0.4:
            return "部分相关，建议结合其他信息"
        else:
            return "相关性较低，仅供参考"
    
    async def _keyword_search(self, keywords: List[str], top_k: int) -> List[Dict[str, Any]]:
        """
        关键词搜索（补充检索方法）
        
        Args:
            keywords: 关键词列表
            top_k: 结果数量
            
        Returns:
            关键词匹配结果
        """
        try:
            client = getattr(self.db_manager, 'mongodb_client', None)
            if not client:
                return []

            # 优先使用抽象方法以适配异步/同步实现和测试桩
            if hasattr(client, 'search_documents'):
                try:
                    possible = client.search_documents(keywords, top_k)
                    # 支持协程或同步返回
                    if asyncio.iscoroutine(possible):
                        return await possible
                    return possible or []
                except Exception as e:
                    logger.error(f"关键词搜索失败(search_documents): {e}")
                    return []

            # 回退：无法使用抽象方法时，放弃直接访问底层 driver，避免未 await 的 Mock 警告
            # 如需底层集合检索，应在具体 mongodb_client 实现内封装。
            return []
            
        except Exception as e:
            logger.error(f"关键词搜索失败: {e}")
            
        return []
    
    async def health_check(self) -> Dict[str, Any]:
        """健康检查"""
        try:
            # 检查初始化状态
            initialized = self.is_initialized
            
            # 检查数据库连接
            db_health = await self.db_manager.health_check()
            db_healthy = all(db_health.values())
            
            overall_healthy = initialized and db_healthy
            
            return {
                'healthy': overall_healthy,
                'status': 'healthy' if overall_healthy else 'unhealthy',
                'checks': {
                    'initialized': initialized,
                    'database': db_healthy
                },
                'database_health': db_health
            }
            
        except Exception as e:
            logger.error(f"知识处理器健康检查失败: {e}")
            return {
                'healthy': False,
                'status': 'error',
                'error': str(e)
            }