"""
RAG控制器 - 统一管理LightRAG引擎的所有组件
提供完整的RAG流程：检索、重排、生成
"""

import asyncio
from typing import List, Dict, Any, Optional, Union
from datetime import datetime

from ..core.interfaces import Document, SearchResult
from ..core.events import EventEmitter
from ..core.container import container

from .ingestion.pipeline import IngestionPipeline
from .retrieval.hybrid_retriever import HybridRetriever
from .reranking.reranker import HybridReranker
from .generation.generator import RAGGenerator


class RAGController(EventEmitter):
    """RAG控制器 - 统一的RAG服务接口"""
    
    def __init__(self):
        super().__init__()
        self.ingestion_pipeline = None
        self.retriever = None
        self.reranker = None
        self.generator = None
        self._initialized = False
    
    async def initialize(self):
        """初始化RAG控制器"""
        if self._initialized:
            return
        
        try:
            # 初始化各个组件
            self.ingestion_pipeline = IngestionPipeline()
            self.retriever = HybridRetriever()
            self.reranker = HybridReranker()
            self.generator = RAGGenerator()
            
            # 并行初始化所有组件
            await asyncio.gather(
                self.ingestion_pipeline.initialize(),
                self.retriever.initialize(),
                self.reranker.initialize(),
                self.generator.initialize()
            )
            
            self._initialized = True
            
            await self.emit("rag_controller_initialized", {
                "components": ["ingestion", "retrieval", "reranking", "generation"]
            })
            
        except Exception as e:
            await self.emit_error("rag_controller_initialization", e)
            raise
    
    async def process_query(
        self,
        query: str,
        retrieval_modes: List[str] = None,
        top_k: int = 10,
        rerank_top_k: int = 5,
        use_llm_rerank: bool = False,
        template_name: str = "general_qa",
        metadata_filter: Optional[Dict[str, Any]] = None,
        return_intermediate: bool = False
    ) -> Dict[str, Any]:
        """处理完整的RAG查询流程"""
        if not self._initialized:
            await self.initialize()
        
        try:
            start_time = datetime.now()
            
            await self.emit("rag_query_started", {
                "query": query[:100] + "..." if len(query) > 100 else query,
                "top_k": top_k,
                "rerank_top_k": rerank_top_k
            })
            
            # 1. 检索阶段
            retrieval_start = datetime.now()
            search_results = await self.retriever.retrieve(
                query=query,
                top_k=top_k,
                retrieval_modes=retrieval_modes or ["vector", "keyword", "graph"],
                metadata_filter=metadata_filter
            )
            retrieval_time = (datetime.now() - retrieval_start).total_seconds()
            
            if not search_results:
                return {
                    "query": query,
                    "answer": "抱歉，没有找到相关信息来回答您的问题。",
                    "search_results": [],
                    "reranked_results": [],
                    "processing_time": {
                        "total": (datetime.now() - start_time).total_seconds(),
                        "retrieval": retrieval_time,
                        "reranking": 0.0,
                        "generation": 0.0
                    }
                }
            
            # 2. 重排阶段
            rerank_start = datetime.now()
            reranked_results = await self.reranker.rerank(
                query=query,
                results=search_results,
                top_k=rerank_top_k,
                use_llm=use_llm_rerank
            )
            rerank_time = (datetime.now() - rerank_start).total_seconds()
            
            # 3. 生成阶段
            generation_start = datetime.now()
            answer = await self.generator.generate(
                query=query,
                context=reranked_results,
                template_name=template_name
            )
            generation_time = (datetime.now() - generation_start).total_seconds()
            
            total_time = (datetime.now() - start_time).total_seconds()
            
            # 构建响应
            response = {
                "query": query,
                "answer": answer,
                "processing_time": {
                    "total": total_time,
                    "retrieval": retrieval_time,
                    "reranking": rerank_time,
                    "generation": generation_time
                },
                "metadata": {
                    "retrieval_modes": retrieval_modes or ["vector", "keyword", "graph"],
                    "template_name": template_name,
                    "use_llm_rerank": use_llm_rerank,
                    "processed_at": datetime.now().isoformat()
                }
            }
            
            # 可选：返回中间结果
            if return_intermediate:
                response["search_results"] = [
                    {
                        "document_id": r.document.id,
                        "score": r.score,
                        "relevance_type": r.relevance_type,
                        "content_preview": r.document.content[:200] + "..." if len(r.document.content) > 200 else r.document.content
                    } for r in search_results
                ]
                response["reranked_results"] = [
                    {
                        "document_id": r.document.id,
                        "score": r.score,
                        "relevance_type": r.relevance_type,
                        "content_preview": r.document.content[:200] + "..." if len(r.document.content) > 200 else r.document.content
                    } for r in reranked_results
                ]
            
            await self.emit("rag_query_completed", {
                "query": query[:100] + "..." if len(query) > 100 else query,
                "processing_time": total_time,
                "results_count": len(reranked_results)
            })
            
            return response
            
        except Exception as e:
            await self.emit_error("process_query", e)
            return {
                "query": query,
                "answer": f"处理查询时发生错误：{str(e)}",
                "error": str(e),
                "processing_time": {"total": 0, "retrieval": 0, "reranking": 0, "generation": 0}
            }
    
    async def process_structured_query(
        self,
        query: str,
        retrieval_modes: List[str] = None,
        top_k: int = 10,
        rerank_top_k: int = 5
    ) -> Dict[str, Any]:
        """处理结构化查询，返回结构化结果"""
        if not self._initialized:
            await self.initialize()
        
        try:
            # 检索和重排
            search_results = await self.retriever.retrieve(
                query=query,
                top_k=top_k,
                retrieval_modes=retrieval_modes or ["vector", "keyword", "graph"]
            )
            
            if search_results:
                reranked_results = await self.reranker.rerank(
                    query=query,
                    results=search_results,
                    top_k=rerank_top_k
                )
                
                # 生成结构化分析
                structured_analysis = await self.generator.generate_structured_analysis(
                    query, reranked_results
                )
                
                return structured_analysis
            else:
                return {
                    "error": "No relevant documents found",
                    "query": query,
                    "generated_at": datetime.now().isoformat()
                }
                
        except Exception as e:
            await self.emit_error("process_structured_query", e)
            return {
                "error": str(e),
                "query": query,
                "generated_at": datetime.now().isoformat()
            }
    
    async def batch_process_queries(
        self,
        queries: List[str],
        **kwargs
    ) -> List[Dict[str, Any]]:
        """批量处理查询"""
        if not self._initialized:
            await self.initialize()
        
        try:
            await self.emit("batch_query_started", {
                "query_count": len(queries)
            })
            
            # 并行处理查询（控制并发数）
            semaphore = asyncio.Semaphore(5)  # 最多5个并发查询
            
            async def process_single_query(query):
                async with semaphore:
                    return await self.process_query(query, **kwargs)
            
            tasks = [process_single_query(query) for query in queries]
            results = await asyncio.gather(*tasks, return_exceptions=True)
            
            # 处理异常结果
            processed_results = []
            for i, result in enumerate(results):
                if isinstance(result, Exception):
                    processed_results.append({
                        "query": queries[i],
                        "error": str(result),
                        "answer": f"处理查询时发生错误：{str(result)}"
                    })
                else:
                    processed_results.append(result)
            
            await self.emit("batch_query_completed", {
                "query_count": len(queries),
                "successful_queries": sum(1 for r in processed_results if "error" not in r)
            })
            
            return processed_results
            
        except Exception as e:
            await self.emit_error("batch_process_queries", e)
            return [
                {
                    "query": query,
                    "error": str(e),
                    "answer": f"批量处理查询时发生错误：{str(e)}"
                } for query in queries
            ]
    
    async def add_documents(self, documents: Union[List[Document], List[str]]):
        """向系统添加文档"""
        if not self._initialized:
            await self.initialize()
        
        try:
            if isinstance(documents[0], str):
                # 文件路径列表
                for file_path in documents:
                    await self.ingestion_pipeline.process_file(file_path)
            else:
                # Document对象列表
                processed_docs = await self.ingestion_pipeline.batch_process_documents(documents)
                
                # 更新检索器索引
                await self.retriever.add_documents_to_index(processed_docs)
            
            await self.emit("documents_added", {
                "document_count": len(documents)
            })
            
        except Exception as e:
            await self.emit_error("add_documents", e)
            raise
    
    async def get_system_status(self) -> Dict[str, Any]:
        """获取系统状态"""
        try:
            status = {
                "initialized": self._initialized,
                "components": {
                    "ingestion_pipeline": self.ingestion_pipeline is not None,
                    "retriever": self.retriever is not None,
                    "reranker": self.reranker is not None,
                    "generator": self.generator is not None
                },
                "timestamp": datetime.now().isoformat()
            }
            
            if self._initialized:
                # 获取详细统计
                pipeline_stats = await self.ingestion_pipeline.get_pipeline_stats()
                status["pipeline_stats"] = pipeline_stats
            
            return status
            
        except Exception as e:
            await self.emit_error("get_system_status", e)
            return {"error": str(e), "initialized": False}
    
    async def explain_query_processing(
        self,
        query: str,
        result: Dict[str, Any]
    ) -> Dict[str, Any]:
        """解释查询处理过程"""
        try:
            explanation = {
                "query": query,
                "processing_breakdown": result.get("processing_time", {}),
                "components_used": [
                    "retrieval", "reranking", "generation"
                ]
            }
            
            # 如果有中间结果，添加更详细的解释
            if "search_results" in result:
                explanation["retrieval_analysis"] = {
                    "total_retrieved": len(result["search_results"]),
                    "retrieval_types": list(set(r["relevance_type"] for r in result["search_results"]))
                }
            
            if "reranked_results" in result:
                explanation["reranking_analysis"] = {
                    "final_count": len(result["reranked_results"]),
                    "score_range": {
                        "min": min(r["score"] for r in result["reranked_results"]) if result["reranked_results"] else 0,
                        "max": max(r["score"] for r in result["reranked_results"]) if result["reranked_results"] else 0
                    }
                }
            
            return explanation
            
        except Exception as e:
            await self.emit_error("explain_query_processing", e)
            return {"error": str(e)}
    
    async def cleanup(self):
        """清理资源"""
        try:
            if self.ingestion_pipeline:
                await self.ingestion_pipeline.cleanup()
            
            await self.emit("rag_controller_cleanup_completed", {})
            
        except Exception as e:
            await self.emit_error("rag_controller_cleanup", e)