"""
生成器 - 基于检索结果生成回答
支持可解释的生成和证据溯源
"""

import asyncio
from typing import List, Dict, Any, Optional
import json
import re
from datetime import datetime

from ...core.interfaces import GenerationInterface, SearchResult
from ...core.events import EventEmitter
from ...core.config import config


class RAGGenerator(GenerationInterface, EventEmitter):
    """基于检索增强生成的生成器"""
    
    def __init__(self):
        super().__init__()
        self.client = None
        self.model_name = None
        self._initialized = False
        
        # 默认提示模板
        self.default_templates = {
            "resume_analysis": """基于以下简历信息，请提供详细的分析报告：

查询：{query}

相关信息：
{context}

请从以下方面进行分析：
1. 候选人基本信息
2. 工作经验评估
3. 技能匹配度
4. 教育背景
5. 优势和不足
6. 推荐理由

注意：请在每个分析点后标注信息来源 [Source: 文档ID]，确保可追溯性。

分析报告：""",
            
            "general_qa": """基于以下上下文信息回答问题：

问题：{query}

上下文：
{context}

要求：
1. 基于提供的上下文信息回答
2. 如果上下文不足以回答问题，请明确说明
3. 在回答中标注信息来源 [Source: 文档ID]
4. 保持客观和准确

回答：""",
            
            "entity_qa": """基于以下实体相关信息回答问题：

问题：{query}

实体信息：
{context}

请基于实体关系和属性信息提供准确回答，并标注信息来源。

回答："""
        }
    
    async def initialize(self):
        """初始化生成器"""
        if self._initialized:
            return
        
        try:
            from openai import AsyncOpenAI
            
            # 从配置获取LLM设置
            llm_config = config.get("lightrag_engine.llm", {})
            self.model_name = llm_config.get("model", "gpt-3.5-turbo")
            
            api_key = config.get("external_services.openai.api_key")
            if not api_key:
                raise ValueError("OpenAI API key not found")
            
            self.client = AsyncOpenAI(api_key=api_key)
            self._initialized = True
            
            await self.emit("rag_generator_initialized", {
                "model": self.model_name
            })
            
        except Exception as e:
            await self.emit_error("rag_generator_initialization", e)
            raise
    
    async def generate(
        self, 
        query: str, 
        context: List[SearchResult],
        template: Optional[str] = None,
        template_name: str = "general_qa",
        **kwargs
    ) -> str:
        """生成回答"""
        if not self._initialized:
            await self.initialize()
        
        try:
            await self.emit("generation_started", {
                "query": query[:100] + "..." if len(query) > 100 else query,
                "context_count": len(context),
                "template_name": template_name
            })
            
            # 构建上下文
            formatted_context = self._format_context(context)
            
            # 选择或使用模板
            if template is None:
                template = self.default_templates.get(template_name, self.default_templates["general_qa"])
            
            # 构建提示
            prompt = template.format(
                query=query,
                context=formatted_context,
                **kwargs
            )
            
            # 调用LLM生成
            response = await self._call_llm(prompt)
            
            # 后处理
            processed_response = self._post_process_response(response, context)
            
            await self.emit("generation_completed", {
                "query": query[:100] + "..." if len(query) > 100 else query,
                "response_length": len(processed_response),
                "context_used": len(context)
            })
            
            return processed_response
            
        except Exception as e:
            await self.emit_error("generate", e)
            return f"生成回答时发生错误：{str(e)}"
    
    def _format_context(self, context: List[SearchResult]) -> str:
        """格式化上下文信息"""
        formatted_parts = []
        
        for i, result in enumerate(context):
            doc = result.document
            
            # 截断内容以控制长度
            content = doc.content[:800] + "..." if len(doc.content) > 800 else doc.content
            
            # 构建上下文条目
            context_item = f"""
[文档{i+1}] ID: {doc.id}
相关度: {result.score:.3f}
内容: {content}
"""
            
            # 添加元数据信息
            if doc.metadata:
                metadata_info = []
                for key, value in doc.metadata.items():
                    if key in ["file_name", "source_path", "entities", "parser"]:
                        if key == "entities" and isinstance(value, list):
                            entities = [entity.get("properties", {}).get("name", "") for entity in value[:5]]
                            metadata_info.append(f"{key}: {', '.join(filter(None, entities))}")
                        else:
                            metadata_info.append(f"{key}: {value}")
                
                if metadata_info:
                    context_item += f"元数据: {'; '.join(metadata_info)}\n"
            
            formatted_parts.append(context_item)
        
        return "\n".join(formatted_parts)
    
    async def _call_llm(self, prompt: str) -> str:
        """调用LLM生成回答"""
        try:
            response = await self.client.chat.completions.create(
                model=self.model_name,
                messages=[
                    {"role": "system", "content": "你是一个专业的HR分析师，擅长简历分析和人才评估。请基于提供的信息进行客观、专业的分析。"},
                    {"role": "user", "content": prompt}
                ],
                temperature=0.1,
                max_tokens=2000
            )
            
            return response.choices[0].message.content
            
        except Exception as e:
            await self.emit_error("llm_call", e)
            raise
    
    def _post_process_response(self, response: str, context: List[SearchResult]) -> str:
        """后处理响应"""
        try:
            # 确保源引用的正确性
            doc_ids = [result.document.id for result in context]
            
            # 检查并修复源引用
            def fix_source_reference(match):
                source_text = match.group(1)
                # 这里可以添加更复杂的源验证逻辑
                return f"[Source: {source_text}]"
            
            # 使用正则表达式修复源引用格式
            response = re.sub(r'\[Source:\s*([^\]]+)\]', fix_source_reference, response)
            
            # 添加生成时间戳
            response += f"\n\n---\n生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
            
            return response
            
        except Exception as e:
            await self.emit_error("post_process_response", e)
            return response
    
    async def generate_structured_analysis(
        self, 
        query: str, 
        context: List[SearchResult]
    ) -> Dict[str, Any]:
        """生成结构化分析"""
        if not self._initialized:
            await self.initialize()
        
        try:
            # 使用专门的结构化提示
            structured_prompt = f"""基于以下信息，生成结构化的简历分析，以JSON格式返回：

查询：{query}

上下文：
{self._format_context(context)}

请返回以下JSON结构：
{{
    "candidate_summary": {{
        "name": "候选人姓名",
        "current_position": "当前职位",
        "experience_years": "工作年限",
        "key_skills": ["技能1", "技能2"],
        "education": "教育背景"
    }},
    "strengths": ["优势1", "优势2"],
    "weaknesses": ["不足1", "不足2"],
    "match_score": 85,
    "recommendations": ["推荐1", "推荐2"],
    "evidence_sources": ["文档ID1", "文档ID2"]
}}

只返回JSON，不要其他内容："""
            
            response = await self._call_llm(structured_prompt)
            
            # 尝试解析JSON
            try:
                structured_data = json.loads(response)
                
                # 验证和增强结构化数据
                structured_data["generated_at"] = datetime.now().isoformat()
                structured_data["query"] = query
                structured_data["context_count"] = len(context)
                
                return structured_data
                
            except json.JSONDecodeError:
                # JSON解析失败，返回错误信息
                return {
                    "error": "Failed to parse structured response",
                    "raw_response": response,
                    "generated_at": datetime.now().isoformat()
                }
                
        except Exception as e:
            await self.emit_error("generate_structured_analysis", e)
            return {
                "error": str(e),
                "generated_at": datetime.now().isoformat()
            }
    
    async def generate_comparative_analysis(
        self, 
        candidates: List[Dict[str, Any]],
        job_requirements: str
    ) -> str:
        """生成候选人比较分析"""
        try:
            # 构建比较分析提示
            candidates_info = []
            for i, candidate in enumerate(candidates):
                candidates_info.append(f"""
候选人{i+1}:
- 查询: {candidate.get('query', '')}
- 分析结果: {candidate.get('analysis', '')}
- 匹配分数: {candidate.get('match_score', 'N/A')}
""")
            
            prompt = f"""请对以下候选人进行比较分析：

职位要求：{job_requirements}

候选人信息：
{chr(10).join(candidates_info)}

请从以下角度进行比较：
1. 技能匹配度对比
2. 经验相关性对比
3. 发展潜力评估
4. 综合排名推荐

比较分析："""
            
            response = await self._call_llm(prompt)
            
            await self.emit("comparative_analysis_generated", {
                "candidates_count": len(candidates),
                "job_requirements_length": len(job_requirements)
            })
            
            return response
            
        except Exception as e:
            await self.emit_error("generate_comparative_analysis", e)
            return f"生成比较分析时发生错误：{str(e)}"
    
    async def explain_generation(self, query: str, response: str, context: List[SearchResult]) -> Dict[str, Any]:
        """解释生成过程"""
        try:
            explanation = {
                "query": query,
                "response_summary": response[:200] + "..." if len(response) > 200 else response,
                "context_analysis": {
                    "total_documents": len(context),
                    "avg_relevance_score": sum(r.score for r in context) / len(context) if context else 0,
                    "document_types": {}
                },
                "generation_factors": []
            }
            
            # 分析上下文文档类型
            for result in context:
                relevance_type = result.relevance_type
                explanation["context_analysis"]["document_types"][relevance_type] = \
                    explanation["context_analysis"]["document_types"].get(relevance_type, 0) + 1
            
            # 提取源引用
            source_references = re.findall(r'\[Source:\s*([^\]]+)\]', response)
            explanation["source_references"] = list(set(source_references))
            
            # 分析生成因子
            if len(context) > 3:
                explanation["generation_factors"].append("丰富的上下文信息")
            if any(r.score > 0.8 for r in context):
                explanation["generation_factors"].append("高相关度文档")
            if len(source_references) > 0:
                explanation["generation_factors"].append("包含源引用")
            
            return explanation
            
        except Exception as e:
            await self.emit_error("explain_generation", e)
            return {"error": str(e)}
    
    def add_custom_template(self, name: str, template: str):
        """添加自定义模板"""
        self.default_templates[name] = template
    
    def get_available_templates(self) -> List[str]:
        """获取可用模板列表"""
        return list(self.default_templates.keys())