#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
实时处理模块：接口 -> 混合检索 -> 提示词组装 -> LLM -> 返回结果
"""

import json
import logging
from typing import List, Dict, Any, Optional
import sys
import os
sys.path.append(os.path.dirname(__file__))
from hybrid_retrieval import HybridRetrieval
import requests
import os

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class RealtimeProcessor:
    """实时处理器"""
    
    def __init__(self, 
                 collection_name: str = "nature_human_relationship",
                 es_index_name: str = "nature_documents",
                 llm_api_url: str = "http://localhost:8000/airesumores/chat/",
                 llm_model: str = "qwen-plus"):
        """
        初始化实时处理器
        
        Args:
            collection_name: ChromaDB集合名称
            es_index_name: Elasticsearch索引名称
            llm_api_url: LLM API地址
            llm_model: LLM模型名称
        """
        self.collection_name = collection_name
        self.es_index_name = es_index_name
        self.llm_api_url = llm_api_url
        self.llm_model = llm_model
        
        # 初始化混合检索器
        self.retrieval = HybridRetrieval(
            collection_name=collection_name,
            es_index_name=es_index_name
        )
        
        # 系统提示词模板
        self.system_prompt_template = """你是一个专业的生态学和环境科学专家，专门回答关于人与自然关系的问题。

请基于以下检索到的相关文档内容，回答用户的问题。如果文档内容不足以回答问题，请明确说明。

要求：
1. 回答要准确、专业、有深度
2. 引用具体的文档内容作为支撑
3. 如果涉及多个方面，请分点阐述
4. 保持客观、科学的立场
5. 回答要简洁明了，避免冗长

相关文档内容：
{context}

用户问题：{question}

请基于以上文档内容回答用户的问题："""
    
    def retrieve_context(self, question: str, top_k: int = 5) -> List[Dict[str, Any]]:
        """
        步骤1: 混合检索获取相关上下文
        
        Args:
            question: 用户问题
            top_k: 检索结果数量
            
        Returns:
            List[Dict]: 检索到的相关文档
        """
        logger.info(f"开始检索相关上下文，问题: {question}")
        
        try:
            # 执行混合检索
            results = self.retrieval.hybrid_search(question, top_k)
            
            logger.info(f"检索到 {len(results)} 个相关文档")
            return results
            
        except Exception as e:
            logger.error(f"检索上下文失败: {e}")
            return []
    
    def assemble_prompt(self, question: str, context_docs: List[Dict[str, Any]]) -> str:
        """
        步骤2: 组装提示词
        
        Args:
            question: 用户问题
            context_docs: 相关文档列表
            
        Returns:
            str: 组装好的提示词
        """
        logger.info("开始组装提示词")
        
        try:
            # 格式化上下文
            context_parts = []
            for i, doc in enumerate(context_docs, 1):
                context_part = f"""
文档片段 {i} (相关度: {doc['score']:.3f}):
{doc['text']}
"""
                context_parts.append(context_part)
            
            context = "\n".join(context_parts)
            
            # 组装完整提示词
            prompt = self.system_prompt_template.format(
                context=context,
                question=question
            )
            
            logger.info(f"提示词组装完成，长度: {len(prompt)} 字符")
            return prompt
            
        except Exception as e:
            logger.error(f"组装提示词失败: {e}")
            return ""
    
    def call_llm(self, prompt: str) -> Optional[str]:
        """
        步骤3: 调用LLM生成回答
        
        Args:
            prompt: 输入提示词
            
        Returns:
            Optional[str]: LLM生成的回答
        """
        logger.info("开始调用LLM生成回答")
        
        try:
            # 准备请求数据
            request_data = {
                "question": prompt,
                "model": self.llm_model
            }
            
            # 发送请求到LLM API
            response = requests.post(
                self.llm_api_url,
                json=request_data,
                headers={"Content-Type": "application/json"},
                timeout=30
            )
            
            if response.status_code == 200:
                result = response.json()
                answer = result.get("answer", result.get("response", ""))
                logger.info("LLM调用成功")
                return answer
            else:
                logger.error(f"LLM API调用失败，状态码: {response.status_code}")
                return None
                
        except requests.exceptions.Timeout:
            logger.error("LLM API调用超时")
            return None
        except Exception as e:
            logger.error(f"LLM调用失败: {e}")
            return None
    
    def process_question(self, question: str, top_k: int = 5) -> Dict[str, Any]:
        """
        处理用户问题的完整流程
        
        Args:
            question: 用户问题
            top_k: 检索结果数量
            
        Returns:
            Dict: 处理结果
        """
        logger.info("=" * 50)
        logger.info(f"开始处理用户问题: {question}")
        logger.info("=" * 50)
        
        try:
            # 步骤1: 检索相关上下文
            context_docs = self.retrieve_context(question, top_k)
            
            if not context_docs:
                return {
                    "status": "error",
                    "message": "未找到相关文档内容",
                    "question": question,
                    "answer": "抱歉，我无法找到与您问题相关的文档内容。请尝试重新表述您的问题。"
                }
            
            # 步骤2: 组装提示词
            prompt = self.assemble_prompt(question, context_docs)
            
            if not prompt:
                return {
                    "status": "error",
                    "message": "提示词组装失败",
                    "question": question,
                    "answer": "抱歉，系统处理出现问题，请稍后重试。"
                }
            
            # 步骤3: 调用LLM生成回答
            answer = self.call_llm(prompt)
            
            if not answer:
                return {
                    "status": "error",
                    "message": "LLM调用失败",
                    "question": question,
                    "answer": "抱歉，AI服务暂时不可用，请稍后重试。"
                }
            
            # 构建成功响应
            result = {
                "status": "success",
                "question": question,
                "answer": answer,
                "context_docs": [
                    {
                        "id": doc["id"],
                        "text": doc["text"][:200] + "..." if len(doc["text"]) > 200 else doc["text"],
                        "score": doc["score"],
                        "retrieval_types": doc["retrieval_types"]
                    }
                    for doc in context_docs
                ],
                "retrieval_count": len(context_docs),
                "model": self.llm_model
            }
            
            logger.info("=" * 50)
            logger.info("问题处理完成")
            logger.info("=" * 50)
            
            return result
            
        except Exception as e:
            logger.error(f"处理问题失败: {e}")
            return {
                "status": "error",
                "message": f"处理失败: {str(e)}",
                "question": question,
                "answer": "抱歉，系统出现错误，请稍后重试。"
            }
    
    def batch_process_questions(self, questions: List[str], top_k: int = 5) -> List[Dict[str, Any]]:
        """
        批量处理多个问题
        
        Args:
            questions: 问题列表
            top_k: 每个问题的检索结果数量
            
        Returns:
            List[Dict]: 处理结果列表
        """
        logger.info(f"开始批量处理 {len(questions)} 个问题")
        
        results = []
        for i, question in enumerate(questions, 1):
            logger.info(f"处理第 {i}/{len(questions)} 个问题")
            result = self.process_question(question, top_k)
            results.append(result)
        
        logger.info("批量处理完成")
        return results


class RealLLMProcessor(RealtimeProcessor):
    """真实LLM处理器"""
    
    def call_llm(self, prompt: str) -> Optional[str]:
        """
        调用真实LLM生成回答
        
        Args:
            prompt: 输入提示词
            
        Returns:
            str: LLM生成的回答
        """
        logger.info("调用真实LLM生成回答")
        
        try:
            # 使用DashScope API调用Qwen模型
            import os
            from openai import OpenAI
            
            api_key = os.getenv("DASHSCOPE_API_KEY")
            if not api_key:
                logger.error("未设置DASHSCOPE_API_KEY环境变量")
                return "抱歉，无法调用LLM服务，请检查API配置。"
            
            client = OpenAI(
                api_key=api_key,
                base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
            )
            
            response = client.chat.completions.create(
                model="qwen-plus",
                messages=[
                    {"role": "system", "content": "你是一个专业的生态学和环境科学专家，专门回答关于人与自然关系的问题。请基于提供的文档内容，给出准确、专业、有深度的回答。"},
                    {"role": "user", "content": prompt}
                ],
                temperature=0.7,
                max_tokens=2000
            )
            
            answer = response.choices[0].message.content
            logger.info(f"LLM回答生成成功，长度: {len(answer)} 字符")
            return answer
            
        except Exception as e:
            logger.error(f"LLM调用失败: {e}")
            return f"抱歉，LLM服务调用失败: {str(e)}"


def main():
    """测试实时处理功能"""
    # 使用真实LLM进行测试
    processor = RealLLMProcessor()
    
    # 测试问题
    test_questions = [
        "人与自然的关系是什么？",
        "什么是生态危机？",
        "如何实现可持续发展？",
        "工业革命对自然环境有什么影响？"
    ]
    
    for question in test_questions:
        print(f"\n问题: {question}")
        print("=" * 60)
        
        result = processor.process_question(question)
        
        print(f"状态: {result['status']}")
        print(f"回答: {result['answer']}")
        print(f"检索到 {result.get('retrieval_count', 0)} 个相关文档")
        
        if result.get('context_docs'):
            print("\n相关文档:")
            for i, doc in enumerate(result['context_docs'], 1):
                print(f"{i}. 分数: {doc['score']:.3f}")
                print(f"   文本: {doc['text']}")
                print()


if __name__ == "__main__":
    main()
