"""
AI大模型服务模块
处理与大模型相关的所有逻辑，包括初始化、生成等
"""

import asyncio
import json
import logging
from typing import AsyncGenerator, Dict, Any

from langchain_community.llms import Tongyi
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.prompts import PromptTemplate
from langchain.chains import RetrievalQA

# 使用绝对导入，只在backend目录设置__init__.py
from backend.config.constants import Config, PromptTemplates
from backend.services.knowledge_service import knowledge_service
from backend.services.bailian_knowledge_service import bailian_knowledge_service

logger = logging.getLogger(__name__)


class StreamingCallbackHandler(BaseCallbackHandler):
    """流式输出回调处理器"""
    
    def __init__(self, queue: asyncio.Queue):
        self.queue = queue
        
    def on_llm_new_token(self, token: str, **kwargs) -> None:
        """处理新的token"""
        asyncio.create_task(self.queue.put(token))
        
    def on_llm_end(self, response, **kwargs) -> None:
        """处理LLM结束"""
        asyncio.create_task(self.queue.put("[DONE]"))


class AIService:
    """AI大模型服务类"""
    
    def __init__(self):
        self.api_key = Config.DASHSCOPE_API_KEY
        self.model = Config.DEFAULT_MODEL
        self.llm = None
        self.prompt_template = None
        self.chain = None
        self.rag_chain = None
        self._initialize()
    
    def _initialize(self):
        """初始化大模型和提示词模板"""
        if not self.api_key:
            logger.error("未设置DASHSCOPE_API_KEY环境变量")
            raise ValueError("请设置DASHSCOPE_API_KEY环境变量")
        
        try:
            # 初始化普通大模型
            self.llm = Tongyi(
                model=self.model,
                dashscope_api_key=self.api_key
            )
            
            # 初始化提示词模板
            self.prompt_template = PromptTemplate.from_template(PromptTemplates.QUESTION_GENERATION_TEMPLATE)
            
            # 创建链
            self.chain = self.prompt_template | self.llm
            
            # 初始化RAG链（如果知识库服务已初始化）
            self._initialize_rag_chain()
            
            logger.info(f"成功初始化AI服务: {self.model}")
            
        except Exception as e:
            logger.error(f"初始化AI服务失败: {e}")
            raise
    
    def _initialize_rag_chain(self):
        """初始化RAG链"""
        try:
            # 初始化知识库服务的向量存储
            knowledge_service.create_vector_store()
            
            # 如果知识库服务已初始化，创建RAG链
            if knowledge_service.is_initialized():
                retriever = knowledge_service.get_retriever()
                if retriever:
                    # 创建RAG链
                    self.rag_chain = RetrievalQA.from_chain_type(
                        llm=self.llm,
                        chain_type="stuff",
                        retriever=retriever,
                        return_source_documents=True
                    )
                    logger.info("成功初始化本地RAG链")
                else:
                    logger.warning("无法获取本地知识库检索器")
            else:
                logger.warning("本地知识库服务未初始化")
        except Exception as e:
            logger.error(f"初始化本地RAG链失败: {e}")
    
    def _get_knowledge_context(self, params: Dict[str, Any]) -> str:
        """获取知识上下文"""
        # 优先使用阿里百炼云知识库
        if bailian_knowledge_service.is_initialized():
            query = f"生成{params.get('grade', '')}{params.get('subject', '')}试卷，知识点：{params.get('knowledge_points', '')}"
            # 根据官方示例，不传递top_k参数
            relevant_docs = bailian_knowledge_service.retrieve_knowledge(query)
            if relevant_docs:
                knowledge_context = "\n".join(relevant_docs)
                logger.info(f"使用阿里百炼云知识库增强生成，检索到{len(relevant_docs)}个相关文档")
                return knowledge_context
        
        # 如果阿里百炼不可用，尝试使用本地知识库
        if knowledge_service.is_initialized():
            query = f"生成{params.get('grade', '')}{params.get('subject', '')}试卷，知识点：{params.get('knowledge_points', '')}"
            relevant_docs = knowledge_service.search_knowledge(query)
            if relevant_docs:
                knowledge_context = "\n".join([doc.page_content for doc in relevant_docs])
                logger.info(f"使用本地知识库增强生成，检索到{len(relevant_docs)}个相关文档")
                return knowledge_context
        
        # 如果都没有可用的知识库，返回默认提示
        return "请根据你的专业知识生成题目。"
    
    def generate_questions(self, params: Dict[str, Any]) -> str:
        """生成题目（同步模式）"""
        try:
            logger.info(f"开始生成题目: {params}")
            
            # 处理知识点参数
            if isinstance(params.get('knowledge_points'), list):
                params['knowledge_points'] = ", ".join(params['knowledge_points'])
            
            # 获取知识上下文
            params['knowledge_context_section'] = self._get_knowledge_context(params)
            
            result = self.chain.invoke(params)
            logger.info("题目生成成功")
            return result
            
        except Exception as e:
            logger.error(f"生成题目失败: {e}")
            raise
    
    async def generate_questions_stream(self, params: Dict[str, Any]) -> AsyncGenerator[str, None]:
        """流式生成题目"""
        try:
            logger.info(f"开始流式生成题目: {params}")
            
            # 处理知识点参数
            if isinstance(params.get('knowledge_points'), list):
                params['knowledge_points'] = ", ".join(params['knowledge_points'])
            
            # 获取知识上下文
            params['knowledge_context_section'] = self._get_knowledge_context(params)
            
            # 创建流式大模型
            streaming_llm = Tongyi(
                model="qwen-turbo",  # 使用qwen-turbo确保稳定性
                dashscope_api_key=self.api_key,
                streaming=True,
                temperature=0.7
            )
            
            streaming_chain = self.prompt_template | streaming_llm
            
            # 收集所有流式输出
            accumulated_content = ""
            
            # 发送开始信号
            yield json.dumps({'type': 'start', 'message': '开始生成试题...'}, ensure_ascii=False)
            
            try:
                # 调用流式生成
                async for chunk in streaming_chain.astream(params):
                    if chunk:
                        accumulated_content += chunk
                        # 发送流式数据
                        yield json.dumps({'type': 'token', 'content': chunk}, ensure_ascii=False)
                        # 添加短暂延迟以改善显示效果
                        await asyncio.sleep(0.02)
                
                # 打印累积的内容用于调试
                logger.info(f"累积的流式内容: {accumulated_content}")
                
                # 尝试解析最终JSON结果
                parsed_result = self._parse_json_result(accumulated_content)
                
                if parsed_result:
                    # 发送成功结果
                    yield json.dumps({'type': 'success', 'data': parsed_result}, ensure_ascii=False)
                else:
                    # 发送原始结果
                    yield json.dumps({'type': 'raw', 'content': accumulated_content}, ensure_ascii=False)
                
            except Exception as stream_error:
                logger.error(f"流式调用失败: {stream_error}")
                # 降级到常规模式
                yield json.dumps({'type': 'token', 'content': '\n⚠️ 流式调用失败，切换到常规模式...\n'}, ensure_ascii=False)
                
                result = self.generate_questions(params)
                # 打印常规模式生成的内容用于调试
                logger.info(f"常规模式生成的内容: {result}")
                
                parsed_result = self._parse_json_result(result)
                
                if parsed_result:
                    yield json.dumps({'type': 'success', 'data': parsed_result}, ensure_ascii=False)
                else:
                    yield json.dumps({'type': 'raw', 'content': result}, ensure_ascii=False)
            
            # 发送结束信号
            yield json.dumps({'type': 'done', 'message': '试题生成完成'}, ensure_ascii=False)
            logger.info("流式题目生成成功")
            
        except Exception as e:
            logger.error(f"流式生成题目失败: {e}")
            error_msg = f'生成试题失败: {str(e)}'
            yield json.dumps({'type': 'error', 'message': error_msg}, ensure_ascii=False)
    
    def _parse_json_result(self, content: str) -> dict:
        """解析JSON结果"""
        try:
            # 打印原始内容用于调试
            logger.info(f"原始AI生成内容: {content}")
            
            # 清理内容，去除可能的多余文本
            json_start = content.find('{')
            json_end = content.rfind('}') + 1
            
            if json_start != -1 and json_end > json_start:
                json_content = content[json_start:json_end]
                parsed_result = json.loads(json_content)
                return parsed_result
            
        except json.JSONDecodeError as e:
            logger.warning(f"JSON解析失败: {e}")
        
        return None
    
    def validate_api_key(self) -> bool:
        """验证API密钥是否有效"""
        return bool(self.api_key)
    
    def get_model_info(self) -> Dict[str, str]:
        """获取模型信息"""
        return {
            "model": self.model,
            "api_key_configured": bool(self.api_key),
            "local_rag_enabled": knowledge_service.is_initialized(),
            "bailian_rag_enabled": bailian_knowledge_service.is_initialized()
        }


# 全局AI服务实例
ai_service = AIService()