import Agently
from typing import List, AsyncGenerator, Dict, Any
from sqlalchemy.orm import Session
from app import models
from app.config import settings
from app.rag_service import rag_service
import logging

logger = logging.getLogger(__name__)


class LLMService:
    """LLM服务封装"""
    
    def __init__(self):
        self.agent_factory = Agently.AgentFactory()
        # Qwen 使用 OpenAI 兼容接口
        self.agent_factory\
            .set_settings("model.OpenAI.auth", {"api_key": settings.QWEN_API_KEY})\
            .set_settings("model.OpenAI.url", "https://dashscope.aliyuncs.com/compatible-mode/v1")\
            .set_settings("model.OpenAI.options", {"model": settings.QWEN_MODEL})
    
    def create_agent(self, agent_config: models.Agent):
        """根据配置创建Agent"""
        agent_instance = self.agent_factory.create_agent()
        
        # 获取模型配置
        model = agent_config.model
        
        # 解析参数配置
        params = agent_config.params if agent_config.params else {}
        temperature = params.get("temperature", 0.7)
        max_tokens = params.get("max_tokens", 2000)
        top_p = params.get("top_p", 0.9)
        
        # 设置模型和参数
        if model.provider == "qwen":
            # Qwen 使用 OpenAI 兼容接口
            if model.api_key:
                agent_instance.set_settings("model.OpenAI.auth", {"api_key": model.api_key})
            
            # 设置 Qwen API 地址
            agent_instance.set_settings("model.OpenAI.url", "https://dashscope.aliyuncs.com/compatible-mode/v1")
            
            # 使用 OpenAI 模型并设置参数
            agent_instance.use_model("OpenAI")
            agent_instance.set_settings("model.OpenAI.options", {
                "model": model.name,
                "temperature": temperature,
                "max_tokens": max_tokens,
                "top_p": top_p
            })
        elif model.provider == "openai":
            if model.api_key:
                agent_instance.set_settings("model.OpenAI.auth", {"api_key": model.api_key})
            
            agent_instance.use_model("OpenAI")
            agent_instance.set_settings("model.OpenAI.options", {
                "model": model.name,
                "temperature": temperature,
                "max_tokens": max_tokens,
                "top_p": top_p
            })
        
        # 解析和设置角色
        if agent_config.role:
            role_data = agent_config.role if isinstance(agent_config.role, dict) else {}
            role_name = role_data.get("role_name", "AI助手")
            role_background = role_data.get("role_background", "")
            personality = role_data.get("personality", "")
            
            # 构建完整的角色描述
            role_description = []
            if role_background:
                role_description.append(role_background)
            if personality:
                role_description.append(f"性格特点：{personality}")
            
            if role_description:
                agent_instance.set_role(
                    role_name,
                    "\n".join(role_description)
                )
            else:
                # 至少设置一个默认角色
                agent_instance.set_role(role_name, f"你是{agent_config.name}")
        
        # 添加工具
        if agent_config.tools:
            for tool in agent_config.tools:
                # TODO: 实现工具调用逻辑
                # 可以根据 tool.type 和 tool.config 来注册不同的工具
                logger.info(f"加载工具: {tool.name}")
        
        return agent_instance
    
    async def generate_streaming(
        self,
        agent_instance,
        agent_config: models.Agent,
        messages: List[models.Message],
        prompt: str,
        db: Session
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """流式生成回复"""
        import asyncio
        import queue
        import threading
        
        full_content = ""
        chunk_queue = queue.Queue()
        finished = threading.Event()
        error_msg = None
        
        def on_delta(data):
            """处理流式数据回调 - 在单独的线程中运行"""
            nonlocal full_content
            try:
                if data:
                    delta = ""
                    if isinstance(data, dict):
                        delta = data.get("delta", "") or data.get("content", "") or str(data)
                    elif isinstance(data, str):
                        delta = data
                    
                    if delta:
                        full_content += delta
                        chunk_queue.put({
                            "delta": delta,
                            "content": full_content
                        })
            except Exception as e:
                logger.error(f"on_delta 回调错误: {e}")
        
        def run_agently():
            """在单独的线程中运行 Agently"""
            nonlocal error_msg
            try:
                logger.info(f"开始生成回复，历史消息数: {len(messages)}")
                
                # 1. RAG 知识库检索（如果配置了知识库）
                rag_context = ""
                if agent_config.rag_kb_id:
                    try:
                        logger.info(f"Agent 配置了 RAG 知识库: {agent_config.rag_kb_id}")
                        # 检索相关文档
                        kb = db.query(models.KnowledgeBase).filter(
                            models.KnowledgeBase.id == agent_config.rag_kb_id
                        ).first()
                        
                        if kb:
                            config = kb.config or {}
                            top_k = config.get("top_k", 3)
                            threshold = config.get("threshold", 0.5)
                            
                            similar_chunks = rag_service.search_similar_chunks(
                                db=db,
                                kb_id=agent_config.rag_kb_id,
                                query=prompt,
                                top_k=top_k,
                                threshold=threshold
                            )
                            
                            if similar_chunks:
                                logger.info(f"检索到 {len(similar_chunks)} 个相关文档块")
                                rag_context = "\n\n参考资料：\n"
                                for i, chunk in enumerate(similar_chunks, 1):
                                    rag_context += f"\n[{i}] {chunk['content']}\n"
                                rag_context += "\n请基于以上参考资料回答用户的问题。\n"
                            else:
                                logger.info("未检索到相关文档")
                    except Exception as e:
                        logger.error(f"RAG 检索失败: {e}")
                
                # 2. 添加历史消息上下文（记忆功能）
                # 构建完整的对话历史，只取最近10条消息
                chat_history = []
                for msg in messages[-10:]:
                    if msg.role == "user":
                        chat_history.append({"role": "user", "content": msg.content})
                    elif msg.role == "assistant":
                        chat_history.append({"role": "assistant", "content": msg.content})
                
                # 如果有历史消息，使用 chat_history 模式
                if chat_history:
                    logger.info(f"添加 {len(chat_history)} 条历史消息到上下文")
                    agent_instance.chat_history(chat_history)
                
                # 3. 组合 RAG 上下文和用户问题
                final_prompt = prompt
                if rag_context:
                    final_prompt = rag_context + "\n用户问题：" + prompt
                
                # 4. 输入当前问题并生成
                result = agent_instance.input(final_prompt).on_delta(on_delta).start()
                logger.info(f"Agently 完成，结果类型: {type(result)}")
            except Exception as e:
                logger.error(f"Agently 生成失败: {e}", exc_info=True)
                error_msg = str(e)
            finally:
                finished.set()
        
        try:
            # 在新线程中运行 Agently
            thread = threading.Thread(target=run_agently)
            thread.start()
            
            # 实时从队列中获取并 yield chunks
            while not finished.is_set() or not chunk_queue.empty():
                try:
                    chunk = chunk_queue.get(timeout=0.1)
                    yield chunk
                    await asyncio.sleep(0)  # 让出控制权
                except queue.Empty:
                    await asyncio.sleep(0.01)
                    continue
            
            # 等待线程结束
            thread.join(timeout=1)
            
            # 检查是否有错误
            if error_msg:
                yield {
                    "delta": f"生成失败: {error_msg}",
                    "content": f"生成失败: {error_msg}",
                    "done": True
                }
            elif full_content:
                # 发送完成标记
                yield {
                    "delta": "",
                    "content": full_content,
                    "done": True
                }
            else:
                logger.error("Agently 没有返回任何响应")
                yield {
                    "delta": "抱歉，AI 服务暂时无法响应，请稍后重试。",
                    "content": "抱歉，AI 服务暂时无法响应，请稍后重试。",
                    "done": True
                }
            
        except Exception as e:
            logger.error(f"流式生成失败: {type(e).__name__}: {e}", exc_info=True)
            error_msg = f"生成失败: {str(e)}"
            yield {
                "delta": error_msg,
                "content": error_msg,
                "done": True
            }


llm_service = LLMService()


async def generate_response_stream(
    agent: models.Agent,
    messages: List[models.Message],
    prompt: str,
    db: Session,
    session_id: int
) -> AsyncGenerator[Dict[str, Any], None]:
    """生成流式回复并保存消息"""
    
    # 创建Agent实例
    agent_instance = llm_service.create_agent(agent)
    
    # 保存用户消息
    user_message = models.Message(
        session_id=session_id,
        role="user",
        content=prompt
    )
    db.add(user_message)
    db.commit()
    
    # 流式生成
    full_content = ""
    assistant_message = None
    
    try:
        async for chunk in llm_service.generate_streaming(agent_instance, agent, messages, prompt, db):
            full_content = chunk.get("content", "")
            
            # 返回流式chunk
            yield {
                "delta": chunk.get("delta", ""),
                "msg_id": user_message.id
            }
            
            # 如果是最后一个chunk，保存助手回复
            if chunk.get("done"):
                assistant_message = models.Message(
                    session_id=user_message.session_id,
                    role="assistant",
                    content=full_content,
                    token_usage={"prompt": 0, "completion": 0}  # TODO: 实际计算token
                )
                db.add(assistant_message)
                db.commit()
                
                yield {
                    "message": {
                        "id": assistant_message.id,
                        "content": full_content,
                        "token_usage": assistant_message.token_usage
                    }
                }
    
    except Exception as e:
        logger.error(f"生成失败: {e}")
        # 如果失败，也要保存错误消息
        if not assistant_message:
            assistant_message = models.Message(
                session_id=user_message.session_id,
                role="assistant",
                content=f"生成失败: {str(e)}"
            )
            db.add(assistant_message)
            db.commit()
        raise

