import asyncio
from fastapi import FastAPI
from fastapi import APIRouter
from fastapi.requests import Request
from fastapi.responses import StreamingResponse
from typing import AsyncGenerator, Dict, List
import json

from common.logger import logger
from agents.class_agent import LangGraphMCPAgent

chat_router = APIRouter()

# 先获取完整响应，然后再流式返回
async def chat_stream(
        messages: List[Dict[str, str]], 
        sessionid: str, 
        app: FastAPI,
        model_name: str,
        is_thinking: bool, 
        online_search: bool
    ) -> AsyncGenerator[str, None]:

    print(model_name)
    logger.info(f"开始流式聊天，消息数量: {len(messages)}")
    
    # 初始化 reasoning_content 变量
    reasoning_content = ""
    
    try:
        start_time = asyncio.get_event_loop().time()
        logger.info("开始调用LangGraph处理请求")
        lg = LangGraphMCPAgent(
            model_name=model_name, 
            is_thinking=is_thinking, 
            online_search=online_search, 
            app=app
            )
        graph = await lg.initialize()
        config = {"configurable": {"thread_id": sessionid}}
        logger.info(config)
        async for event in graph.astream(
            {"messages": messages},
            config,
            stream_mode="messages"
        ):
            # 获取结果内容
            ai_message = event[0]

            # 安全地获取 reasoning_content
            reasoning_content = ai_message.additional_kwargs.get("reasoning_content", "")
            response = ai_message.content

            # print(response, len(response))

            if len(response) > 15:
                response = ""

            response_metadata = event[0].response_metadata
            if response_metadata == "stop":
                yield "data: [DONE]\n\n"

            # 设置分块参数
            data = {
                    "choices": [
                        {
                            "delta": {
                                "content": response,
                                "reasoning_content": reasoning_content
                            }
                        }
                    ]
                }
            # logger.info(data)
            yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
            end_time = asyncio.get_event_loop().time()
        logger.info(f"LangGraph 执行完成，耗时: {end_time - start_time:.2f}秒")
    except Exception as e:
        error_data = {
            "choices": [
                {
                    "delta": {
                        "content": f"抱歉，发生了错误: {str(e)}",
                        "reasoning_content": reasoning_content
                    }
                }
            ]
        }
        logger.error(f"流式聊天出错: {str(e)}", exc_info=True)
        yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
        yield "data: [DONE]\n\n"




@chat_router.post("/completions", summary="聊天", description="聊天接口")
async def completions(request: Request):
    try:
        token = request.headers.get("token")
        logger.info(f"token: {token}")
        body = await request.json()

        model_name = body.get("model", "qwen3")
        is_thinking = body.get("is_thinking", False)
        messages = body.get("messages", [])
        sessionid = body.get("sessionid", "")
        requestid = body.get("requestid", "")
        online_search = body.get("online_search", False)
        stream = body.get("stream", True)  # 默认使用流式响应
        logger.info(f"用户输入的内容{messages}")

        app = request.app
        
        try:
            content = messages[0]["content"]
            redis = request.app.state.redis
            user_data = await redis.get(token)
            user_data = json.loads(user_data)
            logger.info(f"user_data: {user_data}")
            username = user_data["username"]
            replace_content = content + f";当前用户id为:{username};"
            messages[0]["content"] = replace_content
            logger.info(f"重新赋值后的内容{messages}")




        except Exception as e:
            logger.error(f"从Redis获取数据失败: {str(e)}", exc_info=True)

        # 确保消息格式正确
        if not messages or not isinstance(messages, list):
            logger.warning("消息格式不正确")
            return {"error": "消息格式不正确"}
        
        # 打印最后一条消息内容（调试用）
        if messages:
            last_msg = messages[-1]
            logger.info(f"最后一条消息: {last_msg.get('role', 'unknown')}: {last_msg.get('content', '')[:50]}...")
        
        # 返回流式响应
        return StreamingResponse(
            chat_stream(messages, sessionid, app, model_name, is_thinking, online_search), 
            media_type="text/event-stream",
            headers={
                "Cache-Control": "no-cache",
                "Connection": "keep-alive",
                "X-Accel-Buffering": "no",
                "Content-Type": "text/event-stream; charset=utf-8"
            }
        )
    except Exception as e:
        logger.error(f"处理聊天请求时出错: {str(e)}", exc_info=True)
        return {"error": f"处理请求时出错: {str(e)}"}