import datetime
import json
from typing import List

from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
from pydantic import BaseModel

from myAgent.AgentTools import use_agent_tools
from llm.llmChain import streamingAnswer
from router import UniversalDoc, CommonTools
from router import agent  # 导入agent路由
from router import mcpRouter  # 导入MCP工具路由

app = FastAPI(title="WendAIWeb API")

# 包含路由
app.include_router(UniversalDoc.router)  # 文档配置路由
app.include_router(agent.router)  # agent路由
app.include_router(mcpRouter.router)  # MCP工具路由
app.include_router(CommonTools.router)  # commonTools路由

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有来源，生产环境中应该限制
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


# 数据模型
class ChatMessage(BaseModel):
    message: str
    tools: List[List[str]] = ()  # 嵌套列表，用于接收工具ID、工作流ID和MCP工具ID
    session_id: str = "default"


class ChatResponse(BaseModel):
    response: str
    is_complete: bool = True
    missing_fields: List[str] = ()


# 流式AI回复处理函数
async def get_streaming_ai_response(message: str, tools: List[List[str]], session_id: str = "default"):
    """生成流式回复的异步生成器函数"""
    if not message:
        yield json.dumps({
            "type": "message",
            "content": "消息不能为空",
            "timestamp": datetime.datetime.now().isoformat()
        })
        return

    # 如果没有工具或所有工具列表都为空，则进行普通问答
    if not tools or len(tools) < 3 or all(not tool_list for tool_list in tools):
        async for chunk in streamingAnswer(message, session_id):
            yield json.dumps({
                "type": "message",
                "content": chunk,
                "timestamp": datetime.datetime.now().isoformat()
            })
        return

    try:
        async for chunk in use_agent_tools(message, tools, session_id):
            try:
                # 尝试解析JSON
                if isinstance(chunk, str):
                    data = json.loads(chunk)
                else:
                    # 如果不是字符串，转换为字符串
                    data = {
                        "type": "message",
                        "content": str(chunk),
                        "timestamp": datetime.datetime.now().isoformat()
                    }
                
                # 确保数据包含必要的字段
                if "type" not in data:
                    data["type"] = "message"
                if "timestamp" not in data:
                    data["timestamp"] = datetime.datetime.now().isoformat()
                
                # 确保content是字符串类型
                if "content" in data:
                    if isinstance(data["content"], (list, dict)):
                        data["content"] = json.dumps(data["content"], ensure_ascii=False)
                    else:
                        data["content"] = str(data["content"])
                
                yield json.dumps(data, ensure_ascii=False)
            except json.JSONDecodeError:
                # 如果解析失败，包装成标准格式
                yield json.dumps({
                    "type": "message",
                    "content": str(chunk),
                    "timestamp": datetime.datetime.now().isoformat()
                }, ensure_ascii=False)
            except Exception as e:
                # 处理其他可能的错误
                yield json.dumps({
                    "type": "error",
                    "content": f"数据处理失败: {str(e)}",
                    "timestamp": datetime.datetime.now().isoformat()
                }, ensure_ascii=False)
    except Exception as e:
        # 处理工具执行过程中的错误
        error_data = {
            "type": "error",
            "content": f"工具处理失败: {str(e)}",
            "timestamp": datetime.datetime.now().isoformat()
        }
        yield json.dumps(error_data, ensure_ascii=False)
    return


@app.post("/api/chat/stream")
async def chat_stream(message: ChatMessage):
    """流式响应API端点"""
    # 使用消息内容生成会话ID，这里简单使用UUID生成
    session_id = message.session_id if hasattr(message, "session_id") else "default"

    return StreamingResponse(
        get_streaming_ai_response(message.message, message.tools, session_id),
        media_type="application/json",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "X-Accel-Buffering": "no",  # 禁用Nginx缓冲
            'Access-Control-Allow-Origin': '*',
            'Access-Control-Allow-Methods': 'POST, OPTIONS',
            'Access-Control-Allow-Headers': 'Content-Type, Accept',
        }
    )


# 路由
@app.get("/")
async def root():
    return {"message": "欢迎使用WendAIWeb API"}
