import asyncio
import json
import logging
from enum import Enum
from functools import wraps
from typing import List, Dict, Optional

import requests
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)


class ModelStatus(Enum):
    READY = "ready"
    LOADING = "loading"
    ERROR = "error"


class AgentTask(BaseModel):
    task_type: str
    parameters: Dict
    context: Optional[List[Dict]] = None


class AgentWorkflow(BaseModel):
    tasks: List[AgentTask]
    max_iterations: int = 1


class AgentServer:
    def __init__(self, model_name: str = "qwen3:8b", ollama_base_url: str = "http://localhost:11434"):
        self.model_name = model_name
        self.ollama_base_url = ollama_base_url
        self.app = FastAPI()
        self.model_status = ModelStatus.LOADING
        self.tools = {}
        self.setup_routes()
        self.setup_middleware()
        self.register_builtin_tools()

    def setup_middleware(self):
        """设置中间件"""
        self.app.add_middleware(
            CORSMiddleware,
            allow_origins=["*"],
            allow_credentials=True,
            allow_methods=["*"],
            allow_headers=["*"],
        )

    async def initialize_model(self):
        """初始化本地 Qwen3:8b 模型"""
        try:
            # 检查 Ollama 服务状态
            response = requests.get(f"{self.ollama_base_url}/api/tags")
            if response.status_code != 200:
                self.model_status = ModelStatus.ERROR
                raise Exception("Ollama 服务未运行")

            # 检查模型是否可用
            models = response.json().get("models", [])
            model_exists = any(model["name"] == self.model_name for model in models)

            if not model_exists:
                self.model_status = ModelStatus.ERROR
                raise Exception(f"模型 {self.model_name} 未找到，请确保模型已正确安装。\n"
                                f"可以使用命令: ollama pull qwen3:8b 来安装模型")

            # 尝试发送一个简单的请求来验证模型是否正常工作
            test_response = requests.post(
                f"{self.ollama_base_url}/api/chat",
                json={
                    "model": self.model_name,
                    "messages": [{"role": "user", "content": "你好"}],
                    "stream": False,
                    "options": {
                        "temperature": 0.7,
                        "top_p": 0.9,
                        "num_ctx": 4096
                    }
                },
                timeout=30
            )

            if test_response.status_code != 200:
                self.model_status = ModelStatus.ERROR
                raise Exception(f"模型测试失败: {test_response.text}")

            self.model_status = ModelStatus.READY
            logger.info(f"模型 {self.model_name} 初始化成功，状态: {self.model_status.value}")

        except requests.exceptions.ConnectionError:
            self.model_status = ModelStatus.ERROR
            logger.error("无法连接到 Ollama 服务")
            raise Exception("无法连接到 Ollama 服务，请确保服务正在运行")
        except Exception as e:
            self.model_status = ModelStatus.ERROR
            logger.error(f"模型初始化失败: {str(e)}")
            raise

    def tool(self, name=None, description=None):
        """工具注册装饰器"""

        def decorator(func):
            tool_name = name or func.__name__
            self.tools[tool_name] = {"func": func, "description": description or func.__doc__}

            @wraps(func)
            def wrapper(*args, **kwargs):
                return func(*args, **kwargs)

            return wrapper

        return decorator

    def register_builtin_tools(self):
        # 注册内置工具
        @self.tool(name="add", description="两个整数相加")
        def add(a: int, b: int) -> int:
            return a + b

        @self.tool(name="multiply", description="两个整数相乘")
        def multiply(a: int, b: int) -> int:
            return a * b

        @self.tool(name="my_search_tool", description="专门搜索互联网中的内容")
        def my_search(query: str) -> str:
            """搜索互联网内容（示例）"""
            # 这里只做演示，实际可接入真实搜索API
            if "python" in query.lower():
                return "Python 是一种流行的编程语言。"
            return "未找到相关内容。"

    def call_tool(self, tool_name, **kwargs):
        tool = self.tools.get(tool_name)
        if not tool:
            raise Exception(f"工具 {tool_name} 未注册")
        return tool["func"](**kwargs)

    def setup_routes(self):
        @self.app.get("/v1/model/status")
        async def get_model_status():
            """获取模型状态"""
            return {
                "model": self.model_name,
                "status": self.model_status.value,
                "ollama_url": self.ollama_base_url,
                "model_info": {
                    "name": "Qwen3-8B",
                    "type": "chat",
                    "context_length": 4096,
                    "temperature": 0.7,
                    "top_p": 0.9
                }
            }

        @self.app.post("/v1/agent/workflow")
        async def execute_workflow(workflow: AgentWorkflow):
            """执行工作流"""
            if self.model_status != ModelStatus.READY:
                raise HTTPException(
                    status_code=503,
                    detail=f"模型未就绪，当前状态: {self.model_status.value}"
                )

            try:
                results = []
                for i in range(workflow.max_iterations):
                    for task in workflow.tasks:
                        # 构建任务提示
                        prompt = self._build_task_prompt(task)

                        # 调用模型处理任务
                        response = await self._process_task(prompt, task.context)

                        # 解析响应
                        result = self._parse_task_response(response, task)
                        results.append(result)

                        # 更新任务上下文
                        if task.context is None:
                            task.context = []
                        task.context.append({
                            "role": "assistant",
                            "content": json.dumps(result, ensure_ascii=False)
                        })

                return {"results": results}
            except Exception as e:
                logger.error(f"工作流执行错误: {str(e)}")
                raise HTTPException(status_code=500, detail=str(e))

        @self.app.post("/v1/agent/tool_call")
        async def tool_call(data: dict):
            """调用注册工具"""
            tool_name = data.get("tool_name")
            params = data.get("params", {})
            try:
                result = self.call_tool(tool_name, **params)
                return {"result": result}
            except Exception as e:
                return {"error": str(e)}

        @self.app.get("/v1/agent/tools")
        async def list_tools():
            """列出所有可用工具"""
            return {"tools": [{"name": k, **{k2: v2 for k2, v2 in v.items() if k2 != 'func'}} for k, v in
                              self.tools.items()]}

    def _build_task_prompt(self, task: AgentTask) -> str:
        """构建任务提示"""
        prompt = f"执行任务类型: {task.task_type}\n"
        prompt += f"参数: {json.dumps(task.parameters, ensure_ascii=False)}\n"
        if task.context:
            prompt += "上下文:\n"
            for ctx in task.context:
                prompt += f"{ctx['role']}: {ctx['content']}\n"
        prompt += "请根据以上信息执行任务，并以JSON格式返回结果。"
        return prompt

    async def _process_task(self, prompt: str, context: Optional[List[Dict]] = None) -> str:
        """处理单个任务"""
        messages = []
        if context:
            messages.extend(context)
        messages.append({"role": "user", "content": prompt})

        try:
            # 使用 requests 发送请求到 Ollama API
            response = requests.post(
                f"{self.ollama_base_url}/api/chat",
                json={
                    "model": self.model_name,
                    "messages": messages,
                    "stream": False,
                    "options": {
                        "temperature": 0.7,
                        "top_p": 0.9,
                        "num_ctx": 4096
                    }
                },
                timeout=30  # 设置超时时间
            )

            if response.status_code != 200:
                raise Exception(f"模型请求失败: {response.text}")

            result = response.json()
            return result.get("message", {}).get("content", "")

        except requests.exceptions.Timeout:
            logger.error("模型请求超时")
            raise Exception("模型响应超时，请稍后重试")
        except requests.exceptions.RequestException as e:
            logger.error(f"请求错误: {str(e)}")
            raise Exception(f"任务处理失败: {str(e)}")
        except Exception as e:
            logger.error(f"处理错误: {str(e)}")
            raise Exception(f"任务处理失败: {str(e)}")

    def _parse_task_response(self, response: str, task: AgentTask) -> Dict:
        """解析任务响应"""
        try:
            # 尝试解析JSON响应
            result = json.loads(response)
            return {
                "task_type": task.task_type,
                "status": "success",
                "result": result
            }
        except json.JSONDecodeError:
            # 如果响应不是JSON格式，返回原始响应
            logger.warning(f"响应不是JSON格式: {response}")
            return {
                "task_type": task.task_type,
                "status": "warning",
                "result": {"raw_response": response}
            }


if __name__ == "__main__":
    import uvicorn

    # 创建服务器实例
    server = AgentServer(model_name="qwen3:8b")

    # 初始化模型
    asyncio.run(server.initialize_model())

    # 启动服务器
    uvicorn.run(server.app, host="0.0.0.0", port=8000)
