from typing import Optional, Dict, Any
import os
import uvicorn
from fastapi import FastAPI, HTTPException, Body, UploadFile, File
from fastapi.middleware.cors import CORSMiddleware
import threading
import time
import base64
from loguru import logger
from utils.logger import Logger
from utils.constants import AgentType, AGENT_MAX_CONCURRENT
from states.agent_state import AgentState
from middleware.redis_adapter import get_redis_adapter


# ===================== 图片处理工具 =====================
class ImageProcessor:
    @staticmethod
    def image_to_base64(image_path: str) -> str:
        """图片转Base64（GPT-4V要求格式）"""
        with open(image_path, "rb") as f:
            return base64.b64encode(f.read()).decode("utf-8")

    @staticmethod
    def save_uploaded_image(file: UploadFile, save_dir: str) -> str:
        """保存上传的图片"""
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        file_path = os.path.join(save_dir, file.filename)
        with open(file_path, "wb") as f:
            f.write(file.read())
        return file_path


# ===================== 多模态Agent核心服务 =====================
class MultiModalAgentService:
    def __init__(self):
        # 环境变量配置
        self.host = os.getenv("MULTI_MODAL_HOST", "0.0.0.0")
        self.port = int(os.getenv("MULTI_MODAL_PORT", 8005))  # 独立端口
        self.openai_api_key = os.getenv("OPENAI_API_KEY", "")
        self.image_storage_path = os.path.join(os.getcwd(), "apps", "multi_modal", "uploaded_images")

        # 核心组件初始化
        self.redis_client = get_redis_adapter()  # 使用Redis适配器，支持集群模式
        self.logger = Logger.get_logger(agent_name="multi_modal_agent", task_id="none")
        self.image_processor = ImageProcessor()
        self.supported_image_formats = ["png", "jpg", "jpeg", "gif", "bmp"]

        # AgentState注册
        self.instance_id = f"multi_modal_{self.host}:{self.port}"
        self.agent_state = AgentState(
            agent_type=AgentType.MULTI_MODAL.value,
            instance_id=self.instance_id,
            redis_client=self.redis_client,
            host_ip=self.host,
            port=self.port
        )

        # 启动心跳
        self._start_heartbeat()

        # FastAPI服务初始化
        self.app = self._init_fastapi()
        self.logger.success(
            f"多模态Agent启动完成 | 实例ID：{self.instance_id} | 地址：http://{self.host}:{self.port} | 图片存储路径：{self.image_storage_path}")

    def _init_fastapi(self) -> FastAPI:
        app = FastAPI(title="Multi Modal Agent", version="v1.0")

        # 跨域中间件
        app.add_middleware(
            CORSMiddleware,
            allow_origins=["*"],
            allow_credentials=True,
            allow_methods=["*"],
            allow_headers=["*"],
        )

        # 健康检查接口
        @app.get("/health")
        async def health_check():
            self.logger.debug(f"健康检查请求 | 实例：{self.instance_id}")
            return {
                "status": self.agent_state.status,
                "instance_id": self.instance_id,
                "cpu_usage": self.agent_state.cpu_usage,
                "mem_usage": self.agent_state.mem_usage,
                "current_tasks": self.agent_state.current_tasks,
                "supported_image_formats": self.supported_image_formats
            }

        # 多模态问答核心接口（支持图片+文本）
        @app.post("/predict")
        async def predict(
                question: str = Body(..., description="针对图片的问题"),
                image: Optional[UploadFile] = File(None, description="图片文件（可选，支持png/jpg等）"),
                image_base64: Optional[str] = Body(None, description="图片Base64编码（二选一，优先级高于上传文件）"),
                task_id: Optional[str] = Body("none", description="任务ID")
        ):
            task_logger = Logger.update_task_id(self.logger, task_id)
            task_logger.info(f"收到多模态问答请求 | 问题：{question[:30]}...")
            try:
                # 处理图片（Base64优先，其次是上传文件）
                image_b64 = None
                if image_base64:
                    image_b64 = image_base64
                    task_logger.debug("使用传入的Base64图片")
                elif image:
                    file_ext = os.path.splitext(image.filename)[1].lower().lstrip(".")
                    if file_ext not in self.supported_image_formats:
                        raise ValueError(f"不支持的图片格式：{file_ext}，支持格式：{self.supported_image_formats}")
                    # 保存图片并转Base64
                    image_path = self.image_processor.save_uploaded_image(image, self.image_storage_path)
                    image_b64 = self.image_processor.image_to_base64(image_path)
                    task_logger.debug(f"上传图片并转Base64成功 | 文件名：{image.filename}")
                else:
                    raise ValueError("必须传入图片（上传文件或Base64编码二选一）")

                # 调用GPT-4V生成多模态答案
                from openai import OpenAI
                client = OpenAI(api_key=self.openai_api_key)
                response = client.chat.completions.create(
                    model="gpt-4-vision-preview",
                    messages=[
                        {
                            "role": "user",
                            "content": [
                                {"type": "text", "text": question},
                                {
                                    "type": "image_url",
                                    "image_url": {
                                        "url": f"data:image/png;base64,{image_b64}",
                                        "detail": "auto"
                                    }
                                }
                            ]
                        }
                    ],
                    max_tokens=1000,
                    temperature=0.3
                )
                answer = response.choices[0].message.content.strip()

                self.agent_state.update_task_metrics(success=True)
                task_logger.info(f"多模态问答处理成功 | 答案：{answer[:30]}...")
                return {
                    "success": True,
                    "data": {
                        "question": question,
                        "answer": answer,
                        "instance_id": self.instance_id,
                        "image_info": "Base64图片" if image_base64 else image.filename
                    },
                    "error_code": 0,
                    "error_msg": ""
                }
            except Exception as e:
                error_msg = str(e)
                self.agent_state.update_task_metrics(success=False)
                task_logger.error(f"多模态问答处理失败：{error_msg}")
                raise HTTPException(
                    status_code=500,
                    detail={"success": False, "error_code": 2002, "error_msg": error_msg}
                )

        return app

    def _start_heartbeat(self):
        """统一心跳逻辑"""

        def heartbeat():
            while True:
                try:
                    self.agent_state.send_heartbeat()
                    self.logger.debug(
                        f"心跳同步成功 | 状态：{self.agent_state.status} | 任务数：{self.agent_state.current_tasks}")
                except Exception as e:
                    self.logger.error(f"心跳同步失败：{str(e)}")
                time.sleep(10)

        thread = threading.Thread(target=heartbeat, daemon=True)
        thread.start()
        self.logger.info("心跳线程启动成功")

    def run(self):
        uvicorn.run(app=self.app, host=self.host, port=self.port, log_level="warning")


# 启动入口
def main():
    service = MultiModalAgentService()
    service.run()


if __name__ == "__main__":
    main()