#!/usr/bin/env python3
"""
多种模型客户端支持
支持本地vLLM、云端API、Ollama等多种推理服务
"""

import base64
import json
import logging
import asyncio
from abc import ABC, abstractmethod
from typing import Optional, Dict, Any, Tuple
import aiohttp
from openai import OpenAI, AsyncOpenAI
import requests

logger = logging.getLogger(__name__)

class BaseModelClient(ABC):
    """模型客户端基类"""
    
    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.model_name = config.get("model_name", "Qwen/Qwen2.5-VL-7B-Instruct")
        self.max_tokens = config.get("max_tokens", 1024)
    
    @abstractmethod
    async def analyze_image(self, image_path: str, query: str) -> Tuple[bool, str, int]:
        """分析图像"""
        pass
    
    def encode_image(self, image_path: str) -> str:
        """将图片转换为base64编码"""
        with open(image_path, "rb") as image_file:
            return base64.b64encode(image_file.read()).decode('utf-8')
    
    def build_prompt(self, query: str) -> str:
        """构建分析提示词"""
        return f"""Please analyze the image and answer the following questions:
1. Is there a {query} in the image?
2. If yes, describe its appearance and location in the image in detail.
3. If no, describe what you see in the image instead.
4. On a scale of 1-10, how confident are you in your answer?

Please structure your response as follows:
Answer: [YES/NO]
Description: [Your detailed description]
Confidence: [1-10]"""
    
    def parse_response(self, response_text: str) -> Tuple[bool, str, int]:
        """解析模型响应"""
        response_lines = response_text.strip().split('\n')
        
        answer = None
        description = None
        confidence = 10
        
        for line in response_lines:
            line = line.strip()
            if line.lower().startswith('answer:'):
                answer = line.split(':', 1)[1].strip().upper()
            elif any(line.lower().startswith(prefix) for prefix in
                     ['description:', 'reasoning:', 'alternative description:']):
                description = line.split(':', 1)[1].strip()
            elif line.lower().startswith('confidence:'):
                try:
                    confidence = int(line.split(':', 1)[1].strip())
                except ValueError:
                    confidence = 10
        
        is_match = answer == "YES" and confidence >= 7
        return is_match, description or "No description available", confidence

class VLLMClient(BaseModelClient):
    """本地vLLM客户端"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__(config)
        self.base_url = config.get("base_url", "http://localhost:8001/v1")
        self.api_key = config.get("api_key", "test")
        
        try:
            self.client = AsyncOpenAI(
                base_url=self.base_url,
                api_key=self.api_key
            )
            logger.info(f"vLLM客户端初始化成功: {self.base_url}")
        except Exception as e:
            logger.error(f"vLLM客户端初始化失败: {str(e)}")
            raise
    
    async def analyze_image(self, image_path: str, query: str) -> Tuple[bool, str, int]:
        """使用vLLM分析图像"""
        try:
            base64_image = self.encode_image(image_path)
            prompt = self.build_prompt(query)
            
            response = await self.client.chat.completions.create(
                model=self.model_name,
                messages=[
                    {
                        "role": "user",
                        "content": [
                            {"type": "text", "text": prompt},
                            {
                                "type": "image_url",
                                "image_url": {
                                    "url": f"data:image/jpeg;base64,{base64_image}",
                                },
                            },
                        ],
                    }
                ],
                max_tokens=self.max_tokens,
            )
            
            response_text = response.choices[0].message.content
            return self.parse_response(response_text)
            
        except Exception as e:
            logger.error(f"vLLM分析失败: {str(e)}")
            return False, f"Error: {str(e)}", 0

class OllamaClient(BaseModelClient):
    """Ollama客户端"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__(config)
        self.base_url = config.get("base_url", "http://localhost:11434")
        self.model_name = config.get("model_name", "qwen2-vl")
        
        logger.info(f"Ollama客户端初始化: {self.base_url}")
    
    async def analyze_image(self, image_path: str, query: str) -> Tuple[bool, str, int]:
        """使用Ollama分析图像"""
        try:
            base64_image = self.encode_image(image_path)
            prompt = self.build_prompt(query)
            
            payload = {
                "model": self.model_name,
                "prompt": prompt,
                "images": [base64_image],
                "stream": False
            }
            
            async with aiohttp.ClientSession() as session:
                async with session.post(
                    f"{self.base_url}/api/generate",
                    json=payload,
                    timeout=aiohttp.ClientTimeout(total=300)
                ) as response:
                    if response.status == 200:
                        result = await response.json()
                        response_text = result.get("response", "")
                        return self.parse_response(response_text)
                    else:
                        error_msg = f"Ollama API错误: {response.status}"
                        logger.error(error_msg)
                        return False, error_msg, 0
                        
        except Exception as e:
            logger.error(f"Ollama分析失败: {str(e)}")
            return False, f"Error: {str(e)}", 0

class CloudAPIClient(BaseModelClient):
    """云端API客户端（支持OpenAI、Azure、阿里云等）"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__(config)
        self.base_url = config.get("base_url")
        self.api_key = config.get("api_key")
        self.provider = config.get("provider", "openai")  # openai, azure, alibaba
        
        if not self.api_key:
            raise ValueError("云端API需要提供api_key")
        
        try:
            self.client = AsyncOpenAI(
                base_url=self.base_url,
                api_key=self.api_key
            )
            logger.info(f"云端API客户端初始化成功: {self.provider}")
        except Exception as e:
            logger.error(f"云端API客户端初始化失败: {str(e)}")
            raise
    
    async def analyze_image(self, image_path: str, query: str) -> Tuple[bool, str, int]:
        """使用云端API分析图像"""
        try:
            base64_image = self.encode_image(image_path)
            prompt = self.build_prompt(query)
            
            # 根据不同提供商调整请求格式
            if self.provider == "alibaba":
                # 阿里云通义千问API格式
                messages = [
                    {
                        "role": "user",
                        "content": [
                            {"type": "text", "text": prompt},
                            {
                                "type": "image_url",
                                "image_url": {
                                    "url": f"data:image/jpeg;base64,{base64_image}",
                                },
                            },
                        ],
                    }
                ]
            else:
                # OpenAI兼容格式
                messages = [
                    {
                        "role": "user",
                        "content": [
                            {"type": "text", "text": prompt},
                            {
                                "type": "image_url",
                                "image_url": {
                                    "url": f"data:image/jpeg;base64,{base64_image}",
                                },
                            },
                        ],
                    }
                ]
            
            response = await self.client.chat.completions.create(
                model=self.model_name,
                messages=messages,
                max_tokens=self.max_tokens,
            )
            
            response_text = response.choices[0].message.content
            return self.parse_response(response_text)
            
        except Exception as e:
            logger.error(f"云端API分析失败: {str(e)}")
            return False, f"Error: {str(e)}", 0

class ModelClientFactory:
    """模型客户端工厂"""
    
    @staticmethod
    def create_client(client_type: str, config: Dict[str, Any]) -> BaseModelClient:
        """创建模型客户端"""
        
        if client_type.lower() == "vllm":
            return VLLMClient(config)
        elif client_type.lower() == "ollama":
            return OllamaClient(config)
        elif client_type.lower() in ["cloud", "openai", "azure", "alibaba"]:
            config["provider"] = client_type.lower()
            return CloudAPIClient(config)
        else:
            raise ValueError(f"不支持的客户端类型: {client_type}")
    
    @staticmethod
    def get_available_clients() -> Dict[str, str]:
        """获取可用的客户端类型"""
        return {
            "vllm": "本地vLLM服务",
            "ollama": "Ollama本地服务", 
            "openai": "OpenAI API",
            "azure": "Azure OpenAI",
            "alibaba": "阿里云通义千问",
            "cloud": "通用云端API"
        }

# 测试函数
async def test_client(client: BaseModelClient, test_image: str = "test.jpg"):
    """测试客户端"""
    if not os.path.exists(test_image):
        logger.warning(f"测试图片不存在: {test_image}")
        return
    
    try:
        is_match, description, confidence = await client.analyze_image(
            test_image, "person"
        )
        
        print(f"测试结果:")
        print(f"  匹配: {is_match}")
        print(f"  描述: {description}")
        print(f"  置信度: {confidence}/10")
        
    except Exception as e:
        print(f"测试失败: {str(e)}")

if __name__ == "__main__":
    import os
    
    print("模型客户端测试")
    print("=" * 30)
    
    # 显示可用客户端
    clients = ModelClientFactory.get_available_clients()
    print("可用客户端:")
    for key, desc in clients.items():
        print(f"  {key}: {desc}")
    
    # 测试vLLM客户端
    print("\n测试vLLM客户端...")
    try:
        vllm_config = {
            "base_url": "http://localhost:8001/v1",
            "api_key": "test",
            "model_name": "Qwen/Qwen2.5-VL-7B-Instruct"
        }
        vllm_client = ModelClientFactory.create_client("vllm", vllm_config)
        asyncio.run(test_client(vllm_client))
    except Exception as e:
        print(f"vLLM客户端测试失败: {str(e)}")
    
    print("\n✅ 模型客户端模块加载完成")
