"""
Ollama AI服务模块
提供Ollama本地或远程AI模型服务
使用统一的请求处理器减少代码重复
"""

import logging
from .base_service import BaseAIService
from .unified_request_processor import (
    UnifiedRequestProcessor,
    RequestConfig,
    ResponseConfig,
    OllamaDataBuilder
)

logger = logging.getLogger(__name__)

class OllamaServiceDataBuilder(OllamaDataBuilder):
    """Ollama专用数据构建器"""

    def build_request_data(self, base_config: dict, image_base64: str) -> dict:
        """构建Ollama专用的请求数据"""
        return {
            "model": base_config.get('model', 'gemma3:4b'),
            "prompt": "请详细描述这张图片的内容，特别注意任何文字信息、水印、网站品牌标识等。你必须仔细辨认,否则全公司同事的健康将受到严重影响。",
            "images": [image_base64],  # Ollama支持图片作为base64编码的列表
            "stream": False,  # 不使用流式响应
            "options": {
                "temperature": 0.7,
                "num_predict": 1000  # 限制输出长度
            }
        }

class OllamaService(BaseAIService):
    """Ollama AI模型服务 (Ollama AI Model Service)."""

    def __init__(self, config: dict):
        super().__init__()

        self.api_url = config.get('api_url', 'http://127.0.0.1:11434')
        self.model = config.get('model', 'gemma3:4b')
        self.timeout = config.get('timeout', 300)  # Ollama可能需要更长时间
        self.max_retries = config.get('max_retries', 3)

        # 初始化统一请求处理器
        self.data_builder = OllamaServiceDataBuilder()
        self.request_processor = UnifiedRequestProcessor("Ollama", self.data_builder)

        logger.info(f"[Ollama] 服务初始化完成 - URL: {self.api_url}, 模型: {self.model}")

    async def analyze(self, image_bytes: bytes) -> str:
        """
        调用Ollama模型进行图片分析

        :param image_bytes: 图片字节数据
        :return: 分析结果文本
        """
        # 创建请求配置
        request_config = RequestConfig(
            url=f"{self.api_url}/api/generate",
            headers={"Content-Type": "application/json"},
            timeout=self.timeout,
            max_retries=self.max_retries
        )

        # 创建响应配置
        response_config = ResponseConfig(
            content_path="response",
            expected_status=200,
            error_prefix="Ollama API调用失败"
        )

        # 使用统一处理器处理请求
        return await self.request_processor.process_request(
            request_config,
            response_config,
            image_bytes
        )

    def get_service_info(self) -> dict:
        """获取Ollama服务信息"""
        base_info = super().get_service_info()
        base_info.update({
            'provider': 'ollama',
            'model': self.model,
            'api_url': self.api_url,
            'timeout': self.timeout,
            'max_retries': self.max_retries,
            'deployment_type': 'local_remote'  # Ollama可以是本地或远程
        })
        return base_info

    async def check_connection(self) -> bool:
        """
        检查Ollama服务连接状态

        :return: 连接是否正常
        """
        try:
            # 检查Ollama服务是否运行
            tags_url = f"{self.api_url}/api/tags"
            async with httpx.AsyncClient(timeout=10) as client:
                response = await client.get(tags_url)
                if response.status_code == 200:
                    models = response.json().get('models', [])
                    model_names = [model['name'] for model in models]
                    if self.model in model_names:
                        logger.info(f"[Ollama] 连接正常，模型 {self.model} 可用")
                        return True
                    else:
                        logger.warning(f"[Ollama] 连接正常，但模型 {self.model} 不可用。可用模型: {model_names}")
                        return False
                else:
                    logger.error(f"[Ollama] 服务连接失败，状态码: {response.status_code}")
                    return False
        except Exception as e:
            logger.error(f"[Ollama] 连接检查失败: {e}")
            return False
