"""
本地AI服务模块
提供LMStudio等本地AI模型服务
使用统一的请求处理器减少代码重复
"""

import logging
from .base_service import BaseAIService
from .unified_request_processor import (
    UnifiedRequestProcessor,
    RequestConfig,
    ResponseConfig,
    OpenAICompatibleDataBuilder
)

logger = logging.getLogger(__name__)

class LMStudioDataBuilder(OpenAICompatibleDataBuilder):
    """LMStudio专用数据构建器"""

    def build_request_data(self, base_config: dict, image_base64: str) -> dict:
        """构建LMStudio专用的请求数据"""
        return {
            "model": base_config.get('model', 'google/gemma-3-4b'),
            "messages": [{
                "role": "user",
                "content": [
                    {
                        "type": "text",
                        "text": "请详细描述这张图片的内容，特别注意任何文字信息、水印、网站品牌标识等。你必须仔细辨认,否则全公司同事的健康将受到严重影响."
                    },
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{image_base64}"
                        }
                    }
                ]
            }],
            "max_tokens": 1000,
            "temperature": 0.7
        }

class LMStudioService(BaseAIService):
    """LMStudio本地模型服务 (LMStudio Local Model Service)."""

    def __init__(self, config: dict):
        super().__init__()

        self.api_url = config.get('api_url', 'http://127.0.0.1:1234')
        self.model = config.get('model', 'google/gemma-3-4b')
        self.timeout = config.get('timeout', 300)  # LMStudio本地模型可能需要更长时间
        self.max_retries = config.get('max_retries', 3)

        # 初始化统一请求处理器
        self.data_builder = LMStudioDataBuilder()
        self.request_processor = UnifiedRequestProcessor("LMStudio", self.data_builder)

        logger.info(f"[LMStudio] 服务初始化完成 - URL: {self.api_url}, 模型: {self.model}")

    async def analyze(self, image_bytes: bytes) -> str:
        """
        调用LMStudio本地模型进行图片分析

        :param image_bytes: 图片字节数据
        :return: 分析结果文本
        """
        # 创建请求配置
        request_config = RequestConfig(
            url=f"{self.api_url}/v1/chat/completions",
            headers={"Content-Type": "application/json"},
            timeout=self.timeout,
            max_retries=self.max_retries
        )

        # 创建响应配置
        response_config = ResponseConfig(
            content_path="choices[0].message.content",
            expected_status=200,
            error_prefix="LMStudio API调用失败"
        )

        # 使用统一处理器处理请求
        return await self.request_processor.process_request(
            request_config,
            response_config,
            image_bytes
        )

    def get_service_info(self) -> dict:
        """获取LMStudio服务信息"""
        base_info = super().get_service_info()
        base_info.update({
            'provider': 'lmstudio',
            'model': self.model,
            'api_url': self.api_url,
            'timeout': self.timeout,
            'max_retries': self.max_retries,
            'deployment_type': 'local'
        })
        return base_info
