"""
多模态模型接口模块

该模块提供了统一的多模态模型调用接口，支持vLLM和OpenAI API两种调用方式。
主要功能包括：
- 统一的多模态模型接口
- 支持图像和文本的联合推理
- 并行调用优化
- 自动错误处理和重试机制
- 图像编码和预处理
"""

from typing import List, Dict, Any, Optional, Union
from PIL import Image
import os
import base64
# import asyncio
# import aiohttp
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
from io import BytesIO
import time

# 设置日志级别，防止OpenAI客户端输出过多调试信息
logging.getLogger("openai").setLevel(logging.WARNING)
logging.getLogger("httpx").setLevel(logging.WARNING)
logging.getLogger("httpcore").setLevel(logging.WARNING)

logger = logging.getLogger(__name__)


class MultimodalModelInterface:
    """
    多模态模型接口
    
    该接口提供了统一的多模态模型调用方式，支持不同类型的模型后端。
    主要功能包括：
    - 支持vLLM和OpenAI API两种调用方式
    - 自动图像处理和编码
    - 并行调用优化
    - 统计信息记录
    """
    
    def __init__(self, config):
        """
        初始化多模态模型接口
        
        Args:
            config: 配置对象，包含模型和生成参数
        """
        self.config = config
        self.model_type = config.model.type
        
        # 统计信息
        self.call_count = 0
        self.total_time = 0.0
        self.total_tokens = 0
        
        # 多模态配置
        multimodal_config = getattr(config, 'multimodal', None)
        if multimodal_config:
            self.image_format = getattr(multimodal_config, 'image_format', 'base64')
            self.max_image_size = getattr(multimodal_config, 'max_image_size', 2048)
            self.show_image_info = getattr(multimodal_config, 'show_image_info', True)
            self.log_image_usage = getattr(multimodal_config, 'log_image_usage', True)
        else:
            self.image_format = 'base64'
            self.max_image_size = 2048
            self.show_image_info = True
            self.log_image_usage = True
        
        # 初始化对应的模型接口
        if self.model_type == "openai_api":
            self._init_openai_client()
        elif self.model_type == "vllm":
            self._init_vllm_client()
        else:
            raise ValueError(f"不支持的模型类型: {self.model_type}")
        
        logger.info(f"多模态模型接口初始化完成 - 类型: {self.model_type}")
    
    def _init_openai_client(self):
        """初始化OpenAI客户端"""
        from openai import OpenAI
        
        openai_config = self.config.model.openai_config
        
        # 创建客户端
        self.client = OpenAI(
            api_key=openai_config.get('api_key', ''),
            base_url=openai_config.get('base_url', ''),
            timeout=openai_config.get('request_timeout', 300)
        )
        
        self.model_name = openai_config.get('model_name', '')
        self.max_retries = openai_config.get('max_retries', 3)
        self.request_timeout = openai_config.get('request_timeout', 300)
        self.max_concurrent_requests = openai_config.get('max_concurrent_requests', 8)
        
        logger.info(f"OpenAI客户端初始化完成 - 模型: {self.model_name}")
        logger.info(f"并行配置 - 最大并发: {self.max_concurrent_requests}, 超时: {self.request_timeout}s")
        logger.info(f"支持OpenAI API原生并行生成多个候选答案")
    
    def _init_vllm_client(self):
        """初始化vLLM客户端"""
        try:
            from vllm import LLM, SamplingParams
            
            vllm_config = self.config.model.vllm_config
            
            # 创建vLLM实例
            self.llm = LLM(
                model=vllm_config.model_path,
                tensor_parallel_size=vllm_config.tensor_parallel_size,
                gpu_memory_utilization=vllm_config.gpu_memory_utilization,
                max_model_len=vllm_config.max_model_len,
                trust_remote_code=True
            )
            
            # 设置采样参数
            generation_config = self.config.model.generation_config
            self.sampling_params = SamplingParams(
                temperature=generation_config.temperature,
                top_p=generation_config.top_p,
                max_tokens=generation_config.max_tokens
            )
            
            logger.info(f"vLLM客户端初始化完成 - 模型: {vllm_config.model_path}")
            
        except ImportError:
            logger.error("vLLM未安装，请安装vLLM以使用vLLM后端")
            raise
    
    def _process_image(self, image_path: str) -> Optional[str]:
        """
        处理图像文件，转换为base64编码
        
        Args:
            image_path (str): 图像文件路径
            
        Returns:
            Optional[str]: base64编码的图像数据，失败时返回None
        """
        if not image_path or not os.path.exists(image_path):
            if image_path:  # 只有在提供了路径但文件不存在时才报错
                logger.error(f"❌ 图像文件不存在: {image_path}")
            return None
        
        try:
            with Image.open(image_path) as img:
                # 转换为RGB格式
                if img.mode != 'RGB':
                    img = img.convert('RGB')
                
                # 调整图像大小
                original_size = img.size
                if max(img.size) > self.max_image_size:
                    ratio = self.max_image_size / max(img.size)
                    new_size = tuple(int(dim * ratio) for dim in img.size)
                    img = img.resize(new_size, Image.Resampling.LANCZOS)
                
                # 编码为base64
                buffer = BytesIO()
                img.save(buffer, format='JPEG', quality=95)
                image_data = base64.b64encode(buffer.getvalue()).decode('utf-8')
                
                # 只在第一次处理图像时输出详细信息
                if not hasattr(self, '_first_image_processed'):
                    logger.info(f"📸 图像处理配置: 最大尺寸={self.max_image_size}, 格式={self.image_format}")
                    logger.info(f"📸 首次图像处理: {os.path.basename(image_path)} ({original_size} -> {img.size})")
                    self._first_image_processed = True
                
                return image_data
                
        except Exception as e:
            logger.error(f"❌ 图像处理失败: {os.path.basename(image_path)} - {e}")
            return None
    
    def _create_message_content(self, text: str, image_path: Optional[str] = None) -> List[Dict[str, Any]]:
        """
        创建消息内容，支持文本和图像
        
        Args:
            text (str): 文本内容
            image_path (Optional[str]): 图像路径
            
        Returns:
            List[Dict[str, Any]]: 消息内容列表
        """
        content = []
        
        # 添加文本内容
        content.append({
            "type": "text",
            "text": text
        })
        
        # 添加图像内容（如果有）
        if image_path:
            image_data = self._process_image(image_path)
            if image_data:
                content.append({
                    "type": "image_url",
                    "image_url": {
                        "url": f"data:image/jpeg;base64,{image_data}"
                    }
                })
                # 只在调试模式下显示图像使用信息
                if self.show_image_info and hasattr(self, '_debug_mode') and self._debug_mode:
                    logger.info(f"✅ 图像已包含在请求中: {os.path.basename(image_path)}")
            else:
                # 只在图像处理失败时输出错误信息
                logger.error(f"❌ 图像处理失败，将使用纯文本模式: {os.path.basename(image_path) if image_path else 'None'}")
        
        return content
    
    def _single_openai_call(self, text: str, image_path: Optional[str] = None, num_return: int = 1) -> List[str]:
        """
        OpenAI API调用，支持并行生成多个候选答案
        
        Args:
            text (str): 文本内容
            image_path (Optional[str]): 图像路径
            num_return (int): 生成候选答案的数量
            
        Returns:
            List[str]: 生成的响应列表
        """
        content = self._create_message_content(text, image_path)
        gen_config = self.config.model.generation_config
        
        # OpenAI API的n参数限制为1-4，需要分批处理
        max_n_per_call = 4
        all_results = []
        
        # 计算需要的批次
        remaining = num_return
        while remaining > 0:
            current_n = min(remaining, max_n_per_call)
            
            for attempt in range(self.max_retries):
                try:
                    start_time = time.time()
                    
                    # 构建符合OpenAI API格式的消息
                    message = {
                        "role": "user",
                        "content": content
                    }
                    
                    # 使用OpenAI API的n参数并行生成多个候选答案
                    response = self.client.chat.completions.create(
                        model=self.model_name,
                        messages=[message],
                        temperature=gen_config.get('temperature', 0.8),
                        max_tokens=gen_config.get('max_tokens', 2048),
                        top_p=gen_config.get('top_p', 0.9),
                        n=current_n  # 关键：使用n参数并行生成多个候选
                    )
                    
                    elapsed_time = time.time() - start_time
                    
                    # 更新统计信息
                    self.call_count += 1
                    self.total_time += elapsed_time
                    
                    # 提取当前批次的候选响应内容
                    batch_results = []
                    if response.choices:
                        for choice in response.choices:
                            if choice.message and choice.message.content:
                                batch_results.append(choice.message.content.strip())
                            else:
                                batch_results.append("")
                        
                        # 更新token统计
                        if hasattr(response, 'usage') and response.usage:
                            self.total_tokens += response.usage.total_tokens
                        else:
                            # 如果没有usage信息，使用简单的token估算
                            total_tokens = sum(len(result.split()) for result in batch_results)
                            self.total_tokens += total_tokens
                    
                    all_results.extend(batch_results)
                    remaining -= current_n
                    break  # 成功后跳出重试循环
                    
                except Exception as e:
                    logger.error(f"OpenAI API调用失败 (尝试 {attempt + 1}/{self.max_retries}): {e}")
                    if attempt == self.max_retries - 1:
                        logger.error(f"批次调用失败，添加空字符串")
                        all_results.extend([""] * current_n)
                        remaining -= current_n
                    else:
                        # 等待后重试
                        time.sleep(2 ** attempt)
        
        # 确保返回正确数量的结果
        while len(all_results) < num_return:
            all_results.append("")
        
        return all_results[:num_return]
    
    def _single_vllm_call(self, text: str, image_path: Optional[str] = None, num_return: int = 1) -> List[str]:
        """
        vLLM调用，支持并行生成多个候选答案
        
        Args:
            text (str): 文本内容
            image_path (Optional[str]): 图像路径
            num_return (int): 生成候选答案的数量
            
        Returns:
            List[str]: 生成的响应列表
        """
        try:
            from vllm import SamplingParams
            
            # 构建输入
            if image_path and os.path.exists(image_path):
                # 多模态输入：图像 + 文本
                inputs = {
                    "prompt": text,
                    "multi_modal_data": {"image": image_path}
                }
            else:
                # 纯文本输入
                inputs = text
            
            # 配置生成参数
            gen_config = self.config.model.generation_config
            sampling_params = SamplingParams(
                temperature=gen_config.get('temperature', 0.8),
                top_p=gen_config.get('top_p', 0.9),
                max_tokens=gen_config.get('max_tokens', 2048),
                n=num_return  # 关键：使用n参数并行生成多个候选
            )
            
            start_time = time.time()
            
            # 调用vLLM生成
            outputs = self.vllm_model.generate(inputs, sampling_params)
            
            elapsed_time = time.time() - start_time
            
            # 更新统计信息
            self.call_count += 1
            self.total_time += elapsed_time
            
            # 提取结果
            results = []
            if outputs and len(outputs) > 0:
                output = outputs[0]  # vLLM返回的是RequestOutput列表
                for completion in output.outputs:
                    results.append(completion.text.strip())
                
                # 更新token统计（简单估算）
                total_tokens = sum(len(result.split()) for result in results)
                self.total_tokens += total_tokens
            
            # 确保返回正确数量的结果
            while len(results) < num_return:
                results.append("")
            
            return results[:num_return]
            
        except Exception as e:
            logger.error(f"vLLM调用失败: {e}")
            return [""] * num_return
    
    def generate(self, messages: List[Dict[str, str]], image_paths: Optional[List[str]] = None, num_return: int = 1) -> List[str]:
        """
        生成响应，支持多模态输入和并行生成多个候选答案
        
        Args:
            messages (List[Dict[str, str]]): 消息列表，每个消息包含content字段
            image_paths (Optional[List[str]]): 图像路径列表，与messages对应
            num_return (int): 每个消息返回的结果数量
            
        Returns:
            List[str]: 生成的响应列表
        """
        if not messages:
            return []
        
        results = []
        
        # 处理每个消息
        for i, message in enumerate(messages):
            text = message.get('content', '')
            image_path = image_paths[i] if image_paths and i < len(image_paths) else None
            
            try:
                if self.model_type == "openai_api":
                    # 使用OpenAI API并行生成
                    message_results = self._single_openai_call(text, image_path, num_return)
                elif self.model_type == "vllm":
                    # 使用vLLM并行生成
                    message_results = self._single_vllm_call(text, image_path, num_return)
                else:
                    logger.error(f"不支持的模型类型: {self.model_type}")
                    message_results = [""] * num_return
                
                results.extend(message_results)
            except Exception as e:
                logger.error(f"生成失败: {e}")
                # 失败时返回空字符串列表
                results.extend([""] * num_return)
        
        return results
    
    def get_stats(self) -> Dict[str, Any]:
        """
        获取调用统计信息
        
        Returns:
            Dict[str, Any]: 统计信息
        """
        return {
            'call_count': self.call_count,
            'total_tokens': self.total_tokens,
            'total_time': self.total_time,
            'avg_time_per_call': self.total_time / max(self.call_count, 1),
            'model_type': self.model_type
        }
    
 