import os
import time
import torch
from PIL import Image
from core.deploy import MiniCPMVDeploy
import gc

class ImageInferencer:
    """
    图像推理类，负责处理单张图像的推理功能
    """
    def __init__(self, deploy=None):
        # 如果没有提供deploy实例，创建一个新的
        if deploy is None:
            self.deploy = MiniCPMVDeploy()
        else:
            self.deploy = deploy
        
        self.model = self.deploy.get_model()
        self.tokenizer = self.deploy.get_tokenizer()
        self.config = self.deploy.get_config()
    
    def load_model_if_needed(self):
        """
        如果模型未加载，则加载模型
        """
        if self.model is None or self.tokenizer is None:
            if not self.deploy.load_model():
                print("模型加载失败")
                return False
            self.model = self.deploy.get_model()
            self.tokenizer = self.deploy.get_tokenizer()
        return True
    
    def infer(self, image_path, prompt=None, save_result=True):
        """
        对单张图像进行推理，优化版本，添加内存管理
        
        Args:
            image_path: 图像路径
            prompt: 提示词，如果为None则使用默认提示词
            save_result: 是否保存结果到文件
            
        Returns:
            推理结果字符串
        """
        result = None
        image = None
        try:
            # 检查模型是否加载
            if not self.load_model_if_needed():
                return None
            
            # 检查图像文件是否存在
            if not os.path.exists(image_path):
                print(f"图像文件不存在: {image_path}")
                return None
            
            # 加载和预处理图像（优化内存使用）
            print(f"加载图像: {image_path}")
            image = Image.open(image_path).convert('RGB')
            
            # 使用默认提示词或传入的提示词
            if prompt is None:
                prompt = "请详细描述这张图片的内容"
            print(f"使用提示词: {prompt}")
            
            # 根据官方示例，使用messages格式
            msgs = [{"role": "user", "content": [image] + [prompt]}]
            
            # 获取推理配置
            inference_config = self.config["inference"]
            
            # 推理 - 使用官方示例的model.chat()方法，优化内存使用
            print(f"开始推理...")
            with torch.no_grad():
                # 如果启用了自动混合精度
                if self.deploy.is_amp_enabled():
                    with torch.autocast(device_type=self.deploy.device.split(":")[0], dtype=self.deploy.dtype):
                        start_time = time.time()
                        result = self.model.chat(
                            msgs=msgs,
                            tokenizer=self.tokenizer,
                            enable_thinking=inference_config.get("enable_thinking", False),
                            max_new_tokens=min(512, inference_config["max_new_tokens"]),  # 限制最大token数
                            temperature=inference_config["temperature"],
                            top_p=inference_config["top_p"]
                        )
                        end_time = time.time()
                else:
                    start_time = time.time()
                    result = self.model.chat(
                        msgs=msgs,
                        tokenizer=self.tokenizer,
                        enable_thinking=inference_config.get("enable_thinking", False),
                        max_new_tokens=min(512, inference_config["max_new_tokens"]),  # 限制最大token数
                        temperature=inference_config["temperature"],
                        top_p=inference_config["top_p"]
                    )
                    end_time = time.time()
            
            print(f"图像推理完成，耗时: {end_time - start_time:.2f}秒")
            
            # 保存结果到文件
            if save_result and result:
                output_dir = self.config["paths"].get("output_dir", "outputs")
                os.makedirs(output_dir, exist_ok=True)
                
                output_file = os.path.join(
                    output_dir,
                    f"image_result_{os.path.basename(image_path).split('.')[0]}.txt"
                )
                with open(output_file, 'w', encoding='utf-8') as f:
                    f.write(f"图像路径: {image_path}\n")
                    f.write(f"图像尺寸: {image.width}x{image.height}\n")
                    f.write(f"提示词: {prompt}\n")
                    f.write(f"推理时间: {end_time - start_time:.2f}秒\n")
                    f.write("\n===== 推理结果 =====\n")
                    f.write(result)
                print(f"结果已保存到: {output_file}")
            
            return result
        except Exception as e:
            print(f"图像推理失败: {e}")
            return None
        finally:
            # 清理资源，防止内存泄漏
            if image is not None:
                del image
            # 清理PyTorch缓存
            torch.cuda.empty_cache() if torch.cuda.is_available() else None
            # 强制垃圾回收
            gc.collect()
    
    def batch_infer(self, image_paths, prompts=None, save_result=True, batch_size=2):
        """
        对多张图像进行批量推理，优化版本，添加内存管理和批处理优化
        
        Args:
            image_paths: 图像路径列表
            prompts: 提示词列表，如果为None则所有图像使用默认提示词
            save_result: 是否保存结果到文件
            batch_size: 批处理大小，根据内存情况调整
            
        Returns:
            推理结果列表，每个元素是一个字典，包含图像路径和推理结果
        """
        results = []
        
        # 检查模型是否加载
        if not self.load_model_if_needed():
            return results
        
        # 提示词预处理
        if prompts is None:
            prompts = ["请详细描述这张图片的内容"] * len(image_paths)
        elif len(prompts) < len(image_paths):
            # 如果提示词数量少于图像数量，不足部分使用默认提示词
            default_prompt = "请详细描述这张图片的内容"
            prompts += [default_prompt] * (len(image_paths) - len(prompts))
        
        # 批量推理
        print(f"开始批量推理，共{len(image_paths)}张图像...")
        print(f"使用批处理大小: {batch_size}")
        total_start_time = time.time()
        
        # 分批处理图像
        for batch_start in range(0, len(image_paths), batch_size):
            batch_end = min(batch_start + batch_size, len(image_paths))
            batch_image_paths = image_paths[batch_start:batch_end]
            batch_prompts = prompts[batch_start:batch_end]
            
            print(f"\n处理批次 {batch_start//batch_size + 1}/{(len(image_paths)+batch_size-1)//batch_size}: {batch_image_paths}")
            
            # 处理批次内的每张图像
            for i, (image_path, prompt) in enumerate(zip(batch_image_paths, batch_prompts)):
                global_idx = batch_start + i
                print(f"处理图像 {global_idx+1}/{len(image_paths)}: {image_path}")
                result = self.infer(image_path, prompt, save_result)
                
                # 记录结果
                results.append({
                    "image_path": image_path,
                    "result": result
                })
            
            # 每批次处理后清理缓存
            print("清理缓存，优化内存使用...")
            torch.cuda.empty_cache() if torch.cuda.is_available() else None
            gc.collect()
        
        total_end_time = time.time()
        print(f"\n批量推理完成，共处理{len(image_paths)}张图像，总耗时: {total_end_time - total_start_time:.2f}秒")
        print(f"平均每张图像耗时: {(total_end_time - total_start_time) / len(image_paths):.2f}秒")
        
        return results