import os
import time
import cv2
import torch
from PIL import Image
from core.deploy import MiniCPMVDeploy

class VideoInferencer:
    """
    视频推理类，负责处理视频的推理功能
    """
    def __init__(self, deploy=None):
        # 如果没有提供deploy实例，创建一个新的
        if deploy is None:
            self.deploy = MiniCPMVDeploy()
        else:
            self.deploy = deploy
        
        self.model = self.deploy.get_model()
        self.tokenizer = self.deploy.get_tokenizer()
        self.config = self.deploy.get_config()
    
    def load_model_if_needed(self):
        """
        如果模型未加载，则加载模型
        """
        if self.model is None or self.tokenizer is None:
            if not self.deploy.load_model():
                print("模型加载失败")
                return False
            self.model = self.deploy.get_model()
            self.tokenizer = self.deploy.get_tokenizer()
        return True
    
    def sample_keyframes(self, video_path):
        """
        从视频中采样关键帧
        
        Args:
            video_path: 视频路径
            
        Returns:
            (frames, frame_count, fps, duration): 关键帧列表、总帧数、帧率、视频时长
        """
        try:
            cap = cv2.VideoCapture(video_path)
            
            # 获取视频信息
            frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            fps = cap.get(cv2.CAP_PROP_FPS)
            duration = frame_count / fps if fps > 0 else 0
            
            print(f"视频信息: {frame_count}帧, {fps:.2f}FPS, 时长约{duration:.2f}秒")
            
            # 获取采样配置
            sample_interval = self.config['video'].get('sample_interval', 5)
            max_frames = self.config['video'].get('max_frames', 8)
            
            print(f"采样配置: 每{sample_interval}帧采样一次，最多采样{max_frames}帧")
            
            frames = []
            # 采样关键帧
            for i in range(frame_count):
                ret, frame = cap.read()
                if not ret:
                    break
                
                # 采样关键帧
                if i % sample_interval == 0 or i == frame_count - 1:
                    # 转换为RGB并添加到列表
                    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    frames.append(Image.fromarray(frame_rgb))
                    
                    if len(frames) >= max_frames:
                        break
            
            cap.release()
            print(f"采样了{len(frames)}帧进行分析")
            
            return frames, frame_count, fps, duration
        except Exception as e:
            print(f"采样关键帧时出错: {e}")
            return [], 0, 0, 0
    
    def infer(self, video_path, prompt=None, save_result=True):
        """
        对视频进行推理
        
        Args:
            video_path: 视频路径
            prompt: 提示词，如果为None则使用默认提示词
            save_result: 是否保存结果到文件
            
        Returns:
            推理结果字符串
        """
        try:
            # 检查模型是否加载
            if not self.load_model_if_needed():
                return None
            
            # 检查视频文件是否存在
            if not os.path.exists(video_path):
                print(f"视频文件不存在: {video_path}")
                return None
            
            print(f"处理视频: {video_path}")
            
            # 采样关键帧
            frames, frame_count, fps, duration = self.sample_keyframes(video_path)
            if not frames:
                print("未采样到任何帧，无法进行推理")
                return None
            
            # 使用默认提示词或传入的提示词
            if prompt is None:
                prompt = "请详细描述这个视频的内容"
            print(f"使用提示词: {prompt}")
            
            # 根据官方示例，使用messages格式
            print(f"使用官方示例的messages格式进行视频推理...")
            # 对于视频，我们将关键帧作为消息内容的一部分
            msgs = [{"role": "user", "content": [frames[0]] + [prompt]}]
            
            # 获取推理配置
            inference_config = self.config["inference"]
            print(f"使用推理配置: {inference_config}")
            
            # 推理 - 使用官方示例的model.chat()方法
            print(f"开始推理...")
            with torch.no_grad():
                start_time = time.time()
                # 如果启用了自动混合精度
                if self.deploy.is_amp_enabled():
                    print("使用自动混合精度(AMP)")
                    with torch.autocast(device_type=self.deploy.device.split(":")[0], dtype=self.deploy.dtype):
                        result = self.model.chat(
                            msgs=msgs,
                            tokenizer=self.tokenizer,
                            enable_thinking=inference_config.get("enable_thinking", False),
                            max_new_tokens=inference_config["max_new_tokens"],
                            temperature=inference_config["temperature"],
                            top_p=inference_config["top_p"]
                        )
                else:
                    print("使用标准精度")
                    result = self.model.chat(
                        msgs=msgs,
                        tokenizer=self.tokenizer,
                        enable_thinking=inference_config.get("enable_thinking", False),
                        max_new_tokens=inference_config["max_new_tokens"],
                        temperature=inference_config["temperature"],
                        top_p=inference_config["top_p"]
                    )
                end_time = time.time()
            
            print(f"视频推理完成，耗时: {end_time - start_time:.2f}秒")
            print(f"推理结果: {result}")
            
            # 保存结果到文件
            if save_result:
                output_file = os.path.join(
                    self.config["paths"]["output_dir"],
                    f"video_result_{os.path.basename(video_path).split('.')[0]}.txt"
                )
                with open(output_file, 'w', encoding='utf-8') as f:
                    f.write(f"视频路径: {video_path}\n")
                    f.write(f"视频信息: {frame_count}帧, {fps:.2f}FPS, 时长约{duration:.2f}秒\n")
                    f.write(f"采样帧数: {len(frames)}\n")
                    f.write(f"提示词: {prompt}\n")
                    f.write(f"推理时间: {end_time - start_time:.2f}秒\n")
                    f.write("\n===== 推理结果 =====\n")
                    f.write(result)
                print(f"结果已保存到: {output_file}")
            
            return result
        except Exception as e:
            print(f"视频推理失败: {e}")
            return None
    
    def batch_infer(self, video_paths, prompts=None, save_result=True):
        """
        批量推理多个视频
        
        Args:
            video_paths: 视频路径列表
            prompts: 提示词列表，如果为None则全部使用默认提示词
            save_result: 是否保存结果到文件
            
        Returns:
            推理结果字典，key为视频路径，value为推理结果
        """
        results = {}
        
        # 确保prompts列表长度与video_paths一致
        if prompts is None:
            prompts = [None] * len(video_paths)
        elif len(prompts) != len(video_paths):
            print("警告: prompts列表长度与video_paths不一致，将使用默认提示词")
            prompts = [None] * len(video_paths)
        
        for i, (video_path, prompt) in enumerate(zip(video_paths, prompts)):
            print(f"\n[{i+1}/{len(video_paths)}] 处理视频: {video_path}")
            result = self.infer(video_path, prompt, save_result)
            results[video_path] = result
        
        return results