import cv2
import numpy as np
import os
import torch
from PIL import Image
import io
import tempfile
from tqdm import tqdm
from datetime import datetime
from config import data_transforms

class VideoProcessor:
    def __init__(self, model, device):
        """
        初始化视频处理器
        
        Args:
            model: 加载的模型
            device: 设备（CPU或GPU）
        """
        self.model = model
        self.device = device
        self.transform = data_transforms["test"]
    
    def process_frame(self, frame):
        """
        处理单个视频帧
        
        Args:
            frame: OpenCV格式的视频帧 (BGR)
            
        Returns:
            预测结果
        """
        # 转换颜色空间从BGR到RGB
        rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        
        # 转换为PIL图像
        pil_image = Image.fromarray(rgb_frame)
        
        # 应用预处理变换
        image_tensor = self.transform(pil_image).unsqueeze(0).to(self.device)
        
        # 模型推理
        with torch.no_grad():
            output = self.model(image_tensor)
            prob = output.float().squeeze().cpu().numpy()
        
        # 返回结果
        prediction = 1 if prob >= 0.5 else 0
        return {
            "prediction": int(prediction),
            "probability": float(prob),
            "label": "伪造" if prediction == 1 else "真实",
            "confidence": float(prob) if prediction == 1 else float(1 - prob)
        }
    
    def process_video(self, video_file, sample_rate=10, max_frames=300, output_video=True):
        """
        处理视频文件
        
        Args:
            video_file: 视频文件内容
            sample_rate: 采样率（每隔多少帧处理一次）
            max_frames: 最大处理帧数
            output_video: 是否生成带标记的输出视频
            
        Returns:
            处理结果和输出视频路径
        """
        # 创建临时文件保存视频
        with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_input:
            temp_input.write(video_file)
            temp_input_path = temp_input.name
        
        # 打开视频
        cap = cv2.VideoCapture(temp_input_path)
        
        # 视频属性
        fps = int(cap.get(cv2.CAP_PROP_FPS))
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        
        # 设置输出视频
        output_path = None
        out = None
        
        if output_video:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            output_path = f"video_result_{timestamp}.mp4"
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
        
        # 处理结果
        results = {
            "frames_total": total_frames,
            "frames_processed": 0,
            "real_count": 0,
            "fake_count": 0,
            "frame_results": []
        }
        
        # 处理视频帧
        frame_idx = 0
        processed_count = 0
        
        with tqdm(total=min(total_frames, max_frames * sample_rate), desc="处理视频") as pbar:
            while cap.isOpened() and processed_count < max_frames:
                success, frame = cap.read()
                if not success:
                    break
                
                # 更新进度条
                pbar.update(1)
                
                # 按采样率处理帧
                if frame_idx % sample_rate == 0:
                    # 处理当前帧
                    result = self.process_frame(frame)
                    processed_count += 1
                    
                    # 更新计数
                    if result["prediction"] == 0:
                        results["real_count"] += 1
                    else:
                        results["fake_count"] += 1
                    
                    # 添加帧结果
                    results["frame_results"].append({
                        "frame_index": frame_idx,
                        "prediction": result["prediction"],
                        "confidence": result["confidence"]
                    })
                    
                    # 在帧上添加检测结果
                    if output_video:
                        label = f"{result['label']} ({result['confidence']:.2f})"
                        color = (0, 255, 0) if result["prediction"] == 0 else (0, 0, 255)
                        cv2.putText(frame, label, (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)
                
                # 写入输出视频
                if output_video:
                    out.write(frame)
                
                frame_idx += 1
        
        # 更新处理的帧数
        results["frames_processed"] = processed_count
        
        # 计算整体结果
        if processed_count > 0:
            fake_ratio = results["fake_count"] / processed_count
            results["overall_prediction"] = 1 if fake_ratio > 0.5 else 0
            results["fake_ratio"] = fake_ratio
            results["overall_label"] = "伪造" if results["overall_prediction"] == 1 else "真实"
        else:
            results["overall_prediction"] = None
            results["fake_ratio"] = None
            results["overall_label"] = "未知（处理0帧）"
        
        # 释放资源
        cap.release()
        if output_video and out is not None:
            out.release()
        
        # 清理临时文件
        os.unlink(temp_input_path)
        
        return results, output_path if output_video else None 