#!/usr/bin/env python3
"""
Transformer视频生成器
从视频数据生成新的视频内容
支持多种视频格式和帧处理
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from PIL import Image
from models.model import TransformerModel

try:
    import cv2
    CV2_AVAILABLE = True
except ImportError:
    CV2_AVAILABLE = False

class VideoGenerator:
    def __init__(self, frame_size=(64, 64), patch_size=8, num_frames=16, 
                 embedding_dim=512, nhead=8, num_layers=6, num_channels=3, max_patches=1024):
        """
        视频生成器 - 从原始视频数据生成新视频
        
        Args:
            frame_size: 视频帧尺寸 (height, width)
            patch_size: 图像块大小
            num_frames: 视频帧数
            embedding_dim: 嵌入维度
            nhead: 注意力头数
            num_layers: Transformer层数
            num_channels: 图像通道数
            max_patches: 最大图像块数量
        """
        self.frame_size = frame_size
        self.patch_size = patch_size
        self.num_frames = num_frames
        self.embedding_dim = embedding_dim
        self.nhead = nhead
        self.num_layers = num_layers
        self.num_channels = num_channels
        self.max_patches = max_patches
        
        # 计算每帧的图像块数量
        self.patches_per_row = frame_size[1] // patch_size
        self.patches_per_col = frame_size[0] // patch_size
        self.patches_per_frame = self.patches_per_row * self.patches_per_col
        self.total_patches = self.patches_per_frame * num_frames
        
        # 时空图像块嵌入
        self.patch_dim = num_channels * patch_size * patch_size
        self.patch_embedding = nn.Linear(self.patch_dim, embedding_dim)
        
        # 时空位置编码
        self.positional_encoding = self._create_spatiotemporal_encoding(
            num_frames, self.patches_per_frame, embedding_dim
        )
        
        # Transformer模型
        self.model = TransformerModel(
            input_dim=embedding_dim,
            output_dim=self.patch_dim,  # 输出与输入相同的维度
            nhead=nhead,
            num_layers=num_layers
        )

    def _create_spatiotemporal_encoding(self, num_frames, patches_per_frame, d_model):
        """创建时空位置编码"""
        # 创建帧级位置编码
        frame_positions = torch.arange(0, num_frames).unsqueeze(1)
        frame_div_term = torch.exp(torch.arange(0, d_model // 2, 2) * -(math.log(10000.0) / d_model))
        
        frame_encoding = torch.zeros(num_frames, d_model // 2)
        frame_encoding[:, 0::2] = torch.sin(frame_positions * frame_div_term)
        frame_encoding[:, 1::2] = torch.cos(frame_positions * frame_div_term)
        
        # 创建空间位置编码
        spatial_positions = torch.arange(0, patches_per_frame).unsqueeze(1)
        spatial_div_term = torch.exp(torch.arange(0, d_model // 2, 2) * -(math.log(10000.0) / d_model))
        
        spatial_encoding = torch.zeros(patches_per_frame, d_model // 2)
        spatial_encoding[:, 0::2] = torch.sin(spatial_positions * spatial_div_term)
        spatial_encoding[:, 1::2] = torch.cos(spatial_positions * spatial_div_term)
        
        # 合并时空编码
        total_patches_needed = num_frames * patches_per_frame
        pos_encoding = torch.zeros(total_patches_needed, d_model)
        
        for t in range(num_frames):
            for p in range(patches_per_frame):
                idx = t * patches_per_frame + p
                pos_encoding[idx, :d_model//2] = frame_encoding[t]
                pos_encoding[idx, d_model//2:] = spatial_encoding[p]
        
        return pos_encoding

    def _video_to_patches(self, video_tensor):
        """将视频转换为时空图像块序列"""
        # video_tensor: (batch_size, frames, channels, height, width)
        batch_size, num_frames, channels, height, width = video_tensor.shape
        
        # 验证视频尺寸
        assert height % self.patch_size == 0 and width % self.patch_size == 0, \
            f"Frame size {height}x{width} must be divisible by patch size {self.patch_size}"
        
        # 重塑为(batch_size * frames, channels, height, width)
        video_reshaped = video_tensor.view(-1, channels, height, width)
        
        # 提取图像块
        patches = video_reshaped.unfold(2, self.patch_size, self.patch_size).unfold(3, self.patch_size, self.patch_size)
        # patches shape: (batch_size*frames, channels, patches_per_col, patches_per_row, patch_size, patch_size)
        
        # 重塑为序列
        patches = patches.permute(0, 2, 3, 1, 4, 5).contiguous()
        
        # 重塑回(batch_size, total_patches, patch_dim)
        patches = patches.view(batch_size * num_frames, self.patches_per_frame, -1)
        patches = patches.view(batch_size, self.total_patches, -1)
        
        # 确保数据类型正确（转换为浮点数）
        if patches.dtype != torch.float32:
            patches = patches.float()
        
        return patches

    def _patches_to_video(self, patches):
        """将图像块序列转换回视频"""
        # patches: (batch_size, total_patches, patch_dim)
        batch_size, total_patches, patch_dim = patches.shape

        actual_num_frames = total_patches // self.patches_per_frame
        # 重塑为(batch_size, frames, patches_per_frame, patch_dim)
        patches = patches.view(batch_size, actual_num_frames, self.patches_per_frame, patch_dim)
        
        # 重塑为图像块格式
        patches = patches.view(batch_size * actual_num_frames, self.patches_per_col, self.patches_per_row,
                              self.num_channels, self.patch_size, self.patch_size)
        patches = patches.permute(0, 3, 1, 4, 2, 5).contiguous()
        
        # 合并图像块
        frames = patches.view(batch_size * actual_num_frames, self.num_channels,
                            self.patches_per_col * self.patch_size,
                            self.patches_per_row * self.patch_size)
        
        # 重塑回视频格式
        # 确保正确的维度顺序: (batch_size, num_frames, num_channels, height, width)
        video = frames.view(batch_size, actual_num_frames, self.num_channels,
                          self.frame_size[0], self.frame_size[1])
        
        return video

    def preprocess_video(self, video_input):
        """预处理输入视频"""
        if isinstance(video_input, str):
            # 从文件路径加载视频
            if not CV2_AVAILABLE:
                raise ImportError("OpenCV is required for video file processing. Install with: pip install opencv-python")
            
            cap = cv2.VideoCapture(video_input)
            frames = []
            
            while len(frames) < self.num_frames:
                ret, frame = cap.read()
                if not ret:
                    break
                
                # 转换为RGB并调整尺寸
                frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                frame_resized = cv2.resize(frame_rgb, (self.frame_size[1], self.frame_size[0]))
                frames.append(frame_resized)
            
            cap.release()
            
            # 转换为tensor并标准化到[-1, 1]范围
            video_array = np.array(frames) / 255.0 # 归一化到[0, 1]
            video_array = video_array * 2 - 1       # 转换到[-1, 1]
            video_tensor = torch.tensor(video_array, dtype=torch.float32).permute(0, 3, 1, 2).unsqueeze(0)
            # (batch_size=1, num_frames, channels, height, width)
            
        elif isinstance(video_input, np.ndarray):
            # 从numpy数组加载
            video_max = video_input.max()
            video_min = video_input.min()
            # 如果数据不在[-1, 1]范围内，进行标准化
            if video_max > 1.0 or video_min < -1.0:
                video_input = (video_input - video_min) / (video_max - video_min) * 2 - 1
            video_tensor = torch.tensor(video_input, dtype=torch.float32)
            if video_input.ndim == 4:  # (frames, height, width, channels)
                video_tensor = video_tensor.permute(0, 3, 1, 2).unsqueeze(0)
            elif video_input.ndim == 5:  # (batch_size, frames, channels, height, width)
                pass
            else:
                raise ValueError("Unsupported video array shape")
            
        elif isinstance(video_input, torch.Tensor):
            # 从tensor加载
            video_tensor = video_input.clone().detach().float()
            video_min = video_tensor.min()
            video_max = video_tensor.max()
            # 如果数据不在[-1, 1]范围内，进行标准化
            if video_max > 1.0 or video_min < -1.0:
                video_tensor = (video_tensor - video_min) / (video_max - video_min) * 2 - 1
            if video_input.ndim == 4:  # (frames, height, width, channels)
                video_tensor = video_tensor.permute(0, 3, 1, 2).unsqueeze(0)
            elif video_input.ndim == 5:  # (batch_size, frames, channels, height, width)
                pass
            else:
                raise ValueError("Unsupported video tensor shape")
                
        else:
            raise ValueError("Unsupported video input type")
        
        return video_tensor

    def generate(self, input_video, num_steps=20, temperature=1.0):
        """
        从输入视频生成新视频
        
        Args:
            input_video: 输入视频 (文件路径、numpy数组或tensor)
            num_steps: 生成步数
            temperature: 温度参数控制随机性
            
        Returns:
            generated_video: 生成的视频tensor
        """
        # 预处理输入视频
        video_tensor = self.preprocess_video(input_video) #(batch_size, num_frames, channels, height, width)
        
        # 转换为图像块
        patches = self._video_to_patches(video_tensor) #(batch_size, total_patches, patch_dim)
        
        # 嵌入图像块
        embeddings = self.patch_embedding(patches)
        embeddings = embeddings + self.positional_encoding.unsqueeze(0)
        
        with torch.no_grad():
            # 自回归生成
            current_patches = patches.clone()
            
            for step in range(num_steps):
                # Transformer前向传播
                output_logits = self.model(embeddings)
                
                # 应用温度缩放
                if temperature != 1.0:
                    output_logits = output_logits / temperature
                
                # 预测下一帧的图像块（回归任务）
                # 只取最后一帧的预测结果
                next_frame_patches = output_logits[:, -self.patches_per_frame:]
                
                # 应用温度缩放（在回归任务中，温度缩放影响预测的确定性）
                if temperature != 1.0:
                    # 对于回归任务，温度缩放可以添加噪声
                    noise_scale = 0.1 * temperature
                    noise = torch.randn_like(next_frame_patches) * noise_scale
                    next_frame_patches = next_frame_patches + noise
                
                # 直接使用预测的图像块值（回归任务）
                # 确保预测值在合理范围内
                next_frame_patches = torch.tanh(next_frame_patches)  # 限制到[-1, 1]范围
                
                # 添加到当前序列（自回归生成）
                current_patches = torch.cat([current_patches, next_frame_patches], dim=1)
                
                # 更新嵌入（只使用最新的total_patches个图像块）
                embeddings = self.patch_embedding(current_patches[:, -self.total_patches:])
                embeddings = embeddings + self.positional_encoding.unsqueeze(0)
        
        # 转换回视频
        generated_video = self._patches_to_video(current_patches)
        
        # 反标准化
        generated_video = (generated_video + 1) / 2
        
        return generated_video

    def generate_continuation(self, input_video, continuation_frames=10):
        """生成视频续写"""
        # 预处理输入视频
        video_tensor = self.preprocess_video(input_video)
        batch_size, input_frames, channels, height, width = video_tensor.shape
        
        # 转换为图像块
        patches = self._video_to_patches(video_tensor) #(batch_size, total_patches, patch_dim)
        
        with torch.no_grad():
            current_patches = patches.clone()
            
            for frame_idx in range(continuation_frames):
                # 嵌入当前图像块
                embeddings = self.patch_embedding(current_patches)
                # (batch_size, total_patches, embed_dim)
                
                # 扩展位置编码
                total_frames = input_frames + frame_idx + 1
                extended_encoding = self._create_spatiotemporal_encoding(
                    total_frames, self.patches_per_frame, embeddings.size(-1)
                )
                extended_embeddings = torch.cat([embeddings, torch.zeros(embeddings.size(0), self.patches_per_frame, embeddings.size(-1))], dim=1)
                embeddings = extended_embeddings + extended_encoding.unsqueeze(0)
                
                # Transformer前向传播
                output_logits = self.model(embeddings)
                
                # 预测下一帧的图像块（回归任务,无采样获取索引的过程，直接预测连续值）
                # 只取最后一帧的预测结果
                next_frame_patches = output_logits[:, -self.patches_per_frame:]
                
                # 直接使用预测的图像块值（回归任务）
                # 确保预测值在合理范围内
                next_frame_patches = torch.tanh(next_frame_patches)  # 限制到[-1, 1]范围
                
                # 添加到当前序列
                current_patches = torch.cat([current_patches, next_frame_patches], dim=1)
        
        # 转换回视频
        generated_video = self._patches_to_video(current_patches)
        generated_video = (generated_video + 1) / 2
        
        return generated_video

    def generate_from_motion(self, first_frame, motion_vectors, num_frames=16):
        """从第一帧和运动向量生成视频"""
        # 预处理第一帧
        first_frame_tensor = self.preprocess_video(first_frame)
        
        # 这里简化处理，实际应该使用运动向量来引导生成
        # 生成随机噪声作为后续帧
        noise_frames = torch.randn(1, num_frames - 1, self.num_channels, 
                                 self.frame_size[0], self.frame_size[1])
        
        # 合并第一帧和噪声帧
        input_video = torch.cat([first_frame_tensor, noise_frames], dim=1)
        
        return self.generate(input_video)
