"""
视频处理工具函数
提供视频加载、保存、预处理等功能
"""

import torch
import numpy as np
import cv2
from typing import Optional, Tuple, List, Union
import logging
from pathlib import Path
import imageio
from PIL import Image
import ffmpeg

logger = logging.getLogger(__name__)

class VideoProcessor:
    """视频处理器"""
    
    def __init__(self, fps: int = 24, resolution: Tuple[int, int] = (256, 256)):
        """初始化视频处理器"""
        self.fps = fps
        self.resolution = resolution
        
    def load_video(self, video_path: str, max_frames: Optional[int] = None) -> torch.Tensor:
        """加载视频文件"""
        logger.info(f"加载视频: {video_path}")
        
        try:
            # 使用imageio加载视频
            video_reader = imageio.get_reader(video_path)
            
            frames = []
            for i, frame in enumerate(video_reader):
                if max_frames and i >= max_frames:
                    break
                
                # 转换为RGB格式
                if len(frame.shape) == 3 and frame.shape[2] == 3:
                    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                else:
                    frame_rgb = frame
                
                # 调整分辨率
                frame_resized = cv2.resize(frame_rgb, self.resolution)
                
                # 转换为tensor
                frame_tensor = torch.from_numpy(frame_resized).float()
                frames.append(frame_tensor)
            
            video_reader.close()
            
            if not frames:
                raise ValueError("无法读取视频帧")
            
            # 堆叠帧
            video_tensor = torch.stack(frames)
            
            logger.info(f"视频加载完成: {video_tensor.shape}")
            return video_tensor
            
        except Exception as e:
            logger.error(f"加载视频失败: {e}")
            raise
    
    def save_video(self, video: torch.Tensor, output_path: str, 
                  fps: Optional[int] = None) -> None:
        """保存视频文件"""
        logger.info(f"保存视频: {output_path}")
        
        try:
            # 确保输出目录存在
            Path(output_path).parent.mkdir(parents=True, exist_ok=True)
            
            # 设置帧率
            save_fps = fps or self.fps
            
            # 确保视频是uint8格式
            if video.dtype != torch.uint8:
                video = torch.clamp(video, 0, 255).byte()
            
            # 转换为numpy数组
            video_np = video.numpy()
            
            # 使用imageio保存
            writer = imageio.get_writer(output_path, fps=save_fps)
            
            for frame in video_np:
                # 确保帧是RGB格式
                if len(frame.shape) == 3 and frame.shape[2] == 3:
                    frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
                else:
                    frame_bgr = frame
                
                writer.append_data(frame_bgr)
            
            writer.close()
            
            logger.info(f"视频保存完成: {output_path}")
            
        except Exception as e:
            logger.error(f"保存视频失败: {e}")
            raise
    
    def extract_frames(self, video_path: str, output_dir: str, 
                      frame_interval: int = 1) -> List[str]:
        """提取视频帧"""
        logger.info(f"提取视频帧: {video_path}")
        
        try:
            # 创建输出目录
            output_path = Path(output_dir)
            output_path.mkdir(parents=True, exist_ok=True)
            
            # 加载视频
            video_reader = imageio.get_reader(video_path)
            
            frame_paths = []
            for i, frame in enumerate(video_reader):
                if i % frame_interval == 0:
                    # 保存帧
                    frame_path = output_path / f"frame_{i:06d}.jpg"
                    Image.fromarray(frame).save(frame_path)
                    frame_paths.append(str(frame_path))
            
            video_reader.close()
            
            logger.info(f"提取了 {len(frame_paths)} 帧")
            return frame_paths
            
        except Exception as e:
            logger.error(f"提取帧失败: {e}")
            raise
    
    def create_video_from_frames(self, frame_paths: List[str], output_path: str,
                               fps: Optional[int] = None) -> None:
        """从帧序列创建视频"""
        logger.info(f"从帧创建视频: {output_path}")
        
        try:
            save_fps = fps or self.fps
            
            writer = imageio.get_writer(output_path, fps=save_fps)
            
            for frame_path in frame_paths:
                frame = imageio.imread(frame_path)
                writer.append_data(frame)
            
            writer.close()
            
            logger.info(f"视频创建完成: {output_path}")
            
        except Exception as e:
            logger.error(f"创建视频失败: {e}")
            raise
    
    def resize_video(self, video: torch.Tensor, new_resolution: Tuple[int, int]) -> torch.Tensor:
        """调整视频分辨率"""
        logger.info(f"调整视频分辨率: {new_resolution}")
        
        resized_frames = []
        for frame in video:
            frame_np = frame.numpy().astype(np.uint8)
            resized_frame = cv2.resize(frame_np, new_resolution)
            resized_frames.append(torch.from_numpy(resized_frame))
        
        return torch.stack(resized_frames)
    
    def change_fps(self, video: torch.Tensor, new_fps: int, original_fps: int) -> torch.Tensor:
        """改变视频帧率"""
        logger.info(f"改变帧率: {original_fps} -> {new_fps}")
        
        if new_fps == original_fps:
            return video
        
        # 计算新的帧数
        num_frames = video.shape[0]
        duration = num_frames / original_fps
        new_num_frames = int(duration * new_fps)
        
        # 创建新的时间轴
        old_times = torch.linspace(0, duration, num_frames)
        new_times = torch.linspace(0, duration, new_num_frames)
        
        # 插值
        new_frames = []
        for t in new_times:
            # 找到最近的两帧
            idx = torch.searchsorted(old_times, t)
            if idx == 0:
                frame = video[0]
            elif idx >= num_frames:
                frame = video[-1]
            else:
                # 线性插值
                t1, t2 = old_times[idx-1], old_times[idx]
                alpha = (t - t1) / (t2 - t1)
                frame = (1 - alpha) * video[idx-1] + alpha * video[idx]
            
            new_frames.append(frame)
        
        return torch.stack(new_frames)
    
    def add_audio(self, video_path: str, audio_path: str, output_path: str) -> None:
        """为视频添加音频"""
        logger.info(f"为视频添加音频: {audio_path}")
        
        try:
            # 使用ffmpeg添加音频
            video_stream = ffmpeg.input(video_path)
            audio_stream = ffmpeg.input(audio_path)
            
            ffmpeg.output(
                video_stream, audio_stream,
                output_path,
                vcodec='copy',
                acodec='aac',
                shortest=None
            ).overwrite_output().run()
            
            logger.info(f"音频添加完成: {output_path}")
            
        except Exception as e:
            logger.error(f"添加音频失败: {e}")
            raise
    
    def extract_audio(self, video_path: str, output_path: str) -> None:
        """从视频中提取音频"""
        logger.info(f"提取音频: {video_path}")
        
        try:
            # 使用ffmpeg提取音频
            ffmpeg.input(video_path).output(
                output_path,
                acodec='mp3',
                ab='192k'
            ).overwrite_output().run()
            
            logger.info(f"音频提取完成: {output_path}")
            
        except Exception as e:
            logger.error(f"提取音频失败: {e}")
            raise
    
    def get_video_info(self, video_path: str) -> Dict[str, Any]:
        """获取视频信息"""
        logger.info(f"获取视频信息: {video_path}")
        
        try:
            # 使用ffmpeg获取视频信息
            probe = ffmpeg.probe(video_path)
            
            video_info = probe['streams'][0]
            info = {
                'width': int(video_info['width']),
                'height': int(video_info['height']),
                'fps': eval(video_info['r_frame_rate']),
                'duration': float(video_info['duration']),
                'bitrate': int(video_info['bit_rate']),
                'codec': video_info['codec_name']
            }
            
            logger.info(f"视频信息: {info}")
            return info
            
        except Exception as e:
            logger.error(f"获取视频信息失败: {e}")
            raise
    
    def apply_filter(self, video: torch.Tensor, filter_type: str, **kwargs) -> torch.Tensor:
        """应用视频滤镜"""
        logger.info(f"应用滤镜: {filter_type}")
        
        if filter_type == "blur":
            return self._apply_blur_filter(video, **kwargs)
        elif filter_type == "sharpen":
            return self._apply_sharpen_filter(video, **kwargs)
        elif filter_type == "grayscale":
            return self._apply_grayscale_filter(video)
        elif filter_type == "sepia":
            return self._apply_sepia_filter(video)
        else:
            logger.warning(f"未知的滤镜类型: {filter_type}")
            return video
    
    def _apply_blur_filter(self, video: torch.Tensor, kernel_size: int = 5) -> torch.Tensor:
        """应用模糊滤镜"""
        kernel = np.ones((kernel_size, kernel_size), np.float32) / (kernel_size * kernel_size)
        
        blurred_frames = []
        for frame in video:
            frame_np = frame.numpy().astype(np.uint8)
            blurred_frame = cv2.filter2D(frame_np, -1, kernel)
            blurred_frames.append(torch.from_numpy(blurred_frame))
        
        return torch.stack(blurred_frames)
    
    def _apply_sharpen_filter(self, video: torch.Tensor) -> torch.Tensor:
        """应用锐化滤镜"""
        kernel = np.array([[-1, -1, -1],
                          [-1,  9, -1],
                          [-1, -1, -1]])
        
        sharpened_frames = []
        for frame in video:
            frame_np = frame.numpy().astype(np.uint8)
            sharpened_frame = cv2.filter2D(frame_np, -1, kernel)
            sharpened_frames.append(torch.from_numpy(sharpened_frame))
        
        return torch.stack(sharpened_frames)
    
    def _apply_grayscale_filter(self, video: torch.Tensor) -> torch.Tensor:
        """应用灰度滤镜"""
        grayscale_frames = []
        for frame in video:
            frame_np = frame.numpy().astype(np.uint8)
            gray_frame = cv2.cvtColor(frame_np, cv2.COLOR_RGB2GRAY)
            # 转换为3通道
            gray_frame_3ch = cv2.cvtColor(gray_frame, cv2.COLOR_GRAY2RGB)
            grayscale_frames.append(torch.from_numpy(gray_frame_3ch))
        
        return torch.stack(grayscale_frames)
    
    def _apply_sepia_filter(self, video: torch.Tensor) -> torch.Tensor:
        """应用棕褐色滤镜"""
        sepia_matrix = np.array([
            [0.393, 0.769, 0.189],
            [0.349, 0.686, 0.168],
            [0.272, 0.534, 0.131]
        ])
        
        sepia_frames = []
        for frame in video:
            frame_np = frame.numpy().astype(np.float32) / 255.0
            sepia_frame = cv2.transform(frame_np, sepia_matrix)
            sepia_frame = np.clip(sepia_frame, 0, 1) * 255
            sepia_frames.append(torch.from_numpy(sepia_frame.astype(np.uint8)))
        
        return torch.stack(sepia_frames)
    
    def create_thumbnail(self, video_path: str, output_path: str, 
                        time_position: float = 0.5) -> None:
        """创建视频缩略图"""
        logger.info(f"创建缩略图: {video_path}")
        
        try:
            # 使用ffmpeg提取指定时间的帧
            ffmpeg.input(video_path, ss=time_position).output(
                output_path,
                vframes=1,
                format='image2'
            ).overwrite_output().run()
            
            logger.info(f"缩略图创建完成: {output_path}")
            
        except Exception as e:
            logger.error(f"创建缩略图失败: {e}")
            raise
    
    def concatenate_videos(self, video_paths: List[str], output_path: str) -> None:
        """拼接多个视频"""
        logger.info(f"拼接视频: {len(video_paths)} 个文件")
        
        try:
            # 创建临时文件列表
            temp_list_path = "temp_video_list.txt"
            with open(temp_list_path, 'w') as f:
                for video_path in video_paths:
                    f.write(f"file '{video_path}'\n")
            
            # 使用ffmpeg拼接视频
            ffmpeg.input(temp_list_path, format='concat', safe=0).output(
                output_path,
                c='copy'
            ).overwrite_output().run()
            
            # 删除临时文件
            Path(temp_list_path).unlink()
            
            logger.info(f"视频拼接完成: {output_path}")
            
        except Exception as e:
            logger.error(f"拼接视频失败: {e}")
            raise

def video_to_tensor(video_path: str, max_frames: Optional[int] = None) -> torch.Tensor:
    """将视频文件转换为tensor"""
    processor = VideoProcessor()
    return processor.load_video(video_path, max_frames)

def tensor_to_video(video: torch.Tensor, output_path: str, fps: int = 24) -> None:
    """将tensor保存为视频文件"""
    processor = VideoProcessor()
    processor.save_video(video, output_path, fps)

def get_video_duration(video_path: str) -> float:
    """获取视频时长"""
    processor = VideoProcessor()
    info = processor.get_video_info(video_path)
    return info['duration']

def resize_video_file(input_path: str, output_path: str, 
                     new_resolution: Tuple[int, int]) -> None:
    """调整视频文件分辨率"""
    processor = VideoProcessor()
    video = processor.load_video(input_path)
    resized_video = processor.resize_video(video, new_resolution)
    processor.save_video(resized_video, output_path) 