# video_fusion_refactored.py

import cv2
import numpy as np
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
from moviepy.video.VideoClip import ImageClip
import os
import sys
from datetime import datetime
import subprocess
import shutil

class VideoFusionProcessor:
    def __init__(self):
        """初始化视频融合处理器"""
        self.current_dir = os.path.dirname(os.path.abspath(__file__))
        self.timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        self.setup_encoding()
        self.setup_font()
        
    def setup_encoding(self):
        """设置编码相关配置"""
        if sys.platform == 'win32':
            os.environ['OPENCV_IO_ENABLE_JASPER'] = '1'
            
    def setup_font(self):
        """设置中文字体"""
        import matplotlib.pyplot as plt
        plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
        
    def get_file_paths(self):
        """获取所需文件路径"""
        return {
            'bg': os.path.join(self.current_dir, "bg.png"),
            'result_video': os.path.join(self.current_dir, "result.mp4"),
            'mask_video': os.path.join(self.current_dir, "mask.mp4"),
            'output': os.path.join(self.current_dir, f"output_{self.timestamp}.mp4"),
            'temp_video': os.path.join(self.current_dir, f"temp_{self.timestamp}.mp4"),
            'audio_temp': os.path.join(self.current_dir, f"audio_temp_{self.timestamp}.mp3")
        }
        
    def load_bg_image(self, bg_path):
        """加载背景图片"""
        # 尝试使用OpenCV读取
        bg_img = cv2.imread(bg_path, cv2.IMREAD_UNCHANGED)
        
        if bg_img is not None:
            return bg_img
            
        # 如果OpenCV读取失败，尝试使用PIL
        print("OpenCV无法读取背景图片，尝试使用PIL...")
        try:
            from PIL import Image
            bg_pil = Image.open(bg_path)
            bg_img = np.array(bg_pil)
            
            if len(bg_img.shape) == 3 and bg_img.shape[2] == 4:
                # 如果有alpha通道，转换为BGR
                bg_img = cv2.cvtColor(bg_img, cv2.COLOR_RGBA2BGR)
            else:
                # 转换RGB为BGR
                bg_img = cv2.cvtColor(bg_img, cv2.COLOR_RGB2BGR)
                
            print(f"成功通过PIL读取背景图片: {bg_path}")
            return bg_img
        except Exception as e:
            raise FileNotFoundError(f"无法读取背景图片: {bg_path}. 错误: {e}")
            
    def extract_audio(self, video_clip, audio_temp_path):
        """提取视频音频"""
        if hasattr(video_clip, 'audio') and video_clip.audio is not None:
            video_clip.audio.write_audiofile(audio_temp_path)
            print(f"成功提取音频到: {audio_temp_path}")
            return True
        else:
            print("警告: 视频中没有音频轨道")
            return False
            
    def calculate_digital_human_params(self, bg_img, width, height):
        """计算数字人参数（大小和位置）"""
        bg_height, bg_width = bg_img.shape[:2]
        digital_human_width = int(bg_width / 4)
        digital_human_height = int((height / width) * digital_human_width)  # 保持宽高比
        
        # 放置在右下角
        pos_x = bg_width - digital_human_width
        pos_y = bg_height - digital_human_height
        
        print(f"数字人大小: {digital_human_width}x{digital_human_height}")
        print(f"数字人位置: ({pos_x}, {pos_y})")
        
        return {
            'width': digital_human_width,
            'height': digital_human_height,
            'pos_x': pos_x,
            'pos_y': pos_y,
            'bg_width': bg_width,
            'bg_height': bg_height
        }
        
    def process_frame(self, result_frame, mask_frame, bg_img, dh_params):
        """处理单帧图像融合"""
        width, height = dh_params['bg_width'], dh_params['bg_height']
        dh_width, dh_height = dh_params['width'], dh_params['height']
        pos_x, pos_y = dh_params['pos_x'], dh_params['pos_y']
        
        # 如果mask_frame是彩色的，转为灰度图
        if len(mask_frame.shape) == 3:
            mask_frame = cv2.cvtColor(mask_frame, cv2.COLOR_BGR2GRAY)
            
        # 创建alpha通道
        alpha = cv2.resize(mask_frame, (result_frame.shape[1], result_frame.shape[0])) / 255.0
        alpha = np.stack([alpha, alpha, alpha], axis=2)
        
        # 将RGB转换为BGR以适应OpenCV
        result_frame_bgr = cv2.cvtColor(result_frame, cv2.COLOR_RGB2BGR)
        
        # 调整数字人大小
        digital_human = cv2.resize(result_frame_bgr, (dh_width, dh_height))
        alpha_resized = cv2.resize(alpha, (dh_width, dh_height))
        
        # 创建结果帧（复制背景）
        result_frame = bg_img.copy()
        
        # 融合数字人和背景
        roi = result_frame[pos_y:pos_y+dh_height, pos_x:pos_x+dh_width]
        roi = (1 - alpha_resized) * roi + alpha_resized * digital_human
        result_frame[pos_y:pos_y+dh_height, pos_x:pos_x+dh_width] = roi
        
        return result_frame.astype(np.uint8)
        
    def merge_video_audio(self, temp_video_path, audio_temp_path, output_filename):
        """合并视频和音频"""
        if audio_temp_path and os.path.exists(audio_temp_path):
            # 有音频的情况
            print("使用ffmpeg合并视频和音频...")
            ffmpeg_cmd = [
                'ffmpeg',
                '-i', temp_video_path,
                '-i', audio_temp_path,
                '-c:v', 'libx264',
                '-c:a', 'aac',
                '-strict', 'experimental',
                '-y',  # 覆盖已存在的文件
                output_filename
            ]
            
            subprocess.run(ffmpeg_cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            print("成功使用ffmpeg合并视频和音频")
        else:
            # 没有音频的情况，直接复制视频
            shutil.copy2(temp_video_path, output_filename)
            print("没有音频，直接复制视频文件")
            
    def cleanup_temp_files(self, temp_files):
        """清理临时文件"""
        for temp_file in temp_files:
            if os.path.exists(temp_file):
                # 尝试多次删除，处理Windows文件占用问题
                for i in range(3):
                    try:
                        os.remove(temp_file)
                        break
                    except PermissionError:
                        if i == 2:
                            print(f"警告: 无法删除临时文件: {temp_file}")
                        else:
                            import time
                            time.sleep(0.5)
                            
    def process_video_fusion(self):
        """主处理函数"""
        print("开始处理视频融合任务...")
        
        # 获取文件路径
        paths = self.get_file_paths()
        
        try:
            # 加载背景图片
            bg_img = self.load_bg_image(paths['bg'])
            
            # 使用moviepy读取视频以获取音频
            result_clip = VideoFileClip(paths['result_video'])
            mask_clip = VideoFileClip(paths['mask_video'])
            
            # 提取音频
            has_audio = self.extract_audio(result_clip, paths['audio_temp'])
            
            # 获取视频参数
            fps = result_clip.fps
            duration = result_clip.duration
            width = result_clip.w
            height = result_clip.h
            
            print(f"视频参数: 分辨率={width}x{height}, FPS={fps}, 时长={duration:.2f}秒")
            
            # 计算数字人参数
            dh_params = self.calculate_digital_human_params(bg_img, width, height)
            
            # 创建VideoWriter对象
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            out = cv2.VideoWriter(paths['temp_video'], fourcc, fps, 
                                (dh_params['bg_width'], dh_params['bg_height']))
            
            # 处理每一帧
            frame_count = int(fps * duration)
            for i in range(frame_count):
                try:
                    # 读取当前帧
                    result_frame = result_clip.get_frame(i / fps)
                    mask_frame = mask_clip.get_frame(i / fps)
                    
                    # 处理帧融合
                    processed_frame = self.process_frame(result_frame, mask_frame, 
                                                       bg_img, dh_params)
                    
                    # 写入视频
                    out.write(processed_frame)
                    
                    # 显示进度
                    if (i + 1) % 10 == 0 or (i + 1) == frame_count:
                        progress = (i + 1) / frame_count * 100
                        print(f"处理进度: {i+1}/{frame_count} ({progress:.1f}%)")
                        
                except Exception as e:
                    print(f"处理第{i}帧时出错: {e}")
                    # 出错时使用背景图片继续
                    out.write(bg_img)
            
            # 释放资源
            out.release()
            result_clip.close()
            mask_clip.close()
            
            print("视频帧处理完成，正在合成最终视频...")
            
            # 合并视频和音频
            self.merge_video_audio(paths['temp_video'], 
                                 paths['audio_temp'] if has_audio else None, 
                                 paths['output'])
            
            # 删除临时文件
            temp_files = [paths['temp_video']]
            if has_audio and os.path.exists(paths['audio_temp']):
                temp_files.append(paths['audio_temp'])
            self.cleanup_temp_files(temp_files)
            
            print(f"视频融合完成！输出文件: {paths['output']}")
            
            # 验证输出文件是否存在
            if os.path.exists(paths['output']):
                file_size = os.path.getsize(paths['output']) / (1024 * 1024)  # 转换为MB
                print(f"输出文件大小: {file_size:.2f} MB")
            else:
                print("警告: 输出文件可能未成功创建")
                
        except Exception as e:
            print(f"处理过程中发生错误: {e}")
            # 清理临时文件
            if os.path.exists(paths['temp_video']):
                os.remove(paths['temp_video'])

if __name__ == "__main__":
    processor = VideoFusionProcessor()
    processor.process_video_fusion()