from moviepy.editor import VideoFileClip, CompositeVideoClip, TextClip, ColorClip, concatenate_videoclips
import numpy as np
import os
from pathlib import Path
import tempfile
import uuid
import requests
from huggingface_hub import InferenceClient

class VideoEditing:
    def __init__(self):
        project_root = Path(__file__).parent.parent
        self.results_dir = project_root / "generated_videos"
        self.results_dir.mkdir(exist_ok=True)
        self.font_path = "C:/Windows/Fonts/simhei.ttf"
        
        # Hugging Face配置
        self.hf_client = None
        self._initialize_hf_client()
        
        print(f"📁 视频文件将保存在: {self.results_dir.absolute()}")
    
    def _initialize_hf_client(self):
        """初始化Hugging Face客户端"""
        try:
            import os
            api_key = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
            if api_key:
                self.hf_client = InferenceClient(
                    provider="auto",
                    api_key=api_key,
                )
                print("✅ Hugging Face客户端初始化成功")
            else:
                print("⚠️ 未找到HUGGINGFACEHUB_API_TOKEN，视频生成功能将不可用")
        except Exception as e:
            print(f"❌ Hugging Face客户端初始化失败: {e}")
    
    def generate_text_to_video(self, text_content, style="animated", duration=10, output_name=None):
        """使用Hugging Face API生成AI视频"""
        if not self.hf_client:
            return {
                "success": False,
                "error": "Hugging Face API未配置，请设置HUGGINGFACEHUB_API_TOKEN环境变量"
            }
        
        if not output_name:
            output_name = f"ai_video_{uuid.uuid4().hex[:8]}.mp4"
        
        output_path = self.results_dir / output_name
        
        try:
            print(f"🎬 正在生成AI视频: {text_content[:50]}...")
            
            # 使用Hugging Face API生成视频
            video_data = self.hf_client.text_to_video(
                text_content,
                model="Wan-AI/Wan2.2-T2V-A14B",
            )
            
            # 保存视频文件
            with open(output_path, "wb") as f:
                f.write(video_data)
            
            file_size = os.path.getsize(output_path)
            
            result = {
                "success": True,
                "message": "AI视频生成成功",
                "filepath": str(output_path.absolute()),
                "filename": output_name,
                "file_size": file_size,
                "prompt": text_content,
                "model": "Wan-AI/Wan2.2-T2V-A14B"
            }
            
            print(f"✅ AI视频已生成: {output_path}")
            return result
            
        except Exception as e:
            error_msg = str(e)
            if "api_key" in error_msg.lower():
                return {
                    "success": False,
                    "error": "Hugging Face API密钥无效，请检查HUGGINGFACEHUB_API_TOKEN"
                }
            elif "rate limit" in error_msg.lower():
                return {
                    "success": False,
                    "error": "API调用频率限制，请稍后重试"
                }
            else:
                return {
                    "success": False,
                    "error": f"视频生成失败: {error_msg}"
                }
    
    def split_video(self, video_path, split_type="time", duration=10, output_prefix=None):
        """视频分割功能"""
        if not output_prefix:
            output_prefix = f"split_{uuid.uuid4().hex[:8]}"
        
        video = VideoFileClip(str(video_path))
        output_paths = []
        
        if split_type == "time":
            # 按时间分割
            total_duration = video.duration
            segments = []
            
            start_time = 0
            while start_time < total_duration:
                end_time = min(start_time + duration, total_duration)
                segments.append((start_time, end_time))
                start_time = end_time
            
            for i, (start, end) in enumerate(segments):
                subclip = video.subclip(start, end)
                output_path = self.results_dir / f"{output_prefix}_part{i+1}.mp4"
                subclip.write_videofile(str(output_path), codec="libx264", audio_codec="aac")
                output_paths.append(str(output_path))
                subclip.close()
        
        video.close()
        return output_paths
    
    def add_text_to_video(self, video_path, text_content, font_size=24, text_color="#ffffff", 
                         position="center", start_time=0, duration=None, output_name=None):
        """为视频添加文字"""
        if not output_name:
            output_name = f"text_added_{uuid.uuid4().hex[:8]}.mp4"
        
        output_path = self.results_dir / output_name
        video = VideoFileClip(str(video_path))
        
        if duration is None:
            duration = video.duration
        
        # 文字位置映射
        position_map = {
            "top-left": (0.1, 0.1),
            "top-center": (0.5, 0.1),
            "top-right": (0.9, 0.1),
            "center": (0.5, 0.5),
            "bottom-left": (0.1, 0.9),
            "bottom-center": (0.5, 0.9),
            "bottom-right": (0.9, 0.9)
        }
        
        text_clip = TextClip(
            text_content,
            fontsize=font_size,
            color=text_color,
            font=self.font_path
        ).set_position(position_map.get(position, position))
        
        text_clip = text_clip.set_start(start_time).set_duration(duration)
        
        final_video = CompositeVideoClip([video, text_clip])
        final_video.write_videofile(
            str(output_path),
            codec="libx264",
            audio_codec="aac"
        )
        
        video.close()
        final_video.close()
        return str(output_path)
    
    def apply_crossfade_transition(self, video_paths, fade_type="cross", duration=1, output_name=None):
        """交叉淡入淡出转场"""
        if not output_name:
            output_name = f"crossfade_{uuid.uuid4().hex[:8]}.mp4"
        
        output_path = self.results_dir / output_name
        clips = [VideoFileClip(str(path)) for path in video_paths]
        
        if fade_type == "cross":
            # 交叉淡入淡出
            final_clip = concatenate_videoclips(clips, method="compose", transition=duration)
        elif fade_type == "fadeIn":
            # 淡入
            final_clip = concatenate_videoclips(clips, method="compose", transition=duration)
            final_clip = final_clip.fadein(duration)
        elif fade_type == "fadeOut":
            # 淡出
            final_clip = concatenate_videoclips(clips, method="compose", transition=duration)
            final_clip = final_clip.fadeout(duration)
        else:  # dissolve
            # 溶解效果
            final_clip = concatenate_videoclips(clips, method="compose", transition=duration)
        
        final_clip.write_videofile(
            str(output_path),
            codec="libx264",
            audio_codec="aac"
        )
        
        for clip in clips:
            clip.close()
        final_clip.close()
        
        return str(output_path)
    
    def apply_slide_transition(self, video_paths, direction="right", slide_type="push", duration=1, output_name=None):
        """滑动转场 - 支持所有方向"""
        if not output_name:
            output_name = f"slide_{direction}_{uuid.uuid4().hex[:8]}.mp4"
        
        output_path = self.results_dir / output_name
        clips = [VideoFileClip(str(path)) for path in video_paths]
        
        def create_slide_position(direction, screen_width, screen_height):
            """创建滑动位置函数"""
            if direction == "right":
                def slide_position(t):
                    x = float(screen_width * (1 - t / duration))
                    return (x, 0)
                return slide_position
            elif direction == "left":
                def slide_position(t):
                    x = float(-screen_width * (1 - t / duration))
                    return (x, 0)
                return slide_position
            elif direction == "up":
                def slide_position(t):
                    y = float(-screen_height * (1 - t / duration))
                    return (0, y)
                return slide_position
            elif direction == "down":
                def slide_position(t):
                    y = float(screen_height * (1 - t / duration))
                    return (0, y)
                return slide_position
        
        final_clips = []
        screen_width, screen_height = clips[0].size
        
        # 处理多个视频的滑动转场
        for i in range(len(clips) - 1):
            clip1 = clips[i]
            clip2 = clips[i + 1]
            
            # 延长第一段视频
            clip1_extended = clip1.set_duration(clip1.duration + duration)
            
            # 创建滑动位置函数
            slide_func = create_slide_position(direction, screen_width, screen_height)
            
            # 第二段视频设置滑动效果
            clip2_slide = clip2.set_start(clip1.duration - duration)
            clip2_slide = clip2_slide.set_position(slide_func)
            clip2_slide = clip2_slide.set_duration(clip2.duration + duration)
            
            # 合成当前转场
            transition_clip = CompositeVideoClip([clip1_extended, clip2_slide])
            final_clips.append(transition_clip)
        
        # 如果没有转场，直接连接
        if len(final_clips) == 0:
            final_clip = concatenate_videoclips(clips)
        else:
            final_clip = concatenate_videoclips(final_clips)
        
        final_clip.write_videofile(
            str(output_path),
            codec="libx264",
            audio_codec="aac"
        )
        
        for clip in clips:
            clip.close()
        final_clip.close()
        
        return str(output_path)

# 创建单例实例
video_editor = VideoEditing()

# 对外接口函数
def text_to_video(text_content, style="animated", duration=10):
    """文字转视频接口"""
    return video_editor.generate_text_to_video(text_content, style, duration)

def split_video(video_path, split_type="time", duration=10):
    """视频分割接口"""
    return video_editor.split_video(video_path, split_type, duration)

def add_text_to_video(video_path, text_content, font_size=24, text_color="#ffffff", 
                     position="center", start_time=0, duration=None):
    """添加文字接口"""
    return video_editor.add_text_to_video(video_path, text_content, font_size, 
                                        text_color, position, start_time, duration)

def apply_transition(video_paths, transition_type="crossfade", **kwargs):
    """转场效果接口"""
    if transition_type == "crossfade":
        return video_editor.apply_crossfade_transition(
            video_paths, 
            fade_type=kwargs.get("fade_type", "cross"),
            duration=kwargs.get("duration", 1)
        )
    elif transition_type == "slide":
        return video_editor.apply_slide_transition(
            video_paths,
            direction=kwargs.get("direction", "right"),
            slide_type=kwargs.get("slide_type", "push"),
            duration=kwargs.get("duration", 1)
        )
    else:
        raise ValueError(f"不支持的转场类型: {transition_type}")

if __name__ == "__main__":
    # 测试代码
    test_text = "这是一个测试视频。展示了美丽的风景。"
    result = text_to_video(test_text)
    print(f"生成完成: {result}")