import os
import uuid
from typing import Optional
from moviepy.editor import VideoFileClip, concatenate_videoclips, CompositeVideoClip, ImageClip, ColorClip, VideoClip
import tempfile
import requests
import subprocess
import json
from services.llm_service import LLMService
import logging
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from .advanced_video_generator import AdvancedVideoGenerator
import time

# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class VideoGeneratorService:
    def __init__(self, llm_service: LLMService):
        self.llm_service = llm_service
        self.advanced_generator = None
        
    def _get_advanced_generator(self):
        """延迟加载高级生成器"""
        if self.advanced_generator is None:
            from .advanced_video_generator import AdvancedVideoGenerator
            self.advanced_generator = AdvancedVideoGenerator(self.llm_service)
        return self.advanced_generator

    def generate_from_text(self, prompt: str, output_path: str, project_id: Optional[str] = None):
        """生成视频 - 使用高级生成器"""
        start_time = time.time()
        logger.info(f"🚀 开始视频生成流程: {prompt}")
        
        try:
            # 尝试使用高级视频生成器
            advanced_gen = self._get_advanced_generator()
            success = advanced_gen.generate_advanced_video(prompt, output_path, project_id)
            
            if success:
                elapsed_time = time.time() - start_time
                logger.info(f"✅ 高级视频生成完成! 耗时: {elapsed_time:.2f}秒, 路径: {output_path}")
                return True
            else:
                logger.error(f"❌ 高级视频生成失败，尝试回退方案")
                raise Exception("高级视频生成失败")
                
        except Exception as e:
            logger.error(f"❌ 高级视频生成过程中出错: {str(e)}")
            # 如果高级生成失败，回退到基础生成
            logger.info("🔄 回退到基础视频生成...")
            return self._fallback_basic_video(prompt, output_path)
            
    def _fallback_basic_video(self, prompt: str, output_path: str):
        """基础视频生成回退方案"""
        try:
            from moviepy.editor import ColorClip
            
            # 创建简单的颜色视频
            duration = 5
            video = ColorClip(size=(640, 480), color=(100, 100, 200), duration=duration)
            
            # 写入视频文件
            video.write_videofile(
                output_path,
                fps=24,
                codec='libx264',
                audio=False,
                verbose=False,
                logger=None
            )
            
            video.close()
            logger.info(f"✅ 基础视频生成完成: {output_path}")
            return True
            
        except Exception as e:
            logger.error(f"❌ 基础视频生成也失败: {str(e)}")
            return False

    def _create_video_with_pil_text(self, text: str, output_path: str, duration: int = 5):
        """
        使用PIL创建文本图像，避免依赖ImageMagick
        """
        logger.info(f"🎨 使用PIL创建视频，文本长度: {len(text)}")
        
        try:
            # 视频尺寸
            width, height = 640, 480
            
            # 创建背景颜色剪辑
            logger.info("🖼️ 创建背景...")
            background = ColorClip(size=(width, height), color=(25, 25, 112), duration=duration)
            
            # 使用PIL创建文本图像
            logger.info("📝 使用PIL创建文字图像...")
            
            # 创建PIL图像
            pil_image = Image.new('RGB', (width, height), color=(25, 25, 112))
            draw = ImageDraw.Draw(pil_image)
            
            # 尝试加载字体，如果失败则使用默认字体
            try:
                font = ImageFont.truetype("arial.ttf", 24)
            except:
                try:
                    font = ImageFont.truetype("C:/Windows/Fonts/arial.ttf", 24)
                except:
                    font = ImageFont.load_default()
                    logger.warning("使用默认字体")
            
            # 文本换行处理
            lines = []
            words = text.split()
            current_line = []
            
            for word in words:
                test_line = ' '.join(current_line + [word])
                bbox = draw.textbbox((0, 0), test_line, font=font)
                text_width = bbox[2] - bbox[0]
                
                if text_width < width - 40:  # 留边距
                    current_line.append(word)
                else:
                    if current_line:
                        lines.append(' '.join(current_line))
                    current_line = [word]
            
            if current_line:
                lines.append(' '.join(current_line))
            
            # 如果行数太多，截断
            if len(lines) > 10:
                lines = lines[:10]
                lines[-1] = lines[-1] + "..."
            
            # 绘制文本
            y_position = (height - len(lines) * 30) // 2  # 大致估算行高
            
            for i, line in enumerate(lines):
                bbox = draw.textbbox((0, 0), line, font=font)
                text_width = bbox[2] - bbox[0]
                x_position = (width - text_width) // 2
                draw.text((x_position, y_position + i * 30), line, font=font, fill=(255, 255, 255))
            
            # 将PIL图像转换为numpy数组
            image_array = np.array(pil_image)
            
            # 创建ImageClip
            text_image_clip = ImageClip(image_array, duration=duration)
            
            # 合成视频
            logger.info("🔧 合成视频...")
            video = CompositeVideoClip([background, text_image_clip])
            
            # 写入视频文件
            logger.info(f"💾 保存视频到: {output_path}")
            video.write_videofile(
                output_path,
                fps=24,
                codec='libx264',
                audio=False,
                verbose=False,
                logger=None
            )
            
            # 关闭剪辑释放内存
            background.close()
            text_image_clip.close()
            video.close()
            
            logger.info("✅ 视频创建完成")
            
        except Exception as e:
            logger.error(f"❌ 使用PIL创建视频失败: {str(e)}")
            import traceback
            logger.error(traceback.format_exc())
            raise e

    def _create_simple_color_video(self, text: str, output_path: str, duration: int = 5):
        """
        创建最简单的颜色视频（完全不使用文字）
        """
        logger.info("🎨 创建简单颜色视频...")
        
        try:
            # 基于文本生成不同的颜色
            color_map = {
                'red': (255, 0, 0),
                'blue': (0, 0, 255),
                'green': (0, 255, 0),
                'yellow': (255, 255, 0),
                'purple': (128, 0, 128)
            }
            
            # 根据文本选择颜色
            color_key = hash(text) % len(color_map)
            color = list(color_map.values())[color_key]
            
            # 创建纯色视频
            video = ColorClip(size=(640, 480), color=color, duration=duration)
            
            # 写入视频文件
            video.write_videofile(
                output_path,
                fps=24,
                codec='libx264',
                audio=False,
                verbose=False,
                logger=None
            )
            
            video.close()
            logger.info("✅ 简单颜色视频创建完成")
            
        except Exception as e:
            logger.error(f"❌ 创建简单颜色视频失败: {str(e)}")
            raise e

    # 其他方法保持不变...
    def combine_videos(self, video_paths: list, output_path: str):
        """Combine multiple video clips into one"""
        try:
            clips = []
            for path in video_paths:
                if os.path.exists(path):
                    clip = VideoFileClip(path)
                    clips.append(clip)
            
            if not clips:
                raise ValueError("No valid video files provided")
            
            # Concatenate all clips
            final_clip = concatenate_videoclips(clips, method="compose")
            
            # Write the output
            final_clip.write_videofile(output_path, codec='libx264', audio_codec='aac')
            
            # Close clips
            for clip in clips:
                clip.close()
            final_clip.close()
            
        except Exception as e:
            print(f"Error combining videos: {str(e)}")
            raise e

    def add_text_overlay(self, video_path: str, text: str, output_path: str):
        """Add text overlay to an existing video"""
        try:
            # Load the original video
            video = VideoFileClip(video_path)
            
            # 使用PIL方法添加文字
            # 这里简化处理，实际应该使用PIL创建文字图像
            logger.warning("文字叠加功能需要ImageMagick，暂时禁用文字")
            
            # 直接复制视频
            video.write_videofile(output_path, codec='libx264', audio_codec='aac')
            
            # Close clips
            video.close()
            
        except Exception as e:
            print(f"Error adding text overlay: {str(e)}")
            raise e

    def generate_from_script(self, script: dict, output_path: str):
        """Generate a video from a script with multiple sections"""
        try:
            # For now, we'll create a simple video based on the script
            combined_text = script.get("title", "") + "\n\n"
            
            intro = script.get("introduction", {})
            combined_text += intro.get("text", "") + "\n\n"
            
            for section in script.get("sections", []):
                combined_text += section.get("text", "") + "\n\n"
            
            conclusion = script.get("conclusion", {})
            combined_text += conclusion.get("text", "")
            
            # Create video with the combined text
            self._create_video_with_pil_text(combined_text[:500], output_path)
            
        except Exception as e:
            print(f"Error generating video from script: {str(e)}")
            raise e
        
    def add_advanced_effects(self, video_path: str, output_path: str, effects: list):
        """
        添加高级视觉效果
        """
        video = VideoFileClip(video_path)
        
        for effect in effects:
            if effect == "zoom_pan":
                video = self._add_zoom_pan_effect(video)
            elif effect == "ken_burns":
                video = self._add_ken_burns_effect(video)
            elif effect == "color_grading":
                video = self._add_color_grading(video)
            elif effect == "particles":
                video = self._add_particle_effects(video)
        
        video.write_videofile(output_path, codec='libx264')
        video.close()

    def _add_ken_burns_effect(self, clip):
        """
        添加Ken Burns效果（缓慢缩放和平移）
        """
        def effect(get_frame, t):
            frame = get_frame(t)
            h, w = frame.shape[:2]
            
            # 计算缩放比例（缓慢放大）
            zoom = 1.0 + 0.1 * (t / clip.duration)
            
            # 计算平移（从左上到右下）
            dx = int(0.1 * w * (t / clip.duration))
            dy = int(0.1 * h * (t / clip.duration))
            
            # 应用变换
            from PIL import Image
            pil_img = Image.fromarray(frame)
            new_w, new_h = int(w * zoom), int(h * zoom)
            resized = pil_img.resize((new_w, new_h), Image.LANCZOS)
            
            # 裁剪到原始尺寸
            cropped = resized.crop((dx, dy, dx + w, dy + h))
            
            return np.array(cropped)
        
        return clip.fl(effect, apply_to=['mask'])

    def _add_particle_effects(self, clip):
        """
        添加粒子效果
        """
        # 创建粒子层
        particle_clips = []
        
        for i in range(20):  # 创建20个粒子
            particle = self._create_particle(clip.duration)
            particle_clips.append(particle)
        
        # 将粒子层叠加到原始视频上
        final_clip = CompositeVideoClip([clip] + particle_clips)
        return final_clip

    def _create_particle(self, duration: float):
        """
        创建单个粒子动画
        """
        # 随机粒子属性
        start_x = np.random.randint(0, 1920)
        start_y = np.random.randint(0, 1080)
        end_x = np.random.randint(0, 1920)
        end_y = np.random.randint(0, 1080)
        
        size = np.random.randint(2, 10)
        color = tuple(np.random.randint(0, 255, 3))
        
        def make_frame(t):
            progress = t / duration
            x = start_x + (end_x - start_x) * progress
            y = start_y + (end_y - start_y) * progress
            
            # 创建粒子帧
            frame = np.zeros((1080, 1920, 4), dtype=np.uint8)
            
            # 绘制圆形粒子
            y_idx, x_idx = np.ogrid[-size:size+1, -size:size+1]
            mask = x_idx**2 + y_idx**2 <= size**2
            
            y_center = int(y)
            x_center = int(x)
            
            y_start = max(0, y_center - size)
            y_end = min(1080, y_center + size + 1)
            x_start = max(0, x_center - size)
            x_end = min(1920, x_center + size + 1)
            
            if y_start < y_end and x_start < x_end:
                frame[y_start:y_end, x_start:x_end, :3][
                    mask[:y_end-y_start, :x_end-x_start]
                ] = color
                frame[y_start:y_end, x_start:x_end, 3][
                    mask[:y_end-y_start, :x_end-x_start]
                ] = 255  # Alpha通道
            
            return frame
        
        particle = VideoClip(make_frame, duration=duration, ismask=False)
        particle = particle.set_opacity(0.7)  # 设置透明度
        
        return particle
