
import os
import asyncio
import subprocess
import tempfile
from pathlib import Path
from typing import Dict

import cv2
import numpy as np
from PIL import Image
import torch
from TTS.api import TTS

from .task_manager import TaskManager

class VideoGenerator:
    def __init__(self):
        self.task_manager = TaskManager()
        self.tts = None
        self.init_tts()
        
    def init_tts(self):
        """初始化TTS引擎"""
        try:
            # 使用中文TTS模型
            self.tts = TTS("tts_models/zh-CN/baker/tacotron2-DDC")
            print("TTS引擎初始化成功")
        except Exception as e:
            print(f"TTS引擎初始化失败: {e}")
            # 使用备用英文模型
            self.tts = TTS("tts_models/en/ljspeech/tacotron2-DDC")
    
    async def generate_video(self, task_data: Dict):
        """生成短视频的主流程"""
        task_id = task_data["task_id"]
        
        try:
            # 更新进度：开始生成语音
            self.task_manager.update_progress(task_id, 20, "generating_audio")
            
            # 1. 生成语音
            audio_path = await self.generate_audio(
                task_data["script"],
                task_data["voice"],
                task_data["speed"],
                task_id
            )
            
            # 更新进度：语音生成完成，开始处理照片
            self.task_manager.update_progress(task_id, 40, "processing_photo")
            
            # 2. 处理照片
            processed_photo = await self.process_photo(task_data["photo"])
            
            # 更新进度：照片处理完成，开始生成视频
            self.task_manager.update_progress(task_id, 60, "generating_video")
            
            # 3. 生成视频
            video_path = await self.create_video(
                processed_photo,
                audio_path,
                task_data["resolution"],
                task_data["duration"],
                task_id
            )
            
            # 更新进度：完成
            video_url = f"/api/videos/{os.path.basename(video_path)}"
            self.task_manager.update_progress(task_id, 100, "completed", video_url)
            
        except Exception as e:
            print(f"视频生成失败: {e}")
            self.task_manager.update_progress(task_id, 0, "failed", error=str(e))
    
    async def generate_audio(self, text: str, voice: str, speed: float, task_id: str) -> str:
        """生成语音文件"""
        audio_path = f"temp/{task_id}_audio.wav"
        
        # 使用TTS生成语音
        if self.tts:
            # 根据声音类型调整参数
            if voice == "male":
                speaker = "male"
            elif voice == "soft_female":
                speaker = "female"
            else:
                speaker = "female"
            
            # 生成语音
            self.tts.tts_to_file(
                text=text,
                file_path=audio_path,
                speed=speed
            )
        else:
            # 如果没有TTS，创建一个静音文件作为占位
            import wave
            with wave.open(audio_path, 'w') as wav_file:
                wav_file.setnchannels(1)
                wav_file.setsampwidth(2)
                wav_file.setframerate(22050)
                # 创建1秒的静音
                wav_file.writeframes(b'\x00' * 44100)
        
        return audio_path
    
    async def process_photo(self, photo_filename: str) -> str:
        """处理上传的照片"""
        photo_path = f"uploads/photos/{photo_filename}"
        processed_path = f"temp/processed_{photo_filename}"
        
        # 读取图片
        image = cv2.imread(photo_path)
        if image is None:
            raise Exception("无法读取照片文件")
        
        # 人脸检测和裁剪
        face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray, 1.1, 4)
        
        if len(faces) > 0:
            # 获取最大的人脸
            largest_face = max(faces, key=lambda x: x[2] * x[3])
            x, y, w, h = largest_face
            
            # 扩大裁剪区域
            margin = 50
            x1 = max(0, x - margin)
            y1 = max(0, y - margin)
            x2 = min(image.shape[1], x + w + margin)
            y2 = min(image.shape[0], y + h + margin)
            
            # 裁剪人脸区域
            face_crop = image[y1:y2, x1:x2]
            
            # 调整大小为竖屏比例
            target_height = 1920
            target_width = 1080
            
            # 计算缩放比例
            scale = max(target_width / face_crop.shape[1], target_height / face_crop.shape[0])
            new_width = int(face_crop.shape[1] * scale)
            new_height = int(face_crop.shape[0] * scale)
            
            # 缩放
            resized = cv2.resize(face_crop, (new_width, new_height))
            
            # 居中裁剪
            start_x = (new_width - target_width) // 2
            start_y = (new_height - target_height) // 2
            final_crop = resized[start_y:start_y + target_height, start_x:start_x + target_width]
            
            # 保存处理后的图片
            cv2.imwrite(processed_path, final_crop)
        else:
            # 如果没有检测到人脸，直接调整原图大小
            target_size = (1080, 1920)
            resized = cv2.resize(image, target_size)
            cv2.imwrite(processed_path, resized)
        
        return processed_path
    
    async def create_video(self, photo_path: str, audio_path: str, resolution: str, duration: int, task_id: str) -> str:
        """创建最终视频"""
        video_filename = f"{task_id}_video.mp4"
        video_path = f"uploads/videos/{video_filename}"
        
        # 获取音频时长
        try:
            import wave
            with wave.open(audio_path, 'r') as wav_file:
                audio_duration = wav_file.getnframes() / wav_file.getframerate()
        except:
            audio_duration = duration
        
        # 使用FFmpeg创建视频
        cmd = [
            'ffmpeg',
            '-loop', '1',
            '-i', photo_path,
            '-i', audio_path,
            '-c:v', 'libx264',
            '-c:a', 'aac',
            '-pix_fmt', 'yuv420p',
            '-shortest',
            '-t', str(min(audio_duration, duration)),
            '-y',
            video_path
        ]
        
        # 执行FFmpeg命令
        process = await asyncio.create_subprocess_exec(
            *cmd,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE
        )
        
        stdout, stderr = await process.communicate()
        
        if process.returncode != 0:
            raise Exception(f"FFmpeg错误: {stderr.decode()}")
        
        return video_path
