"""
视频配音合成功能
提供完整的视频配音解决方案
"""

import os
import tempfile
from typing import Dict, Any, Optional, List, Tuple, Union
from pathlib import Path
import numpy as np
from dataclasses import dataclass
from enum import Enum

from .processor import VideoProcessor
from .synchronizer import AudioVideoSynchronizer, SyncMethod, SyncResult
from ..tts.service import TTSService, TTSRequest, VoiceProfile, VoiceSettings
from ..audio.enhancer import AudioEnhancer, EnhancementSettings


class DubbingMode(Enum):
    """配音模式"""
    REPLACE = "replace"           # 替换原音频
    OVERLAY = "overlay"           # 叠加配音
    MIXED = "mixed"              # 混合模式
    NARRATION = "narration"       # 旁白模式


@dataclass
class DubbingSegment:
    """配音片段"""
    text: str
    start_time: float
    end_time: float
    voice_profile: Optional[VoiceProfile] = None
    volume: float = 1.0
    sync_offset: float = 0.0
    metadata: Dict[str, Any] = None


@dataclass
class DubbingRequest:
    """配音请求"""
    video_path: str
    segments: List[DubbingSegment]
    output_path: str
    mode: DubbingMode = DubbingMode.REPLACE
    voice_settings: Optional[VoiceSettings] = None
    enhancement_settings: Optional[EnhancementSettings] = None
    sync_method: SyncMethod = SyncMethod.AUDIO_PEAKS
    original_audio_volume: float = 0.3
    dubbing_audio_volume: float = 1.0


@dataclass
class DubbingResult:
    """配音结果"""
    success: bool
    output_path: Optional[str] = None
    duration: Optional[float] = None
    segments_processed: int = 0
    sync_result: Optional[SyncResult] = None
    metadata: Optional[Dict[str, Any]] = None
    error_message: Optional[str] = None


class VideoDubber:
    """视频配音器"""
    
    def __init__(self):
        self.video_processor = VideoProcessor()
        self.tts_service = TTSService()
        self.synchronizer = AudioVideoSynchronizer()
        self.audio_enhancer = AudioEnhancer()
        
        self._initialized = False
    
    async def initialize(self) -> bool:
        """初始化配音器"""
        try:
            # 初始化TTS服务
            if not await self.tts_service.initialize():
                return False
            
            self._initialized = True
            return True
            
        except Exception as e:
            print(f"Failed to initialize video dubber: {e}")
            return False
    
    async def create_dubbing(self, request: DubbingRequest) -> DubbingResult:
        """创建配音"""
        try:
            if not self._initialized:
                return DubbingResult(
                    success=False,
                    error_message="Video dubber not initialized"
                )
            
            # 验证输入文件
            if not os.path.exists(request.video_path):
                return DubbingResult(
                    success=False,
                    error_message=f"Video file not found: {request.video_path}"
                )
            
            if not self.video_processor.is_supported_format(request.video_path):
                return DubbingResult(
                    success=False,
                    error_message=f"Unsupported video format: {request.video_path}"
                )
            
            # 获取视频信息
            video_info = self.video_processor.get_video_info(request.video_path)
            
            # 处理配音片段
            processed_segments = await self._process_segments(request.segments, video_info.duration)
            
            if not processed_segments:
                return DubbingResult(
                    success=False,
                    error_message="No valid segments to process"
                )
            
            # 生成配音音频
            dubbing_audio_path = await self._generate_dubbing_audio(
                processed_segments, request, video_info.duration
            )
            
            if not dubbing_audio_path:
                return DubbingResult(
                    success=False,
                    error_message="Failed to generate dubbing audio"
                )
            
            # 音视频同步
            sync_result = await self._synchronize_audio_video(
                request.video_path, dubbing_audio_path, request.sync_method
            )
            
            # 合成最终视频
            output_path = await self._compose_final_video(
                request, dubbing_audio_path, video_info
            )
            
            # 清理临时文件
            if os.path.exists(dubbing_audio_path):
                os.unlink(dubbing_audio_path)
            
            return DubbingResult(
                success=True,
                output_path=output_path,
                duration=video_info.duration,
                segments_processed=len(processed_segments),
                sync_result=sync_result,
                metadata={
                    'video_info': video_info,
                    'dubbing_mode': request.mode.value,
                    'segments_count': len(processed_segments)
                }
            )
            
        except Exception as e:
            return DubbingResult(
                success=False,
                error_message=f"Dubbing failed: {str(e)}"
            )
    
    async def _process_segments(
        self,
        segments: List[DubbingSegment],
        video_duration: float
    ) -> List[DubbingSegment]:
        """处理配音片段"""
        processed = []
        
        for segment in segments:
            # 验证时间范围
            if segment.start_time < 0 or segment.end_time > video_duration:
                continue
            
            if segment.start_time >= segment.end_time:
                continue
            
            # 验证文本
            if not segment.text or not segment.text.strip():
                continue
            
            processed.append(segment)
        
        # 按时间排序
        processed.sort(key=lambda x: x.start_time)
        return processed
    
    async def _generate_dubbing_audio(
        self,
        segments: List[DubbingSegment],
        request: DubbingRequest,
        video_duration: float
    ) -> Optional[str]:
        """生成配音音频"""
        try:
            # 创建临时音频文件列表
            audio_segments = []
            
            for i, segment in enumerate(segments):
                # TTS合成
                tts_request = TTSRequest(
                    text=segment.text,
                    voice_profile=segment.voice_profile,
                    voice_settings=request.voice_settings
                )
                
                tts_response = await self.tts_service.synthesize(tts_request)
                
                if not tts_response.success or not tts_response.audio_data:
                    continue
                
                # 音频增强
                if request.enhancement_settings:
                    from ..audio.file_handler import AudioFileHandler
                    handler = AudioFileHandler()
                    
                    # 转换为numpy数组
                    audio_data, sr = handler.read_audio_file(
                        io.BytesIO(tts_response.audio_data)
                    )
                    
                    # 应用增强
                    enhanced_result = self.audio_enhancer.enhance_audio(
                        audio_data, sr, request.enhancement_settings
                    )
                    
                    if enhanced_result.success and enhanced_result.enhanced_data is not None:
                        # 转换回字节数据
                        import soundfile as sf
                        with io.BytesIO() as buffer:
                            sf.write(buffer, enhanced_result.enhanced_data, sr, format='wav')
                            audio_data_bytes = buffer.getvalue()
                        tts_response.audio_data = audio_data_bytes
                
                audio_segments.append({
                    'data': tts_response.audio_data,
                    'start_time': segment.start_time,
                    'end_time': segment.end_time,
                    'volume': segment.volume,
                    'sync_offset': segment.sync_offset
                })
            
            if not audio_segments:
                return None
            
            # 创建完整的配音音频轨道
            return await self._create_dubbing_track(audio_segments, video_duration)
            
        except Exception as e:
            print(f"Failed to generate dubbing audio: {e}")
            return None
    
    async def _create_dubbing_track(
        self,
        audio_segments: List[Dict[str, Any]],
        video_duration: float
    ) -> Optional[str]:
        """创建配音音频轨道"""
        try:
            import soundfile as sf
            
            # 计算音频参数
            sample_rate = 22050  # 默认采样率
            total_samples = int(video_duration * sample_rate)
            full_audio = np.zeros(total_samples)
            
            for segment in audio_segments:
                # 读取音频数据
                with tempfile.NamedTemporaryFile() as temp_file:
                    temp_file.write(segment['data'])
                    temp_file.seek(0)
                    
                    audio_data, sr = sf.read(temp_file.name)
                    
                    # 重采样到目标采样率
                    if sr != sample_rate:
                        import librosa
                        audio_data = librosa.resample(audio_data, orig_sr=sr, target_sr=sample_rate)
                    
                    # 应用音量
                    audio_data = audio_data * segment['volume']
                    
                    # 计算插入位置
                    start_sample = int(segment['start_time'] * sample_rate)
                    sync_offset_samples = int(segment['sync_offset'] * sample_rate)
                    insert_position = start_sample + sync_offset_samples
                    
                    # 确保不超出范围
                    if insert_position + len(audio_data) <= len(full_audio):
                        full_audio[insert_position:insert_position + len(audio_data)] += audio_data
            
            # 保存到临时文件
            temp_output_path = tempfile.mktemp(suffix='.wav')
            sf.write(temp_output_path, full_audio, sample_rate)
            
            return temp_output_path
            
        except Exception as e:
            print(f"Failed to create dubbing track: {e}")
            return None
    
    async def _synchronize_audio_video(
        self,
        video_path: str,
        audio_path: str,
        sync_method: SyncMethod
    ) -> SyncResult:
        """音视频同步"""
        try:
            sync_result = self.synchronizer.synchronize(
                video_path, audio_path, sync_method
            )
            
            # 如果同步成功，应用偏移
            if sync_result.success and sync_result.sync_offset != 0:
                # 应用同步偏移到音频文件
                from ..audio.file_handler import AudioFileHandler
                handler = AudioFileHandler()
                
                audio_data, sr = handler.read_audio_file(audio_path)
                
                # 应用偏移
                synchronized_audio = self.synchronizer.apply_sync_offset(
                    audio_data, sr, sync_result.sync_offset
                )
                
                # 保存同步后的音频
                handler.write_audio_file(synchronized_audio, sr, audio_path)
            
            return sync_result
            
        except Exception as e:
            return SyncResult(
                success=False,
                sync_points=[],
                error_message=f"Synchronization failed: {str(e)}"
            )
    
    async def _compose_final_video(
        self,
        request: DubbingRequest,
        dubbing_audio_path: str,
        video_info
    ) -> Optional[str]:
        """合成最终视频"""
        try:
            output_path = request.output_path
            
            # 确保输出目录存在
            os.makedirs(os.path.dirname(output_path), exist_ok=True)
            
            if request.mode == DubbingMode.REPLACE:
                # 完全替换原音频
                result_path = self.video_processor.replace_audio(
                    request.video_path,
                    dubbing_audio_path,
                    output_path
                )
            
            elif request.mode == DubbingMode.OVERLAY:
                # 叠加配音到原音频
                if video_info.has_audio:
                    # 提取原音频
                    original_audio_path = self.video_processor.extract_audio(request.video_path)
                    
                    # 混合音频
                    mixed_audio_path = await self._mix_audio_tracks(
                        original_audio_path,
                        dubbing_audio_path,
                        request.original_audio_volume,
                        request.dubbing_audio_volume
                    )
                    
                    # 替换音频
                    result_path = self.video_processor.replace_audio(
                        request.video_path,
                        mixed_audio_path,
                        output_path
                    )
                    
                    # 清理临时文件
                    if os.path.exists(original_audio_path):
                        os.unlink(original_audio_path)
                    if os.path.exists(mixed_audio_path):
                        os.unlink(mixed_audio_path)
                else:
                    # 没有原音频，直接替换
                    result_path = self.video_processor.replace_audio(
                        request.video_path,
                        dubbing_audio_path,
                        output_path
                    )
            
            elif request.mode == DubbingMode.MIXED:
                # 混合模式
                result_path = self.video_processor.merge_audio_video(
                    request.video_path,
                    dubbing_audio_path,
                    output_path,
                    audio_volume=request.dubbing_audio_volume
                )
            
            else:  # DubbingMode.NARRATION
                # 旁白模式，降低原音量
                result_path = self.video_processor.merge_audio_video(
                    request.video_path,
                    dubbing_audio_path,
                    output_path,
                    audio_volume=0.2,  # 原音频音量很低
                    start_time=0.0
                )
            
            return result_path
            
        except Exception as e:
            print(f"Failed to compose final video: {e}")
            return None
    
    async def _mix_audio_tracks(
        self,
        original_path: str,
        dubbing_path: str,
        original_volume: float,
        dubbing_volume: float
    ) -> str:
        """混合音轨"""
        try:
            import soundfile as sf
            
            # 加载音频文件
            original_data, original_sr = sf.read(original_path)
            dubbing_data, dubbing_sr = sf.read(dubbing_path)
            
            # 确保采样率一致
            if original_sr != dubbing_sr:
                import librosa
                dubbing_data = librosa.resample(
                    dubbing_data, orig_sr=dubbing_sr, target_sr=original_sr
                )
            
            # 应用音量
            original_data = original_data * original_volume
            dubbing_data = dubbing_data * dubbing_volume
            
            # 混合音频
            if len(original_data.shape) == 1:
                mixed_data = original_data + dubbing_data
            else:
                # 立体声处理
                if len(dubbing_data.shape) == 1:
                    dubbing_data = np.column_stack([dubbing_data, dubbing_data])
                
                # 确保长度一致
                min_length = min(len(original_data), len(dubbing_data))
                mixed_data = original_data[:min_length] + dubbing_data[:min_length]
            
            # 限制幅度避免削波
            mixed_data = np.clip(mixed_data, -1.0, 1.0)
            
            # 保存混合后的音频
            mixed_path = tempfile.mktemp(suffix='.wav')
            sf.write(mixed_path, mixed_data, original_sr)
            
            return mixed_path
            
        except Exception as e:
            print(f"Failed to mix audio tracks: {e}")
            raise
    
    async def create_auto_segments(
        self,
        video_path: str,
        text: str,
        max_segment_duration: float = 10.0
    ) -> List[DubbingSegment]:
        """自动创建配音片段"""
        try:
            # 获取视频时长
            video_info = self.video_processor.get_video_info(video_path)
            video_duration = video_info.duration
            
            # 文本分段
            words = text.split()
            segments = []
            
            current_text = ""
            current_start = 0.0
            words_per_second = 2.5  # 平均语速
            
            for i, word in enumerate(words):
                current_text += " " + word if current_text else word
                
                # 计算当前段落的预计时长
                estimated_duration = len(current_text.split()) / words_per_second
                
                if estimated_duration >= max_segment_duration:
                    segments.append(DubbingSegment(
                        text=current_text.strip(),
                        start_time=current_start,
                        end_time=min(current_start + estimated_duration, video_duration)
                    ))
                    
                    current_text = ""
                    current_start += estimated_duration
            
            # 添加最后一个段落
            if current_text.strip():
                segments.append(DubbingSegment(
                    text=current_text.strip(),
                    start_time=current_start,
                    end_time=video_duration
                ))
            
            return segments
            
        except Exception as e:
            print(f"Failed to create auto segments: {e}")
            return []
    
    async def cleanup(self):
        """清理资源"""
        if self._initialized:
            await self.tts_service.cleanup()
            self._initialized = False
