"""
音视频同步算法
提供精确的音视频同步功能
"""

import numpy as np
import librosa
from typing import Dict, Any, Optional, List, Tuple
from dataclasses import dataclass
from enum import Enum
import cv2
from scipy import signal
from scipy.ndimage import uniform_filter1d
import time


class SyncMethod(Enum):
    """同步方法"""
    AUDIO_PEAKS = "audio_peaks"
    VIDEO_MOTION = "video_motion"
    CROSS_CORRELATION = "cross_correlation"
    LIP_SYNC = "lip_sync"
    MANUAL = "manual"


@dataclass
class SyncPoint:
    """同步点"""
    time: float
    confidence: float
    metadata: Dict[str, Any] = None


@dataclass
class SyncResult:
    """同步结果"""
    success: bool
    sync_points: List[SyncPoint]
    sync_offset: float = 0.0
    confidence: float = 0.0
    method: str = ""
    error_message: Optional[str] = None
    metadata: Dict[str, Any] = None


class AudioVideoSynchronizer:
    """音视频同步器"""
    
    def __init__(self):
        self.sync_methods = {
            SyncMethod.AUDIO_PEAKS: self._sync_by_audio_peaks,
            SyncMethod.VIDEO_MOTION: self._sync_by_video_motion,
            SyncMethod.CROSS_CORRELATION: self._sync_by_cross_correlation,
            SyncMethod.LIP_SYNC: self._sync_by_lip_sync,
            SyncMethod.MANUAL: self._sync_manual
        }
    
    def synchronize(
        self,
        video_path: str,
        audio_path: str,
        method: SyncMethod = SyncMethod.AUDIO_PEAKS,
        reference_points: Optional[List[Dict[str, float]]] = None
    ) -> SyncResult:
        """同步音视频"""
        try:
            if method not in self.sync_methods:
                raise ValueError(f"Unsupported sync method: {method}")
            
            sync_func = self.sync_methods[method]
            result = sync_func(video_path, audio_path, reference_points)
            
            result.method = method.value
            return result
            
        except Exception as e:
            return SyncResult(
                success=False,
                sync_points=[],
                error_message=f"Synchronization failed: {str(e)}"
            )
    
    def _sync_by_audio_peaks(
        self,
        video_path: str,
        audio_path: str,
        reference_points: Optional[List[Dict[str, float]]] = None
    ) -> SyncResult:
        """基于音频峰值同步"""
        try:
            # 加载音频
            audio_data, audio_sr = librosa.load(audio_path, sr=None)
            
            # 检测音频峰值
            onset_frames = librosa.onset.onset_detect(
                y=audio_data,
                sr=audio_sr,
                units='time'
            )
            
            # 创建同步点
            sync_points = []
            for i, onset_time in enumerate(onset_frames):
                confidence = 1.0 - (i / len(onset_frames)) * 0.3  # 后续峰值置信度递减
                sync_points.append(SyncPoint(
                    time=onset_time,
                    confidence=confidence,
                    metadata={'type': 'audio_peak', 'index': i}
                ))
            
            # 计算偏移量（这里假设视频从0开始）
            sync_offset = 0.0
            
            return SyncResult(
                success=True,
                sync_points=sync_points,
                sync_offset=sync_offset,
                confidence=0.8,
                metadata={
                    'audio_peaks_count': len(onset_frames),
                    'audio_duration': len(audio_data) / audio_sr
                }
            )
            
        except Exception as e:
            return SyncResult(
                success=False,
                sync_points=[],
                error_message=f"Audio peak sync failed: {str(e)}"
            )
    
    def _sync_by_video_motion(
        self,
        video_path: str,
        audio_path: str,
        reference_points: Optional[List[Dict[str, float]]] = None
    ) -> SyncResult:
        """基于视频运动同步"""
        try:
            # 打开视频
            cap = cv2.VideoCapture(video_path)
            if not cap.isOpened():
                raise RuntimeError("Failed to open video file")
            
            fps = cap.get(cv2.CAP_PROP_FPS)
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            
            # 计算帧间差分来检测运动
            motion_scores = []
            prev_frame = None
            frame_index = 0
            
            while True:
                ret, frame = cap.read()
                if not ret:
                    break
                
                # 转换为灰度图
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                
                if prev_frame is not None:
                    # 计算帧差
                    diff = cv2.absdiff(prev_frame, gray)
                    motion_score = np.mean(diff)
                    motion_scores.append(motion_score)
                    
                    # 保留当前帧作为下一帧的参考
                    prev_frame = gray
                else:
                    prev_frame = gray
                
                frame_index += 1
                
                # 限制处理帧数以提高性能
                if frame_index > 1000:
                    break
            
            cap.release()
            
            # 平滑运动分数
            if motion_scores:
                motion_scores = uniform_filter1d(motion_scores, size=5)
            
            # 检测运动峰值
            sync_points = []
            threshold = np.mean(motion_scores) + 2 * np.std(motion_scores) if motion_scores else 0
            
            for i, score in enumerate(motion_scores):
                if score > threshold:
                    time_seconds = i / fps
                    confidence = min(score / (threshold * 2), 1.0)
                    sync_points.append(SyncPoint(
                        time=time_seconds,
                        confidence=confidence,
                        metadata={'type': 'video_motion', 'score': score}
                    ))
            
            return SyncResult(
                success=True,
                sync_points=sync_points,
                sync_offset=0.0,
                confidence=0.7,
                metadata={
                    'motion_frames': len(motion_scores),
                    'fps': fps
                }
            )
            
        except Exception as e:
            return SyncResult(
                success=False,
                sync_points=[],
                error_message=f"Video motion sync failed: {str(e)}"
            )
    
    def _sync_by_cross_correlation(
        self,
        video_path: str,
        audio_path: str,
        reference_points: Optional[List[Dict[str, float]]] = None
    ) -> SyncResult:
        """基于互相关同步"""
        try:
            # 加载音频
            audio_data, audio_sr = librosa.load(audio_path, sr=None)
            
            # 提取音频特征
            audio_features = librosa.feature.mfcc(y=audio_data, sr=audio_sr, n_mfcc=13)
            audio_energy = librosa.feature.rms(y=audio_data)
            
            # 处理视频特征（这里简化为从音频推断）
            # 在实际应用中，应该提取视频的音频特征或视觉特征
            
            # 计算互相关
            correlation = signal.correlate2d(
                audio_features, audio_features, mode='same'
            )
            
            # 寻找峰值
            peak_indices = signal.find_peaks(
                np.mean(correlation, axis=0),
                height=np.max(correlation) * 0.7
            )[0]
            
            # 转换为时间
            sync_points = []
            for idx in peak_indices:
                time_seconds = idx / (len(audio_features[0]) / (len(audio_data) / audio_sr))
                confidence = correlation.mean(axis=0)[idx] / np.max(correlation)
                
                sync_points.append(SyncPoint(
                    time=time_seconds,
                    confidence=float(confidence),
                    metadata={'type': 'cross_correlation', 'index': idx}
                ))
            
            # 计算最佳偏移量
            if len(sync_points) > 0:
                sync_offset = sync_points[0].time
            else:
                sync_offset = 0.0
            
            return SyncResult(
                success=True,
                sync_points=sync_points[:10],  # 限制同步点数量
                sync_offset=sync_offset,
                confidence=0.75,
                metadata={
                    'correlation_peaks': len(peak_indices),
                    'method': 'cross_correlation'
                }
            )
            
        except Exception as e:
            return SyncResult(
                success=False,
                sync_points=[],
                error_message=f"Cross correlation sync failed: {str(e)}"
            )
    
    def _sync_by_lip_sync(
        self,
        video_path: str,
        audio_path: str,
        reference_points: Optional[List[Dict[str, float]]] = None
    ) -> SyncResult:
        """基于唇形同步"""
        try:
            # 这里应该实现唇形检测和音频音素分析
            # 由于复杂性，这里提供一个简化的实现
            
            # 加载音频并分析语音活动
            audio_data, audio_sr = librosa.load(audio_path, sr=None)
            
            # 语音活动检测
            voice_activity = librosa.effects.split(audio_data, top_db=20)
            
            # 创建同步点（基于语音活动）
            sync_points = []
            for i, (start, end) in enumerate(voice_activity):
                start_time = start / audio_sr
                end_time = end / audio_sr
                
                # 在语音段开始和结束创建同步点
                sync_points.append(SyncPoint(
                    time=start_time,
                    confidence=0.8,
                    metadata={'type': 'speech_start', 'segment': i}
                ))
                
                sync_points.append(SyncPoint(
                    time=end_time,
                    confidence=0.6,
                    metadata={'type': 'speech_end', 'segment': i}
                ))
            
            return SyncResult(
                success=True,
                sync_points=sync_points,
                sync_offset=0.0,
                confidence=0.65,
                metadata={
                    'speech_segments': len(voice_activity),
                    'method': 'lip_sync'
                }
            )
            
        except Exception as e:
            return SyncResult(
                success=False,
                sync_points=[],
                error_message=f"Lip sync failed: {str(e)}"
            )
    
    def _sync_manual(
        self,
        video_path: str,
        audio_path: str,
        reference_points: Optional[List[Dict[str, float]]] = None
    ) -> SyncResult:
        """手动同步"""
        try:
            if not reference_points:
                return SyncResult(
                    success=False,
                    sync_points=[],
                    error_message="Manual sync requires reference points"
                )
            
            # 创建同步点
            sync_points = []
            for point in reference_points:
                sync_point = SyncPoint(
                    time=point.get('time', 0.0),
                    confidence=point.get('confidence', 1.0),
                    metadata=point.get('metadata', {})
                )
                sync_points.append(sync_point)
            
            # 计算偏移量
            if sync_points:
                sync_offset = sync_points[0].time
            else:
                sync_offset = 0.0
            
            return SyncResult(
                success=True,
                sync_points=sync_points,
                sync_offset=sync_offset,
                confidence=0.9,  # 手动同步置信度较高
                metadata={'method': 'manual', 'points_count': len(sync_points)}
            )
            
        except Exception as e:
            return SyncResult(
                success=False,
                sync_points=[],
                error_message=f"Manual sync failed: {str(e)}"
            )
    
    def apply_sync_offset(
        self,
        audio_data: np.ndarray,
        audio_sr: int,
        offset_seconds: float,
        padding_mode: str = 'zeros'
    ) -> np.ndarray:
        """应用同步偏移"""
        try:
            offset_samples = int(offset_seconds * audio_sr)
            
            if offset_samples == 0:
                return audio_data
            
            if offset_samples > 0:
                # 正偏移：在前面添加静音
                padding = np.zeros(offset_samples)
                return np.concatenate([padding, audio_data])
            else:
                # 负偏移：从前面移除样本
                trim_samples = abs(offset_samples)
                if trim_samples >= len(audio_data):
                    return np.array([])
                return audio_data[trim_samples:]
                
        except Exception as e:
            raise RuntimeError(f"Failed to apply sync offset: {str(e)}")
    
    def refine_sync(
        self,
        sync_result: SyncResult,
        max_iterations: int = 3
    ) -> SyncResult:
        """优化同步结果"""
        try:
            if not sync_result.success or len(sync_result.sync_points) < 2:
                return sync_result
            
            refined_points = sync_result.sync_points.copy()
            
            for iteration in range(max_iterations):
                # 移除异常点
                filtered_points = self._filter_outliers(refined_points)
                
                # 重新计算置信度
                for point in filtered_points:
                    point.confidence = self._recalculate_confidence(point, filtered_points)
                
                refined_points = filtered_points
            
            sync_result.sync_points = refined_points
            sync_result.metadata = sync_result.metadata or {}
            sync_result.metadata['refined'] = True
            
            return sync_result
            
        except Exception as e:
            sync_result.error_message = f"Refinement failed: {str(e)}"
            return sync_result
    
    def _filter_outliers(self, sync_points: List[SyncPoint]) -> List[SyncPoint]:
        """过滤异常同步点"""
        if len(sync_points) < 3:
            return sync_points
        
        times = [point.time for point in sync_points]
        mean_time = np.mean(times)
        std_time = np.std(times)
        
        threshold = 2.0  # 2倍标准差
        
        filtered = []
        for point in sync_points:
            if abs(point.time - mean_time) <= threshold * std_time:
                filtered.append(point)
        
        return filtered
    
    def _recalculate_confidence(
        self,
        target_point: SyncPoint,
        all_points: List[SyncPoint]
    ) -> float:
        """重新计算置信度"""
        if len(all_points) <= 1:
            return target_point.confidence
        
        # 基于与邻近点的距离调整置信度
        times = [point.time for point in all_points if point != target_point]
        
        if not times:
            return target_point.confidence
        
        # 计算平均距离
        distances = [abs(target_point.time - t) for t in times]
        avg_distance = np.mean(distances)
        
        # 距离越近，置信度越高
        distance_factor = np.exp(-avg_distance / 10.0)
        
        # 组合原始置信度和距离因子
        new_confidence = target_point.confidence * 0.7 + distance_factor * 0.3
        
        return min(max(new_confidence, 0.0), 1.0)
    
    def validate_sync(
        self,
        sync_result: SyncResult,
        tolerance: float = 0.1
    ) -> Dict[str, Any]:
        """验证同步结果"""
        validation = {
            'is_valid': True,
            'issues': [],
            'score': 0.0
        }
        
        if not sync_result.success:
            validation['is_valid'] = False
            validation['issues'].append("Synchronization failed")
            return validation
        
        # 检查同步点数量
        if len(sync_result.sync_points) == 0:
            validation['is_valid'] = False
            validation['issues'].append("No sync points found")
        elif len(sync_result.sync_points) < 2:
            validation['issues'].append("Too few sync points")
        
        # 检查置信度
        if sync_result.confidence < 0.5:
            validation['issues'].append("Low confidence")
        
        # 检查偏移量
        if abs(sync_result.sync_offset) > 10.0:  # 10秒
            validation['issues'].append("Large sync offset")
        
        # 计算综合评分
        score = sync_result.confidence * 0.6
        score += min(len(sync_result.sync_points) / 10.0, 1.0) * 0.4
        
        validation['score'] = score
        
        return validation
