import ffmpeg
import os
import uuid
import tempfile
import re
import logging

class AudioTools:
    """音频处理工具类"""
    
    def __init__(self):
        """初始化音频工具"""
        self.supported_audio_formats = ['mp3', 'wav', 'aac', 'ogg', 'flac', 'm4a']
    
    def _parse_timestamp(self, timestamp_str):
        """
        解析时间戳字符串为秒数
        
        Args:
            timestamp_str (str): 时间戳字符串，格式为 "HH:MM:SS.mmm"
            
        Returns:
            float: 秒数
        """
        try:
            # 匹配时间戳格式 HH:MM:SS.mmm（毫秒部分必须存在）
            pattern = r'^(\d{2}):(\d{2}):(\d{2})\.(\d{3})$'
            match = re.match(pattern, timestamp_str)
            if not match:
                raise ValueError(f"无效的时间戳格式: {timestamp_str}，必须为 HH:MM:SS.mmm 格式")
            
            hours = int(match.group(1))
            minutes = int(match.group(2))
            seconds = int(match.group(3))
            milliseconds = int(match.group(4))
            
            # 验证时间范围
            if hours > 23:
                raise ValueError(f"小时数超出范围: {hours}")
            if minutes > 59:
                raise ValueError(f"分钟数超出范围: {minutes}")
            if seconds > 59:
                raise ValueError(f"秒数超出范围: {seconds}")
            if milliseconds > 999:
                raise ValueError(f"毫秒数超出范围: {milliseconds}")
            
            total_seconds = hours * 3600 + minutes * 60 + seconds + milliseconds / 1000
            return total_seconds
        except Exception as e:
            raise ValueError(f"解析时间戳失败: {timestamp_str}, 错误: {str(e)}")
    
    def _create_silence_audio(self, duration, sample_rate=44100, output_path=None):
        """
        创建指定时长的静音音频
        
        Args:
            duration (float): 静音时长（秒）
            sample_rate (int): 采样率
            output_path (str): 输出路径，如果为None则使用临时文件
            
        Returns:
            str: 静音音频文件路径
        """
        try:
            if output_path is None:
                output_path = os.path.join(tempfile.gettempdir(), f"silence_{uuid.uuid4().hex[:8]}.wav")
            
            # 使用FFmpeg生成静音音频
            stream = ffmpeg.input('anullsrc', f='lavfi', t=duration)
            stream = ffmpeg.output(stream, output_path, ar=sample_rate, ac=2)
            ffmpeg.run(stream, overwrite_output=True, quiet=True)
            
            return output_path
        except Exception as e:
            raise Exception(f"创建静音音频失败: {str(e)}")
    
    def extract_audio(self, video_path, output_dir):
        """
        从视频中提取音频
        
        Args:
            video_path (str): 视频文件路径
            output_dir (str): 输出目录
            
        Returns:
            str: 输出音频文件路径
        """
        try:
            # 确保输出目录存在
            os.makedirs(output_dir, exist_ok=True)
            
            # 检查视频是否包含音频流
            video_info = ffmpeg.probe(video_path)
            has_audio = any(stream['codec_type'] == 'audio' for stream in video_info['streams'])
            
            if not has_audio:
                raise Exception("该视频文件不包含音频数据，无法提取音频")
            
            output_filename = f"extracted_audio_{uuid.uuid4().hex[:8]}.mp3"
            output_path = os.path.join(output_dir, output_filename)
            
            # 使用FFmpeg提取音频
            stream = ffmpeg.input(video_path)
            stream = ffmpeg.output(stream, output_path, acodec='mp3', ab='192k')
            ffmpeg.run(stream, overwrite_output=True, quiet=True)
            
            # 验证输出文件是否存在
            if not os.path.exists(output_path):
                raise Exception(f"FFmpeg处理完成但输出文件不存在: {output_path}")
            
            return output_path
        except Exception as e:
            raise Exception(f"提取音频失败: {str(e)}")
    
    def merge_audio(self, audio_paths, output_dir):
        """
        合并多个音频文件
        
        Args:
            audio_paths (list): 音频文件路径列表
            output_dir (str): 输出目录
            
        Returns:
            str: 输出音频文件路径
        """
        try:
            # 确保输出目录存在
            os.makedirs(output_dir, exist_ok=True)
            
            output_filename = f"merged_audio_{uuid.uuid4().hex[:8]}.mp3"
            output_path = os.path.join(output_dir, output_filename)
            
            # 创建FFmpeg输入流
            inputs = []
            for audio_path in audio_paths:
                inputs.append(ffmpeg.input(audio_path))
            
            # 合并音频
            merged = ffmpeg.concat(*inputs, v=0, a=1)
            stream = ffmpeg.output(merged, output_path, acodec='mp3', ab='192k')
            ffmpeg.run(stream, overwrite_output=True, quiet=True)
            
            # 验证输出文件是否存在
            if not os.path.exists(output_path):
                raise Exception(f"FFmpeg处理完成但输出文件不存在: {output_path}")
            
            return output_path
        except Exception as e:
            raise Exception(f"合并音频失败: {str(e)}")
    
    def merge_audio_with_timestamps(self, audio_files, timestamps, output_dir, gain_db=None, gains_db=None):
        """
        根据时间戳合并音频文件，在空白处添加静音
        
        Args:
            audio_files (list): 音频文件路径列表
            timestamps (list): 时间戳列表，每个元素为字典 {"start_time": "HH:MM:SS.mmm", "end_time": "HH:MM:SS.mmm"}
            output_dir (str): 输出目录
            gain_db (float|None): 全局增益（dB），对所有输入音频生效（可选）
            gains_db (list|None): 逐段增益（dB）数组，与 audio_files 对齐（可选，优先于 gain_db）
            
        Returns:
            str: 输出音频文件路径
        """
        try:
            # 确保输出目录存在
            os.makedirs(output_dir, exist_ok=True)
            
            output_filename = f"merged_timestamp_audio_{uuid.uuid4().hex[:8]}.mp3"
            output_path = os.path.join(output_dir, output_filename)
            
            # 解析时间戳
            parsed_timestamps = []
            for ts in timestamps:
                start_seconds = self._parse_timestamp(ts['start_time'])
                end_seconds = self._parse_timestamp(ts['end_time'])
                parsed_timestamps.append({
                    'start': start_seconds,
                    'end': end_seconds,
                    'duration': end_seconds - start_seconds
                })
            
            # 按开始时间排序
            parsed_timestamps.sort(key=lambda x: x['start'])
            
            # 检查音频文件数量是否与时间戳数量匹配
            if len(audio_files) != len(parsed_timestamps):
                raise ValueError(f"音频文件数量({len(audio_files)})与时间戳数量({len(parsed_timestamps)})不匹配")
            
            # 校验逐段增益长度
            if gains_db is not None:
                if not isinstance(gains_db, list):
                    raise ValueError("gains_db 必须是数组")
                if len(gains_db) != len(audio_files):
                    raise ValueError(f"gains_db 长度({len(gains_db)})需与音频文件数量({len(audio_files)})一致")
            
            # 创建临时文件列表
            temp_files = []
            
            try:
                # 计算总时长（最后一个时间戳的结束时间）
                total_duration = parsed_timestamps[-1]['end']
                
                # 创建基础静音音频
                temp_silence_base = os.path.join(tempfile.gettempdir(), f"silence_base_{uuid.uuid4().hex[:8]}.wav")
                temp_files.append(temp_silence_base)
                
                self._create_silence_audio(total_duration, output_path=temp_silence_base)
                
                # 处理每个时间段的音频
                processed_audio_files = []
                for i, (audio_file, timestamp) in enumerate(zip(audio_files, parsed_timestamps)):
                    # 检查音频文件是否存在
                    if not os.path.exists(audio_file):
                        raise FileNotFoundError(f"音频文件不存在: {audio_file}")
                    
                    # 获取音频时长
                    probe = ffmpeg.probe(audio_file)
                    audio_duration = float(probe['format']['duration'])
                    
                    # 如果音频时长超过时间戳指定的时长，需要裁剪
                    if audio_duration > timestamp['duration']:
                        # 裁剪音频到指定时长
                        temp_cropped = os.path.join(tempfile.gettempdir(), f"cropped_{uuid.uuid4().hex[:8]}.wav")
                        temp_files.append(temp_cropped)
                        
                        stream = ffmpeg.input(audio_file)
                        stream = ffmpeg.output(stream, temp_cropped, t=timestamp['duration'])
                        try:
                            ffmpeg.run(stream, overwrite_output=True, quiet=True)
                        except ffmpeg.Error as e:
                            stderr_output = e.stderr.decode('utf-8') if e.stderr else "无错误输出"
                            logging.error(f"裁剪音频FFmpeg错误: {stderr_output}")
                            raise Exception(f"裁剪音频失败: {stderr_output}")
                        
                        processed_audio_files.append(temp_cropped)
                    
                    # 如果音频时长小于时间戳指定的时长，需要填充静音
                    elif audio_duration < timestamp['duration']:
                        # 创建静音填充
                        silence_duration = timestamp['duration'] - audio_duration
                        temp_silence = os.path.join(tempfile.gettempdir(), f"silence_{uuid.uuid4().hex[:8]}.wav")
                        temp_files.append(temp_silence)
                        
                        self._create_silence_audio(silence_duration, output_path=temp_silence)
                        
                        # 合并原音频和静音
                        temp_combined = os.path.join(tempfile.gettempdir(), f"combined_{uuid.uuid4().hex[:8]}.wav")
                        temp_files.append(temp_combined)
                        
                        stream1 = ffmpeg.input(audio_file)
                        stream2 = ffmpeg.input(temp_silence)
                        stream = ffmpeg.concat(stream1, stream2, v=0, a=1)
                        stream = ffmpeg.output(stream, temp_combined)
                        try:
                            ffmpeg.run(stream, overwrite_output=True, quiet=True)
                        except ffmpeg.Error as e:
                            stderr_output = e.stderr.decode('utf-8') if e.stderr else "无错误输出"
                            logging.error(f"合并音频FFmpeg错误: {stderr_output}")
                            raise Exception(f"合并音频失败: {stderr_output}")
                        
                        processed_audio_files.append(temp_combined)
                    
                    else:
                        # 音频时长正好匹配，直接使用
                        processed_audio_files.append(audio_file)
                
                # 添加调试信息
                logging.info(f"处理音频文件数量: {len(processed_audio_files)}")
                logging.info(f"时间戳数量: {len(parsed_timestamps)}")
                for i, (audio_file, timestamp) in enumerate(zip(processed_audio_files, parsed_timestamps)):
                    logging.info(f"音频文件 {i}: {audio_file}, 开始时间: {timestamp['start']}秒")
                
                # 使用简单的方法：创建基础静音，然后叠加音频
                if len(processed_audio_files) == 1:
                    # 只有一个音频文件，使用overlay
                    base_stream = ffmpeg.input(temp_silence_base)
                    audio_stream = ffmpeg.input(processed_audio_files[0])
                    
                    # 使用adelay滤镜延迟音频
                    delay_ms = int(parsed_timestamps[0]['start'] * 1000)
                    # 计算增益（优先逐段）
                    gain_for_0 = None
                    if gains_db is not None:
                        try:
                            gain_for_0 = float(gains_db[0])
                        except Exception:
                            raise ValueError("gains_db 中存在无法转换为浮点数的值")
                    elif gain_db is not None:
                        gain_for_0 = float(gain_db)

                    # 构建滤镜，先 volume 再 adelay
                    if gain_for_0 is not None:
                        filter_complex = f"volume={gain_for_0}dB,adelay={delay_ms}:1"
                    else:
                        filter_complex = f"adelay={delay_ms}:1"
                    
                    stream = ffmpeg.output(base_stream, audio_stream, output_path,
                                         filter_complex=f"[1:a]{filter_complex}[delayed];[0:a][delayed]amix=inputs=2:duration=first:normalize=0",
                                         acodec='mp3', ab='192k')
                else:
                    # 多个音频文件，使用更简单的方法：逐个叠加到基础静音上
                    base_stream = ffmpeg.input(temp_silence_base)
                    
                    # 从第一个音频开始，逐个叠加
                    current_stream = base_stream
                    current_filter = "[0:a]"
                    
                    for i, (audio_file, timestamp) in enumerate(zip(processed_audio_files, parsed_timestamps)):
                        audio_stream = ffmpeg.input(audio_file)
                        delay_ms = int(timestamp['start'] * 1000)
                        # 计算增益（优先逐段）
                        gain_for_i = None
                        if gains_db is not None:
                            try:
                                gain_for_i = float(gains_db[i])
                            except Exception:
                                raise ValueError("gains_db 中存在无法转换为浮点数的值")
                        elif gain_db is not None:
                            gain_for_i = float(gain_db)
                        
                        if i == 0:
                            # 第一个音频
                            if gain_for_i is not None:
                                # 应用 volume 后再延迟
                                filter_complex = (
                                    f"[{i+1}:a]volume={gain_for_i}dB[vol{i}];"
                                    f"[vol{i}]adelay={delay_ms}:1[delayed{i}];"
                                    f"{current_filter}[delayed{i}]amix=inputs=2:duration=first:normalize=0[tmp{i}]"
                                )
                            else:
                                filter_complex = (
                                    f"[{i+1}:a]adelay={delay_ms}:1[delayed{i}];"
                                    f"{current_filter}[delayed{i}]amix=inputs=2:duration=first:normalize=0[tmp{i}]"
                                )
                            current_filter = f"[tmp{i}]"
                        else:
                            # 后续音频
                            if gain_for_i is not None:
                                filter_complex += (
                                    f";[{i+1}:a]volume={gain_for_i}dB[vol{i}];"
                                    f"[vol{i}]adelay={delay_ms}:1[delayed{i}];"
                                    f"{current_filter}[delayed{i}]amix=inputs=2:duration=first:normalize=0[tmp{i}]"
                                )
                            else:
                                filter_complex += (
                                    f";[{i+1}:a]adelay={delay_ms}:1[delayed{i}];"
                                    f"{current_filter}[delayed{i}]amix=inputs=2:duration=first:normalize=0[tmp{i}]"
                                )
                            current_filter = f"[tmp{i}]"
                    
                    # 移除最后的[tmp{i}]标记，直接输出
                    final_filter = filter_complex.replace(f"[tmp{len(processed_audio_files)-1}]", "")
                    
                    # 添加调试信息
                    logging.info(f"构建的滤镜: {final_filter}")
                    
                    all_inputs = [base_stream] + [ffmpeg.input(audio_file) for audio_file in processed_audio_files]
                    stream = ffmpeg.output(*all_inputs, output_path,
                                         filter_complex=final_filter,
                                         acodec='mp3', ab='192k')
                
                # 运行FFmpeg并捕获详细错误信息
                try:
                    ffmpeg.run(stream, overwrite_output=True, quiet=True)
                except ffmpeg.Error as e:
                    stderr_output = e.stderr.decode('utf-8') if e.stderr else "无错误输出"
                    logging.error(f"最终合并FFmpeg错误: {stderr_output}")
                    raise Exception(f"最终合并失败: {stderr_output}")
                
                # 验证输出文件是否存在
                if not os.path.exists(output_path):
                    raise Exception(f"FFmpeg处理完成但输出文件不存在: {output_path}")
                
                return output_path
                
            finally:
                # 清理临时文件
                for temp_file in temp_files:
                    try:
                        if os.path.exists(temp_file):
                            os.remove(temp_file)
                    except Exception as e:
                        logging.warning(f"清理临时文件失败: {temp_file}, 错误: {e}")
                        
        except Exception as e:
            raise Exception(f"根据时间戳合并音频失败: {str(e)}")
    
    def convert_audio_format(self, audio_path, target_format, output_dir):
        """
        转换音频格式
        
        Args:
            audio_path (str): 音频文件路径
            target_format (str): 目标格式
            output_dir (str): 输出目录
            
        Returns:
            str: 输出音频文件路径
        """
        try:
            # 确保输出目录存在
            os.makedirs(output_dir, exist_ok=True)
            
            output_filename = f"converted_audio_{uuid.uuid4().hex[:8]}.{target_format}"
            output_path = os.path.join(output_dir, output_filename)
            
            # 检查目标格式是否支持
            if target_format not in self.supported_audio_formats:
                raise ValueError(f"不支持的音频格式: {target_format}")
            
            # 使用FFmpeg转换格式
            stream = ffmpeg.input(audio_path)
            
            # 根据目标格式设置编码参数
            if target_format == 'mp3':
                stream = ffmpeg.output(stream, output_path, acodec='mp3', ab='192k')
            elif target_format == 'wav':
                stream = ffmpeg.output(stream, output_path, acodec='pcm_s16le')
            elif target_format == 'aac':
                stream = ffmpeg.output(stream, output_path, acodec='aac', ab='192k')
            elif target_format == 'ogg':
                stream = ffmpeg.output(stream, output_path, acodec='libvorbis', ab='192k')
            elif target_format == 'flac':
                stream = ffmpeg.output(stream, output_path, acodec='flac')
            elif target_format == 'm4a':
                stream = ffmpeg.output(stream, output_path, acodec='aac', ab='192k')
            else:
                stream = ffmpeg.output(stream, output_path)
            
            ffmpeg.run(stream, overwrite_output=True, quiet=True)
            
            # 验证输出文件是否存在
            if not os.path.exists(output_path):
                raise Exception(f"FFmpeg处理完成但输出文件不存在: {output_path}")
            
            return output_path
        except Exception as e:
            raise Exception(f"转换音频格式失败: {str(e)}")
    
    def get_audio_info(self, audio_path):
        """
        获取音频信息
        
        Args:
            audio_path (str): 音频文件路径
            
        Returns:
            dict: 音频信息
        """
        try:
            probe = ffmpeg.probe(audio_path)
            
            # 获取音频流信息
            audio_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'), None)
            
            info = {
                'format': probe['format']['format_name'],
                'duration': float(probe['format']['duration']),
                'size': int(probe['format']['size']),
                'bitrate': int(probe['format']['bit_rate'])
            }
            
            if audio_stream:
                info.update({
                    'audio_codec': audio_stream['codec_name'],
                    'audio_channels': int(audio_stream['channels']),
                    'audio_sample_rate': int(audio_stream['sample_rate'])
                })
            
            return info
        except Exception as e:
            raise Exception(f"获取音频信息失败: {str(e)}") 