import os
import subprocess
import re
import math
from tqdm import tqdm
import json
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile

def run_command_safely(cmd, timeout=30):
    """
    安全执行命令，防止阻塞
    :param cmd: 要执行的命令
    :param timeout: 超时时间(秒)
    :return: 返回码, stdout, stderr
    """
    try:
        proc = subprocess.Popen(
            cmd,
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            universal_newlines=True
        )
        stdout, stderr = proc.communicate(timeout=timeout)
        return proc.returncode, stdout, stderr
    except subprocess.TimeoutExpired:
        proc.kill()
        stdout, stderr = proc.communicate()
        return -1, stdout, stderr
    except Exception as e:
        return -2, "", str(e)

def preprocess_audio(input_path, output_wav):
    """
    预处理音频：转换为单声道WAV，应用带通滤波器增强语音频率
    :param input_path: 输入音频文件
    :param output_wav: 输出WAV文件路径
    """
    cmd = (
        f'ffmpeg -v error -i "{input_path}" '
        f'-ac 1 -ar 16000 -c:a pcm_s16le '  # 单声道,16kHz采样率,PCM编码
        f'-af "highpass=f=300,lowpass=f=3400,aresample=async=1"'  # 简化滤波器链
        f' -threads 4 -y "{output_wav}"'  # 使用多线程
    )
    print(f"\n执行命令: {cmd}")
    returncode, stdout, stderr = run_command_safely(cmd, timeout=60)
    
    if returncode != 0:
        print(f"命令执行失败 (返回码: {returncode})")
        print(f"错误输出: {stderr}")
        raise RuntimeError(f"预处理音频失败: {stderr}")

def extract_voice_features(wav_path):
    """
    提取语音特征并检测语音活动
    :param wav_path: 预处理后的WAV文件路径
    :return: 语音活动数组 (1表示语音, 0表示静音)
    """
    # 读取音频数据
    sample_rate, data = wavfile.read(wav_path)
    
    # 转换为浮点数
    data = data.astype(np.float32) / np.iinfo(data.dtype).max
    
    # 分析参数
    frame_size = 0.02  # 20ms帧
    frame_length = int(sample_rate * frame_size)
    frames = [data[i:i+frame_length] for i in range(0, len(data), frame_length)]
    
    # 计算每帧的特征
    voice_activity = []
    for frame in frames:
        if len(frame) == 0:
            continue
            
        # 计算能量（均方根）
        rms = np.sqrt(np.mean(frame**2))
        
        # 计算过零率
        zcr = np.mean(np.abs(np.diff(np.sign(frame))))
        
        # 基于能量和过零率的语音活动检测
        # 语音通常具有中等能量和中等过零率
        is_speech = (rms > 0.02) and (0.1 < zcr < 0.5)
        voice_activity.append(1 if is_speech else 0)
    
    return np.array(voice_activity), sample_rate, frame_size

def find_speech_pauses(voice_activity, frame_size, min_pause_duration=1.0):
    """
    检测语音停顿位置
    :param voice_activity: 语音活动数组
    :param frame_size: 每帧时长(秒)
    :param min_pause_duration: 最小停顿时长(秒)
    :return: 停顿开始时间列表
    """
    pause_positions = []
    min_pause_frames = int(min_pause_duration / frame_size)
    
    current_pause_start = None
    consecutive_silence = 0
    
    for i, active in enumerate(voice_activity):
        if active == 0:  # 静音
            if current_pause_start is None:
                current_pause_start = i
            consecutive_silence += 1
        else:  # 语音
            if consecutive_silence >= min_pause_frames and current_pause_start is not None:
                # 找到有效停顿
                pause_time = (current_pause_start + consecutive_silence / 2) * frame_size
                pause_positions.append(pause_time)
            
            current_pause_start = None
            consecutive_silence = 0
    
    # 处理末尾停顿
    if consecutive_silence >= min_pause_frames and current_pause_start is not None:
        pause_time = (current_pause_start + consecutive_silence / 2) * frame_size
        pause_positions.append(pause_time)
    
    return pause_positions

def visualize_voice_activity(voice_activity, output_path, frame_size):
    """
    可视化语音活动（用于调试）
    :param voice_activity: 语音活动数组
    :param output_path: 输出图像路径
    :param frame_size: 每帧时长(秒)
    """
    time_axis = np.arange(len(voice_activity)) * frame_size
    
    plt.figure(figsize=(15, 5))
    plt.plot(time_axis, voice_activity, 'b-', linewidth=0.5)
    plt.title('Voice Activity Detection')
    plt.xlabel('Time (seconds)')
    plt.ylabel('Voice Activity (1=speech, 0=silence)')
    plt.ylim(-0.1, 1.1)
    plt.grid(True)
    plt.savefig(output_path)
    plt.close()

def find_best_cut_position(pause_positions, target_time, search_window=120):
    """
    在目标时间附近寻找最佳分割点
    :param pause_positions: 停顿位置列表
    :param target_time: 目标分割时间
    :param search_window: 搜索窗口大小(秒)
    :return: 最佳分割时间
    """
    if not pause_positions:
        return target_time
    
    # 在目标时间前后搜索窗口内寻找静音点
    min_time = max(0, target_time - search_window / 2)
    max_time = target_time + search_window / 2
    
    # 收集候选分割点
    candidates = [t for t in pause_positions if min_time <= t <= max_time]
    
    # 如果没有候选点，返回目标时间
    if not candidates:
        return target_time
    
    # 找到离目标时间最近的候选点
    best_cut = min(candidates, key=lambda x: abs(x - target_time))
    return best_cut

def split_audio_at_time(input_path, output_path, start_time, end_time):
    """
    在指定时间范围内分割音频
    :param input_path: 输入文件路径
    :param output_path: 输出文件路径
    :param start_time: 开始时间(秒)
    :param end_time: 结束时间(秒)
    """
    # 构建FFmpeg命令
    cmd = (
        f'ffmpeg -v error -ss {start_time} -i "{input_path}" '
        f'-to {end_time} -c:a copy -y "{output_path}"'
    )
    print(f"\n执行命令: {cmd}")
    returncode, stdout, stderr = run_command_safely(cmd, timeout=60)
    
    if returncode != 0:
        print(f"命令执行失败 (返回码: {returncode})")
        print(f"错误输出: {stderr}")
        raise RuntimeError(f"分割音频失败: {stderr}")

def split_audio_with_speech_detection(input_dir, output_dir, 
                                     target_duration=2700, 
                                     max_duration=3300, 
                                     min_duration=2100,
                                     visualize=False):
    """
    智能分割音频文件，在语音停顿处分割
    :param input_dir: 输入目录路径
    :param output_dir: 输出目录路径
    :param target_duration: 目标分割时长(秒)，默认2700秒(45分钟)
    :param max_duration: 最大允许时长(秒)
    :param min_duration: 最小允许时长(秒)
    :param visualize: 是否生成语音活动可视化
    """
    # 确保输出目录存在
    os.makedirs(output_dir, exist_ok=True)
    
    # 创建调试目录
    debug_dir = os.path.join(output_dir, "debug")
    if visualize:
        os.makedirs(debug_dir, exist_ok=True)
    
    # 收集所有.m4a文件
    audio_files = [f for f in os.listdir(input_dir) if f.lower().endswith('.m4a')]
    
    print(f"找到 {len(audio_files)} 个音频文件，开始处理...")
    print(f"分割策略: 目标时长={target_duration/60:.1f}分钟, "
          f"搜索范围={min_duration/60:.1f}-{max_duration/60:.1f}分钟")
    
    # 处理每个文件
    for filename in tqdm(audio_files, desc="处理文件"):
        input_path = os.path.join(input_dir, filename)
        base_name = os.path.splitext(filename)[0]
        
        # 获取音频总时长
        cmd_duration = f'ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 "{input_path}"'
        returncode, stdout, stderr = run_command_safely(cmd_duration, timeout=30)
        
        if returncode != 0:
            print(f"错误: 无法获取文件 {filename} 的时长。跳过此文件。")
            print(f"错误信息: {result.stderr}")
            continue
        
        try:
            total_duration = float(stdout.strip())
        except ValueError:
            print(f"错误: 无效的时长数据 '{result.stdout.strip()}' 来自文件 {filename}。跳过此文件。")
            continue
        
        # 预处理音频（转换为WAV并应用滤波器）
        with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_wav:
            wav_path = temp_wav.name
        
        print(f"预处理音频: {filename}...")
        preprocess_audio(input_path, wav_path)
        
        # 提取语音特征并检测停顿
        print(f"分析语音活动: {filename}...")
        voice_activity, sample_rate, frame_size = extract_voice_features(wav_path)
        pause_positions = find_speech_pauses(voice_activity, frame_size)
        
        # 可视化（可选）
        if visualize:
            vis_path = os.path.join(debug_dir, f"{base_name}_voice_activity.png")
            visualize_voice_activity(voice_activity, vis_path, frame_size)
            print(f"语音活动图已保存至: {vis_path}")
        
        # 清理临时文件
        os.remove(wav_path)
        
        print(f"找到 {len(pause_positions)} 个语音停顿点")
        
        # 分割音频
        segment_num = 1
        current_time = 0.0
        
        with tqdm(total=total_duration, desc=f"分割 {filename}", unit='sec') as pbar:
            while current_time < total_duration:
                # 计算目标结束时间
                target_end = current_time + target_duration
                
                # 如果剩余时长不足最小分割时长，直接分割剩余部分
                if total_duration - current_time < min_duration:
                    end_time = total_duration
                else:
                    # 在目标时间附近寻找最佳语音停顿分割点
                    end_time = find_best_cut_position(pause_positions, target_end)
                    
                    # 确保分割点在合理范围内
                    if end_time < current_time + min_duration:
                        end_time = min(current_time + min_duration, total_duration)
                    elif end_time > current_time + max_duration:
                        end_time = min(current_time + max_duration, total_duration)
                
                # 生成输出文件名
                output_filename = f"{base_name}_part{segment_num}.m4a"
                output_path = os.path.join(output_dir, output_filename)
                
                # 分割音频
                split_audio_at_time(input_path, output_path, current_time, end_time)
                
                # 更新进度
                segment_duration = end_time - current_time
                pbar.update(segment_duration)
                
                # 输出分割信息
                print(f"创建分段 {segment_num}: {current_time/60:.1f}分 - {end_time/60:.1f}分 "
                      f"(时长: {segment_duration/60:.1f}分钟)")
                
                # 准备下一段
                current_time = end_time
                segment_num += 1

if __name__ == "__main__":
    # 配置路径 (按需修改)
    INPUT_DIR = "D:\MyDocs\split_in"   # 输入目录(存放原始.m4a文件)
    OUTPUT_DIR = "D:\MyDocs\split_out"  # 输出目录(存放分割后的文件)
    
    # 分割参数 (单位:秒)
    TARGET_DURATION = 45 * 60      # 目标时长: 45分钟
    MAX_DURATION = 55 * 60         # 最大允许时长: 55分钟
    MIN_DURATION = 35 * 60         # 最小允许时长: 35分钟
    
    # 是否生成可视化调试图
    VISUALIZE = True
    
    split_audio_with_speech_detection(
        INPUT_DIR, 
        OUTPUT_DIR,
        target_duration=TARGET_DURATION,
        max_duration=MAX_DURATION,
        min_duration=MIN_DURATION,
        visualize=VISUALIZE
    )
    print("\n所有文件处理完成！")
