#!/usr/bin/env python3
import os
import sys
import argparse
import shutil
import numpy as np
import librosa
import soundfile as sf
import webrtcvad
import collections
import contextlib
import wave
import tempfile
from pathlib import Path
from loguru import logger


class SimpleAudioProcessor:
    """简化版音频处理器，专注于音频分割，不执行人声分离"""
    
    def __init__(self, output_dir="processed_audio"):
        """初始化处理器"""
        self.output_dir = output_dir
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        
        # 创建子目录
        self.raw_dir = os.path.join(output_dir, "raw")
        self.segments_dir = os.path.join(output_dir, "segments")
        
        for dir_path in [self.raw_dir, self.segments_dir]:
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
    
    def process_file(self, input_file, output_prefix=None, min_segment_length=1.0, max_segment_length=15.0):
        """处理音频文件：分割为句子并转换为指定格式"""
        if not os.path.exists(input_file):
            logger.info(f"错误: 文件 {input_file} 不存在")
            return None
            
        logger.info(f"处理文件: {input_file}")
        
        # 确定输出前缀
        if output_prefix:
            prefix = output_prefix
        else:
            base_name = os.path.basename(input_file).split('.')[0]
            prefix = base_name
        
        # 复制原始文件到raw目录
        raw_copy = os.path.join(self.raw_dir, os.path.basename(input_file))
        if input_file != raw_copy:
            shutil.copy2(input_file, raw_copy)
            logger.info(f"已复制文件到 {raw_copy}")
        
        # 加载音频文件
        logger.info("加载音频文件...")
        try:
            y, sr = librosa.load(input_file, sr=16000, mono=True)
        except Exception as e:
            logger.info(f"加载音频文件失败: {e}")
            return None
        
        # 初始化VAD
        logger.info("初始化语音活动检测...")
        vad = webrtcvad.Vad(3)  # 灵敏度等级3 (0-3)
        
        # 将音频转换为VAD可用的PCM 16-bit格式
        with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
            temp_path = temp_file.name
            sf.write(temp_path, y, sr, subtype='PCM_16')
        
        try:
            # 获取语音分段
            logger.info("检测语音分段...")
            segments = self._get_voice_segments(temp_path, vad)
            
            # 根据长度约束进一步优化分段
            refined_segments = self._refine_segments(segments, min_length=min_segment_length, max_length=max_segment_length)
            
            # 输出分段
            segment_files = []
            logger.info(f"生成{len(refined_segments)}个语音分段...")
            
            for i, (start, end) in enumerate(refined_segments, 1):
                # 从原始音频获取分段（使用高质量版本）
                high_quality_y, high_sr = librosa.load(input_file, sr=48000, mono=False, offset=start, duration=end-start)
                
                # 如果是单声道，转换为立体声
                if len(high_quality_y.shape) == 1:
                    high_quality_y = np.array([high_quality_y, high_quality_y])
                
                # 创建输出文件名
                output_file = os.path.join(self.segments_dir, f"{prefix}_segment_{i:03d}.wav")
                
                # 保存为24位WAV
                sf.write(output_file, high_quality_y.T, high_sr, subtype='PCM_24')
                segment_files.append(output_file)
                logger.info(f"保存分段到 {output_file}")
                
            logger.info(f"音频已分段为{len(segment_files)}个片段")
            return segment_files
                
        except Exception as e:
            logger.info(f"处理音频时出错: {e}")
            return None
        finally:
            # 清理临时文件
            os.unlink(temp_path)
    
    def _get_voice_segments(self, audio_file, vad, frame_duration_ms=30, padding_ms=300):
        """使用WebRTC VAD提取语音段"""
        # 读取wave文件
        with contextlib.closing(wave.open(audio_file, 'rb')) as wf:
            num_channels = wf.getnchannels()
            sample_width = wf.getsampwidth()
            sample_rate = wf.getframerate()
            pcm_data = wf.readframes(wf.getnframes())
            
        # 将帧时长转换为样本数
        frame_size = int(sample_rate * (frame_duration_ms / 1000.0))
        frame_step = frame_size
        padding_size = int(sample_rate * (padding_ms / 1000.0))
        
        # 将音频分割为帧
        frames = self._frame_generator(frame_size, pcm_data, sample_width)
        frames = list(frames)
        
        # 使用VAD处理每个帧
        is_speech = []
        for frame in frames:
            is_speech.append(vad.is_speech(frame.bytes, sample_rate))
        
        # 将语音帧归类为分段
        segments = []
        in_voice = False
        segment_start = 0
        
        for i, speech in enumerate(is_speech):
            if speech and not in_voice:
                # 语音开始
                in_voice = True
                segment_start = max(0, i * frame_duration_ms / 1000.0 - padding_ms / 1000.0)
            elif not speech and in_voice:
                # 语音结束
                in_voice = False
                segment_end = min((i + 1) * frame_duration_ms / 1000.0 + padding_ms / 1000.0, 
                                len(pcm_data) / (sample_rate * sample_width * num_channels))
                segments.append((segment_start, segment_end))
        
        # 处理在结尾仍处于语音状态的情况
        if in_voice:
            segment_end = len(pcm_data) / (sample_rate * sample_width * num_channels)
            segments.append((segment_start, segment_end))
            
        return segments
    
    def _frame_generator(self, frame_size, audio, sample_width):
        """从音频数据生成音频帧"""
        Frame = collections.namedtuple('Frame', 'bytes')
        n = len(audio)
        offset = 0
        while offset + frame_size <= n:
            yield Frame(bytes=audio[offset:offset + frame_size])
            offset += frame_size
            
    def _refine_segments(self, segments, min_length=1.0, max_length=15.0):
        """根据长度约束优化分段"""
        refined = []
        for start, end in segments:
            duration = end - start
            
            if duration < min_length:
                # 跳过太短的分段
                continue
                
            if duration <= max_length:
                # 接受在长度限制内的分段
                refined.append((start, end))
            else:
                # 拆分长分段
                n_parts = int(np.ceil(duration / max_length))
                part_duration = duration / n_parts
                
                for i in range(n_parts):
                    part_start = start + i * part_duration
                    part_end = start + (i + 1) * part_duration
                    if part_end > end:
                        part_end = end
                    refined.append((part_start, part_end))
                    
        return refined


def main():
    parser = argparse.ArgumentParser(description="处理本地音频文件")
    parser.add_argument("--input", required=True, help="输入音频文件路径")
    parser.add_argument("--output_dir", default="processed_audio", help="输出目录")
    parser.add_argument("--min_length", type=float, default=1.0, help="最小分段长度（秒）")
    parser.add_argument("--max_length", type=float, default=15.0, help="最大分段长度（秒）")
    
    args = parser.parse_args()
    
    processor = SimpleAudioProcessor(args.output_dir)
    segments = processor.process_file(
        args.input, 
        min_segment_length=args.min_length,
        max_segment_length=args.max_length
    )
    
    if segments:
        logger.info(f"处理完成，已生成{len(segments)}个分段")
    else:
        logger.info("处理失败")


if __name__ == "__main__":
    main() 