#!/usr/bin/env python3
import os
import sys
import argparse
import shutil
import numpy as np
import librosa
import soundfile as sf
import subprocess
import logging
import tempfile
from pathlib import Path

# 设置日志格式
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s | %(levelname)-8s | %(name)s:%(funcName)s:%(lineno)d - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

class DemucsProcessor:
    """使用Demucs进行人声提取和音频分割的处理器"""
    
    def __init__(self, output_dir="demucs_processed"):
        """初始化处理器"""
        self.output_dir = output_dir
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        
        # 创建子目录
        self.raw_dir = os.path.join(output_dir, "raw")
        self.vocals_dir = os.path.join(output_dir, "vocals")
        self.segments_dir = os.path.join(output_dir, "segments")
        
        for dir_path in [self.raw_dir, self.vocals_dir, self.segments_dir]:
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
                
    def install_demucs(self):
        """安装Demucs包"""
        try:
            logger.info("检查Demucs是否已安装...")
            import importlib.util
            if importlib.util.find_spec("demucs") is None:
                logger.info("安装Demucs...")
                subprocess.run([sys.executable, "-m", "pip", "install", "demucs"], check=True)
                logger.info("Demucs安装成功")
            else:
                logger.info("Demucs已安装")
            return True
        except Exception as e:
            logger.error(f"安装Demucs失败: {e}")
            return False
    
    def extract_vocals(self, input_file, model="htdemucs", use_gpu=False):
        """使用Demucs提取人声
        
        Args:
            input_file: 输入音频文件路径
            model: Demucs模型名称，默认为htdemucs (高质量模型)
            use_gpu: 是否使用GPU
            
        Returns:
            人声文件的路径
        """
        if not os.path.exists(input_file):
            logger.error(f"文件不存在: {input_file}")
            return None
            
        # 确保Demucs已安装
        if not self.install_demucs():
            return None
            
        logger.info(f"使用Demucs提取人声: {input_file}")
        
        # 准备输出路径
        base_name = os.path.basename(input_file).split('.')[0]
        vocals_path = os.path.join(self.vocals_dir, f"{base_name}_vocals.wav")
        
        # 如果vocals_path已存在，直接返回
        if os.path.exists(vocals_path):
            logger.info(f"人声文件已存在: {vocals_path}")
            return vocals_path
        
        try:
            # 复制原始文件到raw目录
            raw_copy = os.path.join(self.raw_dir, os.path.basename(input_file))
            if input_file != raw_copy and not os.path.exists(raw_copy):
                shutil.copy2(input_file, raw_copy)
                logger.info(f"已复制文件到 {raw_copy}")
                
            # 创建临时目录存放Demucs输出
            with tempfile.TemporaryDirectory() as tmp_dir:
                # 根据是否使用GPU设置命令行参数
                if use_gpu:
                    logger.info(f"使用GPU模式处理音频: model={model}")
                    
                    # GPU模式下的Demucs命令
                    cmd = [
                        "demucs", "--two-stems=vocals", 
                        "-o", tmp_dir,
                        "--mp3", "--mp3-bitrate", "320",
                        "--filename", "{track}/{stem}.{ext}",
                        model, 
                        input_file
                    ]
                    
                    # GPU模式下的环境变量
                    env = os.environ.copy()
                    
                else:
                    logger.info(f"使用CPU模式处理音频: model={model}")
                    
                    # CPU模式下的Demucs命令
                    cmd = [
                        "demucs", "--two-stems=vocals", 
                        "-o", tmp_dir,
                        "--mp3", "--mp3-bitrate", "320",
                        "--filename", "{track}/{stem}.{ext}",
                        "--device", "cpu",  # 强制使用CPU
                        "--shifts", "1",    # 减少处理次数以提高速度
                        "--no-cuda",        # 禁用CUDA
                        model, 
                        input_file
                    ]
                    
                    # CPU模式下的环境变量
                    env = os.environ.copy()
                    env["CUDA_VISIBLE_DEVICES"] = ""
                
                logger.info(f"执行命令: {' '.join(cmd)}")
                
                # 运行命令
                subprocess.run(cmd, check=True, env=env)
                
                # 查找生成的人声文件
                model_dir = os.path.join(tmp_dir, model)
                if not os.path.isdir(model_dir):
                    # 尝试找到另一个可能的模型目录
                    dirs = os.listdir(tmp_dir)
                    if dirs:
                        model_dir = os.path.join(tmp_dir, dirs[0])
                
                track_name = base_name
                vocals_tmp = os.path.join(model_dir, track_name, "vocals.wav")
                
                if not os.path.exists(vocals_tmp):
                    # 尝试找到其他可能的文件名
                    track_dirs = os.listdir(model_dir)
                    if track_dirs:
                        vocals_tmp = os.path.join(model_dir, track_dirs[0], "vocals.wav")
                    
                if os.path.exists(vocals_tmp):
                    # 将人声文件转换为指定格式
                    logger.info(f"将人声转换为48kHz, 24位, 立体声...")
                    y, sr = librosa.load(vocals_tmp, sr=48000, mono=False)
                    
                    # 确保是立体声
                    if len(y.shape) == 1:
                        y = np.array([y, y])
                        
                    # 保存为24位WAV
                    sf.write(vocals_path, y.T, sr, subtype='PCM_24')
                    logger.info(f"人声提取成功: {vocals_path}")
                    return vocals_path
                else:
                    logger.error(f"未找到生成的人声文件")
            
        except Exception as e:
            logger.error(f"提取人声时出错: {e}")
            import traceback
            logger.error(traceback.format_exc())
            
        return None
    
    def segment_audio(self, input_file, output_prefix=None, min_segment_length=1.0, max_segment_length=15.0):
        """将音频文件分割为句子"""
        if not os.path.exists(input_file):
            logger.error(f"文件不存在: {input_file}")
            return None
            
        logger.info(f"分割音频文件: {input_file}")
        
        # 确定输出前缀
        if output_prefix:
            prefix = output_prefix
        else:
            base_name = os.path.basename(input_file).split('.')[0]
            prefix = base_name
        
        # 加载音频文件
        logger.info("加载音频文件...")
        try:
            y, sr = librosa.load(input_file, sr=48000, mono=False)
            
            # 确保是立体声
            if len(y.shape) == 1:
                y = np.array([y, y])
                
            logger.info(f"成功加载音频: 采样率={sr}Hz, 声道数={'立体声' if len(y.shape) > 1 else '单声道'}")
        except Exception as e:
            logger.error(f"加载音频文件失败: {e}")
            return None
        
        # 使用能量阈值检测静音部分
        logger.info("使用能量阈值分割音频...")
        try:
            # 转为单声道进行分析
            mono_y = np.mean(y, axis=0) if len(y.shape) > 1 else y
            
            # 计算短时能量
            frame_length = int(sr * 0.025)  # 25ms帧
            hop_length = int(sr * 0.010)    # 10ms步长
            energy = librosa.feature.rms(y=mono_y, frame_length=frame_length, hop_length=hop_length)[0]
            
            # 计算能量阈值（自适应阈值，这里用能量的平均值加上标准差的一部分）
            energy_threshold = np.mean(energy) + 0.25 * np.std(energy)
            logger.info(f"能量阈值: {energy_threshold}")
            
            # 找出超过阈值的帧
            speech_frames = energy > energy_threshold
            
            # 将帧索引转换为时间
            frame_times = librosa.frames_to_time(np.arange(len(speech_frames)), sr=sr, hop_length=hop_length)
            
            # 创建语音分段
            segments = []
            in_speech = False
            segment_start = 0
            padding = 0.3  # 300ms的前后填充
            
            for i, is_speech in enumerate(speech_frames):
                if is_speech and not in_speech:
                    # 语音开始
                    in_speech = True
                    segment_start = max(0, frame_times[i] - padding)
                elif not is_speech and in_speech:
                    # 语音结束
                    in_speech = False
                    segment_end = min(frame_times[i] + padding, len(mono_y) / sr)
                    segments.append((segment_start, segment_end))
            
            # 处理在结尾仍处于语音状态的情况
            if in_speech:
                segment_end = min(len(mono_y) / sr, frame_times[-1] + padding)
                segments.append((segment_start, segment_end))
            
            # 根据长度约束优化分段
            refined_segments = self._refine_segments(segments, min_length=min_segment_length, max_length=max_segment_length)
            logger.info(f"检测到{len(refined_segments)}个语音分段")
            
            # 输出分段
            segment_files = []
            for i, (start, end) in enumerate(refined_segments, 1):
                # 提取分段
                start_sample = int(start * sr)
                end_sample = int(end * sr)
                segment_y = y[:, start_sample:end_sample]
                
                # 创建输出文件名
                output_file = os.path.join(self.segments_dir, f"{prefix}_segment_{i:03d}.wav")
                
                # 保存为24位WAV
                sf.write(output_file, segment_y.T, sr, subtype='PCM_24')
                segment_files.append(output_file)
                logger.info(f"保存分段到 {output_file}")
                
            logger.info(f"音频已分段为{len(segment_files)}个片段")
            return segment_files
                
        except Exception as e:
            logger.error(f"处理音频时出错: {e}")
            import traceback
            logger.error(traceback.format_exc())
            return None
    
    def process_file(self, input_file, output_prefix=None, min_segment_length=1.0, max_segment_length=15.0, use_gpu=False):
        """完整处理音频文件：提取人声并分割为句子"""
        if not os.path.exists(input_file):
            logger.error(f"文件不存在: {input_file}")
            return None
            
        logger.info(f"处理文件: {input_file}")
        
        # 确定输出前缀
        if output_prefix is None:
            output_prefix = os.path.basename(input_file).split('.')[0]
        
        # 提取人声
        vocals_file = self.extract_vocals(input_file, use_gpu=use_gpu)
        if not vocals_file:
            logger.error("人声提取失败，无法继续处理")
            return None
            
        # 分割人声
        segments = self.segment_audio(vocals_file, output_prefix, min_segment_length, max_segment_length)
        return segments
    
    def _refine_segments(self, segments, min_length=1.0, max_length=15.0):
        """根据长度约束优化分段"""
        refined = []
        for start, end in segments:
            duration = end - start
            
            if duration < min_length:
                # 跳过太短的分段
                continue
                
            if duration <= max_length:
                # 接受在长度限制内的分段
                refined.append((start, end))
            else:
                # 拆分长分段
                n_parts = int(np.ceil(duration / max_length))
                part_duration = duration / n_parts
                
                for i in range(n_parts):
                    part_start = start + i * part_duration
                    part_end = start + (i + 1) * part_duration
                    if part_end > end:
                        part_end = end
                    refined.append((part_start, part_end))
                    
        return refined


def main():
    parser = argparse.ArgumentParser(description="使用Demucs提取人声并分割音频")
    parser.add_argument("--input", required=True, help="输入音频文件路径")
    parser.add_argument("--output_dir", default="demucs_processed", help="输出目录")
    parser.add_argument("--extract_only", action="store_true", help="只提取人声，不分割")
    parser.add_argument("--min_length", type=float, default=1.0, help="最小分段长度（秒）")
    parser.add_argument("--max_length", type=float, default=15.0, help="最大分段长度（秒）")
    parser.add_argument("--model", default="htdemucs", help="Demucs模型名称")
    parser.add_argument("--use_gpu", action="store_true", help="使用GPU进行处理")
    
    args = parser.parse_args()
    
    # 如果不使用GPU，设置环境变量禁用CUDA
    if not args.use_gpu:
        os.environ["CUDA_VISIBLE_DEVICES"] = ""
        logger.info("使用CPU模式")
    else:
        logger.info("使用GPU模式")
    
    processor = DemucsProcessor(args.output_dir)
    
    if args.extract_only:
        vocals_file = processor.extract_vocals(args.input, model=args.model, use_gpu=args.use_gpu)
        if vocals_file:
            logger.info(f"人声提取成功: {vocals_file}")
        else:
            logger.error("人声提取失败")
    else:
        segments = processor.process_file(
            args.input, 
            min_segment_length=args.min_length,
            max_segment_length=args.max_length,
            use_gpu=args.use_gpu
        )
        
        if segments:
            logger.info(f"处理完成，已生成{len(segments)}个分段")
        else:
            logger.error("处理失败")


if __name__ == "__main__":
    main() 