# Copyright (c) 2024 Xiaomi Corporation

"""
并行合并版说话人日志和语音识别系统
功能：
1. 使用多进程并行处理多个音频文件。
2. 通过设置 CUDA_VISIBLE_DEVICES 环境变量，将任务分配到不同的 GPU (GPU 0 和 GPU 1)。
3. 在每个进程中，先进行说话人日志，将音频分割为不同说话人的片段。
4. 对每个说话人片段进行语音识别。
5. 【重要】所有文件处理完毕后，由主进程统一输出结果并保存到指定文件夹。

注意：此版本在 Linux 上运行，推荐使用 'forkserver' 模式以安全利用 CUDA 加速。
"""

from pathlib import Path
import time
import numpy as np
import sherpa_onnx
import soundfile as sf
import librosa
from tqdm import tqdm
import os
import multiprocessing
import psutil
try:
    import pynvml
except ImportError:
    # 如果没有安装 pynvml，则设置一个占位符，不影响主流程运行
    pynvml = None
    print("警告: 未安装 pynvml 库。将无法获取 GPU 占用率和显存统计信息。请运行 'pip install pynvml' 安装。")


# --- 辅助函数 (与原版基本一致) ---

def resample_audio(audio: np.ndarray, sample_rate: int, target_sample_rate: int):
    """
    将音频重采样到目标采样率
    """
    if sample_rate != target_sample_rate:
        audio = librosa.resample(audio, orig_sr=sample_rate, target_sr=target_sample_rate)
        return audio, target_sample_rate
    return audio, sample_rate


def init_speaker_diarization(num_speakers: int = -1, cluster_threshold: float = 0.7):
    """
    初始化说话人日志系统
    注意: 此函数必须在子进程中调用，以确保模型配置正确。
    """
    segmentation_model = "/opt/speakr-ASR/model/Speakr/sherpa-onnx-pyannote-segmentation-3-0/model.onnx"
    embedding_extractor_model = (
        "/opt/speakr-ASR/model/Speakr/3dspeaker_speech_campplus_sv_zh-cn_16k-common.onnx"
    )
    # "/opt/speakr-ASR/model/Speakr/wespeaker_zh_cnceleb_resnet34_LM.onnx"
    config = sherpa_onnx.OfflineSpeakerDiarizationConfig(
        segmentation=sherpa_onnx.OfflineSpeakerSegmentationModelConfig(
            pyannote=sherpa_onnx.OfflineSpeakerSegmentationPyannoteModelConfig(
                model=segmentation_model
            ),
        ),
        embedding=sherpa_onnx.SpeakerEmbeddingExtractorConfig(
            model=embedding_extractor_model
        ),
        clustering=sherpa_onnx.FastClusteringConfig(
            num_clusters=num_speakers, threshold=cluster_threshold
        ),
        min_duration_on=0.3,
        min_duration_off=0.5,

    )
    if not config.validate():
        raise RuntimeError(
            "请检查说话人日志配置并确保所有必需文件都存在"
        )
    
    # 返回 Diarization 实例
    return sherpa_onnx.OfflineSpeakerDiarization(config)


def init_asr_recognizer():
    """
    初始化语音识别系统，根据路径中的关键字自动选择模型类型。
    """
    # ** 1. 模型路径配置 **
    # 启用哪个模型，就将对应的路径设置为有效字符串，其他保持空字符串 ""。
    # 当前默认启用 Paraformer。

    # --- Whisper (需要三个文件) ---
    whisper_decoder = "/opt/speakr-ASR/model/ASR/sherpa-onnx-whisper-large-v3/large-v3-decoder.int8.onnx"
    whisper_encoder = "/opt/speakr-ASR/model/ASR/sherpa-onnx-whisper-large-v3/large-v3-encoder.int8.onnx"
    whisper_tokens = "/opt/speakr-ASR/model/ASR/sherpa-onnx-whisper-large-v3/large-v3-tokens.txt"

     # --- CTC/SenseVoice/Paraformer (需要 model 和 tokens) ---
    # 启用其他模型时，只需修改下面两行的路径即可。
    model_path = "/opt/speakr-ASR/model/ASR/sherpa-onnx-paraformer-zh-2024-03-09/model.onnx"
    # "/opt/speakr-ASR/model/ASR/sherpa-onnx-paraformer-trilingual-zh-cantonese-en/model.onnx"
    # "/opt/speakr-ASR/model/ASR/sherpa-onnx-whisper-large-v3/large-v3-decoder.int8.onnx"
    # "/opt/speakr-ASR/model/ASR/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2025-09-09/model.onnx"
    # 
    # "/opt/speakr-ASR/model/ASR/sherpa-onnx-telespeech-ctc-zh-2024-06-04/model.onnx"
    tokens_path = "/opt/speakr-ASR/model/ASR/sherpa-onnx-paraformer-zh-2024-03-09/tokens.txt"
    # "/opt/speakr-ASR/model/ASR/sherpa-onnx-paraformer-trilingual-zh-cantonese-en/tokens.txt"
    # "/opt/speakr-ASR/model/ASR/sherpa-onnx-whisper-large-v3/large-v3-tokens.txt"
    # "/opt/speakr-ASR/model/ASR/sherpa-onnx-sense-voice-zh-en-ja-ko-yue-2025-09-09/tokens.txt"
    # 
    # "/opt/speakr-ASR/model/ASR/sherpa-onnx-telespeech-ctc-zh-2024-06-04/tokens.txt"

    # ** 2. 自动模型初始化逻辑 **
    model_name = Path(model_path).parts[-2].lower() # 提取模型文件夹名作为关键字

    # --- 优先检查 Whisper，因为它需要三个文件 ---
    if "whisper" in model_name and whisper_encoder and whisper_decoder and whisper_tokens:
        print(f"[ASR Init] 初始化 Whisper 模型: {whisper_encoder}")
        return sherpa_onnx.OfflineRecognizer.from_whisper(
            encoder=whisper_encoder,
            decoder=whisper_decoder,
            tokens=whisper_tokens,
            num_threads=2,
            decoding_method='greedy_search',
            debug=False,
            language='zh',
            task='transcribe',
            provider="cuda",
        )
    
    # --- 检查 Paraformer ---
    elif "paraformer" in model_name:
        print(f"[ASR Init] 初始化 Paraformer 模型: {model_path}")
        return sherpa_onnx.OfflineRecognizer.from_paraformer(
            paraformer=model_path,
            tokens=tokens_path,
            num_threads=2,
            sample_rate=16000,
            # feature_dim=80,
            decoding_method='greedy_search',
            debug=False,
            provider="cuda", 
        )
        
    # --- 检查 Telespeech CTC ---
    elif "telespeech" in model_name or "ctc" in model_name:
        print(f"[ASR Init] 初始化 Telespeech CTC 模型: {model_path}")
        return sherpa_onnx.OfflineRecognizer.from_telespeech_ctc(
            model=model_path,
            tokens=tokens_path,
            debug=False, # 默认设为 False
            provider="cuda"
        )
        
    # --- 检查 Sense Voice (非 CTC) ---
    elif "sense-voice" in model_name:
        print(f"[ASR Init] 初始化 Sense Voice (非CTC) 模型: {model_path}")
        return sherpa_onnx.OfflineRecognizer.from_sense_voice(
            model=model_path,
            tokens=tokens_path,
            language="zh",
            use_itn=True,
            debug=False,
            provider="cuda",
        )
    else:
        # Fallback 错误处理
        raise RuntimeError(f"未知的语音识别模型配置。请检查模型路径和关键字。路径: {model_path}")


# --- 保持其他辅助函数不变 ---

def create_progress_callback(filename):
    """
    创建带tqdm进度条的回调函数，为每个文件创建独立的进度条
    """
    pbar = None
    
    def callback(num_processed_chunk: int, num_total_chunks: int) -> int:
        nonlocal pbar
        if pbar is None:
            # 使用文件名作为描述，以区分不同进程的进度条
            # _identity[0] 获取进程的 ID/编号，用于设置 tqdm 的位置
            pbar = tqdm(total=num_total_chunks, desc=f"[{Path(filename).name}] 说话人日志", unit="chunk", position=multiprocessing.current_process()._identity[0])
        pbar.update(1)
        if num_processed_chunk >= num_total_chunks:
            pbar.close()
        return 0
    
    return callback


def extract_audio_segment(audio: np.ndarray, sample_rate: int, start_time: float, end_time: float) -> np.ndarray:
    """
    从音频中提取指定时间段的片段
    """
    start_sample = int(start_time * sample_rate)
    end_sample = int(end_time * sample_rate)
    return audio[start_sample:end_sample]


def recognize_audio_segment(recognizer, audio_segment: np.ndarray, sample_rate: int) -> str:
    """
    识别音频片段
    """
    stream = recognizer.create_stream()
    stream.accept_waveform(sample_rate, audio_segment)
    recognizer.decode_stream(stream)
    return stream.result.text.strip()


def format_time(seconds):
    """
    格式化时间显示，保持小数点对齐
    """
    return f"{seconds:6.2f}"


def print_stats(result_data: dict, output_dir: Path):
    """
    输出处理统计信息并保存结果到指定目录 (在主进程中执行)
    """
    # 解包数据
    file_idx = result_data['file_idx']
    total_files = result_data['total_files']
    wave_filename = Path(result_data['wave_filename'])
    all_segments = result_data['all_segments']
    audio_duration = result_data['audio_duration']
    file_processing_time = result_data['file_processing_time']
    cpu_percent = result_data['cpu_percent']
    memory_mb = result_data['memory_mb']
    gpu_id = result_data['gpu_id']
    
    # 新增: GPU 统计信息
    gpu_util_percent = result_data.get('gpu_util_percent', 'N/A')
    gpu_used_vram_mb = result_data.get('gpu_used_vram_mb', 'N/A')

    rtf = file_processing_time / audio_duration
    
    print("\n" + "=" * 80)
    print(f"处理完成文件 {file_idx}/{total_files} | GPU ID: {gpu_id} | 文件: {wave_filename.name}")
    print("=" * 80)
    
    # 打印转录结果
    print(f"完整转录结果 ({len(all_segments)} 段):")
    for segment in all_segments:
        start_formatted = format_time(segment['start'])
        end_formatted = format_time(segment['end'])
        print(f"  {start_formatted}-{end_formatted}s | {segment['speaker']}: {segment['text']}")

    # 输出统计
    print("-" * 30)
    print(f"音频总时长: {audio_duration:.2f}秒")
    print(f"处理时间: {file_processing_time:.2f}秒")
    print(f"实时率(RTF): {rtf:.3f}")
    print(f"进程CPU占用: {cpu_percent:.2f}%")
    print(f"进程内存占用: {memory_mb:.2f} MB")
    
    # 打印新增的 GPU 统计信息
    if gpu_util_percent != 'N/A':
        print(f"分配GPU {gpu_id} 占用率: {gpu_util_percent}%")
        print(f"分配GPU {gpu_id} VRAM占用: {gpu_used_vram_mb:.2f} MB")

    speaker_stats = {}
    for seg in all_segments:
        speaker = seg['speaker']
        if speaker not in speaker_stats:
            speaker_stats[speaker] = {'count': 0, 'total_duration': 0.0}
        speaker_stats[speaker]['count'] += 1
        speaker_stats[speaker]['total_duration'] += (seg['end'] - seg['start'])

    print("\n说话人统计:")
    for speaker, stats in speaker_stats.items():
        print(f"  {speaker}: {stats['count']}个片段, 总时长: {stats['total_duration']:.2f}秒")
    print("-" * 30)

    # 保存结果到新的路径
    output_dir.mkdir(parents=True, exist_ok=True)
    output_filename = output_dir / f"transcript_{wave_filename.stem}_gpu{gpu_id}.txt"
    try:
        with open(output_filename, 'w', encoding='utf-8') as f:
            f.write(f"文件: {wave_filename}\n")
            f.write(f"GPU ID: {gpu_id}\n")
            f.write(f"音频时长: {audio_duration:.2f}秒\n")
            f.write(f"处理时间: {file_processing_time:.2f}秒\n")
            f.write(f"RTF: {rtf:.3f}\n")
            f.write(f"进程CPU占用: {cpu_percent:.2f}%\n")
            f.write(f"进程内存占用: {memory_mb:.2f} MB\n")
            
            # 写入新增的 GPU 统计信息
            if gpu_util_percent != 'N/A':
                f.write(f"分配GPU {gpu_id} 占用率: {gpu_util_percent}%\n")
                f.write(f"分配GPU {gpu_id} VRAM占用: {gpu_used_vram_mb:.2f} MB\n")
            
            f.write(f"检测到的说话人数量: {len(set(seg['speaker'] for seg in all_segments))}\n")
            f.write(f"成功识别的片段数量: {len(all_segments)}\n\n")
            f.write("转录结果:\n")
            for segment in all_segments:
                start_formatted = format_time(segment['start'])
                end_formatted = format_time(segment['end'])
                f.write(f"{start_formatted}-{end_formatted}s | {segment['speaker']}: {segment['text']}\n")
        print(f"结果已保存到: {output_filename}")
    except Exception as e:
        print(f"保存结果文件失败: {e}")


# --- 核心并行处理逻辑 ---

def process_single_audio(wave_filename: Path, gpu_id: int, file_idx: int, total_files: int) -> dict:
    """
    处理单个音频文件的完整流程（在独立的进程中运行）
    """
    
    # 核心：设置 GPU 亲和性，确保此进程只使用指定的 GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
    
    print(f"[Process {os.getpid()}] 正在处理文件: {Path(wave_filename).name} (分配 GPU: {gpu_id})")
    
    file_start_time = time.time()
    REQUIRED_SAMPLE_RATE = 16000 # 所有 ASR/Diarization 模型默认的采样率
    
    gpu_util_percent = 'N/A'
    gpu_used_vram_mb = 'N/A'
    
    # 1. 初始化 NVML (GPU监控)
    if pynvml:
        try:
            pynvml.nvmlInit()
            handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
        except pynvml.NVMLError as e:
            print(f"[Process {os.getpid()} - WARNING] NVML 初始化或获取句柄失败: {e}")
            handle = None
        except Exception as e:
            print(f"[Process {os.getpid()} - WARNING] NVML 遇到未知错误: {e}")
            handle = None
    else:
        handle = None

    # 2. 初始化系统 (模型加载)
    try:
        sd = init_speaker_diarization()
        # ASR 模型现在将使用 CUDA provider
        recognizer = init_asr_recognizer()
    except Exception as e:
        print(f"[Process {os.getpid()} - ERROR] 模型初始化失败: {e}")
        # 确保 NVML 在异常情况下也被清理
        if pynvml and handle:
            try:
                pynvml.nvmlShutdown()
            except pynvml.NVMLError:
                pass
        return {}

    # 3. 读取和重采样音频
    try:
        audio, original_sample_rate = sf.read(wave_filename, dtype="float32", always_2d=True)
        audio = audio[:, 0]  # 取第一个声道
        audio_duration = len(audio) / original_sample_rate

        # --- 强制重采样到 16kHz ---
        if original_sample_rate != REQUIRED_SAMPLE_RATE:
            print(f"[Process {os.getpid()}] 音频采样率 {original_sample_rate} Hz, 正在重采样至 {REQUIRED_SAMPLE_RATE} Hz...")
            audio, current_sample_rate = resample_audio(audio, original_sample_rate, REQUIRED_SAMPLE_RATE)
        else:
            current_sample_rate = original_sample_rate
        
        # 检查重采样后的采样率是否与 Diarization 模型期望的采样率一致
        if current_sample_rate != sd.sample_rate:
            # 这种情况通常发生在 REQUIRED_SAMPLE_RATE 与 Diarization 模型期望的采样率不一致时。
            # 打印警告，但继续使用 current_sample_rate 进行处理。
            print(f"[Process {os.getpid()} - WARNING] 警告: 实际处理采样率 {current_sample_rate} Hz (重采样后) 与说话人日志系统期望采样率 {sd.sample_rate} Hz 不一致。请检查模型配置。")

    except Exception as e:
        print(f"[Process {os.getpid()} - ERROR] 读取或重采样音频文件失败: {e}")
        if pynvml and handle:
            try:
                pynvml.nvmlShutdown()
            except pynvml.NVMLError:
                pass
        return {}

    # 4. 说话人日志 & 语音识别
    try:
        # 使用重采样后的音频和采样率进行处理
        diarization_result = sd.process(audio, callback=create_progress_callback(wave_filename)).sort_by_start_time()
        all_segments = []
        
        with tqdm(total=len(diarization_result), desc=f"[{Path(wave_filename).name}] 语音识别", unit="segment", position=multiprocessing.current_process()._identity[0]) as pbar_asr:
            for i, segment in enumerate(diarization_result):
                segment_audio = extract_audio_segment(audio, current_sample_rate, segment.start, segment.end)
                if segment_audio.size > 0:
                    transcript = recognize_audio_segment(recognizer, segment_audio, current_sample_rate)
                    all_segments.append({
                        'index': i + 1,
                        'start': segment.start,
                        'end': segment.end,
                        'speaker': f"speaker_{segment.speaker:02d}",
                        'text': transcript
                    })
                pbar_asr.update(1)

    except Exception as e:
        print(f"\n[Process {os.getpid()} - ERROR] 处理音频内容时出错: {e}")
        all_segments = []

    all_segments.sort(key=lambda x: x['start'])
    file_end_time = time.time()
    file_processing_time = file_end_time - file_start_time

    # 5. 获取进程资源占用 (CPU/Memory)
    process = psutil.Process(os.getpid())
    cpu_percent = process.cpu_percent(interval=0.1)
    memory_mb = process.memory_info().rss / (1024 ** 2)

    # 6. 获取 GPU 资源占用 (NVML)
    if pynvml and handle:
        try:
            # 这里的占用率和显存是 GPU 0 或 GPU 1 的实时状态
            gpu_util_info = pynvml.nvmlDeviceGetUtilizationRates(handle)
            gpu_memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
            
            gpu_util_percent = gpu_util_info.gpu
            gpu_used_vram_mb = gpu_memory_info.used / (1024 ** 2)
        except pynvml.NVMLError as e:
            print(f"[Process {os.getpid()} - WARNING] NVML 读取统计信息失败: {e}")
        finally:
            # 清理 NVML
            try:
                pynvml.nvmlShutdown()
            except pynvml.NVMLError:
                pass


    # 7. 返回结果
    return {
        'file_idx': file_idx,
        'total_files': total_files,
        'wave_filename': str(wave_filename), # Path对象转为字符串以便多进程传输
        'all_segments': all_segments,
        'audio_duration': audio_duration,
        'file_processing_time': file_processing_time,
        'cpu_percent': cpu_percent,
        'memory_mb': memory_mb,
        'gpu_id': gpu_id,
        'gpu_util_percent': gpu_util_percent,
        'gpu_used_vram_mb': gpu_used_vram_mb,
    }


def main_parallel():
    """
    主函数：配置和执行并行处理
    """
    # 配置
    audio_dir = Path("/opt/speakr-ASR/audio/test-666/9")
    # 新增：指定转录文件的输出目录
    output_dir = Path("/opt/speakr-ASR/transcripts")
    audio_exts = {".wav", ".mp3", ".flac", ".ogg", ".m4a"}
    num_gpus = 1 # 您的 RTX 4090 数量
    
    # *** 重点修改: 将并行进程数设置为 4 ***
    num_processes = 1
    # ***********************************

    # 遍历目录及子目录，收集音频文件
    audio_files = [f for f in audio_dir.rglob("*") if f.suffix.lower() in audio_exts]

    if not audio_files:
        print(f"错误: 没有找到任何音频文件! 目录={audio_dir}")
        return

    print("\n" + "#" * 90)
    print(f"| 检测到 {len(audio_files)} 个音频文件。将使用 {num_processes} 个进程并行处理 (分配到 {num_gpus} 块 GPU)。 |")
    print("#" * 90)

    total_files = len(audio_files)
    
    # 构造任务列表: (文件路径, 分配的 GPU ID, 文件索引, 总文件数)
    tasks = []
    for i, file in enumerate(audio_files):
        # 使用模运算将任务均匀分配给可用的 GPU
        gpu_id = i % num_gpus # 确保 GPU ID 从 0 开始
        tasks.append((file, gpu_id, i + 1, total_files))

    overall_start_time = time.time()
    
    # 使用 multiprocessing.Pool 创建进程池
    all_results = []
    try:
        with multiprocessing.Pool(processes=num_processes) as pool:
            # 使用 starmap 提交任务，并等待所有进程完成，收集返回的结果
            all_results = pool.starmap(process_single_audio, tasks)
        
        overall_end_time = time.time()
        overall_processing_time = overall_end_time - overall_start_time

        # --- 新增: 计算总体统计数据 ---
        total_audio_duration = sum(r.get('audio_duration', 0) for r in all_results if r)
        # 避免除以零
        overall_rtf = overall_processing_time / total_audio_duration if total_audio_duration > 0 else 0
        
        # --- 在主进程中统一输出结果 ---
        print("\n" + "*" * 90)
        print("| 所有音频处理完成。正在统一输出结果... |")
        print("*" * 90)
        
        # 遍历所有结果，进行打印和文件保存
        for result in all_results:
            if result: # 确保结果非空
                print_stats(result, output_dir)

        # --- 总体统计 (包含总 RTF) ---
        print("\n" + "=" * 90)
        print(f"所有 {total_files} 个文件处理完成!")
        print(f"总耗时: {overall_processing_time:.2f} 秒 (使用 {num_processes} 个并行进程).")
        print(f"总音频时长: {total_audio_duration:.2f} 秒")
        print(f"总实时率 (Overall RTF): {overall_rtf:.3f}")
        print(f"转录结果已统一保存到目录: {output_dir.resolve()}") # 打印绝对路径
        print("=" * 90)
        # --------------------

    except Exception as e:
        print(f"\n主进程发生错误: {e}")
        print("请检查模型路径和 GPU 驱动/CUDA 环境配置。")


if __name__ == "__main__":
    # try:
    #     # ** 核心修改: 在 Linux 上，使用 'forkserver' 是一种比 'spawn' 更高效且比 'fork' 更安全的 CUDA 多进程方式 **
    #     # 如果 'forkserver' 失败，可以尝试注释掉这行，让它默认使用 'fork' (但可能遇到 CUDA 冲突)
    #     multiprocessing.set_start_method('forkserver') 
    # except RuntimeError:
    #     # 如果 start method 已经被设置，捕获异常
    #     pass
        
    main_parallel()