#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
带角色分离的音频转录解决方案
基于silero-vad预分段 + CAM++说话人分离 + FunASR精确识别
"""

import os
import sys
import argparse
import json
import re
import librosa
import numpy as np
import torch
import torchaudio
import time
from pathlib import Path
from collections import defaultdict
from funasr import AutoModel
import threading
import concurrent.futures
from queue import Queue

# 修复模块导入路径问题
if __name__ == "__main__":
    # 添加项目根目录到Python路径
    project_root = Path(__file__).parent.parent
    if str(project_root) not in sys.path:
        sys.path.insert(0, str(project_root))

from scripts.utils import get_cache_dir

# silero-vad导入
from silero_vad import load_silero_vad, read_audio, get_speech_timestamps

# ModelScope pipeline for speaker diarization
try:
    from modelscope.pipelines import pipeline
    from modelscope.utils.constant import Tasks
    MODELSCOPE_AVAILABLE = True
except ImportError as e:
    print("ModelScope未安装或缺少依赖，角色分离功能将不可用")
    print(f"错误信息: {e}")
    MODELSCOPE_AVAILABLE = False

# PyAnnote for speaker diarization
try:
    from pyannote.audio import Pipeline
    PYANNOTE_AVAILABLE = True
    print("[INFO] PyAnnote已安装，将使用更高准确度的说话人分离模型")
except ImportError as e:
    print("PyAnnote未安装，将回退到CAM++模型")
    print(f"错误信息: {e}")
    PYANNOTE_AVAILABLE = False

def to_srt_time(sec):
    """将秒数转换为SRT时间格式"""
    h = int(sec // 3600)
    m = int((sec % 3600) // 60)
    s = int(sec % 60)
    f = int((sec % 1) * 1000)
    return f"{h:02d}:{m:02d}:{s:02d},{f:03d}"

def preprocess_audio_enhanced(audio_path):
    """
    增强的音频预处理
    """
    print("开始增强音频预处理...")

    try:
        # 检查文件扩展名，如果是视频文件，先提取音频
        audio_path_lower = audio_path.lower()
        video_extensions = ['.mp4', '.avi', '.mov', '.mkv', '.wmv', '.flv', '.webm']

        if any(audio_path_lower.endswith(ext) for ext in video_extensions):
            print("[INFO] 检测到视频文件，正在提取音频...")
            # 使用librosa直接从视频文件提取音频
            wav, sr = librosa.load(audio_path, sr=16000, mono=True)
            wav_tensor = torch.from_numpy(wav).float().unsqueeze(0)
        else:
            # 使用torchaudio加载纯音频文件
            wav, sr = torchaudio.load(audio_path)

            # 转换为单声道
            if wav.shape[0] > 1:
                wav = torch.mean(wav, dim=0, keepdim=True)

            # 重新采样到16kHz（silero-vad的要求）
            if sr != 16000:
                resampler = torchaudio.transforms.Resample(sr, 16000)
                wav = resampler(wav)
                sr = 16000

            wav_tensor = wav

        # 音量标准化
        if torch.max(torch.abs(wav_tensor)) > 0:
            wav_tensor = wav_tensor / torch.max(torch.abs(wav_tensor))

        print(f"[OK] 增强音频预处理完成 (采样率: {sr}Hz, 时长: {wav_tensor.shape[1]/sr:.2f}s)")
        return wav_tensor.squeeze().numpy(), sr

    except Exception as e:
        print(f"[WARN] 增强预处理失败，使用基本处理: {e}")
        y, sr = librosa.load(audio_path, sr=16000)
        return y, sr

def speaker_diarization_segmentation_pyannote(audio_path, min_speakers=1, max_speakers=8):
    """
    使用PyAnnote进行说话人分离（更高准确度）

    Args:
        audio_path: 音频文件路径
        min_speakers: 最少说话人数量
        max_speakers: 最多说话人数量

    Returns:
        list: [(start_time, end_time, speaker_id), ...] 带角色的语音段列表
    """
    if not PYANNOTE_AVAILABLE:
        print("[ERROR] PyAnnote不可用，无法进行说话人分离")
        return []

    print("[INFO] 使用PyAnnote进行说话人分离（高精度模式）...")

    try:
        # 尝试使用本地模型文件或回退方案
        print("[INFO] 尝试初始化PyAnnote pipeline...")

        # 方案1：尝试使用预训练模型（可能需要HuggingFace认证）
        try:
            pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1")
            print("[INFO] 成功加载PyAnnote预训练模型")
        except Exception as e1:
            print(f"[WARN] 无法加载预训练模型: {e1}")

            # 方案2：尝试使用替代模型
            try:
                # 使用可用的替代模型
                pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization")
                print("[INFO] 成功加载PyAnnote替代模型")
            except Exception as e2:
                print(f"[WARN] 无法加载替代模型: {e2}")
                raise Exception("无法加载任何PyAnnote模型，请检查HuggingFace认证或网络连接")

        # 设置GPU（如果可用）
        import torch
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        pipeline.to(device)

        # 执行说话人分离
        diarization = pipeline(audio_path, num_speakers=max_speakers)

        # 转换结果为统一格式
        segments = []
        speaker_ids = set()

        for turn, _, speaker in diarization.itertracks(yield_label=True):
            start_time = turn.start
            end_time = turn.end
            speaker_id = str(speaker)  # PyAnnote使用字符串格式的speaker ID

            segments.append((start_time, end_time, speaker_id))
            speaker_ids.add(speaker_id)

        print(f"[OK] PyAnnote说话人分离完成，识别到 {len(speaker_ids)} 个说话人")
        for speaker_id in sorted(speaker_ids):
            print(f"  说话人 {speaker_id}: 已识别")

        return segments

    except Exception as e:
        print(f"[ERROR] PyAnnote说话人分离失败: {e}")
        print("[INFO] 将回退到CAM++模型")
        return speaker_diarization_segmentation_campp(audio_path, min_speakers, max_speakers)

def speaker_diarization_segmentation_campp(audio_path, min_speakers=1, max_speakers=8):
    """
    使用CAM++进行说话人分离（备选方案）

    Args:
        audio_path: 音频文件路径
        min_speakers: 最少说话人数量
        max_speakers: 最多说话人数量

    Returns:
        list: [(start_time, end_time, speaker_id), ...] 带角色的语音段列表
    """
    if not MODELSCOPE_AVAILABLE:
        print("[ERROR] ModelScope不可用，无法进行说话人分离")
        return []

    print("[INFO] 使用CAM++进行说话人分离（备选方案）...")

    try:
        # 初始化说话人分离pipeline
        speaker_diarization_pipeline = pipeline(
            task=Tasks.speaker_diarization,
            model='damo/speech_campplus_speaker-diarization_common'
        )

        # 执行说话人分离
        diarization_result = speaker_diarization_pipeline({
            'audio_in': audio_path,
            'min_speakers': min_speakers,
            'max_speakers': max_speakers
        })

        print(f"[OK] CAM++说话人分离完成，识别到 {len(set(seg['spk_id'] for seg in diarization_result))} 个说话人")

        # 解析结果，转换为统一格式
        segments = []
        for segment in diarization_result:
            start_time = segment['start_time']
            end_time = segment['end_time']
            speaker_id = segment['spk_id']

            segments.append((start_time, end_time, speaker_id))
            print(f"  说话人 {speaker_id}: [{start_time:.1f}s - {end_time:.1f}s]")

        return segments

    except Exception as e:
        print(f"[ERROR] CAM++说话人分离失败: {e}")
        return []

def speaker_diarization_segmentation(audio_path, min_speakers=1, max_speakers=8, model_type="pyannote"):
    """
    通用说话人分离函数（支持PyAnnote和CAM++）

    Args:
        audio_path: 音频文件路径
        min_speakers: 最少说话人数量
        max_speakers: 最多说话人数量
        model_type: 模型类型 ("pyannote" 或 "cam++")

    Returns:
        list: [(start_time, end_time, speaker_id), ...] 带角色的语音段列表
    """
    if model_type == "pyannote" and PYANNOTE_AVAILABLE:
        return speaker_diarization_segmentation_pyannote(audio_path, min_speakers, max_speakers)
    elif model_type == "cam++" and MODELSCOPE_AVAILABLE:
        return speaker_diarization_segmentation_campp(audio_path, min_speakers, max_speakers)
    else:
        # 自动回退策略
        if PYANNOTE_AVAILABLE:
            print("[INFO] 首选PyAnnote模型可用，使用PyAnnote")
            return speaker_diarization_segmentation_pyannote(audio_path, min_speakers, max_speakers)
        elif MODELSCOPE_AVAILABLE:
            print("[INFO] 回退到CAM++模型")
            return speaker_diarization_segmentation_campp(audio_path, min_speakers, max_speakers)
        else:
            print("[ERROR] 没有可用的说话人分离模型")
            return []

def advanced_vad_segmentation_with_speakers(audio, sr, speaker_segments, min_speech_duration=0.3, max_speech_duration=20.0, min_silence_duration=300):
    """
    结合说话人分离信息的高级VAD分段

    Args:
        audio: 音频数据
        sr: 采样率
        speaker_segments: 说话人分离结果 [(start_time, end_time, speaker_id), ...]
        min_speech_duration: 最小语音段时长（秒）
        max_speech_duration: 最大语音段时长（秒）
        min_silence_duration: 最小静音间隔（毫秒）

    Returns:
        list: [(start_time, end_time, speaker_id), ...] 优化的语音段列表
    """
    print("[INFO] 结合说话人信息进行高级VAD分段...")

    if not speaker_segments:
        print("[WARN] 没有说话人分离结果，使用传统VAD分段")
        return advanced_vad_segmentation_traditional(audio, sr, min_speech_duration, max_speech_duration, min_silence_duration)

    # 对每个说话人的语音段进行进一步的VAD分段
    processed_segments = []

    try:
        # 加载silero-vad模型
        model = load_silero_vad()
        model.eval()

        # 将numpy数组转换为torch张量
        if isinstance(audio, np.ndarray):
            audio_tensor = torch.from_numpy(audio).float()
        else:
            audio_tensor = audio.float()

        for spk_start, spk_end, speaker_id in speaker_segments:
            # 提取该说话人的音频片段
            start_sample = int(spk_start * sr)
            end_sample = int(spk_end * sr)
            speaker_audio = audio_tensor[start_sample:end_sample]

            # 对该片段进行VAD分段
            speech_timestamps = get_speech_timestamps(
                speaker_audio,
                model,
                sampling_rate=sr,
                min_speech_duration_ms=int(min_speech_duration * 1000),
                min_silence_duration_ms=min_silence_duration,
                return_seconds=True
            )

            # 将相对时间戳转换为绝对时间戳
            for timestamp in speech_timestamps:
                abs_start = spk_start + timestamp['start']
                abs_end = spk_start + timestamp['end']
                processed_segments.append((abs_start, abs_end, speaker_id))

        # 后处理：合并过短间隙，分割过长片段
        final_segments = []
        i = 0

        while i < len(processed_segments):
            current_start, current_end, current_speaker = processed_segments[i]

            # 检查是否需要与下一个段合并
            if i < len(processed_segments) - 1:
                next_start, next_end, next_speaker = processed_segments[i + 1]
                gap = next_start - current_end

                # 如果是同一说话人且间隔小于2秒，合并
                if next_speaker == current_speaker and gap < 2.0:
                    merged_duration = next_end - current_start
                    if merged_duration <= max_speech_duration:
                        final_segments.append((current_start, next_end, current_speaker))
                        i += 2  # 跳过下一个段
                        continue

            # 检查当前段是否过长，需要分割
            duration = current_end - current_start
            if duration > max_speech_duration:
                # 等分这个长段
                num_parts = int(np.ceil(duration / max_speech_duration))
                part_duration = duration / num_parts

                for part in range(num_parts):
                    part_start = current_start + part * part_duration
                    part_end = min(part_start + part_duration, current_end)
                    final_segments.append((part_start, part_end, current_speaker))
            else:
                final_segments.append((current_start, current_end, current_speaker))

            i += 1

        print(f"[OK] 结合说话人信息的分段完成: {len(final_segments)} 个语音段")

        # 输出统计信息
        if final_segments:
            speakers = set(seg[2] for seg in final_segments)
            print(f"📈 分段统计: {len(speakers)} 个说话人")
            for speaker in sorted(speakers):
                speaker_segments_count = [seg for seg in final_segments if seg[2] == speaker]
                total_duration = sum(end - start for start, end, _ in speaker_segments_count)
                print(f"  说话人 {speaker}: {len(speaker_segments_count)} 段, 总时长 {total_duration:.1f}s")

        return final_segments

    except Exception as e:
        print(f"[ERROR] 结合说话人信息的分段失败: {e}")
        return []

def advanced_vad_segmentation_traditional(audio, sr, min_speech_duration=0.3, max_speech_duration=20.0, min_silence_duration=300):
    """
    传统的高级VAD分段（当说话人分离不可用时）
    """
    print("[INFO] 使用传统silero-vad进行高级语音分段...")

    try:
        # 加载silero-vad模型
        model = load_silero_vad()
        model.eval()

        # 将numpy数组转换为torch张量
        if isinstance(audio, np.ndarray):
            audio_tensor = torch.from_numpy(audio).float()
        else:
            audio_tensor = audio.float()

        # 获取语音时间戳
        speech_timestamps = get_speech_timestamps(
            audio_tensor,
            model,
            sampling_rate=sr,
            min_speech_duration_ms=int(min_speech_duration * 1000),
            min_silence_duration_ms=min_silence_duration,
            return_seconds=True
        )

        # 后处理：合并过短间隙，分割过长的语音段
        processed_segments = []
        i = 0

        while i < len(speech_timestamps):
            current_seg = speech_timestamps[i]

            # 检查是否需要与下一个段合并
            if i < len(speech_timestamps) - 1:
                next_seg = speech_timestamps[i + 1]
                gap = next_seg['start'] - current_seg['end']

                # 如果间隔小于2秒，合并
                if gap < 2.0:
                    merged_start = current_seg['start']
                    merged_end = next_seg['end']
                    merged_duration = merged_end - merged_start

                    if merged_duration <= max_speech_duration:
                        # 合并这两个段
                        processed_segments.append((merged_start, merged_end, "spk_0"))
                        i += 2  # 跳过下一个段
                        continue

            # 检查当前段是否过长，需要分割
            duration = current_seg['end'] - current_seg['start']
            if duration > max_speech_duration:
                # 等分这个长段
                num_parts = int(np.ceil(duration / max_speech_duration))
                part_duration = duration / num_parts

                for part in range(num_parts):
                    start_time = current_seg['start'] + part * part_duration
                    end_time = min(start_time + part_duration, current_seg['end'])
                    processed_segments.append((start_time, end_time, "spk_0"))
            else:
                processed_segments.append((current_seg['start'], current_seg['end'], "spk_0"))

            i += 1

        print(f"[OK] 传统分段完成: {len(processed_segments)} 个语音段")
        return processed_segments

    except Exception as e:
        print(f"[ERROR] 传统VAD分段失败: {e}")
        return []

def extract_speaker_audio_segment(audio_path, start_time, end_time, speaker_id, sr=16000, thread_id=None):
    """
    提取带角色信息的音频片段（线程安全版本）

    Args:
        audio_path: 原始音频路径
        start_time: 开始时间（秒）
        end_time: 结束时间（秒）
        speaker_id: 说话人ID
        sr: 采样率
        thread_id: 线程ID，用于生成唯一文件名

    Returns:
        str: 临时音频文件路径
    """
    try:
        # 加载音频
        wav, orig_sr = librosa.load(audio_path, sr=sr)

        # 计算样本索引
        start_sample = int(start_time * sr)
        end_sample = int(end_time * sr)

        # 提取片段
        segment = wav[start_sample:end_sample]

        # 生成线程安全的临时文件名
        if thread_id is None:
            thread_id = threading.get_ident()
        temp_file = f"temp_segment_{speaker_id}_{thread_id}_{start_time:.1f}_{end_time:.1f}.wav"

        import soundfile as sf
        sf.write(temp_file, segment, sr)

        return temp_file

    except Exception as e:
        print(f"[ERROR] 角色音频片段提取失败: {e}")
        return None

# 线程局部存储，用于每个线程复用模型实例
thread_local_data = threading.local()

def _get_or_create_model():
    """
    获取或创建当前线程的FunASR模型实例
    使用线程局部存储确保每个线程只创建一次模型

    Returns:
        FunASR模型实例
    """
    if not hasattr(thread_local_data, 'model'):
        print(f"funasr version: 1.2.7.")
        # 设置线程安全的缓存目录
        cache_dir = get_cache_dir()
        os.environ["FUNASR_CACHE_DIR"] = cache_dir

        # 首次创建模型实例
        thread_local_data.model = AutoModel(
            model="paraformer-zh",
            vad_model="fsmn-vad",
            punc_model="ct-punc",
            disable_update=True
        )
        print(f"[OK] 线程 {threading.get_ident()} 模型加载完成")

    return thread_local_data.model

def _worker_transcribe_speaker_segment(task_data):
    """
    工作线程函数：处理单个带角色的音频段转录

    Args:
        task_data: (segment_index, start_time, end_time, speaker_id, audio_path, hotword, thread_id)

    Returns:
        tuple: (segment_index, start_time, end_time, speaker_id, text, error_msg)
    """
    segment_index, start_time, end_time, speaker_id, audio_path, hotword, thread_id = task_data

    try:
        # 获取或创建当前线程的模型实例（每个线程只创建一次）
        model = _get_or_create_model()

        # 提取音频片段（线程安全）
        temp_file = extract_speaker_audio_segment(audio_path, start_time, end_time, speaker_id, thread_id=thread_id)

        if not temp_file:
            return (segment_index, start_time, end_time, speaker_id, "", "音频片段提取失败")

        try:
            # 进行识别
            result = model.generate(
                input=temp_file,
                batch_size_s=60,
                hotword=hotword,
                return_dict=True,
                sentence_timestamps=True,
                word_timestamps=False
            )

            # 清理临时文件
            if os.path.exists(temp_file):
                os.remove(temp_file)

            if result and result[0] and "text" in result[0]:
                text = result[0]["text"].strip()
                if text:  # 只保存非空文本
                    return (segment_index, start_time, end_time, speaker_id, text, "")
                else:
                    return (segment_index, start_time, end_time, speaker_id, "", "识别文本为空")
            else:
                return (segment_index, start_time, end_time, speaker_id, "", "识别失败")

        except Exception as e:
            # 清理临时文件
            if os.path.exists(temp_file):
                os.remove(temp_file)
            return (segment_index, start_time, end_time, speaker_id, "", f"模型推理出错: {e}")

    except Exception as e:
        return (segment_index, start_time, end_time, speaker_id, "", f"线程处理出错: {e}")

def transcribe_speaker_segments_multithread(audio_path, segments, hotword="", max_workers=2):
    """
    多线程版本的带角色FunASR识别函数

    Args:
        audio_path: 原始音频路径
        segments: 音频段列表 [(start_time, end_time, speaker_id), ...]
        hotword: 热词
        max_workers: 最大工作线程数

    Returns:
        list: [(start_time, end_time, speaker_id, text), ...]
    """
    print(f"[INFO] 开始多线程带角色ASR识别 ({max_workers} 线程)...")

    if not segments:
        print("[WARN] 没有音频段需要识别")
        return []

    # 准备任务数据
    task_queue = []
    for i, (start_time, end_time, speaker_id) in enumerate(segments):
        thread_id = f"thread_{i % max_workers}"
        task_queue.append((i, start_time, end_time, speaker_id, audio_path, hotword, thread_id))

    results = []
    completed_count = 0
    failed_count = 0

    # 使用线程池执行任务
    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 提交所有任务
        future_to_index = {
            executor.submit(_worker_transcribe_speaker_segment, task): task[0]
            for task in task_queue
        }

        # 收集结果
        for future in concurrent.futures.as_completed(future_to_index):
            segment_index, start_time, end_time, speaker_id, text, error_msg = future.result()

            completed_count += 1
            progress = completed_count / len(segments) * 100

            if error_msg:
                failed_count += 1
                print(f"  [WARN] 片段 {segment_index+1}/{len(segments)} 失败: {error_msg}")
            else:
                results.append((segment_index, start_time, end_time, speaker_id, text))
                print(f"  [OK] 片段 {segment_index+1}/{len(segments)} 完成 [{start_time:.1f}s-{end_time:.1f}s] {speaker_id}: {text[:30]}{'...' if len(text) > 30 else ''}")

            # 显示进度
            print(f"    [INFO] 进度: {progress:.1f}% ({completed_count}/{len(segments)})")

    # 按原始顺序排序结果
    results.sort(key=lambda x: x[0])  # 按segment_index排序

    # 移除索引，只返回时间戳、角色和文本
    final_results = [(start, end, speaker, text) for _, start, end, speaker, text in results]

    success_count = len(final_results)
    print(f"\n[OK] 多线程带角色ASR识别完成: {success_count}/{len(segments)} 个片段成功 (成功率: {success_count/len(segments)*100:.1f}%)")

    if failed_count > 0:
        print(f"[WARN] 失败片段数: {failed_count}")

    return final_results

def transcribe_speaker_segments(audio_path, segments, hotword="", use_multithread=True):
    """
    对带角色的音频片段进行FunASR识别（支持单线程和多线程模式）

    Args:
        audio_path: 原始音频路径
        segments: 音频段列表 [(start_time, end_time, speaker_id), ...]
        hotword: 热词
        use_multithread: 是否使用多线程

    Returns:
        list: [(start_time, end_time, speaker_id, text), ...]
    """
    if use_multithread and len(segments) > 1:
        return transcribe_speaker_segments_multithread(audio_path, segments, hotword, max_workers=2)
    else:
        # 原始单线程实现
        print(f"[INFO] 开始单线程带角色ASR识别...")

        # 设置缓存目录
        cache_dir = get_cache_dir()
        os.environ["FUNASR_CACHE_DIR"] = cache_dir

        # 加载FunASR模型
        model = AutoModel(
            model="paraformer-zh",
            vad_model="fsmn-vad",
            punc_model="ct-punc",
            disable_update=True
        )

        results = []

        for i, (start_time, end_time, speaker_id) in enumerate(segments):
            print(f"  处理片段 {i+1}/{len(segments)} [{start_time:.1f}s - {end_time:.1f}s] {speaker_id}")

            try:
                # 提取音频片段
                temp_file = extract_speaker_audio_segment(audio_path, start_time, end_time, speaker_id)

                if temp_file:
                    # 进行识别
                    result = model.generate(
                        input=temp_file,
                        batch_size_s=60,
                        hotword=hotword,
                        return_dict=True,
                        sentence_timestamps=True,
                        word_timestamps=False
                    )

                    # 清理临时文件
                    if os.path.exists(temp_file):
                        os.remove(temp_file)

                    if result and result[0] and "text" in result[0]:
                        text = result[0]["text"].strip()
                        if text:  # 只保存非空文本
                            results.append((start_time, end_time, speaker_id, text))
                            print(f"    [OK] 识别成功: {text[:30]}{'...' if len(text) > 30 else ''}")
                        else:
                            print(f"    [WARN] 识别文本为空")
                    else:
                        print(f"    [ERROR] 识别失败")

            except Exception as e:
                print(f"    [ERROR] 处理片段出错: {e}")

        print(f"[OK] 单线程带角色ASR识别完成: {len(results)}/{len(segments)} 个片段成功")
        return results

def save_speaker_results(results, output_name, output_dir):
    """
    保存带角色信息的识别结果

    Args:
        results: 识别结果 [(start_time, end_time, speaker_id, text), ...]
        output_name: 输出文件名前缀
        output_dir: 输出目录
    """
    print("[INFO] 保存带角色信息的识别结果...")

    # 1. 保存带时间戳和角色的文本文件
    txt_output = os.path.join(output_dir, f"{output_name}_speaker_separated.txt")
    with open(txt_output, 'w', encoding='utf-8') as f:
        for start_time, end_time, speaker_id, text in results:
            f.write(f"[{to_srt_time(start_time)}] {speaker_id}: {text}\n")

    # 2. 保存带角色的SRT字幕文件
    srt_output = os.path.join(output_dir, f"{output_name}_speaker_separated.srt")
    with open(srt_output, 'w', encoding='utf-8') as f:
        for idx, (start_time, end_time, speaker_id, text) in enumerate(results, 1):
            f.write(f"{idx}\n")
            f.write(f"{to_srt_time(start_time)} --> {to_srt_time(end_time)}\n")
            f.write(f"[{speaker_id}] {text}\n\n")

    # 3. 按角色分组保存文本文件
    speaker_groups = defaultdict(list)
    for start_time, end_time, speaker_id, text in results:
        speaker_groups[speaker_id].append({
            'start_time': start_time,
            'end_time': end_time,
            'text': text
        })

    speaker_text_output = os.path.join(output_dir, f"{output_name}_by_speaker.txt")
    with open(speaker_text_output, 'w', encoding='utf-8') as f:
        for speaker_id in sorted(speaker_groups.keys()):
            f.write(f"=== 说话人 {speaker_id} ===\n")
            for segment in speaker_groups[speaker_id]:
                f.write(f"[{to_srt_time(segment['start_time'])}] {segment['text']}\n")
            f.write("\n")

    # 4. 保存详细JSON结果
    json_output = os.path.join(output_dir, f"{output_name}_speaker_separated.json")
    json_data = {
        "speakers": list(speaker_groups.keys()),
        "total_segments": len(results),
        "segments": [
            {
                "start_time": start,
                "end_time": end,
                "speaker_id": speaker,
                "text": text
            }
            for start, end, speaker, text in results
        ],
        "speaker_statistics": {
            speaker_id: {
                "segment_count": len(segments),
                "total_duration": sum(seg['end_time'] - seg['start_time'] for seg in segments),
                "total_words": sum(len(seg['text']) for seg in segments)
            }
            for speaker_id, segments in speaker_groups.items()
        }
    }
    with open(json_output, 'w', encoding='utf-8') as f:
        json.dump(json_data, f, ensure_ascii=False, indent=2)

    # 5. 生成角色统计报告
    stats_output = os.path.join(output_dir, f"{output_name}_speaker_report.txt")
    with open(stats_output, 'w', encoding='utf-8') as f:
        f.write("=== 说话人统计报告 ===\n\n")
        total_duration = sum(end - start for start, end, _, _ in results)

        for speaker_id in sorted(speaker_groups.keys()):
            segments = speaker_groups[speaker_id]
            speaker_duration = sum(seg['end_time'] - seg['start_time'] for seg in segments)
            speaker_words = sum(len(seg['text']) for seg in segments)

            f.write(f"说话人 {speaker_id}:\n")
            f.write(f"  发言段数: {len(segments)}\n")
            f.write(f"  发言时长: {speaker_duration:.1f}秒 ({speaker_duration/60:.1f}分钟)\n")
            f.write(f"  时间占比: {speaker_duration/total_duration*100:.1f}%\n")
            f.write(f"  总字符数: {speaker_words}\n")
            f.write(f"  平均段长: {speaker_duration/len(segments):.1f}秒\n\n")

    print(f"[OK] 带角色结果保存完成:")
    print(f"[INFO] 带角色时间戳文本: {txt_output}")
    print(f"[INFO] 带角色SRT字幕: {srt_output}")
    print(f"[INFO] 按角色分组文本: {speaker_text_output}")
    print(f"[INFO] 详细JSON: {json_output}")
    print(f"[INFO] 角色统计报告: {stats_output}")

def speaker_diarization_transcribe_audio(input_file, output_dir="output", hotword="", use_multithread=True,
                                        enable_speaker_diarization=True, min_speakers=1, max_speakers=8,
                                        min_speech_duration=0.2, max_speech_duration=30.0, min_silence_duration=200,
                                        diarization_model="pyannote"):
    """
    带角色分离的音频转录主函数

    Args:
        input_file: 输入文件路径
        output_dir: 输出目录
        hotword: 热词
        use_multithread: 是否使用多线程处理
        enable_speaker_diarization: 是否启用角色分离
        min_speakers: 最少说话人数量
        max_speakers: 最多说话人数量
        min_speech_duration: 最小语音段时长（秒）
        max_speech_duration: 最大语音段时长（秒）
        min_silence_duration: 最小静音间隔（毫秒）
        diarization_model: 说话人分离模型 ("pyannote" 或 "cam++")

    Returns:
        bool: 成功/失败
    """
    print(f"开始带角色分离的音频处理: {input_file}")

    # 记录开始时间
    processing_start_time = time.time()

    # 检查输入文件
    if not os.path.exists(input_file):
        print(f"[ERROR] 文件不存在: {input_file}")
        return False

    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)

    # 获取文件名
    input_path = Path(input_file)
    output_name = input_path.stem

    try:
        # 步骤1: 音频预处理
        processed_audio, sr = preprocess_audio_enhanced(input_file)

        # 步骤2: 角色分离（如果启用）
        speaker_segments = []
        if enable_speaker_diarization:
            print(f"[INFO] 执行角色分离，说话人数量范围: {min_speakers}-{max_speakers}")
            print(f"[INFO] 使用分离模型: {diarization_model}")
            speaker_segments = speaker_diarization_segmentation(input_file, min_speakers, max_speakers, diarization_model)

            if not speaker_segments:
                print("[WARN] 角色分离失败，将使用传统VAD分段")
                enable_speaker_diarization = False

        # 步骤3: 音频分段
        if enable_speaker_diarization and speaker_segments:
            print("[INFO] 使用角色分离信息进行VAD分段...")
            segments = advanced_vad_segmentation_with_speakers(
                processed_audio, sr, speaker_segments,
                min_speech_duration, max_speech_duration, min_silence_duration
            )
        else:
            print("[INFO] 使用传统VAD分段...")
            segments = advanced_vad_segmentation_traditional(
                processed_audio, sr, min_speech_duration, max_speech_duration, min_silence_duration
            )
            # 传统分段已经包含默认说话人ID，无需重新包装
            # segments 已经是正确的格式 (start, end, "spk_0")

        if not segments:
            print("[ERROR] 音频分段失败，无法继续")
            return False

        # 步骤4: 带角色的ASR识别
        print(f"[INFO] 处理模式: {'多线程' if use_multithread else '单线程'}")
        results = transcribe_speaker_segments(input_file, segments, hotword, use_multithread)

        if not results:
            print("[ERROR] ASR识别失败")
            return False

        # 步骤5: 保存带角色的结果
        save_speaker_results(results, output_name, output_dir)

        # 记录结束时间并计算总耗时
        processing_end_time = time.time()
        total_processing_time = processing_end_time - processing_start_time

        # 输出统计信息
        speakers = set(speaker_id for _, _, speaker_id, _ in results)
        total_duration = segments[-1][0] - segments[0][0] if segments else 0
        success_rate = len(results) / len(segments) * 100 if segments else 0

        print(f"\n[INFO] 处理统计:")
        print(f"  识别说话人数: {len(speakers)}")
        print(f"  总音频时长: {total_duration:.1f}秒 ({total_duration/60:.1f}分钟)")
        print(f"  分段数量: {len(segments)}")
        print(f"  识别成功率: {success_rate:.1f}%")
        print(f"  角色分离: {'启用' if enable_speaker_diarization else '禁用'}")
        print(f"\n[INFO] 时间统计:")
        print(f"  总处理耗时: {total_processing_time:.1f}秒 ({total_processing_time/60:.1f}分钟)")
        if total_duration > 0:
            speed_multiplier = total_duration / total_processing_time
            print(f"  处理速度: {speed_multiplier:.2f}x 实时速度")

        return True

    except Exception as e:
        print(f"[ERROR] 带角色分离的转录失败: {e}")
        return False

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="带角色分离的音频转录工具（CAM++ + silero-vad + FunASR）")
    parser.add_argument("input_file", help="输入音频/视频文件路径")
    parser.add_argument("-o", "--output", default="output", help="输出目录 (默认: output)")
    parser.add_argument("--hotword", default="", help="热词（专业术语）")
    parser.add_argument("--single-thread", action="store_true", help="使用单线程模式（默认使用多线程）")
    parser.add_argument("--disable-speaker-diarization", action="store_true", help="禁用角色分离功能")
    parser.add_argument("--min-speakers", type=int, default=1, help="最少说话人数量（默认1）")
    parser.add_argument("--max-speakers", type=int, default=8, help="最多说话人数量（默认8）")
    parser.add_argument("--min-speech-duration", type=float, default=0.2, help="最小语音段时长（秒，默认0.2）")
    parser.add_argument("--max-speech-duration", type=float, default=30.0, help="最大语音段时长（秒，默认30.0）")
    parser.add_argument("--min-silence-duration", type=int, default=200, help="最小静音间隔（毫秒，默认200）")
    parser.add_argument("--diarization-model", type=str, default="pyannote", choices=["pyannote", "cam++"], help="说话人分离模型选择 (默认: pyannote)")

    args = parser.parse_args()

    use_multithread = not args.single_thread
    enable_speaker_diarization = not args.disable_speaker_diarization and MODELSCOPE_AVAILABLE

    print("=" * 60)
    print("带角色分离的音频转录工具（PyAnnote/CAM++ + silero-vad + FunASR）")
    print("=" * 60)
    print(f"多线程处理: {'启用' if use_multithread else '禁用'}")
    if use_multithread:
        print("   使用 2 个工作线程进行并行处理")
    print(f"角色分离: {'启用' if enable_speaker_diarization else '禁用'}")
    if enable_speaker_diarization:
        print(f"   分离模型: {args.diarization_model} (PyAnnote推荐，更高准确度)")
        print(f"   说话人数量: {args.min_speakers}-{args.max_speakers} 人")
    print(f"优化参数:")
    print(f"   最小语音段: {args.min_speech_duration}秒")
    print(f"   最大语音段: {args.max_speech_duration}秒")
    print(f"   最小静音间隔: {args.min_silence_duration}毫秒")
    print("=" * 60)

    # 记录整体处理开始时间
    overall_start_time = time.time()

    success = speaker_diarization_transcribe_audio(
        input_file=args.input_file,
        output_dir=args.output,
        hotword=args.hotword,
        use_multithread=use_multithread,
        enable_speaker_diarization=enable_speaker_diarization,
        min_speakers=args.min_speakers,
        max_speakers=args.max_speakers,
        min_speech_duration=args.min_speech_duration,
        max_speech_duration=args.max_speech_duration,
        min_silence_duration=args.min_silence_duration,
        diarization_model=args.diarization_model
    )

    # 记录整体处理结束时间
    overall_end_time = time.time()
    total_overall_time = overall_end_time - overall_start_time

    if success:
        print("\n[OK] 带角色分离的转录成功完成！")
        print(f"📁 结果文件保存在: {args.output}")
        print(f"\n🕐 整体处理耗时: {total_overall_time:.1f}秒 ({total_overall_time/60:.1f}分钟)")
        print("\n💡 输出文件说明:")
        print("   *_speaker_separated.txt - 带时间戳和角色的文本")
        print("   *_speaker_separated.srt - 带角色的SRT字幕")
        print("   *_by_speaker.txt - 按角色分组的文本")
        print("   *_speaker_separated.json - 完整的JSON数据")
        print("   *_speaker_report.txt - 角色统计报告")
    else:
        print(f"\n[ERROR] 带角色分离的转录失败 (总耗时: {total_overall_time:.1f}秒)")
        sys.exit(1)

if __name__ == "__main__":
    main()