#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
音频预分段+ASR精确时间戳解决方案
使用silero-vad进行精确的音频边界检测
"""

import os
import sys
import argparse
import json
import re
import librosa
import numpy as np
import torch
import torchaudio
import time
from pathlib import Path
from collections import defaultdict
from funasr import AutoModel
import threading
import concurrent.futures
from queue import Queue

# 修复模块导入路径问题
if __name__ == "__main__":
    # 添加项目根目录到Python路径
    project_root = Path(__file__).parent.parent
    if str(project_root) not in sys.path:
        sys.path.insert(0, str(project_root))

from scripts.utils import get_cache_dir

# silero-vad导入
from silero_vad import load_silero_vad, read_audio, get_speech_timestamps

def to_srt_time(sec):
    """将秒数转换为SRT时间格式"""
    h = int(sec // 3600)
    m = int((sec % 3600) // 60)
    s = int(sec % 60)
    f = int((sec % 1) * 1000)
    return f"{h:02d}:{m:02d}:{s:02d},{f:03d}"

def preprocess_audio_enhanced(audio_path):
    """
    增强的音频预处理
    """
    print("🔧 开始增强音频预处理...")

    try:
        # 检查文件扩展名，如果是视频文件，先提取音频
        audio_path_lower = audio_path.lower()
        video_extensions = ['.mp4', '.avi', '.mov', '.mkv', '.wmv', '.flv', '.webm']

        if any(audio_path_lower.endswith(ext) for ext in video_extensions):
            print("🎬 检测到视频文件，正在提取音频...")
            # 使用librosa直接从视频文件提取音频
            wav, sr = librosa.load(audio_path, sr=16000, mono=True)
            wav_tensor = torch.from_numpy(wav).float().unsqueeze(0)
        else:
            # 使用torchaudio加载纯音频文件
            wav, sr = torchaudio.load(audio_path)

            # 转换为单声道
            if wav.shape[0] > 1:
                wav = torch.mean(wav, dim=0, keepdim=True)

            # 重新采样到16kHz（silero-vad的要求）
            if sr != 16000:
                resampler = torchaudio.transforms.Resample(sr, 16000)
                wav = resampler(wav)
                sr = 16000

            wav_tensor = wav

        # 音量标准化
        if torch.max(torch.abs(wav_tensor)) > 0:
            wav_tensor = wav_tensor / torch.max(torch.abs(wav_tensor))

        print(f"✅ 增强音频预处理完成 (采样率: {sr}Hz, 时长: {wav_tensor.shape[1]/sr:.2f}s)")
        return wav_tensor.squeeze().numpy(), sr

    except Exception as e:
        print(f"⚠️ 增强预处理失败，使用基本处理: {e}")
        y, sr = librosa.load(audio_path, sr=16000)
        return y, sr

def advanced_vad_segmentation(audio, sr, min_speech_duration=0.3, max_speech_duration=20.0, min_silence_duration=300):
    """
    使用silero-vad进行高级语音分段（优化版）

    Args:
        audio: 音频数据
        sr: 采样率
        min_speech_duration: 最小语音段时长（秒，默认0.2秒，用于捕获更短语音）
        max_speech_duration: 最大语音段时长（秒，默认30.0秒，减少过度分割）
        min_silence_duration: 最小静音间隔（毫秒，默认200ms，提高敏感度）

    Returns:
        list: [(start_time, end_time), ...] 语音段列表
    """
    print("🎯 使用silero-vad进行高级语音分段（优化参数）...")

    try:
        # 加载silero-vad模型
        model = load_silero_vad()
        model.eval()

        # 将numpy数组转换为torch张量
        if isinstance(audio, np.ndarray):
            audio_tensor = torch.from_numpy(audio).float()
        else:
            audio_tensor = audio.float()

        # 获取语音时间戳（使用优化的参数）
        speech_timestamps = get_speech_timestamps(
            audio_tensor,
            model,
            sampling_rate=sr,
            min_speech_duration_ms=int(min_speech_duration * 1000),
            min_silence_duration_ms=min_silence_duration,  # 使用参数化静音间隔（默认300ms）
            return_seconds=True
        )

        # 后处理：合并过短的间隙，分割过长的语音段
        processed_segments = []
        i = 0

        while i < len(speech_timestamps):
            current_seg = speech_timestamps[i]

            # 检查是否需要与下一个段合并
            if i < len(speech_timestamps) - 1:
                next_seg = speech_timestamps[i + 1]
                gap = next_seg['start'] - current_seg['end']

                # 如果间隔小于2秒，合并（优化：更宽松的合并条件）
                if gap < 2.0:
                    merged_start = current_seg['start']
                    merged_end = next_seg['end']
                    merged_duration = merged_end - merged_start

                    if merged_duration <= max_speech_duration:
                        # 合并这两个段
                        processed_segments.append((merged_start, merged_end))
                        i += 2  # 跳过下一个段
                        continue

            # 检查当前段是否过长，需要分割
            duration = current_seg['end'] - current_seg['start']
            if duration > max_speech_duration:
                # 等分这个长段
                num_parts = int(np.ceil(duration / max_speech_duration))
                part_duration = duration / num_parts

                for part in range(num_parts):
                    start_time = current_seg['start'] + part * part_duration
                    end_time = min(start_time + part_duration, current_seg['end'])
                    processed_segments.append((start_time, end_time))
            else:
                processed_segments.append((current_seg['start'], current_seg['end']))

            i += 1

        print(f"✅ 优化分段完成: {len(processed_segments)} 个语音段")

        # 输出分段统计信息
        if processed_segments:
            durations = [end - start for start, end in processed_segments]
            avg_duration = np.mean(durations)
            min_duration = np.min(durations)
            max_duration = np.max(durations)
            print(f"📈 分段统计: 平均时长 {avg_duration:.2f}s, 最短 {min_duration:.2f}s, 最长 {max_duration:.2f}s")

        return processed_segments

    except Exception as e:
        print(f"❌ silero-vad分段失败: {e}")
        return []

def extract_audio_segment(audio_path, start_time, end_time, sr=16000, thread_id=None):
    """
    提取音频片段（线程安全版本）

    Args:
        audio_path: 原始音频路径
        start_time: 开始时间（秒）
        end_time: 结束时间（秒）
        sr: 采样率
        thread_id: 线程ID，用于生成唯一文件名

    Returns:
        str: 临时音频文件路径
    """
    try:
        # 加载音频
        wav, orig_sr = librosa.load(audio_path, sr=sr)

        # 计算样本索引
        start_sample = int(start_time * sr)
        end_sample = int(end_time * sr)

        # 提取片段
        segment = wav[start_sample:end_sample]

        # 生成线程安全的临时文件名
        if thread_id is None:
            thread_id = threading.get_ident()
        temp_file = f"temp_segment_{thread_id}_{start_time:.1f}_{end_time:.1f}.wav"

        import soundfile as sf
        sf.write(temp_file, segment, sr)

        return temp_file

    except Exception as e:
        print(f"❌ 音频片段提取失败: {e}")
        return None

# 线程局部存储，用于每个线程复用模型实例
thread_local_data = threading.local()

def _get_or_create_model():
    """
    获取或创建当前线程的FunASR模型实例
    使用线程局部存储确保每个线程只创建一次模型

    Returns:
        FunASR模型实例
    """
    if not hasattr(thread_local_data, 'model'):
        print(f"funasr version: 1.2.7.")
        # 设置线程安全的缓存目录
        cache_dir = get_cache_dir()
        os.environ["FUNASR_CACHE_DIR"] = cache_dir

        # 首次创建模型实例
        thread_local_data.model = AutoModel(
            model="paraformer-zh",
            vad_model="fsmn-vad",
            punc_model="ct-punc",
            disable_update=True
        )
        print(f"✅ 线程 {threading.get_ident()} 模型加载完成")

    return thread_local_data.model

def _worker_transcribe_segment(task_data):
    """
    工作线程函数：处理单个音频段的转录

    Args:
        task_data: (segment_index, start_time, end_time, audio_path, hotword, thread_id)

    Returns:
        tuple: (segment_index, start_time, end_time, text, error_msg)
    """
    segment_index, start_time, end_time, audio_path, hotword, thread_id = task_data

    try:
        # 获取或创建当前线程的模型实例（每个线程只创建一次）
        model = _get_or_create_model()

        # 提取音频片段（线程安全）
        temp_file = extract_audio_segment(audio_path, start_time, end_time, thread_id=thread_id)

        if not temp_file:
            return (segment_index, start_time, end_time, "", "音频片段提取失败")

        try:
            # 进行识别（优化：增大批处理大小提高识别率）
            result = model.generate(
                input=temp_file,
                batch_size_s=60,        # 优化：增大批处理大小提高识别率
                hotword=hotword,
                return_dict=True,
                sentence_timestamps=True,
                word_timestamps=False
            )

            # 清理临时文件
            if os.path.exists(temp_file):
                os.remove(temp_file)

            if result and result[0] and "text" in result[0]:
                text = result[0]["text"].strip()
                if text:  # 只保存非空文本
                    return (segment_index, start_time, end_time, text, "")
                else:
                    return (segment_index, start_time, end_time, "", "识别文本为空")
            else:
                return (segment_index, start_time, end_time, "", "识别失败")

        except Exception as e:
            # 清理临时文件
            if os.path.exists(temp_file):
                os.remove(temp_file)
            return (segment_index, start_time, end_time, "", f"模型推理出错: {e}")

    except Exception as e:
        return (segment_index, start_time, end_time, "", f"线程处理出错: {e}")

def transcribe_segments_with_funasr_multithread(audio_path, segments, hotword="", max_workers=2):
    """
    多线程版本的FunASR识别函数

    Args:
        audio_path: 原始音频路径
        segments: 音频段列表 [(start_time, end_time), ...]
        hotword: 热词
        max_workers: 最大工作线程数

    Returns:
        list: [(start_time, end_time, text), ...]
    """
    print(f"🚀 开始多线程ASR识别 ({max_workers} 线程)...")

    if not segments:
        print("⚠️ 没有音频段需要识别")
        return []

    # 准备任务数据
    task_queue = []
    for i, (start_time, end_time) in enumerate(segments):
        thread_id = f"thread_{i % max_workers}"
        task_queue.append((i, start_time, end_time, audio_path, hotword, thread_id))

    results = []
    completed_count = 0
    failed_count = 0

    # 使用线程池执行任务
    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 提交所有任务
        future_to_index = {
            executor.submit(_worker_transcribe_segment, task): task[0]
            for task in task_queue
        }

        # 收集结果
        for future in concurrent.futures.as_completed(future_to_index):
            segment_index, start_time, end_time, text, error_msg = future.result()

            completed_count += 1
            progress = completed_count / len(segments) * 100

            if error_msg:
                failed_count += 1
                print(f"  ⚠️ 片段 {segment_index+1}/{len(segments)} 失败: {error_msg}")
            else:
                results.append((segment_index, start_time, end_time, text))
                print(f"  ✓ 片段 {segment_index+1}/{len(segments)} 完成 [{start_time:.1f}s-{end_time:.1f}s] {text[:30]}{'...' if len(text) > 30 else ''}")

            # 显示进度
            print(f"    📊 进度: {progress:.1f}% ({completed_count}/{len(segments)})")

    # 按原始顺序排序结果
    results.sort(key=lambda x: x[0])  # 按segment_index排序

    # 移除索引，只返回时间戳和文本
    final_results = [(start, end, text) for _, start, end, text in results]

    success_count = len(final_results)
    print(f"\n✅ 多线程ASR识别完成: {success_count}/{len(segments)} 个片段成功 (成功率: {success_count/len(segments)*100:.1f}%)")

    if failed_count > 0:
        print(f"⚠️ 失败片段数: {failed_count}")

    return final_results

def transcribe_segments_with_funasr(audio_path, segments, hotword="", use_multithread=True):
    """
    对音频片段进行FunASR识别（支持单线程和多线程模式）

    Args:
        audio_path: 原始音频路径
        segments: 音频段列表 [(start_time, end_time), ...]
        hotword: 热词
        use_multithread: 是否使用多线程

    Returns:
        list: [(start_time, end_time, text), ...]
    """
    if use_multithread and len(segments) > 1:
        return transcribe_segments_with_funasr_multithread(audio_path, segments, hotword, max_workers=2)
    else:
        # 原始单线程实现
        print(f"🎙️ 开始单线程ASR识别...")

        # 设置缓存目录
        cache_dir = get_cache_dir()
        os.environ["FUNASR_CACHE_DIR"] = cache_dir

        # 加载FunASR模型
        model = AutoModel(
            model="paraformer-zh",
            vad_model="fsmn-vad",
            punc_model="ct-punc",
            disable_update=True
        )

        results = []

        for i, (start_time, end_time) in enumerate(segments):
            print(f"  处理片段 {i+1}/{len(segments)} [{start_time:.1f}s - {end_time:.1f}s]")

            try:
                # 提取音频片段
                temp_file = extract_audio_segment(audio_path, start_time, end_time)

                if temp_file:
                    # 进行识别（优化：增大批处理大小提高识别率）
                    result = model.generate(
                        input=temp_file,
                        batch_size_s=60,        # 优化：增大批处理大小提高识别率
                        hotword=hotword,
                        return_dict=True,
                        sentence_timestamps=True,
                        word_timestamps=False
                    )

                    # 清理临时文件
                    if os.path.exists(temp_file):
                        os.remove(temp_file)

                    if result and result[0] and "text" in result[0]:
                        text = result[0]["text"].strip()
                        if text:  # 只保存非空文本
                            results.append((start_time, end_time, text))
                            print(f"    ✓ 识别成功: {text[:30]}{'...' if len(text) > 30 else ''}")
                        else:
                            print(f"    ⚠️ 识别文本为空")
                    else:
                        print(f"    ❌ 识别失败")

            except Exception as e:
                print(f"    ❌ 处理片段出错: {e}")

        print(f"✅ 单线程ASR识别完成: {len(results)}/{len(segments)} 个片段成功")
        return results

def enhanced_transcribe_audio(input_file, output_dir="output", hotword="", use_multithread=True,
                             min_speech_duration=0.2, max_speech_duration=30.0, min_silence_duration=200):
    """
    增强的音频转录主函数（优化版）

    Args:
        input_file: 输入文件路径
        output_dir: 输出目录
        hotword: 热词
        use_multithread: 是否使用多线程处理
        min_speech_duration: 最小语音段时长（秒，默认0.2秒）
        max_speech_duration: 最大语音段时长（秒，默认30.0秒）
        min_silence_duration: 最小静音间隔（毫秒，默认200ms）

    Returns:
        bool: 成功/失败
    """
    print(f"🎵 开始增强音频处理（优化版）: {input_file}")

    # 记录开始时间
    processing_start_time = time.time()

    # 检查输入文件
    if not os.path.exists(input_file):
        print(f"❌ 文件不存在: {input_file}")
        return False

    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)

    # 获取文件名
    input_path = Path(input_file)
    output_name = input_path.stem

    try:
        # 步骤1: 音频预处理
        processed_audio, sr = preprocess_audio_enhanced(input_file)

        # 步骤2: 优化的高级VAD分段
        print(f"🔧 优化参数: 最小语音段={min_speech_duration}s, 最大语音段={max_speech_duration}s, 最小静音间隔={min_silence_duration}ms")
        segments = advanced_vad_segmentation(processed_audio, sr, min_speech_duration, max_speech_duration, min_silence_duration)

        if not segments:
            print("❌ VAD分段失败，无法继续")
            return False

        # 步骤3: 对每个片段进行ASR识别
        print(f"🔧 处理模式: {'多线程' if use_multithread else '单线程'}")
        results = transcribe_segments_with_funasr(input_file, segments, hotword, use_multithread)

        if not results:
            print("❌ ASR识别失败")
            return False

        # 步骤4: 保存结果
  
        # 保存详细JSON结果（包含参数信息）
        json_output = os.path.join(output_dir, f"{output_name}.json")
        json_data = {
            "file": input_file,
            "parameters": {
                "min_speech_duration": min_speech_duration,
                "max_speech_duration": max_speech_duration,
                "min_silence_duration": min_silence_duration,
                "use_multithread": use_multithread
            },
            "total_segments": len(segments),
            "successful_segments": len(results),
            "segments": [
                {
                    "start_time": start,
                    "end_time": end,
                    "text": text
                }
                for start, end, text in results
            ]
        }
        with open(json_output, 'w', encoding='utf-8') as f:
            json.dump(json_data, f, ensure_ascii=False, indent=2)

        print(f"\n📊 优化版转录完成!")
        print(f"📋 JSON结果（包含参数信息）: {json_output}")

        # 记录结束时间并计算总耗时
        processing_end_time = time.time()
        total_processing_time = processing_end_time - processing_start_time

        # 输出增强统计信息
        total_duration = segments[-1][1] - segments[0][0] if segments else 0
        avg_segment_duration = total_duration / len(segments) if segments else 0
        success_rate = len(results) / len(segments) * 100 if segments else 0

        # 计算处理效率比值
        efficiency_ratio = total_processing_time / total_duration if total_duration > 0 else 0
        speed_multiplier = total_duration / total_processing_time if total_processing_time > 0 else 0

        print(f"\n📈 处理统计:")
        print(f"  总音频时长: {total_duration:.1f}秒 ({total_duration/60:.1f}分钟)")
        print(f"  分段数量: {len(segments)}")
        print(f"  识别成功率: {success_rate:.1f}%")
        print(f"  平均段长: {avg_segment_duration:.1f}秒")
        print(f"  VAD参数: min={min_speech_duration}s, max={max_speech_duration}s, silence={min_silence_duration}ms")
        print(f"\n⏱️ 时间统计:")
        print(f"  总处理耗时: {total_processing_time:.1f}秒 ({total_processing_time/60:.1f}分钟)")
        print(f"  处理效率比值: {efficiency_ratio:.2f} (处理时间/音频时长)")
        print(f"  处理速度: {speed_multiplier:.2f}x 实时速度")

        return True

    except Exception as e:
        print(f"❌ 增强转录失败: {e}")
        return False

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="增强音频转录工具（优化版 - 使用silero-vad预分段）")
    parser.add_argument("input_file", help="输入音频/视频文件路径")
    parser.add_argument("-o", "--output", default="output", help="输出目录 (默认: output)")
    parser.add_argument("--hotword", default="", help="热词（专业术语）")
    parser.add_argument("--single-thread", action="store_true", help="使用单线程模式（默认使用多线程）")
    parser.add_argument("--min-speech-duration", type=float, default=0.2, help="最小语音段时长（秒，默认0.2）")
    parser.add_argument("--max-speech-duration", type=float, default=30.0, help="最大语音段时长（秒，默认30.0）")
    parser.add_argument("--min-silence-duration", type=int, default=200, help="最小静音间隔（毫秒，默认200）")

    args = parser.parse_args()

    use_multithread = not args.single_thread

    print("=" * 60)
    print("🎵 增强音频转录工具（优化版 - silero-vad + FunASR）")
    print("=" * 60)
    print(f"🚀 多线程处理: {'启用' if use_multithread else '禁用'}")
    if use_multithread:
        print("   使用 2 个工作线程进行并行处理")
    print(f"🎛️ 优化参数:")
    print(f"   最小语音段: {args.min_speech_duration}秒")
    print(f"   最大语音段: {args.max_speech_duration}秒")
    print(f"   最小静音间隔: {args.min_silence_duration}毫秒")
    print("=" * 60)

    # 记录整体处理开始时间
    overall_start_time = time.time()

    success = enhanced_transcribe_audio(
        input_file=args.input_file,
        output_dir=args.output,
        hotword=args.hotword,
        use_multithread=use_multithread,
        min_speech_duration=args.min_speech_duration,
        max_speech_duration=args.max_speech_duration,
        min_silence_duration=args.min_silence_duration
    )

    # 记录整体处理结束时间
    overall_end_time = time.time()
    total_overall_time = overall_end_time - overall_start_time

    if success:
        print("\n✅ 优化版转录成功完成！")
        print(f"📁 结果文件保存在: {args.output}")
        print(f"\n🕐 整体处理耗时: {total_overall_time:.1f}秒 ({total_overall_time/60:.1f}分钟)")
        print("\n💡 如果仍然有语音遗漏，可以尝试调整以下参数:")
        print("   --min-speech-duration 0.2  # 降低最小语音段时长")
        print("   --min-silence-duration 200  # 降低最小静音间隔")
        print("   --max-speech-duration 30.0  # 增加最大语音段时长")
    else:
        print(f"\n❌ 优化版转录失败 (总耗时: {total_overall_time:.1f}秒)")
        sys.exit(1)

if __name__ == "__main__":
    main()