#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
混合策略音频转录系统
结合多种VAD方法和识别策略，提供最佳时间戳精度
"""

import os
import sys
import argparse
import json
import re
import librosa
import numpy as np
import torch
import torchaudio
import soundfile as sf
from pathlib import Path
from collections import defaultdict
from funasr import AutoModel
from scripts.utils import get_cache_dir

# silero-vad导入
from silero_vad import load_silero_vad, get_speech_timestamps

def to_srt_time(sec):
    """将秒数转换为SRT时间格式"""
    h = int(sec // 3600)
    m = int((sec % 3600) // 60)
    s = int(sec % 60)
    f = int((sec % 1) * 1000)
    return f"{h:02d}:{m:02d}:{s:02d},{f:03d}"

class HybridVAD:
    """混合VAD策略类"""

    def __init__(self):
        self.silero_model = None
        self.funasr_model = None

    def load_models(self):
        """加载所有模型"""
        print("🔄 加载混合VAD模型...")

        # 加载silero-vad
        try:
            self.silero_model = load_silero_vad()
            self.silero_model.eval()
            print("✅ silero-vad加载成功")
        except Exception as e:
            print(f"⚠️ silero-vad加载失败: {e}")
            self.silero_model = None

        # 加载FunASR
        try:
            cache_dir = get_cache_dir()
            os.environ["FUNASR_CACHE_DIR"] = cache_dir

            self.funasr_model = AutoModel(
                model="paraformer-zh",
                vad_model="fsmn-vad",
                punc_model="ct-punc",
                disable_update=True
            )
            print("✅ FunASR模型加载成功")
        except Exception as e:
            print(f"⚠️ FunASR模型加载失败: {e}")
            self.funasr_model = None

    def silero_segment(self, audio, sr, min_duration=1.0, max_duration=15.0):
        """silero-vad分段"""
        if not self.silero_model:
            return []

        try:
            audio_tensor = torch.from_numpy(audio).float()

            timestamps = get_speech_timestamps(
                audio_tensor,
                self.silero_model,
                sampling_rate=sr,
                min_speech_duration_ms=int(min_duration * 1000),
                min_silence_duration_ms=300,  # 更短的静音间隔
                return_seconds=True
            )

            segments = [(seg['start'], seg['end']) for seg in timestamps]

            # 后处理：合并短间隔，分割长段
            segments = self._merge_short_gaps(segments, min_gap=0.8)
            segments = self._split_long_segments(segments, max_duration)

            return segments

        except Exception as e:
            print(f"silero-vad分段失败: {e}")
            return []

    def librosa_segment(self, audio, sr, min_duration=1.0, max_duration=15.0):
        """基于librosa能量的分段方法"""
        try:
            # 计算短时能量
            frame_length = int(0.025 * sr)  # 25ms
            hop_length = int(0.01 * sr)    # 10ms

            rms = librosa.feature.rms(y=audio, frame_length=frame_length, hop_length=hop_length)[0]

            # 动态阈值
            threshold = np.median(rms) * 0.1

            # 检测语音帧
            speech_frames = rms > threshold
            frame_times = librosa.frames_to_time(np.arange(len(speech_frames)), sr=sr, hop_length=hop_length)

            # 提取语音段
            segments = []
            in_speech = False
            speech_start = 0

            for i, is_speech in enumerate(speech_frames):
                if is_speech and not in_speech:
                    speech_start = frame_times[i]
                    in_speech = True
                elif not is_speech and in_speech:
                    speech_end = frame_times[i]
                    duration = speech_end - speech_start

                    if duration >= min_duration:
                        segments.append((speech_start, speech_end))

                    in_speech = False

            # 处理最后的语音段
            if in_speech:
                speech_end = frame_times[-1]
                if speech_end - speech_start >= min_duration:
                    segments.append((speech_start, speech_end))

            # 后处理
            segments = self._split_long_segments(segments, max_duration)

            return segments

        except Exception as e:
            print(f"librosa分段失败: {e}")
            return []

    def _merge_short_gaps(self, segments, min_gap=0.8):
        """合并短间隔的语音段"""
        if not segments:
            return []

        merged = []
        current_start, current_end = segments[0]

        for start, end in segments[1:]:
            if start - current_end <= min_gap:
                # 合并
                current_end = end
            else:
                merged.append((current_start, current_end))
                current_start, current_end = start, end

        merged.append((current_start, current_end))
        return merged

    def _split_long_segments(self, segments, max_duration):
        """分割过长的语音段"""
        result = []

        for start, end in segments:
            duration = end - start
            if duration > max_duration:
                # 等分
                num_parts = int(np.ceil(duration / max_duration))
                part_duration = duration / num_parts

                for i in range(num_parts):
                    part_start = start + i * part_duration
                    part_end = min(part_start + part_duration, end)
                    result.append((part_start, part_end))
            else:
                result.append((start, end))

        return result

    def hybrid_segment(self, audio, sr, strategy="auto"):
        """
        混合分段策略

        Args:
            audio: 音频数据
            sr: 采样率
            strategy: "silero", "librosa", "auto", "combined"

        Returns:
            list: [(start, end), ...]
        """
        if strategy == "silero" and self.silero_model:
            return self.silero_segment(audio, sr)
        elif strategy == "librosa":
            return self.librosa_segment(audio, sr)
        elif strategy == "auto":
            # 自动选择最佳策略
            silero_segments = self.silero_segment(audio, sr) if self.silero_model else []
            librosa_segments = self.librosa_segment(audio, sr)

            # 选择段数适中的结果
            if len(silero_segments) > 0:
                silero_avg = sum(end - start for start, end in silero_segments) / len(silero_segments)
                librosa_avg = sum(end - start for start, end in librosa_segments) / len(librosa_segments)

                # 选择平均段长更接近3-8秒的结果
                silero_score = 1.0 / abs(silero_avg - 5.5)
                librosa_score = 1.0 / abs(librosa_avg - 5.5)

                if silero_score > librosa_score:
                    print(f"🎯 自动选择silero-vad (平均段长: {silero_avg:.1f}s)")
                    return silero_segments
                else:
                    print(f"🎯 自动选择librosa (平均段长: {librosa_avg:.1f}s)")
                    return librosa_segments
            else:
                return librosa_segments
        elif strategy == "combined":
            # 结合两种方法的结果
            silero_segments = self.silero_segment(audio, sr) if self.silero_model else []
            librosa_segments = self.librosa_segment(audio, sr)

            # 取并集，然后合并重叠部分
            all_segments = sorted(silero_segments + librosa_segments)
            return self._merge_overlapping_segments(all_segments)
        else:
            return []

    def _merge_overlapping_segments(self, segments):
        """合并重叠的语音段"""
        if not segments:
            return []

        merged = []
        current_start, current_end = segments[0]

        for start, end in segments[1:]:
            if start <= current_end:  # 重叠或相连
                current_end = max(current_end, end)
            else:
                merged.append((current_start, current_end))
                current_start, current_end = start, end

        merged.append((current_start, current_end))
        return merged

class HybridTranscriber:
    """混合转录系统"""

    def __init__(self):
        self.vad = HybridVAD()
        self.funasr_model = None

    def initialize(self):
        """初始化系统"""
        print("🚀 初始化混合转录系统...")
        self.vad.load_models()

        if not self.vad.funasr_model:
            return False

        self.funasr_model = self.vad.funasr_model
        return True

    def transcribe(self, input_file, output_dir="output", hotword="", vad_strategy="auto"):
        """
        混合转录主函数

        Args:
            input_file: 输入文件路径
            output_dir: 输出目录
            hotword: 热词
            vad_strategy: VAD策略 ("silero", "librosa", "auto", "combined")

        Returns:
            bool: 成功/失败
        """
        print(f"🎵 开始混合转录: {input_file}")

        # 检查输入文件
        if not os.path.exists(input_file):
            print(f"❌ 文件不存在: {input_file}")
            return False

        # 创建输出目录
        os.makedirs(output_dir, exist_ok=True)

        # 获取文件名
        input_path = Path(input_file)
        output_name = input_path.stem

        try:
            # 步骤1: 音频预处理
            print("🔧 音频预处理...")
            audio, sr = librosa.load(input_file, sr=16000)

            # 音频增强
            audio = librosa.util.normalize(audio)

            # 步骤2: 混合VAD分段
            print(f"🎯 使用 {vad_strategy} 策略进行VAD分段...")
            segments = self.vad.hybrid_segment(audio, sr, vad_strategy)

            if not segments:
                print("❌ VAD分段失败")
                return False

            print(f"✅ VAD分段完成: {len(segments)} 个语音段")

            # 步骤3: 自适应ASR识别
            results = self._adaptive_transcription(input_file, segments, hotword)

            if not results:
                print("❌ ASR识别失败")
                return False

            # 步骤4: 结果后处理
            enhanced_results = self._enhance_results(results, audio, sr)

            # 步骤5: 保存结果
            self._save_results(enhanced_results, output_name, output_dir)

            # 输出质量报告
            self._print_quality_report(segments, enhanced_results, audio, sr)

            return True

        except Exception as e:
            print(f"❌ 混合转录失败: {e}")
            return False

    def _adaptive_transcription(self, audio_file, segments, hotword=""):
        """自适应ASR识别"""
        print("🎙️ 开始自适应ASR识别...")

        results = []

        for i, (start_time, end_time) in enumerate(segments):
            duration = end_time - start_time

            # 根据片段长度选择不同的参数
            if duration < 3.0:
                batch_size = 10
            elif duration < 8.0:
                batch_size = 30
            else:
                batch_size = 60

            print(f"  片段 {i+1}/{len(segments)} [{start_time:.1f}s-{end_time:.1f}s] ({duration:.1f}s)")

            try:
                # 提取音频片段
                temp_file = self._extract_segment(audio_file, start_time, end_time)

                if temp_file:
                    # 进行识别
                    result = self.funasr_model.generate(
                        input=temp_file,
                        batch_size_s=batch_size,
                        hotword=hotword,
                        return_dict=True,
                        sentence_timestamps=True,
                        word_timestamps=False
                    )

                    # 清理临时文件
                    if os.path.exists(temp_file):
                        os.remove(temp_file)

                    if result and result[0] and "text" in result[0]:
                        text = result[0]["text"].strip()
                        if text:
                            results.append((start_time, end_time, text))
                            print(f"    ✓ {text[:40]}{'...' if len(text) > 40 else ''}")
                        else:
                            print(f"    ⚠️ 识别文本为空")
                    else:
                        print(f"    ❌ 识别失败")

            except Exception as e:
                print(f"    ❌ 处理出错: {e}")

        print(f"✅ ASR识别完成: {len(results)}/{len(segments)} 个片段成功")
        return results

    def _extract_segment(self, audio_file, start_time, end_time, sr=16000):
        """提取音频片段"""
        try:
            # 加载音频
            wav, orig_sr = librosa.load(audio_file, sr=sr)

            # 计算样本索引
            start_sample = int(start_time * sr)
            end_sample = int(end_time * sr)

            # 提取片段
            segment = wav[start_sample:end_sample]

            # 保存临时文件
            temp_file = f"temp_segment_{start_time:.1f}_{end_time:.1f}.wav"
            sf.write(temp_file, segment, sr)

            return temp_file

        except Exception as e:
            print(f"    音频片段提取失败: {e}")
            return None

    def _enhance_results(self, results, audio, sr):
        """结果增强处理"""
        print("🔧 结果增强处理...")

        enhanced = []

        for start_time, end_time, text in results:
            # 文本清理
            cleaned_text = self._clean_text(text)

            # 时间边界微调
            adjusted_start, adjusted_end = self._adjust_boundaries(
                start_time, end_time, cleaned_text, audio, sr
            )

            enhanced.append((adjusted_start, adjusted_end, cleaned_text))

        return enhanced

    def _clean_text(self, text):
        """清理识别文本"""
        # 移除重复的填充词
        text = re.sub(r'(嗯|啊|呃|这个|那个)\1+', r'\1', text)

        # 移除过长的空白
        text = re.sub(r'\s{2,}', ' ', text)

        # 移除首尾空白
        text = text.strip()

        return text

    def _adjust_boundaries(self, start_time, end_time, text, audio, sr):
        """调整时间边界"""
        # 简单的边界调整策略
        # 可以根据音频特征进行更精确的调整

        # 确保最小段长
        min_duration = 0.5
        if end_time - start_time < min_duration:
            end_time = start_time + min_duration

        return start_time, end_time

    def _save_results(self, results, output_name, output_dir):
        """保存结果"""
        print("💾 保存结果...")

        # 带时间戳文本
        txt_output = os.path.join(output_dir, f"{output_name}_hybrid.txt")
        with open(txt_output, 'w', encoding='utf-8') as f:
            for start, end, text in results:
                f.write(f"[{to_srt_time(start)}] {text}\n")

        # SRT字幕
        srt_output = os.path.join(output_dir, f"{output_name}_hybrid.srt")
        with open(srt_output, 'w', encoding='utf-8') as f:
            for idx, (start, end, text) in enumerate(results, 1):
                f.write(f"{idx}\n")
                f.write(f"{to_srt_time(start)} --> {to_srt_time(end)}\n")
                f.write(f"{text}\n\n")

        # 纯文本
        clean_output = os.path.join(output_dir, f"{output_name}_hybrid_clean.txt")
        with open(clean_output, 'w', encoding='utf-8') as f:
            for _, _, text in results:
                f.write(f"{text}\n")

        # 详细JSON
        json_output = os.path.join(output_dir, f"{output_name}_hybrid.json")
        json_data = {
            "method": "hybrid_transcription",
            "segments": [
                {"start": start, "end": end, "text": text}
                for start, end, text in results
            ]
        }
        with open(json_output, 'w', encoding='utf-8') as f:
            json.dump(json_data, f, ensure_ascii=False, indent=2)

        print(f"  📝 带时间戳文本: {txt_output}")
        print(f"  📹 SRT字幕: {srt_output}")
        print(f"  📄 纯文本: {clean_output}")
        print(f"  📋 详细JSON: {json_output}")

    def _print_quality_report(self, segments, results, audio, sr):
        """打印质量报告"""
        print("\n📊 质量报告:")

        total_duration = len(audio) / sr
        recognized_duration = sum(end - start for start, end, _ in results)
        success_rate = len(results) / len(segments) * 100 if segments else 0
        coverage = recognized_duration / total_duration * 100

        print(f"  总音频时长: {total_duration:.1f}秒")
        print(f"  VAD分段数: {len(segments)}")
        print(f"  识别成功数: {len(results)}")
        print(f"  识别成功率: {success_rate:.1f}%")
        print(f"  音频覆盖率: {coverage:.1f}%")

        if results:
            avg_duration = recognized_duration / len(results)
            print(f"  平均段长: {avg_duration:.1f}秒")

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="混合策略音频转录系统")
    parser.add_argument("input_file", help="输入音频/视频文件路径")
    parser.add_argument("-o", "--output", default="output", help="输出目录 (默认: output)")
    parser.add_argument("--hotword", default="", help="热词（专业术语）")
    parser.add_argument("--vad", choices=["silero", "librosa", "auto", "combined"],
                       default="auto", help="VAD策略 (默认: auto)")

    args = parser.parse_args()

    print("=" * 60)
    print("🎵 混合策略音频转录系统")
    print("=" * 60)

    # 初始化转录器
    transcriber = HybridTranscriber()
    if not transcriber.initialize():
        print("❌ 系统初始化失败")
        sys.exit(1)

    # 执行转录
    success = transcriber.transcribe(
        input_file=args.input_file,
        output_dir=args.output,
        hotword=args.hotword,
        vad_strategy=args.vad
    )

    if success:
        print("\n✅ 混合转录成功完成！")
        print(f"📁 结果文件保存在: {args.output}")
    else:
        print("\n❌ 混合转录失败")
        sys.exit(1)

if __name__ == "__main__":
    main()