#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
音频转文字脚本
使用 FunASR 将音频/视频文件转换为带时间戳的文本
"""

import os
import sys
import argparse
import json
import re
import librosa
import numpy as np
from pathlib import Path
from funasr import AutoModel
from scripts.utils import get_cache_dir

def to_srt_time(sec):
    """将秒数转换为SRT时间格式"""
    h = int(sec // 3600)
    m = int((sec % 3600) // 60)
    s = int(sec % 60)
    f = int((sec % 1) * 1000)
    return f"{h:02d}:{m:02d}:{s:02d},{f:03d}"

def preprocess_audio(audio_path):
    """
    音频预处理：降噪、音量标准化

    Args:
        audio_path (str): 音频文件路径

    Returns:
        tuple: (预处理后的音频数据, 采样率)
    """
    print("🔧 开始音频预处理...")

    try:
        # 加载音频，统一采样率
        y, sr = librosa.load(audio_path, sr=16000)

        # 移除DC偏移
        y = y - np.mean(y)

        # 音量标准化
        y = librosa.util.normalize(y)

        # 简单的降噪处理（高通滤波）
        from scipy import signal
        nyquist = sr / 2
        low_cutoff = 80  # 80Hz高通滤波，去除低频噪音
        low = low_cutoff / nyquist
        b, a = signal.butter(4, low, btype='high')
        y = signal.filtfilt(b, a, y)

        print(f"✅ 音频预处理完成 (采样率: {sr}Hz, 时长: {len(y)/sr:.2f}s)")
        return y, sr

    except Exception as e:
        print(f"⚠️ 音频预处理失败，使用原始音频: {e}")
        # 如果预处理失败，返回原始音频
        y, sr = librosa.load(audio_path, sr=16000)
        return y, sr

def detect_silence_boundaries(audio, sr, min_silence_duration=0.5, threshold_factor=0.1):
    """
    检测音频中的静音边界用于断句

    Args:
        audio: 音频数据
        sr: 采样率
        min_silence_duration: 最小静音持续时间（秒）
        threshold_factor: 静音阈值因子

    Returns:
        list: [(start_time, end_time, text_segment), ...]
    """
    print("🔍 检测静音边界...")

    # 计算RMS能量
    frame_length = int(0.025 * sr)  # 25ms帧
    hop_length = int(0.01 * sr)     # 10ms跳跃

    rms = librosa.feature.rms(y=audio, frame_length=frame_length, hop_length=hop_length)[0]

    # 计算动态阈值
    threshold = np.mean(rms) * threshold_factor

    # 检测静音段
    silent_frames = rms < threshold
    frame_times = librosa.frames_to_time(np.arange(len(rms)), sr=sr, hop_length=hop_length)

    # 找到静音段的边界
    silence_segments = []
    in_silence = False
    silence_start = 0

    for i, is_silent in enumerate(silent_frames):
        if is_silent and not in_silence:
            silence_start = frame_times[i]
            in_silence = True
        elif not is_silent and in_silence:
            silence_end = frame_times[i]
            silence_duration = silence_end - silence_start

            if silence_duration >= min_silence_duration:
                silence_segments.append((silence_start, silence_end))

            in_silence = False

    # 处理结尾的静音段
    if in_silence:
        silence_end = frame_times[-1]
        silence_duration = silence_end - silence_start
        if silence_duration >= min_silence_duration:
            silence_segments.append((silence_start, silence_end))

    print(f"✅ 检测到 {len(silence_segments)} 个静音段")
    return silence_segments

def improved_timestamp_mapping(timestamps, sentences, audio_duration=None):
    """
    改进的时间戳映射算法

    Args:
        timestamps: 时间戳数组 [[start_ms, end_ms], ...]
        sentences: 句子列表
        audio_duration: 音频总时长（秒）

    Returns:
        list: [(start_sec, end_sec, sentence), ...]
    """
    print("🎯 执行改进的时间戳映射...")

    if not timestamps or not sentences:
        return []

    # 将时间戳转换为秒
    timestamps_sec = [(start/1000.0, end/1000.0) for start, end in timestamps]

    # 策略1: 基于音频密度的时间戳分配
    result = []

    if len(timestamps_sec) >= len(sentences):
        # 时间戳充足，使用智能分配

        # 计算每个句子的预估长度（基于字符数）
        sentence_lengths = [len(sentence) for sentence in sentences]
        total_length = sum(sentence_lengths)

        if total_length == 0:
            return [(timestamps_sec[i][0], timestamps_sec[i][1], sentences[i])
                   for i in range(min(len(sentences), len(timestamps_sec)))]

        # 按比例分配时间戳
        accumulated_time = 0
        timestamp_idx = 0

        for i, sentence in enumerate(sentences):
            if i >= len(timestamps_sec):
                break

            # 预估这个句子应该占用的时间比例
            if i < len(sentences) - 1:
                expected_ratio = sentence_lengths[i] / total_length
                expected_duration = timestamps_sec[-1][1] * expected_ratio

                # 寻找合适的时间戳
                start_time = timestamps_sec[timestamp_idx][0]
                end_time = start_time + expected_duration

                # 向前寻找最接近结束时间的时间戳
                while (timestamp_idx + 1 < len(timestamps_sec) and
                       timestamps_sec[timestamp_idx + 1][0] < end_time):
                    timestamp_idx += 1

                result.append((start_time, timestamps_sec[timestamp_idx][1], sentence))
                timestamp_idx += 1
            else:
                # 最后一个句子
                start_time = timestamps_sec[timestamp_idx][0]
                end_time = timestamps_sec[min(timestamp_idx + 10, len(timestamps_sec) - 1)][1]
                result.append((start_time, end_time, sentence))

    else:
        # 时间戳不足，使用平均分配但考虑句子长度
        if audio_duration:
            total_duration = audio_duration
        else:
            total_duration = timestamps_sec[-1][1] if timestamps_sec else 3600

        sentence_lengths = [len(sentence) for sentence in sentences]
        total_length = sum(sentence_lengths)

        current_time = 0
        for i, sentence in enumerate(sentences):
            if total_length > 0:
                expected_ratio = sentence_lengths[i] / total_length
                duration = total_duration * expected_ratio
            else:
                duration = total_duration / len(sentences)

            start_time = current_time
            end_time = current_time + max(duration, 1.0)  # 最少1秒

            result.append((start_time, end_time, sentence))
            current_time = end_time

    print(f"✅ 时间戳映射完成: {len(result)} 个句子")
    return result

def transcribe_audio(input_file, output_dir="output", hotword=""):
    """
    转录音频文件

    Args:
        input_file (str): 输入音频/视频文件路径
        output_dir (str): 输出目录
        hotword (str): 热词（专业术语）
    """
    print(f"🎵 开始处理文件: {input_file}")

    # 检查输入文件是否存在
    if not os.path.exists(input_file):
        print(f"❌ 文件不存在: {input_file}")
        return False

    # 创建输出目录
    os.makedirs(output_dir, exist_ok=True)

    # 获取文件名（不含扩展名）
    input_path = Path(input_file)
    output_name = input_path.stem

    try:
        print("📥 正在加载模型...")

        # 设置缓存目录
        cache_dir = get_cache_dir()
        os.environ["FUNASR_CACHE_DIR"] = cache_dir

        # 音频预处理
        processed_audio, sr = preprocess_audio(input_file)

        # 检测静音边界
        silence_segments = detect_silence_boundaries(processed_audio, sr)

        # 加载模型
        model = AutoModel(
            model="paraformer-zh",      # 中文主模型
            vad_model="fsmn-vad",       # 语音端点检测
            punc_model="ct-punc",       # 标点恢复
            disable_update=True         # 禁用更新检查
        )

        print("✅ 模型加载完成")
        print(f"🎯 开始识别音频，热词: {hotword if hotword else '无'}")

        # 进行识别 - 优化参数配置
        result = model.generate(
            input=input_file,
            batch_size_s=150,           # 减小批处理时间提高精度
            hotword=hotword,            # 热词
            return_dict=True,           # 确保返回字典格式
            sentence_timestamps=True,   # 启用句子级时间戳
            word_timestamps=False       # 禁用字级时间戳以获得 sentence_info
        )

        if not result or not result[0]:
            print("❌ 识别失败或结果为空")
            return False

        print("✅ 音频识别完成")

        # 处理结果
        result_data = result[0]

        # 保存原始JSON结果
        json_output = os.path.join(output_dir, f"{output_name}.json")
        with open(json_output, 'w', encoding='utf-8') as f:
            json.dump(result_data, f, ensure_ascii=False, indent=2)
        print(f"📄 原始结果已保存: {json_output}")

        # 生成带时间戳的文本
        if "sentence_info" in result_data:
            # 使用 sentence_info 字段（理想格式）
            print("✅ 找到 sentence_info 字段，使用标准格式处理")

            # 生成SRT字幕文件
            srt_output = os.path.join(output_dir, f"{output_name}.srt")
            srt_lines = []

            for idx, sent in enumerate(result_data["sentence_info"], 1):
                start = sent["start"] / 1000.0  # ms -> s
                end = sent["end"] / 1000.0
                text = sent["text"]

                srt_lines.append(f"{idx}")
                srt_lines.append(f"{to_srt_time(start)} --> {to_srt_time(end)}")
                srt_lines.append(text)
                srt_lines.append("")  # 空行

            with open(srt_output, 'w', encoding='utf-8') as f:
                f.write('\n'.join(srt_lines))
            print(f"📹 SRT字幕已保存: {srt_output}")

            # 生成带时间戳的文本
            txt_output = os.path.join(output_dir, f"{output_name}.txt")
            with open(txt_output, 'w', encoding='utf-8') as f:
                for sent in result_data["sentence_info"]:
                    f.write(f"[{to_srt_time(sent['start']/1000.0)}] {sent['text']}\n")
            print(f"📝 带时间戳文本已保存: {txt_output}")

            # 生成纯文本（无时间戳）
            clean_txt_output = os.path.join(output_dir, f"{output_name}_clean.txt")
            with open(clean_txt_output, 'w', encoding='utf-8') as f:
                for sent in result_data["sentence_info"]:
                    f.write(f"{sent['text']}\n")
            print(f"📄 纯文本已保存: {clean_txt_output}")

        elif "timestamp" in result_data and "text" in result_data:
            # 使用 timestamp 数组和 text 字段（当前 FunASR 格式）
            print("✅ 找到 timestamp 字段，使用改进的时间戳映射算法")

            # 将完整文本按标点符号分割成句子
            full_text = result_data["text"]
            timestamps = result_data["timestamp"]

            # 改进的文本分割算法，考虑更多标点符号
            sentences = re.split(r'([。！？，；：])', full_text)

            # 重新组合句子和标点符号，合并过短的片段
            sentence_list = []
            current_sentence = ""

            for i, segment in enumerate(sentences):
                segment = segment.strip()
                if not segment:
                    continue

                current_sentence += segment

                # 如果当前句子以标点符号结尾且长度合理，则保存
                if (segment in ['。！？，；：'] and len(current_sentence) > 3):
                    sentence_list.append(current_sentence.strip())
                    current_sentence = ""
                elif len(current_sentence) > 100:  # 防止句子过长
                    if segment in ['。！？']:
                        sentence_list.append(current_sentence.strip())
                        current_sentence = ""

            # 添加最后一个句子
            if current_sentence.strip():
                sentence_list.append(current_sentence.strip())

            # 使用改进的时间戳映射算法
            audio_duration = len(processed_audio) / sr
            mapped_results = improved_timestamp_mapping(timestamps, sentence_list, audio_duration)

            # 保存结果
            srt_output = os.path.join(output_dir, f"{output_name}.srt")
            srt_lines = []

            txt_output = os.path.join(output_dir, f"{output_name}.txt")
            clean_txt_output = os.path.join(output_dir, f"{output_name}_clean.txt")

            with open(txt_output, 'w', encoding='utf-8') as f_txt, \
                 open(clean_txt_output, 'w', encoding='utf-8') as f_clean:

                for idx, (start_sec, end_sec, sentence) in enumerate(mapped_results, 1):
                    # 写入SRT格式
                    srt_lines.append(f"{idx}")
                    srt_lines.append(f"{to_srt_time(start_sec)} --> {to_srt_time(end_sec)}")
                    srt_lines.append(sentence)
                    srt_lines.append("")  # 空行

                    # 写入带时间戳的文本
                    f_txt.write(f"[{to_srt_time(start_sec)}] {sentence}\n")

                    # 写入纯文本
                    f_clean.write(f"{sentence}\n")

            # 保存SRT文件
            with open(srt_output, 'w', encoding='utf-8') as f:
                f.write('\n'.join(srt_lines))

            print(f"📹 SRT字幕已保存: {srt_output}")
            print(f"📝 带时间戳文本已保存: {txt_output}")
            print(f"📄 纯文本已保存: {clean_txt_output}")

            # 输出质量报告
            print("\n📊 处理质量报告:")
            print(f"  原始时间戳数量: {len(timestamps)}")
            print(f"  分割句子数量: {len(sentence_list)}")
            print(f"  映射结果数量: {len(mapped_results)}")
            print(f"  检测静音段: {len(silence_segments)}")
            if timestamps:
                total_audio_duration = timestamps[-1][1] / 1000.0
                avg_sentence_duration = total_audio_duration / len(sentence_list) if sentence_list else 0
                print(f"  平均句子时长: {avg_sentence_duration:.2f}秒")

        else:
            print("⚠️ 未找到时间戳信息，仅保存文本")
            txt_output = os.path.join(output_dir, f"{output_name}.txt")
            with open(txt_output, 'w', encoding='utf-8') as f:
                f.write(result_data.get("text", "未识别到文本"))
            print(f"📝 文本已保存: {txt_output}")

        print("🎉 处理完成！")
        return True

    except Exception as e:
        print(f"❌ 处理出错: {e}")
        return False

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="使用FunASR转录音频/视频文件")
    parser.add_argument("input_file", help="输入音频/视频文件路径")
    parser.add_argument("-o", "--output", default="output", help="输出目录 (默认: output)")
    parser.add_argument("--hotword", default="", help="热词（专业术语，用逗号分隔）")

    args = parser.parse_args()

    print("=" * 60)
    print("🎵 FunASR 音频转录工具")
    print("=" * 60)

    success = transcribe_audio(
        input_file=args.input_file,
        output_dir=args.output,
        hotword=args.hotword
    )

    if success:
        print("\n✅ 转录成功完成！")
        print(f"📁 结果文件保存在: {args.output}")
    else:
        print("\n❌ 转录失败，请检查文件路径和网络连接")
        sys.exit(1)

if __name__ == "__main__":
    main()