
import gradio as gr
import os
import re
import subprocess
from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess
from datetime import timedelta
from pydub import AudioSegment
import uuid
import logging
import threading
import torch
from config import (
    DEFAULT_LANGUAGE,
    BATCH_SIZE_S,
    MERGE_VAD,
    MERGE_LENGTH_S,
    VAD_MAX_SEGMENT_MS,
    VAD_END_SILENCE_MS,
    ASR_VAD_MAX_SEGMENT_MS,
    TMP_DIR,
    MODELS_DIR,
    HF_CACHE_DIR,
    MS_CACHE_DIR,
    GRADIO_SERVER_PORT,
)

# 公共常量与缓存
SUPPORTED_VIDEO_EXTS = {".mp4", ".mkv", ".mov", ".avi"}
_ASR_MODELS: dict[int, AutoModel] = {}
_VAD_MODELS: dict[tuple[int, int], AutoModel] = {}
_ASR_LOCK = threading.Lock()
_VAD_LOCK = threading.Lock()

device="cuda:0" if torch.cuda.is_available() else "cpu"

logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s')

# 将模型缓存指向项目目录，避免占用系统盘
def _ensure_models_cache():
    os.makedirs(HF_CACHE_DIR, exist_ok=True)
    os.makedirs(MS_CACHE_DIR, exist_ok=True)
    os.environ.setdefault("HUGGINGFACE_HUB_CACHE", HF_CACHE_DIR)
    os.environ.setdefault("TRANSFORMERS_CACHE", HF_CACHE_DIR)
    os.environ.setdefault("HF_HOME", HF_CACHE_DIR)
    os.environ.setdefault("MODELSCOPE_CACHE", MS_CACHE_DIR)
    os.environ.setdefault("MODELSCOPE_HOME", MS_CACHE_DIR)
    logging.info(f"模型缓存目录设置：HF={HF_CACHE_DIR} | MS={MS_CACHE_DIR}")

_ensure_models_cache()


def ensure_tmp_dir() -> str:
    os.makedirs(TMP_DIR, exist_ok=True)
    return TMP_DIR


def is_video_file(path: str) -> bool:
    return os.path.splitext(path)[1].lower() in SUPPORTED_VIDEO_EXTS


def _ffmpeg_exe() -> str:
    local = os.path.join(os.getcwd(), "ffmpeg", "bin", "ffmpeg.exe")
    return local if os.path.exists(local) else "ffmpeg"


def extract_audio(input_path: str, out_wav: str = "temp_audio.wav", sr: int = 16000) -> str:
    """从视频提取音频为单声道 PCM16 16kHz WAV"""
    ensure_tmp_dir()
    if not os.path.isabs(out_wav):
        out_wav = os.path.join(TMP_DIR, out_wav)
    command = [
        _ffmpeg_exe(),
        "-i", input_path,
        "-vn",
        "-acodec", "pcm_s16le",
        "-ar", str(sr),
        "-ac", "1",
        out_wav,
    ]
    subprocess.run(command, check=True, capture_output=True)
    return out_wav


def ensure_audio(input_path: str) -> tuple[str, str | None]:
    """返回可识别的音频路径，以及需清理的临时文件路径"""
    if is_video_file(input_path):
        ensure_tmp_dir()
        temp_audio = os.path.join(TMP_DIR, f"temp_audio_{uuid.uuid4().hex}.wav")
        audio_path = extract_audio(input_path, temp_audio)
        return audio_path, temp_audio
    return input_path, None


def get_asr_model(asr_vad_max_segment_ms: int | None = None) -> AutoModel:
    """获取 ASR 模型，按内置 VAD 单段时长（毫秒）区分缓存，避免频繁重建。"""
    max_ms = int(asr_vad_max_segment_ms or ASR_VAD_MAX_SEGMENT_MS)
    m = _ASR_MODELS.get(max_ms)
    if m is None:
        with _ASR_LOCK:
            m = _ASR_MODELS.get(max_ms)
            if m is None:
                m = AutoModel(
                    model="iic/SenseVoiceSmall",
                    trust_remote_code=True,
                    vad_model="fsmn-vad",
                    vad_kwargs={"max_single_segment_time": max_ms},
                    disable_update=True,
                    device=device,
                )
                _ASR_MODELS[max_ms] = m
    return m


def get_vad_model(max_segment_ms: int | None = None, end_silence_ms: int | None = None) -> AutoModel:
    """获取外部 VAD 模型，按参数组合区分缓存，用于字幕模式分段。"""
    ms = int(max_segment_ms or VAD_MAX_SEGMENT_MS)
    es = int(end_silence_ms or VAD_END_SILENCE_MS)
    key = (ms, es)
    m = _VAD_MODELS.get(key)
    if m is None:
        with _VAD_LOCK:
            m = _VAD_MODELS.get(key)
            if m is None:
                m = AutoModel(
                    model="fsmn-vad",
                    max_single_segment_time=ms,
                    max_end_silence_time=es,
                    disable_update=True,
                    device=device,
                )
                _VAD_MODELS[key] = m
    return m

def transcribe_audio(
    input_path: str,
    language: str | None = None,
    batch_size_s: int | None = None,
    merge_vad: bool | None = None,
    merge_length_s: int | None = None,
    asr_vad_max_segment_ms: int | None = None,
):
    """整段转写（用于文章），自动处理视频抽取与模型调用"""
    audio_path, temp_audio = ensure_audio(input_path)
    model = get_asr_model(asr_vad_max_segment_ms)
    logging.info("开始整段识别（文章模式）")
    res = model.generate(
        input=audio_path,
        cache={},
        language=language or DEFAULT_LANGUAGE,
        use_itn=True,
        batch_size_s=int(batch_size_s or BATCH_SIZE_S),
        merge_vad=bool(merge_vad if merge_vad is not None else MERGE_VAD),
        merge_length_s=int(merge_length_s or MERGE_LENGTH_S),
        output_timestamp=True,
    )
    try:
        if temp_audio and os.path.exists(temp_audio):
            os.remove(temp_audio)
    except Exception:
        pass
    return res

# 已不使用的打标点函数移除，避免未导入 TextExecutor 报错

def remove_unwanted_characters(text: str) -> str:
    # 参照 api.py：保留中日韩英文及常见符号
    allowed_characters = re.compile(r'[^\u4e00-\u9fff\u3040-\u309f\u30a0-\u30ff\uac00-\ud7af'
                                    r'a-zA-Z0-9\s.,!@#$%^&*()_+\-=\[\]{};\'"\\|<>/?，。！｛｝【】；‘’“”《》、（）￥]+')
    return re.sub(allowed_characters, '', text)


# 统一使用毫秒制的 SRT 时间格式，去掉秒制格式函数


def ms_to_time_string(*, ms=0, seconds=None):
    """格式化毫秒/秒为 SRT 时间串 hh:mm:ss,mmm"""
    if seconds is None:
        td = timedelta(milliseconds=ms)
    else:
        td = timedelta(seconds=seconds)
    hours, remainder = divmod(td.seconds, 3600)
    minutes, seconds = divmod(remainder, 60)
    milliseconds = td.microseconds // 1000
    return f"{hours:02}:{minutes:02}:{seconds:02},{milliseconds:03}"


def generate_srt_api_style(
    input_path: str,
    language: str | None = None,
    vad_max_segment_ms: int | None = None,
    vad_end_silence_ms: int | None = None,
) -> tuple[str, str]:
    """参照 api.py：外部 VAD 分段，逐段识别并生成 SRT"""
    # 如果是视频，先抽取音频
    try:
        audio_path, temp_audio_path = ensure_audio(input_path)
    except Exception:
        fallback = "1\n00:00:00,000 --> 00:00:01,000\n音频抽取失败\n\n"
        with open("output.srt", "w", encoding="utf-8-sig") as f:
            f.write(fallback)
        return fallback, "output.srt"

    # 模型与 VAD（尽量与 api.py 对齐）
    model = get_asr_model()
    vm = get_vad_model(vad_max_segment_ms, vad_end_silence_ms)

    # 语音分段（毫秒）
    segments = vm.generate(input=audio_path)
    audiodata = AudioSegment.from_file(audio_path)

    srt_lines = []
    tmp_files = []
    if segments and segments[0].get('value'):
        for idx, seg in enumerate(segments[0]['value'], start=1):
            st_ms, ed_ms = int(seg[0]), int(seg[1])
            chunk = audiodata[st_ms:ed_ms]
            ensure_tmp_dir()
            filename = os.path.join(TMP_DIR, f"tmp_{st_ms}-{ed_ms}_{uuid.uuid4().hex}.wav")
            chunk.export(filename, format="wav")
            tmp_files.append(filename)
            res = model.generate(input=filename, language=language or DEFAULT_LANGUAGE, use_itn=True)
            text = remove_unwanted_characters(rich_transcription_postprocess(res[0]["text"]))
            srt_lines.append(
                f"{idx}\n{ms_to_time_string(ms=st_ms)} --> {ms_to_time_string(ms=ed_ms)}\n{text.strip()}\n"
            )
    else:
        # 无有效分段时，整体识别
        res = model.generate(input=audio_path, language=language or DEFAULT_LANGUAGE, use_itn=True)
        text = remove_unwanted_characters(rich_transcription_postprocess(res[0]["text"]))
        dur_ms = int(audiodata.duration_seconds * 1000)
        srt_lines.append(
            f"1\n00:00:00,000 --> {ms_to_time_string(ms=dur_ms)}\n{text.strip()}\n"
        )

    content = "".join(srt_lines)
    srt_path = "output.srt"
    with open(srt_path, "w", encoding="utf-8-sig") as f:
        f.write(content)

    # 清理临时文件
    for t in tmp_files:
        try:
            if t and os.path.exists(t):
                os.remove(t)
        except Exception:
            pass
    try:
        if temp_audio_path and os.path.exists(temp_audio_path):
            os.remove(temp_audio_path)
    except Exception:
        pass

    return content, srt_path

def generate_article(res):
    """生成文章纯文本"""
    txt = res[0]["text"] if res else ""
    return re.sub(r"<\|.*?\|>", "", txt)

def process_audio(
    file,
    feature,
    language,
    batch_size_s,
    merge_vad,
    merge_length_s,
    asr_vad_max_segment_ms,
    vad_max_segment_ms,
    vad_end_silence_ms,
):
    try:
        if feature == "生成字幕":
            preview, file_path = generate_srt_api_style(
                file.name,
                language,
                int(vad_max_segment_ms or VAD_MAX_SEGMENT_MS),
                int(vad_end_silence_ms or VAD_END_SILENCE_MS),
            )
            return preview, file_path
        elif feature == "生成文章":
            res = transcribe_audio(
                file.name,
                language,
                int(batch_size_s or BATCH_SIZE_S),
                bool(merge_vad if merge_vad is not None else MERGE_VAD),
                int(merge_length_s or MERGE_LENGTH_S),
                int(asr_vad_max_segment_ms or ASR_VAD_MAX_SEGMENT_MS),
            )
            txt = generate_article(res)
            return txt, None
    except Exception as e:
        import traceback
        return traceback.format_exc(), None

# 双输出：预览文本 + 下载文件
iface = gr.Interface(
    fn=process_audio,
    inputs=[
        gr.File(label="上传音视频文件"),
        gr.Radio(["生成字幕", "生成文章"], value="生成字幕", label="选择功能"),
        gr.Dropdown(["auto", "zh", "en", "ja", "ko"], value=DEFAULT_LANGUAGE, label="识别语言"),
        gr.Slider(30, 120, step=5, value=BATCH_SIZE_S, label="批处理窗口(BATCH_SIZE_S, 秒)：整段识别速度/内存权衡"),
        gr.Checkbox(value=MERGE_VAD, label="合并分段(MERGE_VAD)：提高连贯性，时间戳略粗"),
        gr.Slider(5, 60, step=1, value=MERGE_LENGTH_S, label="合并目标长度(MERGE_LENGTH_S, 秒)：文章模式段长"),
        gr.Slider(20000, 60000, step=1000, value=ASR_VAD_MAX_SEGMENT_MS, label="ASR内置VAD单段(ASR_VAD_MAX_SEGMENT_MS, 毫秒)：文章模式切分"),
        gr.Slider(15000, 60000, step=1000, value=VAD_MAX_SEGMENT_MS, label="字幕分段最大时长(VAD_MAX_SEGMENT_MS, 毫秒)：每行时长"),
        gr.Slider(100, 1000, step=50, value=VAD_END_SILENCE_MS, label="字幕段尾静音(VAD_END_SILENCE_MS, 毫秒)：停顿敏感度"),
    ],
    outputs=[
        gr.Textbox(label="字幕/文章预览", lines=20),
        gr.File(label="下载字幕文件（仅字幕模式）"),
    ],
    title="语音识别与处理",
    description=(
        "从音视频文件生成字幕或文章。参数说明：\n"
        "- 识别语言：auto 自动检测或固定为 zh/en/ja/ko。\n"
        "- 批处理窗口：文章模式的内部批处理时长，越大越快但更占内存。\n"
        "- 合并分段/目标长度：文章模式内置 VAD 的段合并与目标长度。\n"
        "- ASR内置VAD单段：文章模式内置 VAD 的最长片段时长。\n"
        "- 字幕分段最大时长/段尾静音：字幕模式外部 VAD 的分段粒度与停顿敏感度。"
    ),
)

if __name__ == "__main__":
    # 监听 0.0.0.0 并使用配置端口，便于局域网访问与固定端口
    iface.launch(server_name="0.0.0.0", server_port=GRADIO_SERVER_PORT)
