from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess
import time
from fastapi import APIRouter, UploadFile, File, Form, HTTPException
from pydantic import BaseModel
from typing import Optional
import os
import uuid

import text_string_nlp_deal 

asr_router=APIRouter()
# 全局变量存储预加载的模型
global_model = None

def load_model(model_dir="iic/SenseVoiceSmall", punc_model="ct-punc",device="cuda:0"):
    """
    加载语音识别模型
    
    Args:
        model_dir: 模型目录或模型名称
        device: 使用的设备，例如"cuda:0"或"cpu"
        
    Returns:
        加载好的模型
    """
    global global_model
    
    print(f"正在加载模型 {model_dir}...")
    start_time = time.time()
    
    model = AutoModel(
        model=model_dir,
        vad_model="fsmn-vad",
        vad_kwargs={"max_single_segment_time": 30000},
        device=device, punc_model="ct-punc",disable_update=True
    )
    
    end_time = time.time()
    print(f"模型加载完成，耗时 {end_time - start_time:.2f} 秒")
    
    # 更新全局模型
    global_model = model
    return model

def transcribe_audio(audio_path, model=None, language="auto", use_itn=True, 
                    batch_size_s=60, merge_vad=True, merge_length_s=15):
    """
    转录音频文件
    
    Args:
        audio_path: 音频文件路径
        model: 预加载的模型，如果为None则使用全局模型
        language: 语言，可选"auto", "zn", "en", "yue", "ja", "ko", "nospeech"
        use_itn: 是否使用inverse text normalization
        batch_size_s: 批处理大小（秒）
        merge_vad: 是否合并VAD结果
        merge_length_s: 合并长度（秒）
        
    Returns:
        转录后的文本
    """
    global global_model
    
    # 如果没有提供模型且全局模型不存在，则加载模型
    if model is None:
        if global_model is None:
            global_model = load_model()
        model = global_model
    
    print(f"开始转录音频: {audio_path}")
    start_time = time.time()
    
    res = model.generate(
        input=audio_path,
        cache={},
        language=language,
        use_itn=use_itn,
        batch_size_s=batch_size_s,
        merge_vad=merge_vad,
        merge_length_s=merge_length_s,
    )
    
    text = rich_transcription_postprocess(res[0]["text"])
    # 去除<>以及中括号内的内容
    text = text_string_nlp_deal.text_rm_brack_chinese_marks_punctuation_replace(text)
    
    end_time = time.time()
    print(f"转录完成，耗时 {end_time - start_time:.2f} 秒")
    
    return text

class TranscriptionResponse(BaseModel):
    text: str
    duration: float

@asr_router.post("/transcribe", response_model=TranscriptionResponse)
async def transcribe_endpoint(
    audio_file: UploadFile = File(...),
    language: str = Form("auto"),
    use_itn: bool = Form(True),
    batch_size_s: int = Form(60),
    merge_vad: bool = Form(True),
    merge_length_s: int = Form(15)
):
    """
    接收音频文件并返回转录结果的API端点
    """
    global global_model
    
    # 确保模型已加载
    if global_model is None:
        global_model = load_model()
    
    # 创建临时文件保存上传的音频
    temp_dir = "temp_audio"
    os.makedirs(temp_dir, exist_ok=True)
    
    file_extension = os.path.splitext(audio_file.filename)[1] if audio_file.filename else ".wav"
    temp_audio_path = os.path.join(temp_dir, f"{uuid.uuid4()}{file_extension}")
    
    try:
        # 保存上传的音频文件
        with open(temp_audio_path, "wb") as f:
            content = await audio_file.read()
            f.write(content)
        
        # 转录音频
        start_time = time.time()
        transcription = transcribe_audio(
            temp_audio_path,
            language=language,
            use_itn=use_itn,
            batch_size_s=batch_size_s,
            merge_vad=merge_vad,
            merge_length_s=merge_length_s
        )
        duration = time.time() - start_time
        
        # 返回转录结果
        return TranscriptionResponse(text=transcription, duration=duration)
    
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"转录失败: {str(e)}")
    
    finally:
        # 删除临时文件
        if os.path.exists(temp_audio_path):
            os.remove(temp_audio_path)

# 程序启动时预加载模型
if __name__ == "__main__":
    # 预加载模型
    global_model = load_model(model_dir="iic/SenseVoiceSmall", punc_model="ct-punc", device="cuda:0")
    
    # 示例：转录英文音频
    example_audio = f"{global_model.model_path}/example/en.mp3"
    transcription = transcribe_audio(example_audio)
    print("\n转录结果:")
    print(transcription)
    
    # 如果需要转录其他音频文件，可以直接调用，不需要重新加载模型
    # 例如：
    # another_audio = "path/to/another/audio.wav"
    # another_transcription = transcribe_audio(another_audio)
    # print(another_transcription)