from faster_whisper import WhisperModel
from fastapi import APIRouter, File, UploadFile
from model import successMessage
from logger import logger
import tempfile
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

# model = WhisperModel("C:\\Users\\Administrator\\Desktop\\python\\study\\bigmodel\\faster-whisper\\large-v3")
model = WhisperModel("bigmodel/faster-whisper/large-v3", device="cpu")
# segments, info = model.transcribe("C:\\Users\\Administrator\\Downloads\\20240512_181424.m4a")
# segments, info = model.transcribe("/src/source/20240512_181424.m4a")
# for segment in segments:
#     print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))

router = APIRouter()


@router.post("/voice/upload",
             summary="语音识别大模型",
             description="将语音识别为文字,模型选择为: fast-whisper/large-v3")
async def voice_to_text(file: UploadFile = File(...)):
    string_array = []

    with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
        content = await file.read()
        tmp_file.write(content)
    try:
        segments, info = model.transcribe(tmp_file.name)
    except Exception as e:
        logger.error(e)
        os.remove(tmp_file.name)
    for segment in segments:
        string_array.append("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))
    os.remove(tmp_file.name)
    return successMessage(string_array, "转换成功")
