import sys
import uvicorn
import json
import random
import string
import datetime
import uuid

from fastapi import FastAPI, HTTPException, Response
from fastapi.middleware.cors import CORSMiddleware
from contextlib import asynccontextmanager
from typing import List, Literal, Optional, Union

from faster_whisper import WhisperModel


@asynccontextmanager
async def lifespan(app: FastAPI):
    yield
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()

app = FastAPI(lifespan=lifespan)

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


import shutil
from pathlib import Path
from fastapi import HTTPException
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import HTMLResponse

UPLOAD_DIR = "files"
model = None

@app.post("/v1/audio/transcriptions")
async def transcriptions(file: UploadFile = File(...)):
    filename = str(datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S-'))+str(uuid.uuid4())[:8]+".wav"

    file_location = f"{UPLOAD_DIR}/{filename}"
    with open(file_location, "wb") as buffer:
        shutil.copyfileobj(file.file, buffer)

    segments, info = model.transcribe(file_location,language="zh")
    text = ""
    for segment in segments:
       print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))
       text += segment.text

    return {"text": text}

if __name__ == "__main__":
    model = WhisperModel("/home/models/faster-whisper-large-v3")
    Path(UPLOAD_DIR).mkdir(parents=True, exist_ok=True)  # 确保目录存在
    uvicorn.run(app, host='0.0.0.0', port=8000, workers=1)

