from fastapi import FastAPI import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline app = FastAPI() device = "cuda" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "openai/whisper-large-v3" model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, cache_dir="./app.cache" ) model.to(device) processor = AutoProcessor.from_pretrained(model_id, cache_dir="./app.cache") pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, chunk_length_s=30, batch_size=16, return_timestamps=True, torch_dtype=torch_dtype, device=device, ) def speech_to_text(path_to_file: str) -> str: """ Given the path to the .wav file, it returns the transcription. """ result = pipe(path_to_file) return result["text"] @app.get("/") def root(): return {"Hello": "World"} @app.post("/upload/") async def create_upload_file(file: UploadFile = File(...)): # Process the file here content = await file.read() with open("tempfile.wav", "wb") as f: f.write(contents) response_text = speech_to_text("tempfile.wav") return PlainTextResponse(content=response_text, status_code=200)