Mitchell Kilpatrick SE2022
Change of model and versions
416188b
from fastapi import FastAPI
from pydantic import BaseModel
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# my fine tuned model
MODEL_NAME = "MitchellKil/gaelic-ipa-byt5"
app = FastAPI() # fast api for better response times
print("Loading Gaelic → IPA model from HF")
device = "cuda" if torch.cuda.is_available() else "cpu"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME).to(device)
model.eval()
print("Model loaded")
class TextRequest(BaseModel):
text: str
def text_to_ipa(text: str) -> str:
# This must match how you trained it
prompt = f"convert Gaelic to IPA: {text}"
inputs = tokenizer(
prompt,
return_tensors="pt",
truncation=True,
max_length=128
).to(device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=64,
do_sample=False
)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
return decoded.strip()
@app.post("/predict")
def predict(request: TextRequest):
ipa_result = text_to_ipa(request.text)
return {"ipa": ipa_result}