entregable3 / app.py
ivmarin's picture
Update app.py
41beb64
raw
history blame contribute delete
541 Bytes
from fastai.vision.all import *
import gradio as gr
import torch
import numpy as np
from torchvision import transforms
def predict(text):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = torch.jit.load("model.pth")
model = model.cpu()
model.eval()
model.to(device)
with torch.no_grad():
outputs = model(text)
outputs = torch.argmax(outputs,1)
return(outputs)
# Creamos la interfaz y la lanzamos.
gr.Interface(fn=predict, inputs="text", outputs="text").launch();