import torch import numpy as np import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("juliensimon/autonlp-imdb-demo-hf-16622775") model = AutoModelForSequenceClassification.from_pretrained("juliensimon/autonlp-imdb-demo-hf-16622775") def predict(review): inputs = tokenizer(review, padding=True, truncation=True, return_tensors="pt") outputs = model(**inputs) predictions = torch.nn.functional.softmax(outputs.logits, dim=-1) predictions = predictions.detach().numpy()[0] index = np.argmax(predictions) score = predictions[index] return "This review is {:.2f}% {}".format(100*score, "negative" if index==0 else "positive") iface = gr.Interface(fn=predict, inputs="text", outputs="text") iface.launch()