savasy's picture
Update app.py
2fe7971
from transformers import AutoModelForMaskedLM , AutoTokenizer
import torch
model_path="bert-base-multilingual-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_path)
# load Prompting class
from prompt import Prompting
prompting= Prompting(model=model_path)
prompt= ". Because it was "+ prompting.tokenizer.mask_token +"."
def predict(text):
THRESHOLD = prompting.compute_tokens_prob(prompt, token_list1=["good"], token_list2= ["bad"])[0].item()
res=prompting.compute_tokens_prob(text+prompt, token_list1=["good"], token_list2= ["bad"])
if res[0] > THRESHOLD:
return {"POSITIVE":(res[0].item()-THRESHOLD)/ (1-THRESHOLD)}, (res[0].item()-THRESHOLD)/ (1-THRESHOLD)
return {"NEGATIVE":(THRESHOLD-res[0].item())/THRESHOLD},(THRESHOLD-res[0].item())/THRESHOLD
import gradio as gr
iface = gr.Interface(fn=predict, inputs=["text"], outputs=["label","number"]).launch()