|
import gradio as gr |
|
import torch |
|
from transformers import AutoTokenizer, RobertaForQuestionAnswering |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("tsmatz/roberta_qa_japanese") |
|
model = RobertaForQuestionAnswering.from_pretrained("tsmatz/roberta_qa_japanese") |
|
|
|
def answer(text, question): |
|
inputs = tokenizer(question, text, add_special_tokens=True, return_tensors="pt") |
|
input_ids = inputs["input_ids"].tolist()[0] |
|
|
|
outputs = model(**inputs) |
|
answer_start_scores = outputs.start_logits |
|
answer_end_scores = outputs.end_logits |
|
|
|
answer_start = torch.argmax(answer_start_scores) |
|
answer_end = torch.argmax(answer_end_scores) + 1 |
|
|
|
answer = tokenizer.convert_tokens_to_string( |
|
tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]) |
|
) |
|
|
|
return answer |
|
|
|
gr.Interface( |
|
answer, |
|
[ |
|
gr.Textbox(label="Text:", placeholder="Text...", lines=5), |
|
gr.Textbox(label="Question:", placeholder="Question...", lines=1) |
|
], |
|
"text", |
|
).launch() |