qa_roberta / app.py
fsaglam2002's picture
Update app.py
12ed8ed verified
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline
import torch
import gradio as grad
import ast
_pretrainedModelName = "savasy/bert-base-turkish-squad"
_tokenizer = AutoTokenizer.from_pretrained(_pretrainedModelName)
_model = AutoModelForQuestionAnswering.from_pretrained(_pretrainedModelName)
_pipeline = pipeline("question-answering", model = _model, tokenizer = _tokenizer)
def answer_question(question, context):
text = "{" + "'question': '"+question+"', 'context':'"+context+"'}"
di = ast.literal_eval(text)
response = _pipeline(di)
return response.get("answer")
grad.Interface(answer_question, inputs=["text", "text"], outputs=["text"]).launch()
'''
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
import gradio as grad
import ast
#_model = "deepset/roberta-base-squad2"
_model = "savasy/bert-base-turkish-squad"
_pipeline = pipeline("question-answering", model = _model, tokenizer = _model)
def answer_question(question, context):
text = "{" + "'question': '"+question+"', 'context':'"+context+"'}"
di = ast.literal_eval(text)
response = _pipeline(di)
return response
grad.Interface(answer_question, inputs=["text", "text"], outputs="text").launch()
'''