work-qa / app.py
sundea's picture
Upload app.py
9083bb9
raw
history blame
813 Bytes
from transformers import AutoModelForQuestionAnswering,AutoTokenizer,pipeline
import gradio as gr
model = AutoModelForQuestionAnswering.from_pretrained('uer/roberta-base-chinese-extractive-qa')
tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-chinese-extractive-qa')
QA = pipeline('question-answering', model=model, tokenizer=tokenizer)
def get_out(text1,text2):
QA_input={'question':text1,'context':text2}
res=QA(QA_input)
# res['answer']
return res['answer']
with gr.Blocks() as demo:
with gr.Row():
question = gr.Textbox(label='question')
greet_btn = gr.Button('compute')
context=gr.Textbox(label='context')
res=gr.Textbox(label='result')
greet_btn.click(fn=get_out,inputs=[question,context],outputs=res)
demo.launch(server_port=9090)