zhijiang
improve the model selection
e52d94b
import gradio as gr
from transformers import pipeline
pp_en = pipeline("question-answering",model="deepset/roberta-base-squad2")
pp_ch = pipeline("question-answering",model="luhua/chinese_pretrain_mrc_roberta_wwm_ext_large")
def qa_fn(ask,ctxt,model):
pp = (pp_en if model=="english" else pp_ch);
ret = pp(context=ctxt, question=ask);
ret['entity']='Answer';
return {"text":ctxt,"entities":[ret]}, ret['answer'], ret['score']
#注意HighlightedText的用法。有两种不同用法:https://gradio.app/named_entity_recognition/
# 一种是list of dict ,一种是list of tuple. 详细用法参考https://gradio.app/named_entity_recognition/吧
samples= [
["乔治的哥哥叫什么名字?","我是小猪佩奇,我是乔治的哥哥,我家住在北京","english"],
["图书馆主页的网址是多少啊?","读者进入图书馆主页(http://lib.tjut.edu.cn)后,点击文献传递菜单,即可查看文献传递的具体流程步骤","chinese"],
];
introStr = "用于演示使用人工智能自动寻找问题答案,这将是一种更加高效便捷的新型信息检索方式。";
titleStr = "智能问答演示程序";
demo = gr.Interface(qa_fn,
inputs=[gr.Textbox(label="Question",placeholder='请输入问题'),
gr.Textbox(label="Context",lines=10,placeholder="请输入一段文本"),
gr.Radio(["english","chinese"],label="Select a Model", value="english"),
],
outputs=[gr.HighlightedText(label='答案位置'),gr.Textbox(label="答案"),gr.Number(label="Score")],
examples=samples,
description=introStr,
title=titleStr);
demo.launch()