# from transformers import AutoModelForQuestionAnswering,AutoTokenizer,pipeline # import gradio as gr # model = AutoModelForQuestionAnswering.from_pretrained('sundea/Work-QA') # tokenizer = AutoTokenizer.from_pretrained('sundea/Work-QA') # QA = pipeline('question-answering', model=model, tokenizer=tokenizer) # def get_out(text1,text2): # QA_input={'question':text1,'context':text2} # res=QA(QA_input) # # res['answer'] # return res['answer'] # with gr.Blocks() as demo: # with gr.Row(): # question = gr.Textbox(label='question') # greet_btn = gr.Button('compute') # context=gr.Textbox(label='context') # res=gr.Textbox(label='result') # greet_btn.click(fn=get_out,inputs=[question,context],outputs=res) # demo.launch() from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline import gradio as gr model = AutoModelForQuestionAnswering.from_pretrained('sundea/Work-QA') tokenizer = AutoTokenizer.from_pretrained('sundea/Work-QA') QA = pipeline('question-answering', model=model, tokenizer=tokenizer) def get_out(text1, text2): QA_input = {'question': text1, 'context': text2} res = QA(QA_input) return res['answer'] # 添加示例 examples = [ ['李理居住在哪','李理住在南京,他养了只小狗,名字叫丢丢,它是棕色毛色。'], [ '李理的小狗叫什么','李理住在南京,他养了只小狗,名字叫丢丢,它是棕色毛色。'], ['李理的小狗是什么颜色的','李理住在南京,他养了只小狗,名字叫丢丢,它是棕色毛色。'] ] # 创建Gradio应用程序 with gr.Interface(fn=get_out, inputs=[gr.inputs.Textbox(label='question'), gr.inputs.Textbox(label='context')], outputs=gr.outputs.Textbox(label='answer'), title='Question Answering', examples=examples) as app: app.launch()