Update app.py
Browse files
app.py
CHANGED
@@ -1,30 +1,55 @@
|
|
1 |
-
from transformers import AutoModelForQuestionAnswering,AutoTokenizer,pipeline
|
2 |
-
import gradio as gr
|
3 |
|
4 |
-
model = AutoModelForQuestionAnswering.from_pretrained('sundea/Work-QA')
|
5 |
-
tokenizer = AutoTokenizer.from_pretrained('sundea/Work-QA')
|
6 |
-
QA = pipeline('question-answering', model=model, tokenizer=tokenizer)
|
7 |
|
8 |
-
def get_out(text1,text2):
|
9 |
|
10 |
-
|
11 |
|
12 |
|
13 |
-
|
14 |
-
|
15 |
|
16 |
-
|
17 |
|
18 |
|
19 |
-
with gr.Blocks() as demo:
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
|
27 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
|
|
|
1 |
+
# from transformers import AutoModelForQuestionAnswering,AutoTokenizer,pipeline
|
2 |
+
# import gradio as gr
|
3 |
|
4 |
+
# model = AutoModelForQuestionAnswering.from_pretrained('sundea/Work-QA')
|
5 |
+
# tokenizer = AutoTokenizer.from_pretrained('sundea/Work-QA')
|
6 |
+
# QA = pipeline('question-answering', model=model, tokenizer=tokenizer)
|
7 |
|
8 |
+
# def get_out(text1,text2):
|
9 |
|
10 |
+
# QA_input={'question':text1,'context':text2}
|
11 |
|
12 |
|
13 |
+
# res=QA(QA_input)
|
14 |
+
# # res['answer']
|
15 |
|
16 |
+
# return res['answer']
|
17 |
|
18 |
|
19 |
+
# with gr.Blocks() as demo:
|
20 |
+
# with gr.Row():
|
21 |
+
# question = gr.Textbox(label='question')
|
22 |
+
# greet_btn = gr.Button('compute')
|
23 |
+
# context=gr.Textbox(label='context')
|
24 |
+
# res=gr.Textbox(label='result')
|
25 |
+
# greet_btn.click(fn=get_out,inputs=[question,context],outputs=res)
|
26 |
|
27 |
+
# demo.launch()
|
28 |
+
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
29 |
+
import gradio as gr
|
30 |
+
|
31 |
+
model = AutoModelForQuestionAnswering.from_pretrained('sundea/Work-QA')
|
32 |
+
tokenizer = AutoTokenizer.from_pretrained('sundea/Work-QA')
|
33 |
+
QA = pipeline('question-answering', model=model, tokenizer=tokenizer)
|
34 |
|
35 |
+
def get_out(text1, text2):
|
36 |
+
QA_input = {'question': text1, 'context': text2}
|
37 |
+
res = QA(QA_input)
|
38 |
+
return res['answer']
|
39 |
+
|
40 |
+
# 添加示例
|
41 |
+
examples = [
|
42 |
+
['李理住在南京,他养了只小狗,名字叫丢丢,它是棕色毛色。', '李理居住在哪'],
|
43 |
+
['李理住在南京,他养了只小狗,名字叫丢丢,它是棕色毛色。', '李理的小狗叫什么'],
|
44 |
+
['李理住在南京,他养了只小狗,名字叫丢丢,它是棕色毛色。', '李理的小狗是什么颜色的'],
|
45 |
+
]
|
46 |
+
|
47 |
+
# 创建Gradio应用程序
|
48 |
+
with gr.Interface(fn=get_out,
|
49 |
+
inputs=[gr.inputs.Textbox(label='question'), gr.inputs.Textbox(label='context')],
|
50 |
+
outputs=gr.outputs.Textbox(label='answer'),
|
51 |
+
title='Question Answering',
|
52 |
+
examples=examples) as app:
|
53 |
+
app.launch()
|
54 |
|
55 |
|