Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import gradio as grad
|
|
4 |
text2text_tkn = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
|
5 |
mdl = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
|
6 |
|
7 |
-
def text2text(answer
|
8 |
input_text = "answer: %s context: %s </s>" % (answer, context)
|
9 |
features = text2text_tkn ([input_text], return_tensors='pt')
|
10 |
|
@@ -15,7 +15,7 @@ def text2text(answer,context):
|
|
15 |
response=text2text_tkn.decode(output[0])
|
16 |
return response
|
17 |
|
18 |
-
|
19 |
ans=grad.Textbox(lines=1, label="Answer")
|
20 |
out=grad.Textbox(lines=1, label="Genereated Question")
|
21 |
-
grad.Interface(text2text, inputs=[
|
|
|
4 |
text2text_tkn = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
|
5 |
mdl = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
|
6 |
|
7 |
+
def text2text(context,answer):
|
8 |
input_text = "answer: %s context: %s </s>" % (answer, context)
|
9 |
features = text2text_tkn ([input_text], return_tensors='pt')
|
10 |
|
|
|
15 |
response=text2text_tkn.decode(output[0])
|
16 |
return response
|
17 |
|
18 |
+
context=grad.Textbox(lines=10, label="English", placeholder="Context")
|
19 |
ans=grad.Textbox(lines=1, label="Answer")
|
20 |
out=grad.Textbox(lines=1, label="Genereated Question")
|
21 |
+
grad.Interface(text2text, inputs=[context,ans], outputs=out).launch()
|