letrunglinh commited on
Commit
cc27b51
1 Parent(s): e025579

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -1
app.py CHANGED
@@ -1,3 +1,22 @@
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- gr.Interface.load("models/letrunglinh/qa_pnc").launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForQuestionAnswering, pipeline,AutoTokenizer
3
+ import torch
4
+ from optimum.onnxruntime import ORTModelForQuestionAnswering
5
+ def question_answer(context, question):
6
+ AUTH_TOKEN = "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"
7
+ model_checkpoint = "letrunglinh/qa_pnc"
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_auth_token=AUTH_TOKEN)
10
+ model = ORTModelForQuestionAnswering.from_pretrained(model_checkpoint, from_transformers=True)
11
+ model = pipeline('question-answering', model=model,
12
+ tokenizer=tokenizer, use_auth_token=AUTH_TOKEN)
13
+ to_predict = [
14
+ {
15
+ "question": question,
16
+ "context": context,
17
+ }
18
+ ]
19
+ answers = model(to_predict)
20
+ return answers['answer'], answers['score']
21
 
22
+ gr.Interface(fn=question_answer, inputs=["text", "text"], outputs=["textbox","textbox"], share = True).launch()