cshallah commited on
Commit
0fdf9b7
1 Parent(s): e2172b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -12
app.py CHANGED
@@ -1,6 +1,9 @@
1
- # https://huggingface.co/transformers/main_classes/pipelines.html
2
- # https://huggingface.co/models?filter=conversational
3
-
 
 
 
4
 
5
 
6
  # Install Dependences
@@ -9,22 +12,17 @@
9
  # !pip install ipywidgets
10
  # !pip install gradio # see setup for installing gradio
11
 
12
- # Import Dependencies
13
- from transformers import pipeline
14
  import gradio as gr
 
15
 
16
- # Create the Q&A pipeline
17
- nlp = pipeline('question-answering', model='deepset/roberta-base-squad2', tokenizer='deepset/roberta-base-squad2')
18
- #nlp = pipeline('question-answering', model='bert-large-uncased-whole-word-masking-finetuned-squad ', tokenizer='bert-large-uncased-whole-word-masking-finetuned-squad ')
19
- #nlp = pipeline("question-answering", model='distilbert-base-cased-distilled-squad')
20
- #nlp = pipeline("question-answering", model='distilbert-base-uncased-distilled-squad')
21
 
22
  def question_answer(context_filename, question):
23
  """Produce a NLP response based on the input text filename and question."""
24
  with open(context_filename) as f:
25
  context = f.read()
26
- nlp_input = {'question': question, 'context': context}
27
- result = nlp(nlp_input)
28
  return result['answer']
29
 
30
  demo = gr.Interface(
 
1
+ # Benchmarks: NT, Why is blood important?
2
+ #model_name = "deepset/roberta-base-squad2" # 180
3
+ #model_name = "deepset/deberta-v3-large-squad2" # est. 4X
4
+ model_name = "deepset/tinyroberta-squad2" # 86
5
+ #model_name = "deepset/minilm-uncased-squad2" # 96
6
+ #model_name = "deepset/electra-base-squad2" # 185 (nice wordy results)
7
 
8
 
9
  # Install Dependences
 
12
  # !pip install ipywidgets
13
  # !pip install gradio # see setup for installing gradio
14
 
 
 
15
  import gradio as gr
16
+ from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
17
 
18
+ nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
 
 
 
 
19
 
20
  def question_answer(context_filename, question):
21
  """Produce a NLP response based on the input text filename and question."""
22
  with open(context_filename) as f:
23
  context = f.read()
24
+ nlp_input = {'question': question, 'context': context}
25
+ result = nlp(nlp_input)
26
  return result['answer']
27
 
28
  demo = gr.Interface(