juanfkurucz commited on
Commit
2d83c30
1 Parent(s): 54ac152

Change base model and remove title

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -9,7 +9,7 @@ from transformers import AutoModelForQuestionAnswering, AutoTokenizer
9
  MAX_SEQUENCE_LENGTH = 512
10
 
11
  models = {
12
- "Base model": "bert-large-uncased-whole-word-masking-finetuned-squad",
13
  "Pruned model": "madlag/bert-large-uncased-wwm-squadv2-x2.63-f82.6-d16-hybrid-v1",
14
  "Pruned ONNX Optimized FP16": "tryolabs/bert-large-uncased-wwm-squadv2-optimized-f16",
15
  }
@@ -85,7 +85,6 @@ output_inference_time = gr.Text(label="Inference time in seconds")
85
 
86
  demo = gr.Interface(
87
  inference,
88
- title="Optimizing Transformers - Question Answering Demo",
89
  inputs=[model_field, input_text_field, input_question_field],
90
  outputs=[output_model, output_inference_time],
91
  )
 
9
  MAX_SEQUENCE_LENGTH = 512
10
 
11
  models = {
12
+ "Base model": "madlag/bert-large-uncased-whole-word-masking-finetuned-squadv2",
13
  "Pruned model": "madlag/bert-large-uncased-wwm-squadv2-x2.63-f82.6-d16-hybrid-v1",
14
  "Pruned ONNX Optimized FP16": "tryolabs/bert-large-uncased-wwm-squadv2-optimized-f16",
15
  }
 
85
 
86
  demo = gr.Interface(
87
  inference,
 
88
  inputs=[model_field, input_text_field, input_question_field],
89
  outputs=[output_model, output_inference_time],
90
  )