juanfkurucz commited on
Commit
27f94c6
1 Parent(s): b89ed62

Upload optimizing transformers demo

Browse files
Files changed (2) hide show
  1. app.py +74 -0
  2. config.json +3 -0
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ import gradio as gr
4
+ import torch
5
+ from huggingface_hub import hf_hub_download
6
+ from onnxruntime import InferenceSession
7
+ from transformers import AutoModelForQuestionAnswering, AutoTokenizer
8
+
9
+ models = {
10
+ "Base model": "bert-large-uncased-whole-word-masking-finetuned-squad",
11
+ "Prunned model": "madlag/bert-large-uncased-wwm-squadv2-x2.63-f82.6-d16-hybrid-v1",
12
+ "Prunned ONNX Optimized FP16": "tryolabs/bert-large-uncased-wwm-squadv2-optimized-f16",
13
+ }
14
+
15
+
16
+ def run_ort_inference(model_name, inputs):
17
+ model_path = hf_hub_download(repo_id=models[model_name], filename="model.onnx")
18
+ sess = InferenceSession(model_path, providers=["CPUExecutionProvider"])
19
+ start_time = time.time()
20
+ output = sess.run(None, input_feed=inputs)
21
+ end_time = time.time()
22
+ return (output[0], output[1]), (end_time - start_time)
23
+
24
+
25
+ def run_normal_hf(model_name, inputs):
26
+ start_time = time.time()
27
+ model = AutoModelForQuestionAnswering.from_pretrained(models[model_name])
28
+ end_time = time.time()
29
+ return model(**inputs).values(), (end_time - start_time)
30
+
31
+
32
+ def inference(model_name, context, question):
33
+ tokenizer = AutoTokenizer.from_pretrained(models[model_name])
34
+ if model_name == "Prunned ONNX Optimized FP16":
35
+ inputs = dict(tokenizer(question, context, return_tensors="np"))
36
+ output, inference_time = run_ort_inference(model_name, inputs)
37
+ answer_start_scores, answer_end_scores = torch.tensor(output[0]), torch.tensor(
38
+ output[1]
39
+ )
40
+ else:
41
+ inputs = tokenizer(question, context, return_tensors="pt")
42
+ output, inference_time = run_normal_hf(model_name, inputs)
43
+ answer_start_scores, answer_end_scores = output
44
+
45
+ input_ids = inputs["input_ids"].tolist()[0]
46
+ answer_start = torch.argmax(answer_start_scores)
47
+ answer_end = torch.argmax(answer_end_scores) + 1
48
+ answer = tokenizer.convert_tokens_to_string(
49
+ tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end])
50
+ )
51
+
52
+ return answer, f"{inference_time:.4f}s"
53
+
54
+
55
+ model_field = gr.Dropdown(
56
+ choices=["Base model", "Prunned model", "Prunned ONNX Optimized FP16"],
57
+ value="Prunned ONNX Optimized FP16",
58
+ label="Model",
59
+ )
60
+ input_text_field = gr.Textbox(placeholder="Enter the text here", label="Text")
61
+ input_question_field = gr.Text(placeholder="Enter the question here", label="Question")
62
+
63
+ output_model = gr.Text(label="Model output")
64
+ output_inference_time = gr.Text(label="Inference time in seconds")
65
+
66
+
67
+ demo = gr.Interface(
68
+ inference,
69
+ title="Optimizing Transformers - Question Answering Demo",
70
+ inputs=[model_field, input_text_field, input_question_field],
71
+ outputs=[output_model, output_inference_time],
72
+ )
73
+
74
+ demo.launch()
config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "transformers_version": "4.5.1"
3
+ }