Saibo Geng commited on
Commit
b5b0c27
1 Parent(s): d7755a4

use gpt-large and optimum package for faster cpu infernce

Browse files
Files changed (2) hide show
  1. app.py +2 -1
  2. requirements.txt +1 -0
app.py CHANGED
@@ -5,7 +5,7 @@ import numpy as np
5
  from transformers_cfg.grammar_utils import IncrementalGrammarConstraint
6
  from transformers_cfg.generation.logits_process import GrammarConstrainedLogitsProcessor
7
 
8
- MODEL_NAME = "gpt2"
9
 
10
  if __name__ == "__main__":
11
  # Define your model and your tokenizer
@@ -14,6 +14,7 @@ if __name__ == "__main__":
14
  if tokenizer.pad_token_id is None:
15
  tokenizer.pad_token_id = tokenizer.eos_token_id
16
  model.config.pad_token_id = model.config.eos_token_id
 
17
 
18
  # Define your color-coding labels; if prob > x, then label = y; Sorted in descending probability order!
19
  probs_to_label = [
 
5
  from transformers_cfg.grammar_utils import IncrementalGrammarConstraint
6
  from transformers_cfg.generation.logits_process import GrammarConstrainedLogitsProcessor
7
 
8
+ MODEL_NAME = "gpt2-large"
9
 
10
  if __name__ == "__main__":
11
  # Define your model and your tokenizer
 
14
  if tokenizer.pad_token_id is None:
15
  tokenizer.pad_token_id = tokenizer.eos_token_id
16
  model.config.pad_token_id = model.config.eos_token_id
17
+ model.to_bettertransformer()
18
 
19
  # Define your color-coding labels; if prob > x, then label = y; Sorted in descending probability order!
20
  probs_to_label = [
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  torch
 
2
  transformers>=4.26
3
  transformers-cfg==0.2.0
 
1
  torch
2
+ optimum
3
  transformers>=4.26
4
  transformers-cfg==0.2.0