Galuh Sahid commited on
Commit
2786013
1 Parent(s): d222a2c

remove cache params

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -19,13 +19,13 @@ MODELS = {
19
 
20
  headers = {}
21
 
22
- @st.cache()
23
  def load_gpt(model_type):
24
  model = GPT2LMHeadModel.from_pretrained(MODELS[model_type])
25
 
26
  return model
27
 
28
- @st.cache(hash_funcs={transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer: lambda _: None})
29
  def load_gpt_tokenizer(model_type):
30
  tokenizer = GPT2Tokenizer.from_pretrained(MODELS[model_type])
31
 
 
19
 
20
  headers = {}
21
 
22
+ @st.cache(show_spinner=False)
23
  def load_gpt(model_type):
24
  model = GPT2LMHeadModel.from_pretrained(MODELS[model_type])
25
 
26
  return model
27
 
28
+ @st.cache(show_spinner=False, hash_funcs={transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer: lambda _: None})
29
  def load_gpt_tokenizer(model_type):
30
  tokenizer = GPT2Tokenizer.from_pretrained(MODELS[model_type])
31