Pippoz commited on
Commit
b1afa65
1 Parent(s): b8c36f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -10,7 +10,7 @@ import torch
10
  # tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
11
  # return model, tokenizer
12
 
13
- @st.cache(suppress_st_warning=True)
14
  def define_model():
15
  generator = pipeline('text-generation', model="facebook/opt-1.3b", skip_special_tokens=True)
16
  return generator
@@ -28,6 +28,7 @@ generator = define_model()
28
 
29
  prompt= st.text_area('Your prompt here',
30
  '''Hello, I'm am conscious and''')
 
31
  answer = generator(prompt, max_length=100,no_repeat_ngram_size=3, early_stopping=True, num_beams=10)
32
 
33
  #answer = opt_model(prompt, model, tokenizer,)
 
10
  # tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
11
  # return model, tokenizer
12
 
13
+ @st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None})
14
  def define_model():
15
  generator = pipeline('text-generation', model="facebook/opt-1.3b", skip_special_tokens=True)
16
  return generator
 
28
 
29
  prompt= st.text_area('Your prompt here',
30
  '''Hello, I'm am conscious and''')
31
+
32
  answer = generator(prompt, max_length=100,no_repeat_ngram_size=3, early_stopping=True, num_beams=10)
33
 
34
  #answer = opt_model(prompt, model, tokenizer,)