Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,7 @@ import torch
|
|
10 |
# tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
|
11 |
# return model, tokenizer
|
12 |
|
13 |
-
@st.cache(
|
14 |
def define_model():
|
15 |
generator = pipeline('text-generation', model="facebook/opt-1.3b", skip_special_tokens=True)
|
16 |
return generator
|
@@ -28,6 +28,7 @@ generator = define_model()
|
|
28 |
|
29 |
prompt= st.text_area('Your prompt here',
|
30 |
'''Hello, I'm am conscious and''')
|
|
|
31 |
answer = generator(prompt, max_length=100,no_repeat_ngram_size=3, early_stopping=True, num_beams=10)
|
32 |
|
33 |
#answer = opt_model(prompt, model, tokenizer,)
|
|
|
10 |
# tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
|
11 |
# return model, tokenizer
|
12 |
|
13 |
+
@st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None})
|
14 |
def define_model():
|
15 |
generator = pipeline('text-generation', model="facebook/opt-1.3b", skip_special_tokens=True)
|
16 |
return generator
|
|
|
28 |
|
29 |
prompt= st.text_area('Your prompt here',
|
30 |
'''Hello, I'm am conscious and''')
|
31 |
+
|
32 |
answer = generator(prompt, max_length=100,no_repeat_ngram_size=3, early_stopping=True, num_beams=10)
|
33 |
|
34 |
#answer = opt_model(prompt, model, tokenizer,)
|