Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,11 @@ import time
|
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
import torch
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
8 |
|
9 |
@st.cache(allow_output_mutation=True)
|
10 |
def opt_model(prompt, model, tokenizer, num_sequences = 1, max_length = 50):
|
@@ -13,9 +16,12 @@ def opt_model(prompt, model, tokenizer, num_sequences = 1, max_length = 50):
|
|
13 |
answer = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
14 |
return answer
|
15 |
|
|
|
|
|
|
|
16 |
prompt= st.text_area('Your prompt here',
|
17 |
'''Hello, I'm am conscious and''')
|
18 |
-
|
19 |
answer = opt_model(prompt, model, tokenizer,)
|
20 |
#lst = ['ciao come stai sjfsbd dfhsdf fuahfuf feuhfu wefwu ']
|
21 |
lst = ' '.join(answer)
|
|
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
import torch
|
5 |
|
6 |
+
@st.cache(allow_output_mutation=True)
|
7 |
+
def define_model():
|
8 |
+
model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-13b", use_fast=False)
|
10 |
+
return model, tokenizer
|
11 |
|
12 |
@st.cache(allow_output_mutation=True)
|
13 |
def opt_model(prompt, model, tokenizer, num_sequences = 1, max_length = 50):
|
|
|
16 |
answer = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
17 |
return answer
|
18 |
|
19 |
+
|
20 |
+
model, tokenizer = define_model()
|
21 |
+
|
22 |
prompt= st.text_area('Your prompt here',
|
23 |
'''Hello, I'm am conscious and''')
|
24 |
+
|
25 |
answer = opt_model(prompt, model, tokenizer,)
|
26 |
#lst = ['ciao come stai sjfsbd dfhsdf fuahfuf feuhfu wefwu ']
|
27 |
lst = ' '.join(answer)
|