Pippoz commited on
Commit
99b6634
1 Parent(s): 4946d76

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -10,8 +10,11 @@ import torch
10
  # tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
11
  # return model, tokenizer
12
 
13
- generator = pipeline('text-generation', model="facebook/opt-1.3b")
14
- answer = generator("Hello, I'm am conscious and")
 
 
 
15
 
16
 
17
  #@st.cache(allow_output_mutation=True)
@@ -29,6 +32,7 @@ prompt= st.text_area('Your prompt here',
29
 
30
  #answer = opt_model(prompt, model, tokenizer,)
31
  #lst = ['ciao come stai sjfsbd dfhsdf fuahfuf feuhfu wefwu ']
 
32
  lst = answer[0]['generated_text']
33
 
34
  t = st.empty()
 
10
  # tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
11
  # return model, tokenizer
12
 
13
+ @st.cache(allow_output_mutation=True)
14
+ def define_model(prompt):
15
+ generator = pipeline('text-generation', model="facebook/opt-1.3b")
16
+ answer = generator(prompt)
17
+ return answer
18
 
19
 
20
  #@st.cache(allow_output_mutation=True)
 
32
 
33
  #answer = opt_model(prompt, model, tokenizer,)
34
  #lst = ['ciao come stai sjfsbd dfhsdf fuahfuf feuhfu wefwu ']
35
+ answer = define_model(prompt)
36
  lst = answer[0]['generated_text']
37
 
38
  t = st.empty()