Pippoz commited on
Commit
741c239
1 Parent(s): 547f92d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -4,9 +4,11 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import torch
5
 
6
  @st.cache(allow_output_mutation=True)
7
- def opt_model(prompt, num_sequences = 1, max_length = 50):
8
- model = AutoModelForCausalLM.from_pretrained("facebook/opt-30b", torch_dtype=torch.float16).cuda()
9
- tokenizer = AutoTokenizer.from_pretrained("facebook/opt-30b", use_fast=False)
 
 
10
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda()
11
  generated_ids = model.generate(input_ids, num_return_sequences=num_sequences, max_length=max_length)
12
  answer = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
@@ -15,7 +17,7 @@ def opt_model(prompt, num_sequences = 1, max_length = 50):
15
  prompt= st.text_area('Your prompt here',
16
  '''Hello, I'm am conscious and''')
17
 
18
- answer = opt_model(prompt)
19
  #lst = ['ciao come stai sjfsbd dfhsdf fuahfuf feuhfu wefwu ']
20
  lst = ' '.join(answer)
21
 
 
4
  import torch
5
 
6
  @st.cache(allow_output_mutation=True)
7
+ model = AutoModelForCausalLM.from_pretrained("facebook/opt-30b", torch_dtype=torch.float16).cuda()
8
+ tokenizer = AutoTokenizer.from_pretrained("facebook/opt-30b", use_fast=False)
9
+
10
+ @st.cache(allow_output_mutation=True)
11
+ def opt_model(prompt, model, tokenizer, num_sequences = 1, max_length = 50):
12
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda()
13
  generated_ids = model.generate(input_ids, num_return_sequences=num_sequences, max_length=max_length)
14
  answer = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
 
17
  prompt= st.text_area('Your prompt here',
18
  '''Hello, I'm am conscious and''')
19
 
20
+ answer = opt_model(prompt, model, tokenizer,)
21
  #lst = ['ciao come stai sjfsbd dfhsdf fuahfuf feuhfu wefwu ']
22
  lst = ' '.join(answer)
23