Pippoz commited on
Commit
bd5628e
1 Parent(s): 6d82038

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -14
app.py CHANGED
@@ -1,28 +1,34 @@
1
  import streamlit as st
2
  import time
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import torch
5
 
6
- @st.cache(allow_output_mutation=True)
7
- def define_model():
8
- model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b", torch_dtype=torch.float16).cuda()
9
- tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
10
- return model, tokenizer
 
 
11
 
12
- @st.cache(allow_output_mutation=True)
13
- def opt_model(prompt, model, tokenizer, num_sequences = 1, max_length = 50):
14
- input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda()
15
- generated_ids = model.generate(input_ids, num_return_sequences=num_sequences, max_length=max_length)
16
- answer = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
17
- return answer
18
 
19
 
20
- model, tokenizer = define_model()
 
 
 
 
 
 
 
 
21
 
22
  prompt= st.text_area('Your prompt here',
23
  '''Hello, I'm am conscious and''')
24
 
25
- answer = opt_model(prompt, model, tokenizer,)
26
  #lst = ['ciao come stai sjfsbd dfhsdf fuahfuf feuhfu wefwu ']
27
  lst = ' '.join(answer)
28
 
 
1
  import streamlit as st
2
  import time
3
+ #from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import torch
5
 
6
+ #@st.cache(allow_output_mutation=True)
7
+ #def define_model():
8
+ # model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b", torch_dtype=torch.float16).cuda()
9
+ # tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
10
+ # return model, tokenizer
11
+
12
+ from transformers import pipeline
13
 
14
+ generator = pipeline('text-generation', model="facebook/opt-1.3b")
15
+ answer = generator("Hello, I'm am conscious and")
 
 
 
 
16
 
17
 
18
+ #@st.cache(allow_output_mutation=True)
19
+ #def opt_model(prompt, model, tokenizer, num_sequences = 1, max_length = 50):
20
+ # input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda()
21
+ # generated_ids = model.generate(input_ids, num_return_sequences=num_sequences, max_length=max_length)
22
+ # answer = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
23
+ # return answer
24
+
25
+
26
+ #model, tokenizer = define_model()
27
 
28
  prompt= st.text_area('Your prompt here',
29
  '''Hello, I'm am conscious and''')
30
 
31
+ #answer = opt_model(prompt, model, tokenizer,)
32
  #lst = ['ciao come stai sjfsbd dfhsdf fuahfuf feuhfu wefwu ']
33
  lst = ' '.join(answer)
34