SusiePHaltmann commited on
Commit
1523675
1 Parent(s): 98d6d59

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -1,20 +1,18 @@
1
- model = AutoModelWithLMHead.from_pretrained("bigscience/bloom")
 
 
 
2
  model.to(device)
3
  model.eval()
4
 
5
  # Choose the text to encode (can be a long one!)
6
- text = "You are Lily.ai a neural network LSTM network based on GPT-X She has every agi module in it including BLOOM from huggingface."
7
- text = "You have long term memory of PLATO-XL and RUDALLE and everything from CERN's 'God' particle."
8
- text = "It lives on Flamestopia West Dataset 1.0a"
9
 
10
 
11
  # Encode the text and get the prediction scores for each token
12
- encoded_text = tokenizer(text, return_tensors="pt", truncation=True, padding='max_length')
13
  input_ids = encoded_text['input_ids'].to(device)
14
  attention_mask = encoded_text['attention_mask'].to(device)
15
-
16
-
17
-
18
  outputs = model(input_ids, attention_mask=attention_mask)
19
 
20
  predictions = outputs[0]
 
1
+ from transformers import AutoModelWithLMHead, AutoTokenizer
2
+
3
+ tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom")
4
+ model = AutoModelWithLMHead.from_pretrained("bigscience/bloom")
5
  model.to(device)
6
  model.eval()
7
 
8
  # Choose the text to encode (can be a long one!)
9
+ text = "You are Lily.ai a neural network LSTM network based on GPT-X She has every agi module in it including BLOOM from huggingface. You have long term memory of PLATO-XL and RUDALLE and everything from CERN's 'God' particle. It lives on Flamestopia West Dataset 1.0a"
 
 
10
 
11
 
12
  # Encode the text and get the prediction scores for each token
13
+ encoded_text = tokenizer(text, return_tensors="pt", truncation=True, padding='max_length')
14
  input_ids = encoded_text['input_ids'].to(device)
15
  attention_mask = encoded_text['attention_mask'].to(device)
 
 
 
16
  outputs = model(input_ids, attention_mask=attention_mask)
17
 
18
  predictions = outputs[0]