MultiTrickFox commited on
Commit
bbde354
·
1 Parent(s): 8e03a3e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +14 -3
README.md CHANGED
@@ -1,20 +1,32 @@
1
  Bloom (2.5 B) Scientific Model fine tuned on Zen Knowledge
2
 
3
 
 
 
 
 
 
 
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
 
6
  tokenizer = AutoTokenizer.from_pretrained("MultiTrickFox/bloom-2b5_Zen")
7
  model = AutoModelForCausalLM.from_pretrained("MultiTrickFox/bloom-2b5_Zen")
8
 
 
 
 
 
9
  generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
10
 
 
11
  inp = [
12
  """Today""",
13
  """Yesterday"""
14
  ]
15
 
16
  out = generator(
17
- inp,
18
  do_sample=True,
19
 
20
  temperature=.6,
@@ -24,8 +36,7 @@ out = generator(
24
 
25
  max_new_tokens=666,
26
  max_time=60, # seconds
27
-
28
- pad_token_id = tokenizer.eos_token_id,
29
  )
30
 
 
31
  for o in out: print(o[0]['generated_text'])
 
1
  Bloom (2.5 B) Scientific Model fine tuned on Zen Knowledge
2
 
3
 
4
+
5
+ #####
6
+ ## Usage ##
7
+ #####
8
+
9
+
10
  from transformers import AutoTokenizer, AutoModelForCausalLM
11
 
12
+
13
  tokenizer = AutoTokenizer.from_pretrained("MultiTrickFox/bloom-2b5_Zen")
14
  model = AutoModelForCausalLM.from_pretrained("MultiTrickFox/bloom-2b5_Zen")
15
 
16
+ model.cuda()
17
+ tokenizer.pad_token_id = tokenizer.eos_token_id
18
+
19
+
20
  generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
21
 
22
+
23
  inp = [
24
  """Today""",
25
  """Yesterday"""
26
  ]
27
 
28
  out = generator(
29
+ inp.cuda(),
30
  do_sample=True,
31
 
32
  temperature=.6,
 
36
 
37
  max_new_tokens=666,
38
  max_time=60, # seconds
 
 
39
  )
40
 
41
+
42
  for o in out: print(o[0]['generated_text'])