kiranr commited on
Commit
243f824
1 Parent(s): aa2d2d7

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +57 -0
README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets:
3
+ - WizardLM/WizardLM_evol_instruct_V2_196k
4
+ - Open-Orca/OpenOrca
5
+ language:
6
+ - en
7
+ ---
8
+ # Writer/palmyra-20b-chat
9
+ ---
10
+
11
+ # Usage
12
+
13
+ ```py
14
+
15
+ import torch
16
+ from transformers import AutoTokenizer, AutoModelForCausalLM
17
+ from transformers import TextStreamer
18
+
19
+ model_name = "Writer/palmyra-20b-chat"
20
+
21
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
22
+
23
+ model = AutoModelForCausalLM.from_pretrained(
24
+ model_name,
25
+ torch_dtype=torch.float16,
26
+ device_map="auto",
27
+ )
28
+
29
+ prompt = "What is the meaning of life?"
30
+
31
+ input_text = (
32
+ "A chat between a curious user and an artificial intelligence assistant. "
33
+ "The assistant gives helpful, detailed, and polite answers to the user's questions. "
34
+ "USER: {prompt} "
35
+ "ASSISTANT:"
36
+ )
37
+
38
+ model_inputs = tokenizer(input_text.format(prompt=prompt), return_tensors="pt").to(
39
+ "cuda"
40
+ )
41
+
42
+ gen_conf = {
43
+ "top_k": 20,
44
+ "max_new_tokens": 2048,
45
+ "temperature": 0.6,
46
+ "do_sample": True,
47
+ "eos_token_id": tokenizer.eos_token_id,
48
+ }
49
+
50
+ streamer = TextStreamer(tokenizer)
51
+ if "token_type_ids" in model_inputs:
52
+ del model_inputs["token_type_ids"]
53
+
54
+ all_inputs = {**model_inputs, **gen_conf}
55
+ _ = model.generate(**all_inputs, streamer=streamer)
56
+
57
+ ```