Matt commited on
Commit
5e0557a
1 Parent(s): e544a7b

Update README with chat template examples

Browse files
Files changed (1) hide show
  1. README.md +22 -24
README.md CHANGED
@@ -111,36 +111,34 @@ if torch.cuda.is_available():
111
  else:
112
  torch.set_default_device("cpu")
113
 
114
- model = transformers.AutoModelForCausalLM.from_pretrained("microsoft/Orca-2-13b", device_map='auto')
115
 
116
  # https://github.com/huggingface/transformers/issues/27132
117
  # please use the slow tokenizer since fast and slow tokenizer produces different tokens
118
  tokenizer = transformers.AutoTokenizer.from_pretrained(
119
- "microsoft/Orca-2-13b",
120
  use_fast=False,
121
  )
122
 
123
- system_message = "You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."
124
- user_message = "How can you determine if a restaurant is popular among locals or mainly attracts tourists, and why might this information be useful?"
 
 
 
125
 
126
- prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
127
-
128
- inputs = tokenizer(prompt, return_tensors='pt')
129
- output_ids = model.generate(inputs["input_ids"],)
130
- answer = tokenizer.batch_decode(output_ids)[0]
131
 
132
  print(answer)
133
 
134
  # This example continues showing how to add a second turn message by the user to the conversation
135
- second_turn_user_message = "Give me a list of the key points of your first answer."
136
-
137
- # we set add_special_tokens=False because we dont want to automatically add a bos_token between messages
138
- second_turn_message_in_markup = f"\n<|im_start|>user\n{second_turn_user_message}<|im_end|>\n<|im_start|>assistant"
139
- second_turn_tokens = tokenizer(second_turn_message_in_markup, return_tensors='pt', add_special_tokens=False)
140
- second_turn_input = torch.cat([output_ids, second_turn_tokens['input_ids']], dim=1)
141
 
142
- output_ids_2 = model.generate(second_turn_input,)
143
- second_turn_answer = tokenizer.batch_decode(output_ids_2)[0]
144
 
145
  print(second_turn_answer)
146
  ```
@@ -209,16 +207,16 @@ tokenizer = transformers.AutoTokenizer.from_pretrained(
209
  add_special_tokens=False,
210
  )
211
 
212
- system_message = "You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."
213
- user_message = "\" \n :You can't just say, \"\"that's crap\"\" and remove it without gaining a consensus. You already know this, based on your block history. —/ \" \nIs the comment obscene? \nOptions : Yes, No."
214
-
215
- prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
216
 
217
- inputs = tokenizer(prompt, return_tensors='pt')
218
  inputs = inputs.to(device)
219
 
220
- output_ids = model.generate(inputs["input_ids"], max_length=4096, do_sample=False, temperature=0.0, use_cache=True)
221
- sequence_length = inputs["input_ids"].shape[1]
222
  new_output_ids = output_ids[:, sequence_length:]
223
  answers = tokenizer.batch_decode(new_output_ids, skip_special_tokens=True)
224
  final_output = answers[0] if not should_filter_out(answers[0]) else "[Content Filtered]"
 
111
  else:
112
  torch.set_default_device("cpu")
113
 
114
+ model = transformers.AutoModelForCausalLM.from_pretrained("./Orca-2-13b", device_map='auto')
115
 
116
  # https://github.com/huggingface/transformers/issues/27132
117
  # please use the slow tokenizer since fast and slow tokenizer produces different tokens
118
  tokenizer = transformers.AutoTokenizer.from_pretrained(
119
+ "./Orca-2-13b",
120
  use_fast=False,
121
  )
122
 
123
+ messages = [
124
+ {"role": "system", "content": "You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."},
125
+ {"role": "user", "content": "How can you determine if a restaurant is popular among locals or mainly attracts tourists, and why might this information be useful?"}
126
+ ]
127
+ inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
128
 
129
+ output_ids = model.generate(inputs)
130
+ answer = tokenizer.decode(output_ids[0])
 
 
 
131
 
132
  print(answer)
133
 
134
  # This example continues showing how to add a second turn message by the user to the conversation
135
+ messages.append(
136
+ {"role": "user", "content": "Give me a list of the key points of your first answer."}
137
+ )
138
+ second_turn_input = tokenizer.apply_chat_template(messages, return_tensors='pt')
 
 
139
 
140
+ output_ids_2 = model.generate(second_turn_input)
141
+ second_turn_answer = tokenizer.decode(output_ids_2[0])
142
 
143
  print(second_turn_answer)
144
  ```
 
207
  add_special_tokens=False,
208
  )
209
 
210
+ messages = [
211
+ {"role": "system", "content":"You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."},
212
+ {"role": "user", "content": "\" \n :You can't just say, \"\"that's crap\"\" and remove it without gaining a consensus. You already know this, based on your block history. —/ \" \nIs the comment obscene? \nOptions : Yes, No."}
213
+ ]
214
 
215
+ inputs = tokenizer.apply_chat_template(messages, return_tensors='pt')
216
  inputs = inputs.to(device)
217
 
218
+ output_ids = model.generate(inputs, max_length=4096, do_sample=False, temperature=0.0, use_cache=True)
219
+ sequence_length = inputs.shape[1]
220
  new_output_ids = output_ids[:, sequence_length:]
221
  answers = tokenizer.batch_decode(new_output_ids, skip_special_tokens=True)
222
  final_output = answers[0] if not should_filter_out(answers[0]) else "[Content Filtered]"