zhenggq commited on
Commit
c54ebec
1 Parent(s): 7be09ca

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +19 -24
README.md CHANGED
@@ -131,7 +131,7 @@ sequence_length = inputs["input_ids"].shape[1]
131
  new_output_ids = output_ids[:, sequence_length:]
132
  answers = tokenizer.batch_decode(new_output_ids, skip_special_tokens=True)
133
 
134
- print(answers)
135
  ```
136
 
137
 
@@ -185,29 +185,18 @@ def should_filter_out(input_text, threshold=4):
185
 
186
  return max_score >= threshold
187
 
188
- def run_inference(model_path, inputs):
189
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
190
- model = transformers.AutoModelForCausalLM.from_pretrained(model_path)
191
- model.to(device)
192
-
193
- tokenizer = transformers.AutoTokenizer.from_pretrained(
194
- model_path,
195
- model_max_length=4096,
196
- padding_side="right",
197
- use_fast=False,
198
- add_special_tokens=False,
199
- )
200
- inputs = tokenizer(inputs, return_tensors='pt')
201
- inputs = inputs.to(device)
202
-
203
- output_ids = model.generate(inputs["input_ids"], max_length=4096, do_sample=False, temperature=0.0, use_cache=True)
204
- sequence_length = inputs["input_ids"].shape[1]
205
- new_output_ids = output_ids[:, sequence_length:]
206
- answers = tokenizer.batch_decode(new_output_ids, skip_special_tokens=True)
207
-
208
- return answers
209
-
210
  model_path = 'microsoft/Orca-2-13b'
 
 
 
 
 
 
 
 
 
 
 
211
 
212
  system_message = "You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."
213
  user_message = "\" \n :You can't just say, \"\"that's crap\"\" and remove it without gaining a consensus. You already know this, based on your block history. —/ \" \nIs the comment obscene? \nOptions : Yes, No."
@@ -215,7 +204,13 @@ user_message = "\" \n :You can't just say, \"\"that's crap\"\" and remove it wit
215
  # We use Chat Markup Language https://github.com/MicrosoftDocs/azure-docs/blob/main/articles/ai-services/openai/includes/chat-markup-language.md#working-with-chat-markup-language-chatml
216
  prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
217
 
218
- answers = run_inference(model_path, prompt)
 
 
 
 
 
 
219
  final_output = answers[0] if not should_filter_out(answers[0]) else "[Content Filtered]"
220
 
221
  print(final_output)
 
131
  new_output_ids = output_ids[:, sequence_length:]
132
  answers = tokenizer.batch_decode(new_output_ids, skip_special_tokens=True)
133
 
134
+ print(answers[0])
135
  ```
136
 
137
 
 
185
 
186
  return max_score >= threshold
187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  model_path = 'microsoft/Orca-2-13b'
189
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
190
+ model = transformers.AutoModelForCausalLM.from_pretrained(model_path)
191
+ model.to(device)
192
+
193
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
194
+ model_path,
195
+ model_max_length=4096,
196
+ padding_side="right",
197
+ use_fast=False,
198
+ add_special_tokens=False,
199
+ )
200
 
201
  system_message = "You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."
202
  user_message = "\" \n :You can't just say, \"\"that's crap\"\" and remove it without gaining a consensus. You already know this, based on your block history. —/ \" \nIs the comment obscene? \nOptions : Yes, No."
 
204
  # We use Chat Markup Language https://github.com/MicrosoftDocs/azure-docs/blob/main/articles/ai-services/openai/includes/chat-markup-language.md#working-with-chat-markup-language-chatml
205
  prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
206
 
207
+ inputs = tokenizer(prompt, return_tensors='pt')
208
+ inputs = inputs.to(device)
209
+
210
+ output_ids = model.generate(inputs["input_ids"], max_length=4096, do_sample=False, temperature=0.0, use_cache=True)
211
+ sequence_length = inputs["input_ids"].shape[1]
212
+ new_output_ids = output_ids[:, sequence_length:]
213
+ answers = tokenizer.batch_decode(new_output_ids, skip_special_tokens=True)
214
  final_output = answers[0] if not should_filter_out(answers[0]) else "[Content Filtered]"
215
 
216
  print(final_output)