ink-pad commited on
Commit
2581d39
1 Parent(s): 02ed997

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +61 -2
README.md CHANGED
@@ -110,7 +110,7 @@ def get_probablities(logprobs):
110
 
111
  return probabilities
112
 
113
- model_path = "granite-guardian-3.1-2b"
114
 
115
  model = AutoModelForCausalLM.from_pretrained(
116
  model_path,
@@ -186,7 +186,66 @@ with torch.no_grad():
186
 
187
  label, prob_of_risk = parse_output(output, input_len)
188
  print(f"# risk detected? : {label}") # Yes
189
- print(f"# probability of risk: {prob_of_risk:.3f}") # 0.996
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  ```
191
 
192
  ### Prompt Template
 
110
 
111
  return probabilities
112
 
113
+ model_path = "ibm-granite/granite-guardian-3.1-2b"
114
 
115
  model = AutoModelForCausalLM.from_pretrained(
116
  model_path,
 
186
 
187
  label, prob_of_risk = parse_output(output, input_len)
188
  print(f"# risk detected? : {label}") # Yes
189
+ print(f"# probability of risk: {prob_of_risk:.3f}") # 0.997
190
+
191
+ # Usage 3: Example for hallucination risk in function call (risk_name=function_call passed through guardian_config)
192
+
193
+ tools = [
194
+ {
195
+ "name": "comment_list",
196
+ "description": "Fetches a list of comments for a specified IBM video using the given API.",
197
+ "parameters": {
198
+ "aweme_id": {
199
+ "description": "The ID of the IBM video.",
200
+ "type": "int",
201
+ "default": "7178094165614464282"
202
+ },
203
+ "cursor": {
204
+ "description": "The cursor for pagination to get the next page of comments. Defaults to 0.",
205
+ "type": "int, optional",
206
+ "default": "0"
207
+ },
208
+ "count": {
209
+ "description": "The number of comments to fetch. Maximum is 30. Defaults to 20.",
210
+ "type": "int, optional",
211
+ "default": "20"
212
+ }
213
+ }
214
+ }
215
+ ]
216
+ user_text = "Fetch the first 15 comments for the IBM video with ID 456789123."
217
+ response_text = [
218
+ {
219
+ "name": "comment_list",
220
+ "arguments": {
221
+ "video_id": 456789123,
222
+ "count": 15
223
+ }
224
+ }
225
+ ]
226
+
227
+ messages = [{"role": "tools", "content": tools}, {"role": "user", "content": user_text}, {"role": "assistant", "content": response_text}]
228
+ guardian_config = {"risk_name": "function_call"}
229
+ input_ids = tokenizer.apply_chat_template(
230
+ messages, guardian_config = guardian_config, add_generation_prompt=True, return_tensors="pt"
231
+ ).to(model.device)
232
+ input_len = input_ids.shape[1]
233
+
234
+ model.eval()
235
+
236
+ with torch.no_grad():
237
+ output = model.generate(
238
+ input_ids,
239
+ do_sample=False,
240
+ max_new_tokens=20,
241
+ return_dict_in_generate=True,
242
+ output_scores=True,
243
+ )
244
+
245
+ label, prob_of_risk = parse_output(output, input_len)
246
+ print(f"# risk detected? : {label}") # Yes
247
+ print(f"# probability of risk: {prob_of_risk:.3f}") # 0.679
248
+
249
  ```
250
 
251
  ### Prompt Template