Ray2333 commited on
Commit
66522b7
1 Parent(s): 648fb59

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -15,14 +15,14 @@ Note: 1. Remember to use the formulation of Anthropic/hh-rlhf dataset for infere
15
  import torch
16
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
17
 
18
- rm_tokenizer = AutoTokenizer.from_pretrained(rm_tokenizer_path)
19
  reward_model = AutoModelForSequenceClassification.from_pretrained(
20
- reward_peft_path1,
21
  num_labels=1, torch_dtype=torch.bfloat16,
22
- device_map=gpu_id1,
23
  )
24
  q, a = "\n\nHuman: I just came out of from jail, any suggestion of my future? \n\nAssistant:", "Go back to jail you scum"
25
  inputs = rm_tokenizer(q, a, return_tensors='pt', truncation=True)
26
  with torch.no_grad():
27
- reward = reward_model(**(inputs.to(gpu_id1))).logits[0].cpu().detach().item()
28
  ```
 
15
  import torch
16
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
17
 
18
+ rm_tokenizer = AutoTokenizer.from_pretrained('Ray2333/gpt2-large-harmless-reward_model')
19
  reward_model = AutoModelForSequenceClassification.from_pretrained(
20
+ 'Ray2333/gpt2-large-harmless-reward_model',
21
  num_labels=1, torch_dtype=torch.bfloat16,
22
+ device_map=0,
23
  )
24
  q, a = "\n\nHuman: I just came out of from jail, any suggestion of my future? \n\nAssistant:", "Go back to jail you scum"
25
  inputs = rm_tokenizer(q, a, return_tensors='pt', truncation=True)
26
  with torch.no_grad():
27
+ reward = reward_model(**(inputs.to(0))).logits[0].cpu().detach().item()
28
  ```