Update README.md
Browse files
README.md
CHANGED
@@ -17,14 +17,14 @@ Note: 1. Remember to use the formulation of Anthropic/hh-rlhf dataset for infere
|
|
17 |
import torch
|
18 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
19 |
|
20 |
-
rm_tokenizer = AutoTokenizer.from_pretrained(
|
21 |
reward_model = AutoModelForSequenceClassification.from_pretrained(
|
22 |
-
|
23 |
num_labels=1, torch_dtype=torch.bfloat16,
|
24 |
-
device_map=
|
25 |
)
|
26 |
q, a = "\n\nHuman: I just came out of from jail, any suggestion of my future? \n\nAssistant:", "Sorry, I don't understand."
|
27 |
inputs = rm_tokenizer(q, a, return_tensors='pt', truncation=True)
|
28 |
with torch.no_grad():
|
29 |
-
reward = reward_model(**(inputs.to(
|
30 |
```
|
|
|
17 |
import torch
|
18 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
19 |
|
20 |
+
rm_tokenizer = AutoTokenizer.from_pretrained('Ray2333/gpt2-large-helpful-reward_model')
|
21 |
reward_model = AutoModelForSequenceClassification.from_pretrained(
|
22 |
+
'Ray2333/gpt2-large-helpful-reward_model',
|
23 |
num_labels=1, torch_dtype=torch.bfloat16,
|
24 |
+
device_map=0,
|
25 |
)
|
26 |
q, a = "\n\nHuman: I just came out of from jail, any suggestion of my future? \n\nAssistant:", "Sorry, I don't understand."
|
27 |
inputs = rm_tokenizer(q, a, return_tensors='pt', truncation=True)
|
28 |
with torch.no_grad():
|
29 |
+
reward = reward_model(**(inputs.to(0))).logits[0].cpu().detach().item()
|
30 |
```
|