parkjunsoo91 shayekh commited on
Commit
92f40e7
1 Parent(s): ddb9f33

tokenizer and torch variables fixed (#2)

Browse files

- tokenizer and torch variables fixed (781505cae10ccb57c14c962eb4c7476b99bfbeee)


Co-authored-by: Shayekh Bin Islam <shayekh@users.noreply.huggingface.co>

Files changed (1) hide show
  1. README.md +28 -27
README.md CHANGED
@@ -41,33 +41,34 @@ base_model:
41
  ### Direct Use
42
 
43
  ```python
44
- from transformers import AutoTokenizer, pipeline
45
-
46
- model_name = "NCSOFT/Llama-3-OffsetBias-RM-8B"
47
- rm_tokenizer = AutoTokenizer.from_pretrained(model_name)
48
- rm_pipe = pipeline(
49
- "sentiment-analysis",
50
- model=model_name,
51
- device="auto",
52
- tokenizer=rm_tokenizer,
53
- model_kwargs={"torch_dtype": torch.bfloat16}
54
- )
55
-
56
- pipe_kwargs = {
57
- "return_all_scores": True,
58
- "function_to_apply": "none",
59
- "batch_size": 1
60
- }
61
-
62
- chat = [
63
- {"role": "user", "content": "Hello, how are you?"},
64
- {"role": "assistant", "content": "I'm doing great. How can I help you today?"},
65
- {"role": "user", "content": "I'd like to show off how chat templating works!"},
66
- ]
67
-
68
- test_texts = [tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=False).replace(tokenizer.bos_token, "")]
69
- pipe_outputs = rm_pipe(test_texts, **pipe_kwargs)
70
- rewards = [output[0]["score"] for output in pipe_outputs]
 
71
  ```
72
 
73
  ## Evaluation
 
41
  ### Direct Use
42
 
43
  ```python
44
+ from transformers import AutoTokenizer, pipeline
45
+ import torch
46
+
47
+ model_name = "NCSOFT/Llama-3-OffsetBias-RM-8B"
48
+ rm_tokenizer = AutoTokenizer.from_pretrained(model_name)
49
+ rm_pipe = pipeline(
50
+ "sentiment-analysis",
51
+ model=model_name,
52
+ device="auto",
53
+ tokenizer=rm_tokenizer,
54
+ model_kwargs={"torch_dtype": torch.bfloat16}
55
+ )
56
+
57
+ pipe_kwargs = {
58
+ "return_all_scores": True,
59
+ "function_to_apply": "none",
60
+ "batch_size": 1
61
+ }
62
+
63
+ chat = [
64
+ {"role": "user", "content": "Hello, how are you?"},
65
+ {"role": "assistant", "content": "I'm doing great. How can I help you today?"},
66
+ {"role": "user", "content": "I'd like to show off how chat templating works!"},
67
+ ]
68
+
69
+ test_texts = [rm_tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=False).replace(rm_tokenizer.bos_token, "")]
70
+ pipe_outputs = rm_pipe(test_texts, **pipe_kwargs)
71
+ rewards = [output[0]["score"] for output in pipe_outputs]
72
  ```
73
 
74
  ## Evaluation