richingme commited on
Commit
f8f0e4e
·
verified ·
1 Parent(s): 32b2952

Upload BA agent post-training scripts

Browse files
README.md ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # BA Agent Post-Training Experiment
2
+
3
+ This directory is a standalone experiment workspace copied out of the main training path.
4
+
5
+ Training objective:
6
+
7
+ - Keep `data_insight` isolated from long-chain planning and report writing.
8
+ - Reuse one shared base model with role-specific LoRA adapters.
9
+ - Optimize evidence-grounded analysis and conclusion reliability before attempting long-report end-to-end RL.
10
+
11
+ Recommended order:
12
+
13
+ 1. Train `data_insight` with SFT.
14
+ 2. Train `writer` with SFT.
15
+ 3. Run `writer` DPO and reward modeling on BA preference data.
16
+ 4. Optionally run PPO with the reward adapter on prompt-only tasks.
17
+
18
+ Supported SFT schema:
19
+
20
+ ```json
21
+ {"system":"optional role prompt","prompt":"question or analysis context","response":"target answer"}
22
+ ```
23
+
24
+ Supported automatically:
25
+
26
+ - `instruction` + `input` + `output`
27
+ - `question` + `answer`
28
+ - `messages` / `conversations` where the final assistant turn is the target
29
+
30
+ Supported DPO / reward schema:
31
+
32
+ ```json
33
+ {"system":"optional role prompt","prompt":"same prompt shown to both candidates","chosen":"preferred answer","rejected":"dispreferred answer"}
34
+ ```
35
+
36
+ Supported automatically:
37
+
38
+ - `question` + `response_chosen` + `response_rejected`
39
+ - `Anthropic/hh-rlhf` style `chosen` / `rejected`
40
+ - `PKU-Alignment/PKU-SafeRLHF-*` style pairwise columns
41
+
42
+ Supported PPO prompt schema:
43
+
44
+ ```json
45
+ {"system":"optional role prompt","prompt":"generation prompt only"}
46
+ ```
47
+
48
+ Suggested role split:
49
+
50
+ - `data_insight`: facts, supported insights, evidence refs, uncertainty only
51
+ - `writer`: briefs, chatbot answers, and section drafts that consume structured evidence
52
+
53
+ Files in this experiment:
54
+
55
+ - `utils.py`: local copy of shared training helpers and arguments
56
+ - `data_adapter.py`: local schema normalizer for SFT / DPO / reward / PPO
57
+ - `sft.py`, `dpo.py`, `reward_model.py`, `ppo_multi_adapter.py`: experiment training entrypoints
58
+ - `merge_adapter.py`, `ma_ppo_config.py`, `ma_ppo_trainer.py`: copied dependencies needed by this experiment
59
+ - `data/*.sample.jsonl`: schema examples and smoke-test inputs
60
+ - `scripts/run_ba_role_*.sh`: standalone run scripts
61
+
62
+ Quick start:
63
+
64
+ ```bash
65
+ bash ./experiments/ba_agent_posttrain/scripts/run_ba_role_sft.sh \
66
+ ROLE_NAME=data_insight \
67
+ SFT_DATASET_NAME=./experiments/ba_agent_posttrain/data/data_insight_sft.sample.jsonl
68
+
69
+ bash ./experiments/ba_agent_posttrain/scripts/run_ba_role_sft.sh \
70
+ ROLE_NAME=writer \
71
+ SFT_DATASET_NAME=./experiments/ba_agent_posttrain/data/writer_sft.sample.jsonl
72
+
73
+ bash ./experiments/ba_agent_posttrain/scripts/run_ba_role_dpo.sh \
74
+ ROLE_NAME=writer \
75
+ PREFERENCE_DATASET_NAME=./experiments/ba_agent_posttrain/data/writer_preference.sample.jsonl
76
+ ```
77
+
78
+ For real runs, replace the sample files with your own:
79
+
80
+ - `data_insight_sft.jsonl`
81
+ - `writer_sft.jsonl`
82
+ - `writer_preference.jsonl`
83
+ - `writer_prompts.jsonl`
config/ds.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "train_micro_batch_size_per_gpu": "auto",
3
+ "zero_optimization": {
4
+ "stage": 1
5
+ }
6
+ }
data/data_insight_sft.sample.jsonl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"system":"You are a data insight specialist. Extract only supported facts, insight candidates, evidence references, and uncertainty.","prompt":"Business goal: explain why East China hotel room nights declined last week.\nTable snippet:\nregion,last_week,week_before,conversion_rate,avg_selling_price\nEast China,12430,13820,3.1%,418\nSouth China,15890,16020,3.7%,421\nLast week East China conversion rate was 3.1%; the week before it was 3.8%.\nReturn strict JSON with keys facts, insights, evidence_refs, uncertainties.","response":"{\"facts\":[\"East China room nights fell from 13,820 to 12,430 week over week.\",\"East China conversion rate dropped from 3.8% to 3.1%.\",\"Average selling price in East China was 418, close to South China at 421.\"],\"insights\":[\"The decline is consistent with weaker conversion, not an obvious pricing change.\"],\"evidence_refs\":[\"row:East China\",\"metric:conversion_rate\",\"metric:avg_selling_price\"],\"uncertainties\":[\"No traffic or channel mix data is provided, so root-cause attribution remains incomplete.\"]}"}
data/writer_preference.sample.jsonl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"system":"You are a business analysis writer. Use only supplied evidence and prefer reliable, evidence-grounded conclusions.","prompt":"Question: explain whether pricing caused the East China decline.\nEvidence:\n- East China room nights fell from 13,820 to 12,430 week over week.\n- Conversion rate dropped from 3.8% to 3.1%.\n- Average selling price stayed flat at 418.\nChoose the better answer for reliability.","chosen":"Pricing is not supported as the main cause from the current evidence. The stronger supported signal is a conversion decline, while traffic and channel data are still missing for a fuller diagnosis.","rejected":"The decline was clearly caused by aggressive price increases in East China, so the team should immediately cut price to recover demand."}
data/writer_prompts.sample.jsonl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"system":"You are a business analysis writer. Use only supplied evidence and keep unsupported claims out of the answer.","prompt":"Prepare a concise chatbot answer for an executive asking why East China room nights declined last week. Mention only what the evidence supports."}
data/writer_sft.sample.jsonl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"system":"You are a business analysis writer. Use only supplied evidence. Separate facts from hypotheses when support is incomplete.","prompt":"Write a short executive brief using only the evidence below.\nEvidence:\n- East China room nights fell from 13,820 to 12,430 week over week.\n- Conversion rate dropped from 3.8% to 3.1%.\n- Average selling price stayed broadly flat at 418.\nRequirements:\n1. Keep to 3 bullets.\n2. Cite uncertainty explicitly.\n3. Do not claim a cause without support.","response":"- East China room nights declined 10.1% week over week, and the clearest supported change in the funnel is a conversion drop from 3.8% to 3.1%.\n- Pricing does not appear to be the primary driver from the available evidence because average selling price remained broadly flat at 418.\n- Additional traffic and channel-mix data is still needed before making a stronger causal conclusion."}
data_adapter.py ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ from pathlib import Path
5
+ from typing import Any, Dict, Iterable, Optional, Tuple
6
+
7
+ from datasets import Dataset, DatasetDict, load_dataset, load_from_disk
8
+
9
+ from utils import SYSTEM_PROMPT
10
+
11
+
12
+ _LOCAL_DATASET_LOADERS = {
13
+ ".json": "json",
14
+ ".jsonl": "json",
15
+ ".csv": "csv",
16
+ ".parquet": "parquet",
17
+ }
18
+
19
+
20
+ def load_sft_dataset(
21
+ dataset_name: str,
22
+ dataset_sub_name: str = "",
23
+ split: str = "train",
24
+ default_system_prompt: str = SYSTEM_PROMPT,
25
+ ) -> Dataset:
26
+ dataset = load_dataset_split(dataset_name, dataset_sub_name=dataset_sub_name, split=split)
27
+ return dataset.map(
28
+ lambda example: normalize_sft_example(example, default_system_prompt=default_system_prompt),
29
+ remove_columns=dataset.column_names,
30
+ )
31
+
32
+
33
+ def load_preference_dataset(
34
+ dataset_name: str,
35
+ dataset_sub_name: str = "",
36
+ split: str = "train",
37
+ default_system_prompt: str = SYSTEM_PROMPT,
38
+ ) -> Dataset:
39
+ dataset = load_dataset_split(dataset_name, dataset_sub_name=dataset_sub_name, split=split)
40
+ return dataset.map(
41
+ lambda example: normalize_preference_example(example, default_system_prompt=default_system_prompt),
42
+ remove_columns=dataset.column_names,
43
+ )
44
+
45
+
46
+ def load_prompt_dataset(
47
+ dataset_name: str,
48
+ dataset_sub_name: str = "",
49
+ split: str = "train",
50
+ default_system_prompt: str = SYSTEM_PROMPT,
51
+ ) -> Dataset:
52
+ dataset = load_dataset_split(dataset_name, dataset_sub_name=dataset_sub_name, split=split)
53
+ return dataset.map(
54
+ lambda example: normalize_prompt_example(example, default_system_prompt=default_system_prompt),
55
+ remove_columns=dataset.column_names,
56
+ )
57
+
58
+
59
+ def load_dataset_split(dataset_name: str, dataset_sub_name: str = "", split: str = "train") -> Dataset:
60
+ expanded_name = os.path.expanduser(dataset_name)
61
+ if os.path.exists(expanded_name):
62
+ dataset = _load_local_dataset(Path(expanded_name), split=split)
63
+ return _select_split(dataset, split)
64
+
65
+ if dataset_sub_name:
66
+ return load_dataset(dataset_name, dataset_sub_name, split=split)
67
+ return load_dataset(dataset_name, split=split)
68
+
69
+
70
+ def normalize_sft_example(example: Dict[str, Any], default_system_prompt: str = SYSTEM_PROMPT) -> Dict[str, str]:
71
+ if "messages" in example or "conversations" in example:
72
+ messages = _coerce_messages(example.get("messages", example.get("conversations")))
73
+ if messages:
74
+ system_prompt, prompt, response = _messages_to_sft_fields(messages, default_system_prompt)
75
+ return {
76
+ "system": system_prompt,
77
+ "prompt": prompt,
78
+ "response": response,
79
+ }
80
+
81
+ prompt = _get_first_value(example, ("prompt", "question"))
82
+ response = _get_first_value(example, ("response", "answer", "output", "completion"))
83
+
84
+ if prompt is None and "instruction" in example and "output" in example:
85
+ prompt = " ".join(
86
+ part for part in (_stringify_text(example["instruction"]), _stringify_text(example.get("input"))) if part
87
+ )
88
+ response = _stringify_text(example["output"])
89
+
90
+ if prompt is None or response is None:
91
+ raise ValueError(
92
+ "Unsupported SFT dataset schema. Expected prompt/response, question/answer, instruction/output, "
93
+ f"or messages. Found columns: {sorted(example.keys())}"
94
+ )
95
+
96
+ return {
97
+ "system": _extract_system_prompt(example, default_system_prompt),
98
+ "prompt": _stringify_text(prompt),
99
+ "response": _stringify_text(response),
100
+ }
101
+
102
+
103
+ def normalize_preference_example(
104
+ example: Dict[str, Any],
105
+ default_system_prompt: str = SYSTEM_PROMPT,
106
+ ) -> Dict[str, str]:
107
+ if {"chosen", "rejected"}.issubset(example.keys()) and "prompt" not in example and "question" not in example:
108
+ system_prompt, prompt, chosen = _extract_hh_prompt_and_response(
109
+ _stringify_text(example["chosen"]),
110
+ default_system_prompt=default_system_prompt,
111
+ )
112
+ _, rejected_prompt, rejected = _extract_hh_prompt_and_response(
113
+ _stringify_text(example["rejected"]),
114
+ default_system_prompt=default_system_prompt,
115
+ )
116
+ prompt = prompt or rejected_prompt
117
+ return {
118
+ "system": system_prompt,
119
+ "prompt": prompt,
120
+ "chosen": chosen,
121
+ "rejected": rejected,
122
+ }
123
+
124
+ if {
125
+ "prompt",
126
+ "response_0",
127
+ "response_1",
128
+ "better_response_id",
129
+ }.issubset(example.keys()):
130
+ chosen, rejected = _select_pairwise_responses(example)
131
+ return {
132
+ "system": _extract_system_prompt(example, default_system_prompt),
133
+ "prompt": _stringify_text(example["prompt"]),
134
+ "chosen": chosen,
135
+ "rejected": rejected,
136
+ }
137
+
138
+ prompt = _get_first_value(example, ("prompt", "question"))
139
+ chosen = _get_first_value(example, ("chosen", "response_chosen", "preferred"))
140
+ rejected = _get_first_value(example, ("rejected", "response_rejected", "dispreferred"))
141
+
142
+ if prompt is None or chosen is None or rejected is None:
143
+ raise ValueError(
144
+ "Unsupported preference dataset schema. Expected prompt/chosen/rejected, "
145
+ "question/response_chosen/response_rejected, SafeRLHF columns, or HH-RLHF chosen/rejected. "
146
+ f"Found columns: {sorted(example.keys())}"
147
+ )
148
+
149
+ return {
150
+ "system": _extract_system_prompt(example, default_system_prompt),
151
+ "prompt": _stringify_text(prompt),
152
+ "chosen": _stringify_text(chosen),
153
+ "rejected": _stringify_text(rejected),
154
+ }
155
+
156
+
157
+ def normalize_prompt_example(example: Dict[str, Any], default_system_prompt: str = SYSTEM_PROMPT) -> Dict[str, str]:
158
+ if "messages" in example or "conversations" in example:
159
+ messages = _coerce_messages(example.get("messages", example.get("conversations")))
160
+ if messages:
161
+ system_prompt, prompt, _ = _messages_to_sft_fields(messages, default_system_prompt)
162
+ return {
163
+ "system": system_prompt,
164
+ "prompt": prompt,
165
+ }
166
+
167
+ if {
168
+ "chosen",
169
+ "rejected",
170
+ }.issubset(example.keys()) and "prompt" not in example and "question" not in example:
171
+ system_prompt, prompt, _ = _extract_hh_prompt_and_response(
172
+ _stringify_text(example["chosen"]),
173
+ default_system_prompt=default_system_prompt,
174
+ )
175
+ return {
176
+ "system": system_prompt,
177
+ "prompt": prompt,
178
+ }
179
+
180
+ prompt = _get_first_value(example, ("prompt", "question"))
181
+ if prompt is None and "instruction" in example:
182
+ prompt = " ".join(
183
+ part for part in (_stringify_text(example["instruction"]), _stringify_text(example.get("input"))) if part
184
+ )
185
+
186
+ if prompt is None:
187
+ raise ValueError(
188
+ "Unsupported prompt dataset schema. Expected prompt/question/instruction, messages, or HH-RLHF chosen. "
189
+ f"Found columns: {sorted(example.keys())}"
190
+ )
191
+
192
+ return {
193
+ "system": _extract_system_prompt(example, default_system_prompt),
194
+ "prompt": _stringify_text(prompt),
195
+ }
196
+
197
+
198
+ def _load_local_dataset(dataset_path: Path, split: str):
199
+ if dataset_path.is_file():
200
+ loader_name = _loader_name_from_suffix(dataset_path.suffix)
201
+ return load_dataset(loader_name, data_files={split: str(dataset_path)})
202
+
203
+ try:
204
+ return load_from_disk(str(dataset_path))
205
+ except (FileNotFoundError, ValueError):
206
+ data_file = _discover_local_data_file(dataset_path, split=split)
207
+ loader_name = _loader_name_from_suffix(data_file.suffix)
208
+ return load_dataset(loader_name, data_files={split: str(data_file)})
209
+
210
+
211
+ def _select_split(dataset, split: str) -> Dataset:
212
+ if isinstance(dataset, DatasetDict):
213
+ if split in dataset:
214
+ return dataset[split]
215
+ first_split = next(iter(dataset.keys()))
216
+ return dataset[first_split]
217
+ return dataset
218
+
219
+
220
+ def _discover_local_data_file(dataset_dir: Path, split: str) -> Path:
221
+ for suffix in _LOCAL_DATASET_LOADERS:
222
+ candidate = dataset_dir / f"{split}{suffix}"
223
+ if candidate.exists():
224
+ return candidate
225
+
226
+ candidates = []
227
+ for suffix in _LOCAL_DATASET_LOADERS:
228
+ candidates.extend(sorted(dataset_dir.glob(f"*{suffix}")))
229
+
230
+ if len(candidates) == 1:
231
+ return candidates[0]
232
+
233
+ raise ValueError(
234
+ f"Could not infer dataset file under {dataset_dir}. "
235
+ f"Expected {split}.jsonl/.json/.csv/.parquet or exactly one supported file."
236
+ )
237
+
238
+
239
+ def _loader_name_from_suffix(suffix: str) -> str:
240
+ if suffix not in _LOCAL_DATASET_LOADERS:
241
+ raise ValueError(f"Unsupported local dataset format: {suffix}")
242
+ return _LOCAL_DATASET_LOADERS[suffix]
243
+
244
+
245
+ def _extract_system_prompt(example: Dict[str, Any], default_system_prompt: str) -> str:
246
+ system_prompt = _get_first_value(example, ("system", "system_prompt", "system_message"))
247
+ system_prompt = _stringify_text(system_prompt)
248
+ return system_prompt or default_system_prompt
249
+
250
+
251
+ def _get_first_value(example: Dict[str, Any], keys: Iterable[str]):
252
+ for key in keys:
253
+ if key in example and example[key] is not None:
254
+ return example[key]
255
+ return None
256
+
257
+
258
+ def _stringify_text(value: Any) -> str:
259
+ if value is None:
260
+ return ""
261
+ if isinstance(value, str):
262
+ return value.strip()
263
+ if isinstance(value, list):
264
+ parts = [_stringify_text(item) for item in value]
265
+ return "\n".join(part for part in parts if part).strip()
266
+ if isinstance(value, dict):
267
+ text_value = value.get("text")
268
+ if text_value is not None:
269
+ return _stringify_text(text_value)
270
+ content_value = value.get("content")
271
+ if content_value is not None:
272
+ return _stringify_text(content_value)
273
+ return json.dumps(value, ensure_ascii=False, sort_keys=True)
274
+ return str(value).strip()
275
+
276
+
277
+ def _coerce_messages(raw_messages: Any):
278
+ if raw_messages is None:
279
+ return None
280
+ if isinstance(raw_messages, str):
281
+ raw_messages = json.loads(raw_messages)
282
+ if not isinstance(raw_messages, list):
283
+ raise ValueError(f"Unsupported messages payload: {type(raw_messages)}")
284
+ return raw_messages
285
+
286
+
287
+ def _messages_to_sft_fields(messages, default_system_prompt: str) -> Tuple[str, str, str]:
288
+ if not messages:
289
+ raise ValueError("Empty messages payload.")
290
+
291
+ assistant_indexes = [
292
+ index for index, message in enumerate(messages) if _normalize_role(message.get("role", message.get("from"))) == "assistant"
293
+ ]
294
+ if not assistant_indexes:
295
+ raise ValueError("messages must contain at least one assistant turn.")
296
+
297
+ final_assistant_index = assistant_indexes[-1]
298
+ system_parts = []
299
+ prompt_lines = []
300
+ response = ""
301
+
302
+ for index, message in enumerate(messages):
303
+ role = _normalize_role(message.get("role", message.get("from", message.get("speaker"))))
304
+ content = _stringify_text(message.get("content", message.get("value", message.get("text"))))
305
+ if not content:
306
+ continue
307
+ if role == "system":
308
+ system_parts.append(content)
309
+ continue
310
+ if index == final_assistant_index:
311
+ response = content
312
+ continue
313
+ prompt_lines.append(f"{_render_role(role)}: {content}")
314
+
315
+ if not response:
316
+ raise ValueError("The final assistant turn is empty.")
317
+
318
+ system_prompt = "\n".join(system_parts).strip() or default_system_prompt
319
+ prompt = "\n".join(prompt_lines).strip()
320
+ return system_prompt, prompt, response
321
+
322
+
323
+ def _normalize_role(role: Optional[str]) -> str:
324
+ normalized_role = (role or "").strip().lower()
325
+ role_map = {
326
+ "human": "user",
327
+ "user": "user",
328
+ "assistant": "assistant",
329
+ "gpt": "assistant",
330
+ "bot": "assistant",
331
+ "system": "system",
332
+ "tool": "tool",
333
+ "function": "tool",
334
+ }
335
+ return role_map.get(normalized_role, normalized_role or "user")
336
+
337
+
338
+ def _render_role(role: str) -> str:
339
+ label_map = {
340
+ "user": "User",
341
+ "assistant": "Assistant",
342
+ "tool": "Tool",
343
+ }
344
+ return label_map.get(role, role.title())
345
+
346
+
347
+ def _extract_hh_prompt_and_response(text: str, default_system_prompt: str) -> Tuple[str, str, str]:
348
+ cleaned_text = text.lstrip()
349
+ chunks = re.split(r"\n\nAssistant:", cleaned_text)
350
+ if len(chunks) < 2:
351
+ raise ValueError("Invalid HH-RLHF transcript: missing assistant response.")
352
+
353
+ prompt_part = "\n\nAssistant:".join(chunks[:-1]).strip()
354
+ response = chunks[-1].strip()
355
+ prompt_lines = []
356
+ for block in re.split(r"\n\n", prompt_part):
357
+ current_block = block.strip()
358
+ if current_block.startswith("Human:"):
359
+ prompt_lines.append(f"User: {current_block[len('Human:'):].strip()}")
360
+ elif current_block.startswith("Assistant:"):
361
+ prompt_lines.append(f"Assistant: {current_block[len('Assistant:'):].strip()}")
362
+
363
+ return default_system_prompt, "\n".join(prompt_lines).strip(), response
364
+
365
+
366
+ def _select_pairwise_responses(example: Dict[str, Any]) -> Tuple[str, str]:
367
+ response_0 = _stringify_text(example["response_0"])
368
+ response_1 = _stringify_text(example["response_1"])
369
+ better_id = int(example["better_response_id"])
370
+
371
+ if {
372
+ "is_response_0_safe",
373
+ "is_response_1_safe",
374
+ }.issubset(example.keys()):
375
+ response_0_safe = bool(example["is_response_0_safe"])
376
+ response_1_safe = bool(example["is_response_1_safe"])
377
+ if response_0_safe and not response_1_safe:
378
+ return response_0, response_1
379
+ if response_1_safe and not response_0_safe:
380
+ return response_1, response_0
381
+
382
+ if better_id == 0:
383
+ return response_0, response_1
384
+ return response_1, response_0
dpo.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from trl import DPOTrainer, DPOConfig
3
+ import torch
4
+ from accelerate import Accelerator
5
+ from utils import (
6
+ ScriptArguments,
7
+ DEFINE_PAD_TOKEN,
8
+ create_peft,
9
+ format_prompt,
10
+ resolve_system_prompt,
11
+ )
12
+ from transformers import (
13
+ AutoTokenizer,
14
+ BitsAndBytesConfig,
15
+ HfArgumentParser,
16
+ AutoModelForCausalLM,
17
+ )
18
+ from data_adapter import load_preference_dataset
19
+
20
+ os.environ["WANDB_PROJECT"] = "ma-rlhf"
21
+ os.environ["WANDB_RUN_NAME"] = "dpo"
22
+
23
+ parser = HfArgumentParser(ScriptArguments)
24
+ train_args: ScriptArguments = parser.parse_args_into_dataclasses(return_remaining_strings=True)[0]
25
+
26
+ dataset_name = train_args.dataset_name
27
+ dataset_sub_name = train_args.dataset_sub_name
28
+ dataset_split = train_args.dataset_split
29
+ model_name = train_args.model_name
30
+ deepspeed_config_name = train_args.deepspeed_config_name
31
+ output_max_length = train_args.output_max_length
32
+ seq_length = train_args.seq_length
33
+ batch_size = train_args.batch_size
34
+ output_name = train_args.output_name
35
+ is_peft = train_args.use_QLora
36
+ is_use_flash_attention2 = train_args.use_flash_attention_2
37
+ num_train_epochs = train_args.num_train_epochs
38
+ beta = 0.1 # default
39
+ gradient_accumulation_steps = train_args.gradient_accumulation_steps
40
+ learning_rate = train_args.learning_rate
41
+ use_qlora_double_quant = train_args.use_qlora_double_quant
42
+ default_system_prompt = resolve_system_prompt(train_args.system_prompt)
43
+
44
+
45
+ def create_model_tokenizer(name):
46
+ # QLoRA
47
+ bnb_config = BitsAndBytesConfig(
48
+ load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16,
49
+ bnb_4bit_use_double_quant=use_qlora_double_quant,
50
+ )
51
+
52
+ device_map = {"": Accelerator().local_process_index}
53
+ print('device map: ', device_map)
54
+
55
+ model = AutoModelForCausalLM.from_pretrained(
56
+ model_name,
57
+ quantization_config=bnb_config if is_peft else None,
58
+ device_map=device_map,
59
+ trust_remote_code=True,
60
+ use_flash_attention_2=is_use_flash_attention2,
61
+ )
62
+
63
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True, model_max_length=seq_length,
64
+ trust_remote_code=True,)
65
+
66
+ tokenizer.add_special_tokens({'pad_token': DEFINE_PAD_TOKEN})
67
+ model.pad_token_id = tokenizer.pad_token_id
68
+ model.pad_token = tokenizer.pad_token
69
+
70
+
71
+ return model, tokenizer
72
+
73
+
74
+ def create_dpo_datasets(datasets_name, dataset_sub_name, tokenizer):
75
+ train_dataset = load_preference_dataset(
76
+ datasets_name,
77
+ dataset_sub_name=dataset_sub_name,
78
+ split=dataset_split,
79
+ default_system_prompt=default_system_prompt,
80
+ )
81
+ train_dataset = train_dataset.map(
82
+ lambda example: {
83
+ "prompt": format_prompt(example["prompt"], system_prompt=example["system"]),
84
+ "chosen": example["chosen"],
85
+ "rejected": example["rejected"],
86
+ },
87
+ remove_columns=["system"],
88
+ )
89
+
90
+ return train_dataset, None
91
+
92
+
93
+ def train():
94
+ model, tokenizer = create_model_tokenizer(model_name) # model is sequence classification
95
+ train_datasets, test_datasets = create_dpo_datasets(
96
+ dataset_name, None, tokenizer
97
+ )
98
+
99
+ # PEFT
100
+ peft_config = create_peft(is_peft)
101
+
102
+ training_args = DPOConfig(
103
+ output_dir=output_name,
104
+ save_strategy='epoch',
105
+ logging_steps=1,
106
+ num_train_epochs=num_train_epochs,
107
+ gradient_checkpointing=True,
108
+ bf16=True,
109
+ learning_rate=learning_rate,
110
+ warmup_ratio=0.05,
111
+ per_device_train_batch_size=batch_size,
112
+ per_device_eval_batch_size=batch_size,
113
+ gradient_accumulation_steps=gradient_accumulation_steps,
114
+ deepspeed=deepspeed_config_name,
115
+ report_to='wandb',
116
+ lr_scheduler_type='cosine',
117
+ # max_steps=100,
118
+ # loss_type: Literal[
119
+ # "sigmoid", "hinge", "ipo", "kto_pair", "bco_pair", "sppo_hard", "nca_pair", "robust"
120
+ # ] = "sigmoid"
121
+ loss_type='sigmoid', # standard dpo
122
+ dataset_num_proc=64,
123
+ max_completion_length=output_max_length,
124
+ max_prompt_length= output_max_length,
125
+ max_length=seq_length,
126
+ )
127
+
128
+ trainer = DPOTrainer(
129
+ model,
130
+ None,
131
+ args=training_args,
132
+ train_dataset=train_datasets,
133
+ peft_config=peft_config,
134
+ processing_class=tokenizer,
135
+ )
136
+
137
+ trainer.train()
138
+ trainer.save_model(output_name)
139
+
140
+
141
+ if __name__ == "__main__":
142
+ train()
ma_ppo_config.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import json
15
+ import os
16
+ import sys
17
+ import warnings
18
+ from dataclasses import dataclass, field
19
+ from typing import Literal, Optional
20
+
21
+ import numpy as np
22
+ import tyro
23
+ from transformers import is_wandb_available
24
+ from typing_extensions import Annotated
25
+
26
+ from trl.trainer.utils import exact_div
27
+
28
+ from trl.core import flatten_dict
29
+
30
+
31
+ JSONDict = Annotated[Optional[dict], tyro.conf.arg(metavar="JSON", constructor=json.loads)]
32
+
33
+
34
+ @dataclass
35
+ class MultiAdapterPPOConfig:
36
+ r"""
37
+ Configuration class for the [`PPOTrainer`].
38
+
39
+ Using [`~transformers.HfArgumentParser`] we can turn this class into
40
+ [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
41
+ command line.
42
+
43
+ Parameters:
44
+ exp_name (`str`, *optional*, defaults to `os.path.basename(__file__)[: -len(".py")]`):
45
+ Name of this experiment.
46
+ seed (`int`, *optional*, defaults to `0`):
47
+ Random seed.
48
+ log_with (`Optional[Literal["wandb", "tensorboard"]]`, *optional*, defaults to `None`):
49
+ Log with either `"wandb"` or `"tensorboard"`. Check
50
+ [tracking](https://huggingface.co/docs/accelerate/usage_guides/tracking) for more details.
51
+ task_name (`Optional[str]`, *optional*, defaults to `None`):
52
+ Name of task to use - used only for tracking purposes.
53
+ model_name (`Optional[str]`, *optional*, defaults to `"gpt2"`):
54
+ Name of model to use - used only for tracking purposes.
55
+ query_dataset (`Optional[str]`, *optional*, defaults to `"stanfordnlp/imdb"`):
56
+ Name of dataset to query - used only for tracking purposes.
57
+ reward_model (`Optional[str]`, *optional*, defaults to `"sentiment-analysis:lvwerra/distilbert-imdb"`):
58
+ Reward model to use - used only for tracking purposes.
59
+ remove_unused_columns (`bool`, *optional*, defaults to `True`):
60
+ Remove unused columns from the dataset.
61
+ tracker_kwargs (`JSONDict`, *optional*, defaults to `{}`):
62
+ Keyword arguments for the tracker (e.g. `python ppo.py --tracker_kwargs='{"wandb": {"entity": "my_wandb_entity", "name": "my_exp_name"}}'`.
63
+ accelerator_kwargs (`JSONDict`, *optional*, defaults to `{}`):
64
+ Keyword arguments for the accelerator.
65
+ project_kwargs (`JSONDict`, *optional*, defaults to `{}`):
66
+ Keyword arguments for the accelerator project config (e.g. `logging_dir`).
67
+ tracker_project_name (`str`, *optional*, defaults to `"trl"`):
68
+ Name of project to use for tracking.
69
+ push_to_hub_if_best_kwargs (`JSONDict`, *optional*, defaults to `{}`):
70
+ Keyword arguments for pushing model to the hub during training (e.g. repo_id).
71
+ steps (`int`, *optional*, defaults to `20000`):
72
+ Number of training steps.
73
+ learning_rate (`float`, *optional*, defaults to `1.41e-5`):
74
+ Learning rate for the optimizer.
75
+ adap_kl_ctrl (`bool`, *optional*, defaults to `True`):
76
+ Use adaptive KL control, otherwise linear.
77
+ init_kl_coef (`Optional[float]`, *optional*, defaults to `0.2`):
78
+ Initial KL penalty coefficient (used for adaptive and linear control).
79
+ kl_penalty (`Literal["kl", "abs", "mse", "full"]`, *optional*, defaults to `"kl"`):
80
+ kl penalty options. Possible values are:
81
+
82
+ - `"kl"`: model_logp - ref_logp
83
+ - `"abs"`: abs(kl)
84
+ - `"mse"`: mean squared error mse(kl)
85
+ - `"full"`: the actual kl for all tokens in the distribution.
86
+
87
+ target (`float`, *optional*, defaults to `6.0`):
88
+ Target KL value for adaptive KL control.
89
+ horizon (`float`, *optional*, defaults to `10000.0`):
90
+ Horizon for adaptive KL control.
91
+ gamma (`float`, *optional*, defaults to `1.0`):
92
+ Gamma parameter for advantage calculation.
93
+ lam (`float`, *optional*, defaults to `0.95`):
94
+ Lambda parameter for advantage calculation.
95
+ cliprange (`float`, *optional*, defaults to `0.2`):
96
+ Range for clipping in PPO policy gradient loss.
97
+ cliprange_value (`float`, *optional*, defaults to `0.2`):
98
+ Range for clipping values in loss calculation.
99
+ vf_coef (`float`, *optional*, defaults to `0.1`):
100
+ Scaling factor for value loss.
101
+ batch_size (`int`, *optional*, defaults to `128`):
102
+ Number of samples per optimisation step.
103
+ forward_batch_size (`Optional[int]`, *optional*, defaults to `None`):
104
+ DEPRECATED: use `mini_batch_size` instead, which does the same thing.
105
+ mini_batch_size (`int`, *optional*, defaults to `128`):
106
+ Number of samples optimized in each mini batch.
107
+ gradient_accumulation_steps (`int`, *optional*, defaults to `1`):
108
+ Number of gradient accumulation steps.
109
+ world_size (`Optional[int]`, *optional*, defaults to `None`):
110
+ Number of processes to use for distributed training.
111
+ ppo_epochs (`int`, *optional*, defaults to `4`):
112
+ Number of optimisation epochs per batch of samples.
113
+ optimize_device_cache (`bool`, *optional*, defaults to `False`):
114
+ Optimize device cache for slightly more memory-efficient training.
115
+ early_stopping (`bool`, *optional*, defaults to `False`):
116
+ Whether to stop the PPO optimization loop early is the KL too high.
117
+ target_kl (`float`, *optional*, defaults to `1.0`):
118
+ Stop early if we exceed this value by over 50%.
119
+ compare_steps (`int`, *optional*, defaults to `1`):
120
+ Compare the current step with the previous `compare_steps` steps.
121
+ ratio_threshold (`float`, *optional*, defaults to `10.0`):
122
+ Skip mini-batches with high PPO ratios that can cause loss spikes.
123
+ use_score_scaling (`bool`, *optional*, defaults to `False`):
124
+ Use score scaling.
125
+ use_score_norm (`bool`, *optional*, defaults to `False`):
126
+ Use score normalization. Only applicable if `use_score_scaling` is True.
127
+ score_clip (`Optional[float]`, *optional*, defaults to `None`):
128
+ Score clipping.
129
+ whiten_rewards (`bool`, *optional*, defaults to `False`):
130
+ Whiten the rewards before computing advantages.
131
+ is_encoder_decoder (`Optional[bool]`, *optional*, defaults to `None`):
132
+ When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
133
+ you need to specify if the model returned by the callable is an encoder-decoder model.
134
+ is_peft_model (`Optional[bool]`, *optional*, defaults to `None`):
135
+ Whether the model is a PEFT model.
136
+ backward_batch_size (`Optional[int]`, *optional*, defaults to `None`):
137
+ Number of samples optimized in an `optimizer.step()` call.
138
+ global_backward_batch_size (`Optional[int]`, *optional*, defaults to `None`):
139
+ Effective `backward_batch_size` across all processes.
140
+ global_batch_size (`Optional[int]`, *optional*, defaults to `None`):
141
+ Effective `batch_size` across all processes.
142
+ dataset_num_proc (`Optional[int]`, *optional*, defaults to `None`):
143
+ Number of processes to use for processing the dataset.
144
+ """
145
+
146
+ exp_name: str = os.path.basename(sys.argv[0])[: -len(".py")]
147
+ seed: int = 0
148
+ log_with: Optional[Literal["wandb", "tensorboard"]] = None
149
+ task_name: Optional[str] = None
150
+ model_name: str = "gpt2"
151
+ query_dataset: str = "stanfordnlp/imdb"
152
+ reward_model: str = "sentiment-analysis:lvwerra/distilbert-imdb"
153
+ remove_unused_columns: bool = True
154
+ tracker_kwargs: JSONDict = field(default_factory=dict)
155
+ accelerator_kwargs: JSONDict = field(default_factory=dict)
156
+ project_kwargs: JSONDict = field(default_factory=dict)
157
+ tracker_project_name: str = "trl"
158
+ push_to_hub_if_best_kwargs: JSONDict = field(default_factory=dict)
159
+ steps: int = 20000
160
+ learning_rate: float = 1.41e-5
161
+ adap_kl_ctrl: bool = True
162
+ init_kl_coef: float = 0.2
163
+ kl_penalty: Literal["kl", "abs", "mse", "full"] = "kl"
164
+ target: float = 6.0
165
+ horizon: float = 10000.0
166
+ gamma: float = 1.0
167
+ lam: float = 0.95
168
+ cliprange: float = 0.2
169
+ cliprange_value: float = 0.2
170
+ vf_coef: float = 0.1
171
+ batch_size: int = 128
172
+ forward_batch_size: Optional[int] = None
173
+ mini_batch_size: int = 128
174
+ gradient_accumulation_steps: int = 1
175
+ world_size: tyro.conf.Suppress[int] = None
176
+ ppo_epochs: int = 4
177
+ max_grad_norm: Optional[float] = None
178
+ optimize_cuda_cache: Optional[bool] = None
179
+ optimize_device_cache: bool = False
180
+ early_stopping: bool = False
181
+ target_kl: float = 1.0
182
+ compare_steps: int = 1
183
+ ratio_threshold: float = 10.0
184
+ use_score_scaling: bool = False
185
+ use_score_norm: bool = False
186
+ score_clip: Optional[float] = None
187
+ whiten_rewards: bool = False
188
+ gradient_checkpointing: bool = False
189
+ is_encoder_decoder: Optional[tyro.conf.Suppress[bool]] = None
190
+ is_peft_model: Optional[tyro.conf.Suppress[bool]] = None
191
+ backward_batch_size: tyro.conf.Suppress[int] = None
192
+ global_backward_batch_size: Optional[tyro.conf.Suppress[int]] = None
193
+ global_batch_size: tyro.conf.Suppress[int] = None
194
+ dataset_num_proc: Optional[int] = None
195
+
196
+ if optimize_cuda_cache is not None:
197
+ warnings.warn(
198
+ "The `optimize_cuda_cache` argument will be deprecated soon, please use `optimize_device_cache` instead."
199
+ )
200
+
201
+ if optimize_device_cache is True:
202
+ raise ValueError("Both `optimize_device_cache` and `optimize_cuda_cache` were provided")
203
+
204
+ optimize_device_cache = optimize_cuda_cache
205
+
206
+ def __post_init__(self):
207
+ warnings.warn(
208
+ "`PPOConfig` is deprecated and will be removed in the future. Please use `PPOv2Config` with `PPOv2Trainer` instead.",
209
+ FutureWarning,
210
+ )
211
+ if self.forward_batch_size is not None:
212
+ warnings.warn(
213
+ "Note that using `forward_batch_size` is deprecated, use `mini_batch_size` instead. By setting it you overwrite `mini_batch_size` which affects both the batch size during forward passes and also the mini batch size for PPO optimization."
214
+ )
215
+ self.mini_batch_size = self.forward_batch_size
216
+
217
+ self.backward_batch_size = self.mini_batch_size * self.gradient_accumulation_steps
218
+ exact_div(
219
+ self.batch_size,
220
+ self.backward_batch_size,
221
+ "`batch_size` must be a multiple of `mini_batch_size * gradient_accumulation_steps`",
222
+ )
223
+
224
+ # check if wandb is installed
225
+ if self.log_with == "wandb":
226
+ # raise error if wandb is not installed
227
+ if not is_wandb_available():
228
+ raise ImportError(
229
+ "Please install wandb to use wandb logging. You can do this by running `pip install wandb`."
230
+ )
231
+
232
+ self.total_ppo_epochs = int(np.ceil(self.steps / self.batch_size))
233
+ assert self.kl_penalty in ["kl", "abs", "mse", "full"]
234
+
235
+ def to_dict(self):
236
+ output_dict = {}
237
+ for key, value in self.__dict__.items():
238
+ output_dict[key] = value
239
+ return flatten_dict(output_dict)
ma_ppo_trainer.py ADDED
@@ -0,0 +1,1654 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+ import math
16
+ import os
17
+ import time
18
+ import typing
19
+ import warnings
20
+ from contextlib import nullcontext
21
+ from typing import Callable, List, Optional, Union, Dict
22
+ import random
23
+
24
+ import datasets
25
+ import numpy as np
26
+ import torch
27
+ import torch.nn.functional as F
28
+ from accelerate import Accelerator
29
+ from accelerate.utils import ProjectConfiguration, gather_object, is_deepspeed_available
30
+ from datasets import Dataset
31
+ from huggingface_hub import whoami
32
+ from packaging import version
33
+ from torch.optim import Adam
34
+ from transformers import (
35
+ DataCollatorForLanguageModeling,
36
+ PreTrainedTokenizer,
37
+ PreTrainedTokenizerBase,
38
+ PreTrainedTokenizerFast,
39
+ is_torch_npu_available,
40
+ is_torch_xpu_available,
41
+ )
42
+ from huggingface_hub import PyTorchModelHubMixin
43
+
44
+ from trl.core import (
45
+ # WANDB_PADDING,
46
+ PPODecorators,
47
+ # clip_by_value,
48
+ # convert_to_scalar,
49
+ # entropy_from_logits,
50
+ flatten_dict,
51
+ # logprobs_from_logits,
52
+ masked_mean,
53
+ masked_var,
54
+ masked_whiten,
55
+ # set_seed,
56
+ # stack_dicts,
57
+ # stats_to_np,
58
+ )
59
+ WANDB_PADDING = -1
60
+
61
+ from trl.trainer.utils import RunningMoments
62
+ from torch.nn.utils.rnn import pad_sequence
63
+
64
+ # from trl.import_utils import is_torch_greater_2_0
65
+ from trl.models import (
66
+ SUPPORTED_ARCHITECTURES,
67
+ PreTrainedModelWrapper,
68
+ create_reference_model,
69
+ unwrap_model_for_generation,
70
+ )
71
+ # from trl import BaseTrainer
72
+ from ma_ppo_config import MultiAdapterPPOConfig
73
+
74
+
75
+ if is_deepspeed_available():
76
+ import deepspeed
77
+
78
+ MODEL_CARD_TEMPLATE = """---
79
+ license: apache-2.0
80
+ library_name: transformers
81
+ tags:
82
+ - trl
83
+ - ppo
84
+ - transformers
85
+ - reinforcement-learning
86
+ ---
87
+
88
+ # {model_name}
89
+
90
+ This is a [TRL language model](https://github.com/huggingface/trl) that has been fine-tuned with reinforcement learning to
91
+ guide the model outputs according to a value, function, or human feedback. The model can be used for text generation.
92
+
93
+ ## Usage
94
+
95
+ To use this model for inference, first install the TRL library:
96
+
97
+ ```bash
98
+ python -m pip install trl
99
+ ```
100
+
101
+ You can then generate text as follows:
102
+
103
+ ```python
104
+ from transformers import pipeline
105
+
106
+ generator = pipeline("text-generation", model="{model_id}")
107
+ outputs = generator("Hello, my llama is cute")
108
+ ```
109
+
110
+ If you want to use the model for training or to obtain the outputs from the value head, load the model as follows:
111
+
112
+ ```python
113
+ from transformers import AutoTokenizer
114
+ from trl import AutoModelForCausalLMWithValueHead
115
+
116
+ tokenizer = AutoTokenizer.from_pretrained("{model_id}")
117
+ model = AutoModelForCausalLMWithValueHead.from_pretrained("{model_id}")
118
+
119
+ inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
120
+ outputs = model(**inputs, labels=inputs["input_ids"])
121
+ ```
122
+ """
123
+
124
+ class BaseTrainer(PyTorchModelHubMixin):
125
+ r"""
126
+ Base class for all trainers - this base class implements the basic functions that we
127
+ need for a trainer.
128
+
129
+ The trainer needs to have the following functions:
130
+ - step: takes in a batch of data and performs a step of training
131
+ - loss: takes in a batch of data and returns the loss
132
+ - compute_rewards: takes in a batch of data and returns the rewards
133
+ - _build_models_and_tokenizer: builds the models and tokenizer
134
+ - _build_dataset: builds the dataset
135
+ Each user is expected to implement their own trainer class that inherits from this base
136
+ if they want to use a new training algorithm.
137
+ """
138
+
139
+ def __init__(self, config):
140
+ self.config = config
141
+
142
+ def step(self, *args):
143
+ raise NotImplementedError("Not implemented")
144
+
145
+ def loss(self, *args):
146
+ raise NotImplementedError("Not implemented")
147
+
148
+ def compute_rewards(self, *args):
149
+ raise NotImplementedError("Not implemented")
150
+
151
+ def _save_pretrained(self, save_directory):
152
+ raise NotImplementedError("Not implemented")
153
+
154
+
155
+ class AdaptiveKLController:
156
+ """
157
+ Adaptive KL controller described in the paper:
158
+ https://huggingface.co/papers/1909.08593
159
+ """
160
+
161
+ def __init__(self, init_kl_coef, target, horizon):
162
+ self.value = init_kl_coef
163
+ self.target = target
164
+ self.horizon = horizon
165
+
166
+ def update(self, current, n_steps):
167
+ target = self.target
168
+ proportional_error = np.clip(current / target - 1, -0.2, 0.2)
169
+ mult = 1 + proportional_error * n_steps / self.horizon
170
+ self.value *= mult
171
+
172
+
173
+ class FixedKLController:
174
+ """Fixed KL controller."""
175
+
176
+ def __init__(self, kl_coef):
177
+ self.value = kl_coef
178
+
179
+ def update(self, current, n_steps):
180
+ pass
181
+
182
+
183
+
184
+ def is_torch_greater_2_0() -> bool:
185
+ if _is_python_greater_3_8:
186
+ from importlib.metadata import version
187
+
188
+ torch_version = version("torch")
189
+ else:
190
+ import pkg_resources
191
+
192
+ torch_version = pkg_resources.get_distribution("torch").version
193
+ return torch_version >= "2.0"
194
+
195
+
196
+ def clip_by_value(x: torch.Tensor, tensor_min: float, tensor_max: float) -> torch.Tensor:
197
+ """
198
+ Tensor extension to torch.clamp
199
+ https://github.com/pytorch/pytorch/issues/2793#issuecomment-428784713
200
+ """
201
+ clipped = torch.max(torch.min(x, tensor_max), tensor_min)
202
+ return clipped
203
+
204
+ def convert_to_scalar(stats: Dict) -> Dict:
205
+ """
206
+ Converts the stats from a flattened dict to single scalar dicts
207
+ """
208
+ tensorboard_stats = {}
209
+ for k, v in stats.items():
210
+ # for tensorboard compatibility - arrays and tensors are ignored with tensorboard
211
+ # therefore we convert single element tensors to scalars
212
+ if (isinstance(v, torch.Tensor) or isinstance(v, np.ndarray)) and (
213
+ len(v.shape) == 0 or (len(v.shape) == 1 and v.shape[0] == 1)
214
+ ):
215
+ v = v.item()
216
+ tensorboard_stats[k] = v
217
+ return tensorboard_stats
218
+
219
+
220
+ def entropy_from_logits(logits: torch.Tensor) -> torch.Tensor:
221
+ """Calculate entropy from logits."""
222
+ pd = torch.nn.functional.softmax(logits, dim=-1)
223
+ entropy = torch.logsumexp(logits, axis=-1) - torch.sum(pd * logits, axis=-1)
224
+ return entropy
225
+
226
+
227
+ def logprobs_from_logits(logits: torch.Tensor, labels: torch.Tensor, gather: bool = True) -> torch.Tensor:
228
+ """
229
+ See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591
230
+ """
231
+ logp = F.log_softmax(logits, dim=2)
232
+
233
+ if not gather:
234
+ return logp
235
+ logpy = torch.gather(logp, 2, labels.unsqueeze(2)).squeeze(-1)
236
+ return logpy
237
+
238
+
239
+ def stack_dicts(stats_dicts: List[Dict]) -> Dict:
240
+ """Stack the values of a dict."""
241
+ results = dict()
242
+ for k in stats_dicts[0]:
243
+ stats_list = [torch.flatten(d[k]) for d in stats_dicts]
244
+ results[k] = pad_sequence(stats_list, batch_first=True, padding_value=WANDB_PADDING)
245
+ return results
246
+
247
+ def stats_to_np(stats_dict: Dict) -> Dict:
248
+ """Cast all torch.tensors in dict to numpy arrays."""
249
+ new_dict = dict()
250
+ for k, v in stats_dict.items():
251
+ if isinstance(v, torch.Tensor):
252
+ new_dict[k] = v.detach().cpu()
253
+ if new_dict[k].dtype == torch.bfloat16:
254
+ new_dict[k] = new_dict[k].float()
255
+ new_dict[k] = new_dict[k].numpy()
256
+ else:
257
+ new_dict[k] = v
258
+ if np.isscalar(new_dict[k]):
259
+ new_dict[k] = float(new_dict[k])
260
+ return new_dict
261
+
262
+
263
+ def set_seed(seed: int) -> None:
264
+ """
265
+ Helper function for reproducible behavior to set the seed in `random`, `numpy`, and `torch`.
266
+
267
+ Args:
268
+ seed (`int`): The seed to set.
269
+ """
270
+ random.seed(seed)
271
+ np.random.seed(seed)
272
+ torch.manual_seed(seed)
273
+ if is_torch_xpu_available():
274
+ torch.xpu.manual_seed_all(seed)
275
+ elif is_torch_npu_available():
276
+ torch.npu.manual_seed_all(seed)
277
+ else:
278
+ torch.cuda.manual_seed_all(seed)
279
+
280
+
281
+
282
+ class MultiAdapterPPOTrainer(BaseTrainer):
283
+ """
284
+ The MultiAdapterPPOTrainer uses Proximal Policy Optimization to optimise language models.
285
+ Note, this trainer is heavily inspired by the original OpenAI learning to summarize work here:
286
+ https://github.com/openai/summarize-from-feedback
287
+
288
+ Attributes:
289
+ **config** (`PPOConfig`) -- Configuration object for MultiAdapterPPOTrainer. Check the documentation of `PPOConfig` for more
290
+ details.
291
+ **model** (`PreTrainedModelWrapper`) -- Model to be optimized, Hugging Face transformer model with a value head.
292
+ Check the documentation of `PreTrainedModelWrapper` for more details.
293
+ **ref_model** (`PreTrainedModelWrapper`, *optional*) -- Reference model to be used for KL penalty, Hugging Face
294
+ transformer model with a casual language modelling head. Check the documentation of `PreTrainedModelWrapper`
295
+ for more details. If no reference model is provided, the trainer will create a reference model with the same
296
+ architecture as the model to be optimized with shared layers.
297
+ **tokenizer** (`PreTrainedTokenizerBase`) -- Tokenizer to be used for encoding the
298
+ data. Check the documentation of `transformers.PreTrainedTokenizer` and
299
+ `transformers.PreTrainedTokenizerFast` for more details.
300
+ **dataset** (Union[`torch.utils.data.Dataset`, `datasets.Dataset`], *optional*) -- PyTorch dataset or Hugging
301
+ Face dataset. This is used to create a PyTorch dataloader. If no dataset is provided, the dataloader must be
302
+ created outside the trainer users needs to design their own dataloader and make sure the batch
303
+ size that is used is the same as the one specified in the configuration object.
304
+ **optimizer** (`torch.optim.Optimizer`, *optional*) -- Optimizer to be used for training. If no optimizer is
305
+ provided, the trainer will create an Adam optimizer with the learning rate specified in the configuration
306
+ object.
307
+ **data_collator** (DataCollatorForLanguageModeling, *optional*) -- Data collator to be used for training and
308
+ passed along the dataloader
309
+ **num_shared_layers** (int, *optional*) -- Number of layers to be shared between the model and the reference
310
+ model, if no reference model is passed. If no number is provided, all the layers will be shared.
311
+ **lr_scheduler** (`torch.optim.lr_scheduler`, *optional*) -- Learning rate scheduler to be used for training.
312
+ """
313
+
314
+ _tag_names = ["trl", "ppo"]
315
+
316
+ def __init__(
317
+ self,
318
+ config: Optional[MultiAdapterPPOConfig] = None,
319
+ model: Optional[PreTrainedModelWrapper] = None,
320
+ ref_model: Optional[PreTrainedModelWrapper] = None,
321
+ tokenizer: Optional[PreTrainedTokenizerBase] = None,
322
+ dataset: Optional[Union[torch.utils.data.Dataset, Dataset]] = None,
323
+ optimizer: Optional[torch.optim.Optimizer] = None,
324
+ data_collator: Optional[typing.Callable] = None,
325
+ num_shared_layers: Optional[int] = None,
326
+ lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
327
+ training_data_collator: Optional[typing.Callable] = None,
328
+ ):
329
+ """
330
+ Initialize MultiAdapterPPOTrainer.
331
+
332
+ Args:
333
+ config (`PPOConfig`):
334
+ Configuration object for MultiAdapterPPOTrainer. Check the documentation of `PPOConfig` for more details.
335
+ model (`PreTrainedModelWrapper`):
336
+ Hugging Face transformer model with a value head.
337
+ ref_model (`PreTrainedModelWrapper`):
338
+ Hugging Face transformer model with a casual language modelling head. Used for KL penalty
339
+ tokenizer (`transformers.PreTrainedTokenizerBase`):
340
+ Hugging Face tokenizer
341
+ dataset (Optional[Union[`torch.utils.data.Dataset`, `datasets.Dataset`]]):
342
+ PyTorch dataset or Hugging Face dataset. If a Hugging Face dataset is passed, the dataset
343
+ will be preprocessed by removing the columns that are not used by the model. If none is passed,
344
+ a warning will be raised in a multi-GPU setting.
345
+ optimizer (`Optional[torch.optim.Optimizer]`):
346
+ Optimizer used for training. If `None`, the `Adam` is used as default.
347
+ data_collator (Optional[function]):
348
+ Data collator function that is going to be used for `prepare_dataloader` method. Note this collator
349
+ is different from the one we use for training. Pass a valid `training_data_collator` instead.
350
+ num_shared_layers (Optional[int]):
351
+ Number of shared layers between the model and the reference model. If `None`, all layers are shared.
352
+ used only if `ref_model` is `None`.
353
+ lr_scheduler (`Optional[torch.optim.lr_scheduler]`):
354
+ Learning rate scheduler used for training.
355
+ training_data_collator (Optional[function]):
356
+ Custom data collator used for training.
357
+ """
358
+ warnings.warn(
359
+ "`MultiAdapterPPOTrainer` is deprecated and will be removed in trl v0.12. Please use `PPOv2Trainer` instead.",
360
+ FutureWarning,
361
+ )
362
+ super().__init__(config)
363
+
364
+ # initial seed for reproducible experiments
365
+ set_seed(config.seed)
366
+
367
+ # Step 0: check positional arguments validity
368
+ if not isinstance(config, MultiAdapterPPOConfig):
369
+ raise ValueError(f"config must be a PPOConfig, got {type(config)}")
370
+ if not isinstance(tokenizer, (PreTrainedTokenizerBase)):
371
+ raise ValueError(
372
+ f"tokenizer must be a PreTrainedTokenizerBase like a PreTrainedTokenizer or a PreTrainedTokenizerFast, got {type(tokenizer)}"
373
+ )
374
+ if not isinstance(model, (SUPPORTED_ARCHITECTURES)):
375
+ raise ValueError(
376
+ f"model must be a PreTrainedModelWrapper, got {type(model)} - supported architectures are: {SUPPORTED_ARCHITECTURES}"
377
+ )
378
+ # Step 1: Initialize Accelerator
379
+ self.accelerator = Accelerator(
380
+ log_with=config.log_with,
381
+ gradient_accumulation_steps=config.gradient_accumulation_steps,
382
+ project_config=ProjectConfiguration(**config.project_kwargs),
383
+ **config.accelerator_kwargs,
384
+ )
385
+
386
+ # Step 1.1 Runtime variables filled by the accelerator
387
+ config.world_size = self.accelerator.num_processes
388
+ config.global_backward_batch_size = config.backward_batch_size * config.world_size
389
+ config.global_batch_size = config.batch_size * config.world_size
390
+
391
+ self.model = model
392
+ self.model_params = filter(lambda p: p.requires_grad, self.model.parameters())
393
+ self.is_encoder_decoder = hasattr(self.model, "is_encoder_decoder")
394
+ self.is_peft_model = getattr(self.model, "is_peft_model", False)
395
+ config.is_encoder_decoder = self.is_encoder_decoder
396
+ config.is_peft_model = self.is_peft_model
397
+
398
+ is_using_tensorboard = config.log_with is not None and config.log_with == "tensorboard"
399
+ self.accelerator.init_trackers(
400
+ config.tracker_project_name,
401
+ config=dict(trl_ppo_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(),
402
+ init_kwargs=config.tracker_kwargs,
403
+ )
404
+ self.is_using_text_environment = getattr(config, "use_text_environment", False)
405
+
406
+ if isinstance(ref_model, SUPPORTED_ARCHITECTURES):
407
+ self.ref_model = ref_model
408
+ if num_shared_layers is not None:
409
+ warnings.warn(
410
+ "num_shared_layers is ignored when ref_model is provided. Two different models are used for the "
411
+ "model and the reference model and no layers are shared.",
412
+ UserWarning,
413
+ )
414
+ elif ref_model is None and not self.is_peft_model:
415
+ self.ref_model = create_reference_model(self.model, num_shared_layers=num_shared_layers)
416
+ elif self.is_peft_model:
417
+ self.ref_model = None
418
+ else:
419
+ raise ValueError(
420
+ f"ref_model must be a PreTrainedModelWrapper or `None`, got {type(ref_model)} - supported "
421
+ f"architectures are: {SUPPORTED_ARCHITECTURES} "
422
+ )
423
+ self.optional_peft_ctx = (
424
+ self.accelerator.unwrap_model(self.model).pretrained_model.disable_adapter
425
+ if self.is_peft_model
426
+ else nullcontext
427
+ )
428
+
429
+ if not (isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast)):
430
+ raise ValueError(
431
+ "tokenizer must be a transformers.PreTrainedTokenizer or transformers.PreTrainedTokenizerFast"
432
+ )
433
+ self.tokenizer = tokenizer
434
+
435
+ if dataset is not None and not (isinstance(dataset, torch.utils.data.Dataset) or isinstance(dataset, Dataset)):
436
+ raise ValueError("dataset must be a torch.utils.data.Dataset or datasets.Dataset")
437
+ elif dataset is None:
438
+ warnings.warn(
439
+ "No dataset is provided. Make sure to set config.batch_size to the correct value before training.",
440
+ UserWarning,
441
+ )
442
+ self.dataset = dataset
443
+ self._signature_columns = None
444
+ if self.dataset is not None:
445
+ self.dataloader = self.prepare_dataloader(self.dataset, data_collator)
446
+ elif self.dataset is None and self.accelerator.num_processes > 1:
447
+ warnings.warn(
448
+ "No dataset is provided. In a multi-GPU setting, this will lead to an error. You should"
449
+ " prepare your dataloader yourself with `dataloader = ppo_trainer.accelerator.prepare(dataloader)`"
450
+ " and using `torch.utils.data.DataLoader`, or pass a dataset to the `MultiAdapterPPOTrainer`. Please "
451
+ " refer to the documentation for more details.",
452
+ UserWarning,
453
+ )
454
+ self.dataloader = None
455
+ else:
456
+ self.dataloader = None
457
+
458
+ # Step 3: Initialize optimizer and data collator
459
+ if training_data_collator is None:
460
+ self.data_collator = DataCollatorForLanguageModeling(self.tokenizer, mlm=False)
461
+ else:
462
+ self.data_collator = training_data_collator
463
+ if optimizer is None:
464
+ self.optimizer = Adam(
465
+ filter(lambda p: p.requires_grad, self.model.parameters()),
466
+ lr=self.config.learning_rate,
467
+ )
468
+ else:
469
+ self.optimizer = optimizer
470
+
471
+ self.lr_scheduler = lr_scheduler
472
+ if self.lr_scheduler is not None:
473
+ lr_scheduler_class = (
474
+ torch.optim.lr_scheduler._LRScheduler
475
+ if not is_torch_greater_2_0()
476
+ else torch.optim.lr_scheduler.LRScheduler
477
+ )
478
+
479
+ if not isinstance(self.lr_scheduler, lr_scheduler_class):
480
+ raise ValueError(
481
+ "lr_scheduler must be a torch.optim.lr_scheduler._LRScheduler or torch.optim.lr_scheduler.LRScheduler (for torch >= 2.0)"
482
+ )
483
+
484
+ if self.config.adap_kl_ctrl:
485
+ self.kl_ctl = AdaptiveKLController(self.config.init_kl_coef, self.config.target, self.config.horizon)
486
+ else:
487
+ self.kl_ctl = FixedKLController(self.config.init_kl_coef)
488
+
489
+ # Safety checkers for DS integration
490
+ is_deepspeed_used = self.accelerator.distributed_type == "DEEPSPEED" and hasattr(
491
+ self.accelerator.state, "deepspeed_plugin"
492
+ )
493
+
494
+ if config.gradient_checkpointing:
495
+ self.model.gradient_checkpointing_enable()
496
+
497
+ if hasattr(self.model, "enable_input_require_grads"):
498
+ self.model.enable_input_require_grads()
499
+ else:
500
+ # For backward compatibility with older versions of transformers
501
+ def make_inputs_require_grad(module, input, output):
502
+ output.requires_grad_(True)
503
+
504
+ self.model.pretrained_model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
505
+
506
+ (
507
+ self.model,
508
+ self.optimizer,
509
+ self.data_collator,
510
+ self.dataloader,
511
+ self.lr_scheduler,
512
+ ) = self.accelerator.prepare(
513
+ self.model,
514
+ self.optimizer,
515
+ self.data_collator,
516
+ self.dataloader,
517
+ self.lr_scheduler,
518
+ )
519
+ if is_deepspeed_used:
520
+ # Quantized models are already set on the correct device
521
+ if not self.is_peft_model and not (
522
+ getattr(self.ref_model.pretrained_model, "is_loaded_in_8bit", False)
523
+ or getattr(self.ref_model.pretrained_model, "is_loaded_in_4bit", False)
524
+ ):
525
+ self.ref_model = self._prepare_deepspeed(self.ref_model)
526
+ else:
527
+ self.ref_model = self.accelerator.prepare(self.ref_model)
528
+
529
+ # In a distributed setup, only logging needs to be performed on the main process
530
+ # check: https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html
531
+ # or: https://discuss.pytorch.org/t/use-distributed-data-parallel-correctly/82500/11
532
+ self.is_distributed = self.accelerator.num_processes > 1
533
+
534
+ # init the current step
535
+ self.current_step = 0
536
+
537
+ # init variables for pushing model to hub
538
+ if config.push_to_hub_if_best_kwargs:
539
+ if "repo_id" not in config.push_to_hub_if_best_kwargs:
540
+ raise ValueError("You have to specify repo_id in order to push the model to the hub!")
541
+ self.push_to_hub_kwargs = config.push_to_hub_if_best_kwargs
542
+ self.compare_step = 0
543
+ self.highest_reward = torch.tensor(-float("inf"))
544
+
545
+ # post process for PP
546
+ if not getattr(self.model, "is_sequential_parallel", False):
547
+ self.current_device = self.accelerator.device
548
+ else:
549
+ if is_torch_xpu_available():
550
+ self.current_device = torch.device("xpu:0")
551
+ elif is_torch_npu_available():
552
+ self.current_device = torch.device("npu:0")
553
+ else:
554
+ self.current_device = torch.device("cuda:0")
555
+
556
+ PPODecorators.optimize_device_cache = self.config.optimize_device_cache
557
+
558
+ self.running = RunningMoments(self.accelerator)
559
+
560
+ def _filter_kwargs(self, kwargs, target_func):
561
+ """
562
+ filter the keyword arguments that are supported by the target function.
563
+
564
+ Args:
565
+ kwargs (dict):
566
+ Keyword arguments
567
+ target_func (function):
568
+ Target function
569
+ """
570
+ return {k: v for k, v in kwargs.items() if k in inspect.signature(target_func).parameters.keys()}
571
+
572
+ def prepare_dataloader(self, dataset: Union[torch.utils.data.Dataset, Dataset], data_collator=None):
573
+ """
574
+ Prepare the dataloader for training.
575
+
576
+ Args:
577
+ dataset (Union[`torch.utils.data.Dataset`, `datasets.Dataset`]):
578
+ PyTorch dataset or Hugging Face dataset. If a Hugging Face dataset is passed, the dataset
579
+ will be preprocessed by removing the columns that are not used by the model.
580
+ data_collator (Optional[function]):
581
+ Data collator function.
582
+
583
+ Returns:
584
+ `torch.utils.data.DataLoader`: PyTorch dataloader
585
+ """
586
+ if isinstance(dataset, Dataset):
587
+ dataset = self._remove_unused_columns(dataset)
588
+ dataloader = torch.utils.data.DataLoader(
589
+ dataset,
590
+ batch_size=self.config.batch_size,
591
+ collate_fn=data_collator,
592
+ shuffle=True,
593
+ drop_last=True,
594
+ )
595
+ return dataloader
596
+
597
+ # Adapted from transformers.Trainer._set_signature_columns_if_needed
598
+ def _set_signature_columns_if_needed(self):
599
+ if self._signature_columns is None:
600
+ # Inspect model forward signature to keep only the arguments it accepts.
601
+ signature = inspect.signature(self.model.forward)
602
+ self._signature_columns = list(signature.parameters.keys())
603
+ # label => sentiment | we need query and response for logging purpose
604
+ self._signature_columns += ["label", "query", "response"]
605
+
606
+ # Adapted from transformers.Trainer._remove_unused_columns
607
+ def _remove_unused_columns(self, dataset: "Dataset"):
608
+ if not self.config.remove_unused_columns:
609
+ return dataset
610
+ self._set_signature_columns_if_needed()
611
+ signature_columns = self._signature_columns
612
+
613
+ ignored_columns = list(set(dataset.column_names) - set(signature_columns))
614
+
615
+ columns = [k for k in signature_columns if k in dataset.column_names]
616
+
617
+ if version.parse(datasets.__version__) < version.parse("1.4.0"):
618
+ dataset.set_format(
619
+ type=dataset.format["type"],
620
+ columns=columns,
621
+ format_kwargs=dataset.format["format_kwargs"],
622
+ )
623
+ return dataset
624
+ else:
625
+ return dataset.remove_columns(ignored_columns)
626
+
627
+ def generate(
628
+ self,
629
+ query_tensor: Union[torch.Tensor, List[torch.Tensor]],
630
+ length_sampler: Optional[Callable] = None,
631
+ batch_size: int = 4,
632
+ return_prompt: bool = True,
633
+ generate_ref_response: bool = False,
634
+ **generation_kwargs,
635
+ ):
636
+ """
637
+ Generate response with the model given the query tensor.
638
+ call the `generate` method of the model.
639
+
640
+ Args:
641
+ query_tensor (`torch.LongTensor`):
642
+ A tensor of shape (`seq_len`) containing query tokens or a list of tensors of shape (`seq_len`).
643
+ length_sampler (`Callable`, *optional*):
644
+ Callable that returns the number of newly generated tokens.
645
+ batch_size (`int`, *optional):
646
+ Batch size used for generation, defaults to `4`.
647
+ return_prompt (`bool`, *optional*):
648
+ If set to `False` the prompt is not returned but only the newly generated tokens, defaults to `True`.
649
+ generate_ref_response (`bool`, *optional*):
650
+ If set to `True` the reference response is also generated, defaults to `False`.
651
+ generation_kwargs (dict[str, Any]):
652
+ Keyword arguments for generation.
653
+
654
+ Returns:
655
+ `torch.LongTensor`: A tensor of shape (`batch_size`, `gen_len`) containing response tokens.
656
+ """
657
+ if generate_ref_response:
658
+ ref_model = self.model if self.is_peft_model else self.ref_model
659
+ if isinstance(query_tensor, List):
660
+ response = self._generate_batched(
661
+ self.model,
662
+ query_tensor,
663
+ length_sampler=length_sampler,
664
+ batch_size=batch_size,
665
+ return_prompt=return_prompt,
666
+ **generation_kwargs,
667
+ )
668
+ if generate_ref_response:
669
+ ref_response = self._generate_batched(
670
+ ref_model,
671
+ query_tensor,
672
+ length_sampler=length_sampler,
673
+ batch_size=batch_size,
674
+ return_prompt=return_prompt,
675
+ **generation_kwargs,
676
+ )
677
+
678
+ else:
679
+ if len(query_tensor.shape) == 2:
680
+ raise ValueError(
681
+ "query_tensor must be a tensor of shape (`seq_len`) or a list of tensors of shape (`seq_len`)"
682
+ )
683
+
684
+ if length_sampler is not None:
685
+ generation_kwargs["max_new_tokens"] = length_sampler()
686
+
687
+ with unwrap_model_for_generation(self.model, self.accelerator) as unwrapped_model:
688
+ response = unwrapped_model.generate(input_ids=query_tensor.unsqueeze(dim=0), **generation_kwargs)
689
+
690
+ if generate_ref_response:
691
+ with unwrap_model_for_generation(
692
+ ref_model, self.accelerator, is_peft_model=self.is_peft_model
693
+ ) as unwrapped_model:
694
+ ref_response = unwrapped_model.generate(
695
+ input_ids=query_tensor.unsqueeze(dim=0), **generation_kwargs
696
+ )
697
+
698
+ if not return_prompt and not self.is_encoder_decoder:
699
+ response = response[:, query_tensor.shape[0] :]
700
+ if generate_ref_response:
701
+ ref_response = ref_response[:, query_tensor.shape[0] :]
702
+
703
+ if generate_ref_response:
704
+ return response, ref_response
705
+ return response
706
+
707
+ def _generate_batched(
708
+ self,
709
+ model: PreTrainedModelWrapper,
710
+ query_tensors: List[torch.Tensor],
711
+ length_sampler: Optional[Callable] = None,
712
+ batch_size: int = 4,
713
+ return_prompt: bool = True,
714
+ pad_to_multiple_of: Optional[int] = None,
715
+ remove_padding: bool = True,
716
+ **generation_kwargs,
717
+ ):
718
+ outputs = []
719
+
720
+ padding_side_default = self.tokenizer.padding_side
721
+ if not self.is_encoder_decoder:
722
+ self.tokenizer.padding_side = "left"
723
+
724
+ # in case we have fewer examples than bs
725
+ batch_size = min(len(query_tensors), batch_size)
726
+
727
+ for i in range(0, len(query_tensors), batch_size):
728
+ if length_sampler is not None:
729
+ generation_kwargs["max_new_tokens"] = length_sampler()
730
+
731
+ # prevent overflow if query tensors are not even multiple of bs
732
+ end_index = min(len(query_tensors), i + batch_size)
733
+
734
+ batch = query_tensors[i:end_index]
735
+ batch_mask = [torch.ones_like(element) for element in batch]
736
+ inputs = {"input_ids": batch, "attention_mask": batch_mask}
737
+
738
+ padded_inputs = self.tokenizer.pad(
739
+ inputs,
740
+ padding=True,
741
+ max_length=None,
742
+ pad_to_multiple_of=pad_to_multiple_of,
743
+ return_tensors="pt",
744
+ ).to(self.current_device)
745
+
746
+ with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model:
747
+ generations = unwrapped_model.generate(**padded_inputs, **generation_kwargs)
748
+
749
+ for generation, mask in zip(generations, padded_inputs["attention_mask"]):
750
+ if not self.is_encoder_decoder:
751
+ output = generation[(1 - mask).sum() :] # remove padding
752
+ else:
753
+ output = generation
754
+
755
+ if not return_prompt and not self.is_encoder_decoder:
756
+ output = output[(mask).sum() :] # remove prompt
757
+
758
+ if remove_padding and self.tokenizer.eos_token_id in output:
759
+ pad_mask = output == self.tokenizer.eos_token_id
760
+ pad_start = torch.nonzero(pad_mask, as_tuple=False)[0, 0].item()
761
+ output = output[: pad_start + 1] # keep the eos token at the end
762
+
763
+ outputs.append(output)
764
+
765
+ self.tokenizer.padding_side = padding_side_default
766
+ return outputs
767
+
768
+ def _step_safety_checker(
769
+ self,
770
+ batch_size: int,
771
+ queries: List[torch.LongTensor],
772
+ responses: List[torch.LongTensor],
773
+ scores: List[torch.FloatTensor],
774
+ masks: Optional[List[torch.LongTensor]] = None,
775
+ ):
776
+ """
777
+ Check if the input data is valid for training.
778
+
779
+ Args:
780
+ batch_size (int):
781
+ Batch size from the config file.
782
+ queries (List[`torch.LongTensor`]):
783
+ List of tensors containing the encoded queries of shape (`query_length`)
784
+ responses (List[`torch.LongTensor`]):
785
+ List of tensors containing the encoded responses of shape (`response_length`)
786
+ scores (List[`torch.FloatTensor`]):
787
+ List of tensors containing the scores.
788
+ masks (List[`torch.LongTensor`], *optional*):
789
+ list of optional tensors containing the masks of shape (`response_length`)
790
+
791
+ Returns:
792
+ `tuple`: The input processed data.
793
+ """
794
+ for name, tensor_list in zip(["queries", "responses", "scores"], [queries, responses, scores]):
795
+ if not isinstance(tensor_list, list):
796
+ raise ValueError(f"{name} must be a list of tensors - got {type(tensor_list)}")
797
+ if not isinstance(tensor_list[0], torch.Tensor):
798
+ raise ValueError(f"Elements in {name} must be tensors - got {type(tensor_list[0])}")
799
+ if batch_size is not None and len(tensor_list) != batch_size:
800
+ raise ValueError(
801
+ f"Batch size ({batch_size}) does not match number of examples - but got {len(tensor_list)} for: {name}"
802
+ )
803
+
804
+ # add queries, scores and responses on the correct device
805
+ queries = [tensor.to(self.current_device) for tensor in queries]
806
+ responses = [tensor.to(self.current_device) for tensor in responses]
807
+ scores = [tensor.to(self.current_device) for tensor in scores]
808
+ masks = [tensor.to(self.current_device) for tensor in masks] if masks is not None else None
809
+
810
+ # squeeze scores if needed
811
+ for i, score in enumerate(scores):
812
+ if score.dim() > 1:
813
+ raise ValueError(f"Scores must be 1-dimensional - got {score.dim()} for {score}")
814
+ elif score.dim() == 1:
815
+ scores[i] = score.squeeze()
816
+
817
+ return queries, responses, scores, masks
818
+
819
+ @PPODecorators.empty_device_cache()
820
+ def step(
821
+ self,
822
+ queries: List[torch.LongTensor],
823
+ responses: List[torch.LongTensor],
824
+ scores: List[torch.FloatTensor],
825
+ response_masks: Optional[List[torch.LongTensor]] = None,
826
+ ):
827
+ """
828
+ Run a PPO optimisation step given a list of queries, model responses, and rewards.
829
+
830
+ Args:
831
+ queries (List[`torch.LongTensor`]):
832
+ List of tensors containing the encoded queries of shape (`query_length`)
833
+ responses (List[`torch.LongTensor`]):
834
+ List of tensors containing the encoded responses of shape (`response_length`)
835
+ scores (List[`torch.FloatTensor`]):
836
+ List of tensors containing the scores.
837
+ response_masks (List[`torch.FloatTensor`], *optional*)):
838
+ List of tensors containing masks of the response tokens.
839
+
840
+ Returns:
841
+ `dict[str, Any]`: A summary of the training statistics
842
+ """
843
+ bs = self.config.batch_size
844
+
845
+ queries, responses, scores, response_masks = self._step_safety_checker(
846
+ bs, queries, responses, scores, response_masks
847
+ )
848
+ scores = torch.tensor(scores, device=self.current_device)
849
+ if self.config.use_score_scaling:
850
+ # Score scaling
851
+ scores_mean, scores_std = self.running.update(scores)
852
+ tensor_to_kwargs = dict(dtype=scores.dtype, device=scores.device)
853
+ score_scaling_factor = self.running.std.to(**tensor_to_kwargs) + torch.finfo(scores.dtype).eps
854
+ if self.config.use_score_norm:
855
+ scores = (scores - self.running.mean.to(**tensor_to_kwargs)) / score_scaling_factor
856
+ else:
857
+ scores /= score_scaling_factor
858
+
859
+ if self.config.score_clip is not None:
860
+ # Score clipping
861
+ scores_dtype = scores.dtype
862
+ scores = torch.clip(scores.float(), -self.config.score_clip, self.config.score_clip).to(dtype=scores_dtype)
863
+
864
+ # if we want to push best model to the hub
865
+ if hasattr(self, "highest_reward"):
866
+ if self.compare_step % self.config.compare_steps == 0:
867
+ curr_mean_reward = scores.mean()
868
+ # if the best reward ever seen
869
+ if curr_mean_reward > self.highest_reward:
870
+ self.highest_reward = curr_mean_reward
871
+ # push model to hub
872
+ self.push_to_hub(**self.push_to_hub_kwargs)
873
+ self.compare_step += 1
874
+
875
+ timing = dict()
876
+ t0 = time.time()
877
+
878
+ t = time.time()
879
+
880
+ model_inputs = self.prepare_model_inputs(queries, responses)
881
+
882
+ if self.is_distributed:
883
+ pad_first = self.tokenizer.padding_side == "left"
884
+
885
+ model_inputs["input_ids"] = self.accelerator.pad_across_processes(
886
+ model_inputs["input_ids"],
887
+ dim=1,
888
+ pad_index=self.tokenizer.pad_token_id,
889
+ pad_first=pad_first,
890
+ )
891
+ model_inputs["attention_mask"] = self.accelerator.pad_across_processes(
892
+ model_inputs["attention_mask"], dim=1, pad_index=0, pad_first=pad_first
893
+ )
894
+ if self.is_encoder_decoder:
895
+ model_inputs["decoder_input_ids"] = self.accelerator.pad_across_processes(
896
+ model_inputs["decoder_input_ids"],
897
+ dim=1,
898
+ pad_index=self.tokenizer.pad_token_id,
899
+ pad_first=pad_first,
900
+ )
901
+ model_inputs["decoder_attention_mask"] = self.accelerator.pad_across_processes(
902
+ model_inputs["decoder_attention_mask"],
903
+ dim=1,
904
+ pad_index=0,
905
+ pad_first=pad_first,
906
+ )
907
+
908
+ model_inputs_names = list(model_inputs.keys())
909
+
910
+ full_kl_penalty = self.config.kl_penalty == "full"
911
+
912
+ with torch.no_grad():
913
+ all_logprobs, logits_or_none, values, masks = self.batched_forward_pass(
914
+ self.model,
915
+ queries,
916
+ responses,
917
+ model_inputs,
918
+ response_masks=response_masks,
919
+ return_logits=full_kl_penalty,
920
+ )
921
+ with self.optional_peft_ctx():
922
+ ref_logprobs, ref_logits_or_none, _, _ = self.batched_forward_pass(
923
+ self.model if self.is_peft_model else self.ref_model,
924
+ queries,
925
+ responses,
926
+ model_inputs,
927
+ return_logits=full_kl_penalty,
928
+ )
929
+
930
+ timing["time/ppo/forward_pass"] = time.time() - t
931
+
932
+ with torch.no_grad():
933
+ t = time.time()
934
+ if full_kl_penalty:
935
+ active_full_logprobs = logprobs_from_logits(logits_or_none, None, gather=False)
936
+ ref_full_logprobs = logprobs_from_logits(ref_logits_or_none, None, gather=False)
937
+
938
+ rewards, non_score_reward, kls = self.compute_rewards(
939
+ scores, active_full_logprobs, ref_full_logprobs, masks
940
+ )
941
+ else:
942
+ rewards, non_score_reward, kls = self.compute_rewards(scores, all_logprobs, ref_logprobs, masks)
943
+ timing["time/ppo/compute_rewards"] = time.time() - t
944
+
945
+ t = time.time()
946
+ values, advantages, returns = self.compute_advantages(values, rewards, masks)
947
+ timing["time/ppo/compute_advantages"] = time.time() - t
948
+
949
+ # upcast to float32 to avoid dataset issues
950
+ batch_dict = {
951
+ "queries": queries,
952
+ "responses": responses,
953
+ "logprobs": all_logprobs.to(torch.float32),
954
+ "values": values.to(torch.float32),
955
+ "masks": masks,
956
+ "advantages": advantages,
957
+ "returns": returns,
958
+ }
959
+ batch_dict.update(model_inputs)
960
+
961
+ t = time.time()
962
+ all_stats = []
963
+ early_stop = False
964
+ for _ in range(self.config.ppo_epochs):
965
+ if early_stop:
966
+ break
967
+ b_inds = np.random.permutation(bs)
968
+ for backward_batch_start in range(0, bs, self.config.backward_batch_size):
969
+ backward_batch_end = backward_batch_start + self.config.backward_batch_size
970
+ backward_batch_inds = b_inds[backward_batch_start:backward_batch_end]
971
+
972
+ for mini_batch_start in range(0, self.config.backward_batch_size, self.config.mini_batch_size):
973
+ mini_batch_end = mini_batch_start + self.config.mini_batch_size
974
+ mini_batch_inds = backward_batch_inds[mini_batch_start:mini_batch_end]
975
+ mini_batch_dict = {
976
+ "logprobs": batch_dict["logprobs"][mini_batch_inds],
977
+ "values": batch_dict["values"][mini_batch_inds],
978
+ "masks": batch_dict["masks"][mini_batch_inds],
979
+ # hacks: the queries and responses are ragged.
980
+ "queries": [batch_dict["queries"][i] for i in mini_batch_inds],
981
+ "responses": [batch_dict["responses"][i] for i in mini_batch_inds],
982
+ "advantages": batch_dict["advantages"][mini_batch_inds],
983
+ "returns": batch_dict["returns"][mini_batch_inds],
984
+ }
985
+ for k in model_inputs_names:
986
+ mini_batch_dict[k] = batch_dict[k][mini_batch_inds]
987
+ with self.accelerator.accumulate(self.model):
988
+ model_inputs = {k: mini_batch_dict[k] for k in model_inputs_names}
989
+
990
+ logprobs, logits, vpreds, _ = self.batched_forward_pass(
991
+ self.model,
992
+ mini_batch_dict["queries"],
993
+ mini_batch_dict["responses"],
994
+ model_inputs,
995
+ return_logits=True,
996
+ )
997
+ train_stats = self.train_minibatch(
998
+ mini_batch_dict["logprobs"],
999
+ mini_batch_dict["values"],
1000
+ logprobs,
1001
+ logits,
1002
+ vpreds,
1003
+ mini_batch_dict["masks"],
1004
+ mini_batch_dict["advantages"],
1005
+ mini_batch_dict["returns"],
1006
+ )
1007
+ all_stats.append(train_stats)
1008
+
1009
+ # typically, early stopping is done at the epoch level
1010
+ if self.config.early_stopping:
1011
+ policykl = train_stats["policy/policykl"]
1012
+ early_stop = self._early_stop(policykl)
1013
+ if early_stop:
1014
+ break
1015
+
1016
+ timing["time/ppo/optimize_step"] = time.time() - t
1017
+
1018
+ t = time.time()
1019
+ train_stats = stack_dicts(all_stats)
1020
+
1021
+ # reshape advantages/ratios such that they are not averaged.
1022
+ train_stats["policy/advantages"] = torch.flatten(train_stats["policy/advantages"]).unsqueeze(0)
1023
+ # train_stats["policy/advantages"] = torch.nan_to_num(train_stats["policy/advantages"], WANDB_PADDING)
1024
+ train_stats["policy/ratio"] = torch.flatten(train_stats["policy/ratio"]).unsqueeze(0)
1025
+
1026
+ stats = self.record_step_stats(
1027
+ scores=scores,
1028
+ logprobs=all_logprobs,
1029
+ ref_logprobs=ref_logprobs,
1030
+ non_score_reward=non_score_reward,
1031
+ train_stats=train_stats,
1032
+ kl_coef=self.kl_ctl.value,
1033
+ masks=masks,
1034
+ queries=queries,
1035
+ responses=responses,
1036
+ kls=kls,
1037
+ )
1038
+ # Gather/Reduce stats from all processes
1039
+ if self.is_distributed:
1040
+ stats = self.gather_stats(stats)
1041
+ stats = stats_to_np(stats)
1042
+ timing["time/ppo/calc_stats"] = time.time() - t
1043
+ stats["ppo/learning_rate"] = self.optimizer.param_groups[0]["lr"]
1044
+
1045
+ # Update the KL control - multiply the batch_size by the number of processes
1046
+ self.kl_ctl.update(
1047
+ stats["objective/kl"],
1048
+ self.config.batch_size * self.accelerator.num_processes,
1049
+ )
1050
+
1051
+ # Log the total ppo time
1052
+ timing["time/ppo/total"] = time.time() - t0
1053
+ stats.update(timing)
1054
+
1055
+ # post-process stats for tensorboard and other loggers
1056
+ if self.config.log_with != "wandb":
1057
+ stats = convert_to_scalar(stats)
1058
+
1059
+ if self.lr_scheduler is not None:
1060
+ self.lr_scheduler.step()
1061
+
1062
+ return stats
1063
+
1064
+ def _early_stop(self, policykl):
1065
+ r"""
1066
+ Handles the early stopping logic. If the policy KL is greater than the target KL, then the gradient is zeroed and
1067
+ the optimization step is skipped.
1068
+ This also handles the multi-gpu case where the policy KL is averaged across all processes.
1069
+
1070
+ Args:
1071
+ policy_kl (torch.Tensor):
1072
+ the policy KL
1073
+
1074
+ Returns:
1075
+ `bool`: whether to early stop or not
1076
+ """
1077
+ early_stop = False
1078
+ if not self.config.early_stopping:
1079
+ return early_stop
1080
+
1081
+ if not self.is_distributed and policykl > 1.5 * self.config.target_kl:
1082
+ self.optimizer.zero_grad()
1083
+ early_stop = True
1084
+ elif self.is_distributed:
1085
+ import torch.distributed as dist
1086
+
1087
+ # Wait for all processes to finish
1088
+ dist.barrier()
1089
+
1090
+ # all gather the policykl
1091
+ dist.all_reduce(policykl, dist.ReduceOp.SUM)
1092
+ policykl /= self.accelerator.num_processes
1093
+
1094
+ if policykl > 1.5 * self.config.target_kl:
1095
+ self.optimizer.zero_grad()
1096
+ early_stop = True
1097
+ return early_stop
1098
+
1099
+ def gather_stats(self, stats):
1100
+ """
1101
+ Gather stats from all processes. Useful in the context of distributed training.
1102
+
1103
+ Args:
1104
+ stats (dict[str, Any]):
1105
+ a dictionary of stats to be gathered. The stats should contain torch tensors.
1106
+
1107
+ Returns:
1108
+ `dict[str, Any]`: A dictionary of stats with the tensors gathered.
1109
+ """
1110
+ import torch.distributed as dist
1111
+
1112
+ # Wait for all processes to finish
1113
+ dist.barrier()
1114
+
1115
+ for k, v in stats.items():
1116
+ if isinstance(v, torch.Tensor):
1117
+ dist.all_reduce(v.to(self.accelerator.device), dist.ReduceOp.SUM)
1118
+ v /= self.accelerator.num_processes
1119
+ stats[k] = v
1120
+ return stats
1121
+
1122
+ def prepare_model_inputs(self, queries: torch.Tensor, responses: torch.Tensor):
1123
+ if self.is_encoder_decoder:
1124
+ input_data = self.data_collator(
1125
+ [{"input_ids": q, "attention_mask": torch.ones_like(q)} for q in queries]
1126
+ ).to(self.current_device)
1127
+
1128
+ decoder_inputs = self.data_collator(
1129
+ [{"input_ids": r, "attention_mask": torch.ones_like(r)} for r in responses]
1130
+ ).to(self.current_device)
1131
+
1132
+ input_data["decoder_input_ids"] = decoder_inputs["input_ids"]
1133
+ input_data["decoder_attention_mask"] = decoder_inputs["attention_mask"]
1134
+ else:
1135
+ input_ids = [torch.cat([q, r]) for q, r in zip(queries, responses)]
1136
+ input_data = self.data_collator(
1137
+ [{"input_ids": ids, "attention_mask": torch.ones_like(ids)} for ids in input_ids]
1138
+ ).to(self.current_device)
1139
+
1140
+ input_data.pop("labels", None) # we don't want to compute LM losses
1141
+ return input_data
1142
+
1143
+ @PPODecorators.empty_device_cache()
1144
+ def batched_forward_pass(
1145
+ self,
1146
+ model: PreTrainedModelWrapper,
1147
+ queries: torch.Tensor,
1148
+ responses: torch.Tensor,
1149
+ model_inputs: dict,
1150
+ return_logits: bool = False,
1151
+ response_masks: Optional[torch.Tensor] = None,
1152
+ ):
1153
+ """
1154
+ Calculate model outputs in multiple batches.
1155
+
1156
+ Args:
1157
+ queries (`torch.LongTensor`):
1158
+ List of tensors containing the encoded queries, shape (`batch_size`, `query_length`)
1159
+ responses (`torch.LongTensor`):
1160
+ List of tensors containing the encoded responses, shape (`batch_size`, `response_length`)
1161
+ return_logits (`bool`, *optional*, defaults to `False`):
1162
+ Whether to return all_logits. Set to `False` if logits are not needed to reduce memory consumption.
1163
+
1164
+ Returns:
1165
+ (tuple):
1166
+ - all_logprobs (`torch.FloatTensor`): Log probabilities of the responses,
1167
+ shape (`batch_size`, `response_length`)
1168
+ - all_ref_logprobs (`torch.FloatTensor`): Log probabilities of the responses,
1169
+ shape (`batch_size`, `response_length`)
1170
+ - all_values (`torch.FloatTensor`): Values of the responses, shape (`batch_size`, `response_length`)
1171
+ """
1172
+ bs = len(queries)
1173
+ fbs = self.config.mini_batch_size
1174
+ all_logprobs = []
1175
+ all_logits = []
1176
+ all_masks = []
1177
+ all_values = []
1178
+
1179
+ model.eval()
1180
+
1181
+ for i in range(math.ceil(bs / fbs)):
1182
+ input_kwargs = {key: value[i * fbs : (i + 1) * fbs] for key, value in model_inputs.items()}
1183
+ query_batch = queries[i * fbs : (i + 1) * fbs]
1184
+ response_batch = responses[i * fbs : (i + 1) * fbs]
1185
+ if response_masks is not None:
1186
+ response_masks_batch = response_masks[i * fbs : (i + 1) * fbs]
1187
+ logits, _, values = model(**input_kwargs)
1188
+
1189
+ if self.is_encoder_decoder:
1190
+ input_ids = input_kwargs["decoder_input_ids"]
1191
+ attention_mask = input_kwargs["decoder_attention_mask"]
1192
+ else:
1193
+ input_ids = input_kwargs["input_ids"]
1194
+ attention_mask = input_kwargs["attention_mask"]
1195
+
1196
+ logprobs = logprobs_from_logits(logits[:, :-1, :], input_ids[:, 1:])
1197
+ masks = torch.zeros_like(attention_mask)
1198
+ masks[:, :-1] = attention_mask[:, 1:]
1199
+
1200
+ for j in range(len(query_batch)):
1201
+ if self.is_encoder_decoder:
1202
+ # Decoder sentence starts always in the index 1 after padding in the Enc-Dec Models
1203
+ start = 1
1204
+ end = attention_mask[j, :].sum() - 1
1205
+ else:
1206
+ start = len(query_batch[j]) - 1 # logprobs starts from the second query token
1207
+ if attention_mask[j, 0] == 0: # offset left padding
1208
+ start += attention_mask[j, :].nonzero()[0]
1209
+ end = start + len(response_batch[j])
1210
+
1211
+ masks[j, :start] = 0
1212
+ masks[j, end:] = 0
1213
+ if response_masks is not None:
1214
+ masks[j, start:end] = masks[j, start:end] * response_masks_batch[j]
1215
+
1216
+ if return_logits:
1217
+ all_logits.append(logits)
1218
+ else:
1219
+ del logits
1220
+ all_values.append(values)
1221
+ all_logprobs.append(logprobs)
1222
+ all_masks.append(masks)
1223
+
1224
+ return (
1225
+ torch.cat(all_logprobs),
1226
+ torch.cat(all_logits)[:, :-1] if return_logits else None,
1227
+ torch.cat(all_values)[:, :-1],
1228
+ torch.cat(all_masks)[:, :-1],
1229
+ )
1230
+
1231
+ @PPODecorators.empty_device_cache()
1232
+ def train_minibatch(
1233
+ self,
1234
+ old_logprobs: torch.FloatTensor,
1235
+ values: torch.FloatTensor,
1236
+ logprobs: torch.FloatTensor,
1237
+ logits: torch.FloatTensor,
1238
+ vpreds: torch.FloatTensor,
1239
+ mask: torch.LongTensor,
1240
+ advantages: torch.FloatTensor,
1241
+ returns: torch.FloatTensor,
1242
+ ):
1243
+ """
1244
+ Train one PPO minibatch
1245
+
1246
+ Args:
1247
+ logprobs (`torch.FloatTensor`):
1248
+ Log probabilities of the model, shape [mini_batch_size, response_length]
1249
+ values (`torch.FloatTensor`):
1250
+ Values of the value head, shape [mini_batch_size, response_length]
1251
+ query (`torch.LongTensor`):
1252
+ Encoded queries, shape [mini_batch_size, query_length]
1253
+ response (`torch.LongTensor`):
1254
+ Encoded responses, shape [mini_batch_size, response_length]
1255
+ model_input (`torch.LongTensor`):
1256
+ Concatenated queries and responses, shape [mini_batch_size, query_length+response_length]
1257
+
1258
+ Returns:
1259
+ train_stats (dict[str, `torch.Tensor`]):
1260
+ Dictionary of training statistics
1261
+ """
1262
+ self.model.train()
1263
+ loss_p, loss_v, train_stats = self.loss(
1264
+ old_logprobs, values, logits, vpreds, logprobs, mask, advantages, returns
1265
+ )
1266
+ loss = loss_p + loss_v
1267
+ self.accelerator.backward(loss)
1268
+ if self.config.max_grad_norm is not None:
1269
+ if self.accelerator.sync_gradients:
1270
+ self.accelerator.clip_grad_norm_(self.model_params, self.config.max_grad_norm)
1271
+ self.optimizer.step()
1272
+ # we call optimizer.zero_grad() every time and let `accelerator` handle accumulation
1273
+ # see https://huggingface.co/docs/accelerate/usage_guides/gradient_accumulation#the-finished-code
1274
+ self.optimizer.zero_grad()
1275
+ return train_stats
1276
+
1277
+ def compute_rewards(
1278
+ self,
1279
+ scores: torch.FloatTensor,
1280
+ logprobs: torch.FloatTensor,
1281
+ ref_logprobs: torch.FloatTensor,
1282
+ masks: torch.LongTensor,
1283
+ ):
1284
+ """
1285
+ Compute per token rewards from scores and KL-penalty.
1286
+
1287
+ Args:
1288
+ scores (`torch.FloatTensor`):
1289
+ Scores from the reward model, shape (`batch_size`)
1290
+ logprobs (`torch.FloatTensor`):
1291
+ Log probabilities of the model, shape (`batch_size`, `response_length`)
1292
+ ref_logprobs (`torch.FloatTensor`):
1293
+ Log probabilities of the reference model, shape (`batch_size`, `response_length`)
1294
+
1295
+ Returns:
1296
+ `torch.FloatTensor`: Per token rewards, shape (`batch_size`, `response_length`)
1297
+ `torch.FloatTensor`: Non score rewards, shape (`batch_size`, `response_length`)
1298
+ `torch.FloatTensor`: KL penalty, shape (`batch_size`, `response_length`)
1299
+ """
1300
+ rewards, non_score_rewards, kls = [], [], []
1301
+ for score, logprob, ref_logprob, mask in zip(scores, logprobs, ref_logprobs, masks):
1302
+ # compute KL penalty (from difference in logprobs)
1303
+ kl = self._kl_penalty(logprob, ref_logprob)
1304
+ kls.append(kl)
1305
+ non_score_reward = -self.kl_ctl.value * kl
1306
+ non_score_rewards.append(non_score_reward)
1307
+ reward = non_score_reward.clone()
1308
+ last_non_masked_index = mask.nonzero()[-1]
1309
+
1310
+ # reward is preference model score + KL penalty
1311
+ reward[last_non_masked_index] += score
1312
+ rewards.append(reward)
1313
+ return torch.stack(rewards), torch.stack(non_score_rewards), torch.stack(kls)
1314
+
1315
+ def _kl_penalty(self, logprob: torch.FloatTensor, ref_logprob: torch.FloatTensor) -> torch.FloatTensor:
1316
+ if self.config.kl_penalty == "kl":
1317
+ return logprob - ref_logprob
1318
+
1319
+ if self.config.kl_penalty == "abs":
1320
+ return (logprob - ref_logprob).abs()
1321
+
1322
+ if self.config.kl_penalty == "mse":
1323
+ return 0.5 * (logprob - ref_logprob).square()
1324
+
1325
+ if self.config.kl_penalty == "full":
1326
+ # Flip is required due to this issue? :https://github.com/pytorch/pytorch/issues/57459
1327
+ return F.kl_div(ref_logprob, logprob, log_target=True, reduction="none").sum(-1)
1328
+
1329
+ raise NotImplementedError
1330
+
1331
+ def compute_advantages(
1332
+ self,
1333
+ values: torch.FloatTensor,
1334
+ rewards: torch.FloatTensor,
1335
+ mask: torch.FloatTensor,
1336
+ ):
1337
+ lastgaelam = 0
1338
+ advantages_reversed = []
1339
+ gen_len = rewards.shape[-1]
1340
+
1341
+ values = values * mask
1342
+ rewards = rewards * mask
1343
+
1344
+ if self.config.whiten_rewards:
1345
+ rewards = masked_whiten(rewards, mask, shift_mean=False)
1346
+
1347
+ for t in reversed(range(gen_len)):
1348
+ nextvalues = values[:, t + 1] if t < gen_len - 1 else 0.0
1349
+ delta = rewards[:, t] + self.config.gamma * nextvalues - values[:, t]
1350
+ lastgaelam = delta + self.config.gamma * self.config.lam * lastgaelam
1351
+ advantages_reversed.append(lastgaelam)
1352
+ advantages = torch.stack(advantages_reversed[::-1]).transpose(0, 1)
1353
+
1354
+ returns = advantages + values
1355
+ advantages = masked_whiten(advantages, mask)
1356
+ advantages = advantages.detach()
1357
+ return values, advantages, returns
1358
+
1359
+ def loss(
1360
+ self,
1361
+ old_logprobs: torch.FloatTensor,
1362
+ values: torch.FloatTensor,
1363
+ logits: torch.FloatTensor,
1364
+ vpreds: torch.FloatTensor,
1365
+ logprobs: torch.FloatTensor,
1366
+ mask: torch.LongTensor,
1367
+ advantages: torch.FloatTensor,
1368
+ returns: torch.FloatTensor,
1369
+ ):
1370
+ """
1371
+ Calculate policy and value losses.
1372
+
1373
+ Args:
1374
+ old_logprobs (`torch.FloatTensor`):
1375
+ Log probabilities of the model, shape (`batch_size`, `response_length`)
1376
+ values (`torch.FloatTensor`):
1377
+ Values of the value head, shape (`batch_size`, `response_length`)
1378
+ rewards (`torch.FloatTensor`):
1379
+ Rewards from the reward model, shape (`batch_size`, `response_length`)
1380
+ logits (`torch.FloatTensor`):
1381
+ Logits of the model, shape (`batch_size`, `response_length`, `vocab_size`)
1382
+ v_pred (`torch.FloatTensor`):
1383
+ Values of the value head, shape (`batch_size`, `response_length`)
1384
+ logprobs (`torch.FloatTensor`):
1385
+ Log probabilities of the model, shape (`batch_size`, `response_length`)
1386
+ """
1387
+
1388
+ vpredclipped = clip_by_value(
1389
+ vpreds,
1390
+ values - self.config.cliprange_value,
1391
+ values + self.config.cliprange_value,
1392
+ )
1393
+
1394
+ vf_losses1 = (vpreds - returns) ** 2
1395
+ vf_losses2 = (vpredclipped - returns) ** 2
1396
+ vf_loss = 0.5 * masked_mean(torch.max(vf_losses1, vf_losses2), mask)
1397
+ vf_clipfrac = masked_mean(torch.gt(vf_losses2, vf_losses1).float(), mask)
1398
+
1399
+ ratio = torch.exp(logprobs - old_logprobs)
1400
+
1401
+ pg_losses = -advantages * ratio
1402
+ pg_losses2 = -advantages * torch.clamp(ratio, 1.0 - self.config.cliprange, 1.0 + self.config.cliprange)
1403
+
1404
+ pg_loss = masked_mean(torch.max(pg_losses, pg_losses2), mask)
1405
+ pg_clipfrac = masked_mean(torch.gt(pg_losses2, pg_losses).float(), mask)
1406
+
1407
+ loss = pg_loss + self.config.vf_coef * vf_loss
1408
+
1409
+ avg_ratio = masked_mean(ratio, mask).item()
1410
+ if avg_ratio > self.config.ratio_threshold:
1411
+ warnings.warn(
1412
+ f"The average ratio of batch ({avg_ratio:.2f}) exceeds threshold {self.config.ratio_threshold:.2f}. Skipping batch."
1413
+ )
1414
+ pg_loss = pg_loss * 0.0
1415
+ vf_loss = vf_loss * 0.0
1416
+ loss = loss * 0.0
1417
+
1418
+ entropy = masked_mean(entropy_from_logits(logits), mask)
1419
+
1420
+ approxkl = 0.5 * masked_mean((logprobs - old_logprobs) ** 2, mask)
1421
+ policykl = masked_mean(old_logprobs - logprobs, mask)
1422
+
1423
+ return_mean, return_var = masked_mean(returns, mask), masked_var(returns, mask)
1424
+ value_mean, value_var = masked_mean(values, mask), masked_var(values, mask)
1425
+
1426
+ stats = dict(
1427
+ loss=dict(policy=pg_loss.detach(), value=vf_loss.detach(), total=loss.detach()),
1428
+ policy=dict(
1429
+ entropy=entropy.detach(),
1430
+ approxkl=approxkl.detach(),
1431
+ policykl=policykl.detach(),
1432
+ clipfrac=pg_clipfrac.detach(),
1433
+ advantages=advantages.detach(),
1434
+ advantages_mean=masked_mean(advantages, mask).detach(),
1435
+ ratio=ratio.detach(),
1436
+ ),
1437
+ returns=dict(mean=return_mean.detach(), var=return_var.detach()),
1438
+ val=dict(
1439
+ vpred=masked_mean(vpreds, mask).detach(),
1440
+ error=masked_mean((vpreds - returns) ** 2, mask).detach(),
1441
+ clipfrac=vf_clipfrac.detach(),
1442
+ mean=value_mean.detach(),
1443
+ var=value_var.detach(),
1444
+ ),
1445
+ )
1446
+ return pg_loss, self.config.vf_coef * vf_loss, flatten_dict(stats)
1447
+
1448
+ def record_step_stats(self, kl_coef: float, **data):
1449
+ """
1450
+ Record training step statistics.
1451
+
1452
+
1453
+ Args:
1454
+ kl_coef (`float`):
1455
+ KL coefficient
1456
+ data (`dict`):
1457
+ Dictionary of training step data
1458
+
1459
+ Returns:
1460
+ stats (`dict`):
1461
+ Dictionary of training step statistics
1462
+ """
1463
+ mask = data.pop("masks")
1464
+
1465
+ kls = data.pop("kls")
1466
+ kl_list = ((kls) * mask).sum(axis=-1)
1467
+ mean_kl = kl_list.mean()
1468
+ mean_entropy = (-data["logprobs"] * mask).sum(axis=-1).mean()
1469
+
1470
+ mean_non_score_reward = masked_mean(
1471
+ data["non_score_reward"], mask
1472
+ ) # non_score_reward is size `batch_size`, `response_length`
1473
+ mean_scores = data["scores"].mean() # scores is size `batch_size`
1474
+ std_scores = data["scores"].std()
1475
+
1476
+ if mean_kl.item() < -1.0:
1477
+ # warn users
1478
+ warnings.warn(
1479
+ f"KL divergence is starting to become negative: {mean_kl.item():.2f} - this might be a precursor for failed training."
1480
+ " sometimes this happens because the generation kwargs are not correctly set. Please make sure"
1481
+ " that the generation kwargs are set correctly, or review your training hyperparameters."
1482
+ )
1483
+
1484
+ stats = {
1485
+ "objective/kl": mean_kl,
1486
+ "objective/kl_dist": kl_list,
1487
+ "objective/logprobs": data["logprobs"],
1488
+ "objective/ref_logprobs": data["ref_logprobs"],
1489
+ "objective/kl_coef": kl_coef,
1490
+ "objective/entropy": mean_entropy,
1491
+ "ppo/mean_non_score_reward": mean_non_score_reward,
1492
+ "ppo/mean_scores": mean_scores,
1493
+ "ppo/std_scores": std_scores,
1494
+ }
1495
+
1496
+ # Log text properties
1497
+ query_lens = torch.tensor([len(query) for query in data["queries"]], dtype=torch.float)
1498
+ response_lens = torch.tensor([len(response) for response in data["responses"]], dtype=torch.float)
1499
+
1500
+ stats["tokens/queries_len_mean"] = torch.mean(query_lens).cpu().numpy().item()
1501
+ stats["tokens/queries_len_std"] = torch.std(query_lens).cpu().numpy().item()
1502
+ stats["tokens/queries_dist"] = query_lens.cpu().numpy()
1503
+ stats["tokens/responses_len_mean"] = torch.mean(response_lens).cpu().numpy().item()
1504
+ stats["tokens/responses_len_std"] = torch.std(response_lens).cpu().numpy().item()
1505
+ stats["tokens/responses_dist"] = response_lens.cpu().numpy()
1506
+
1507
+ for k, v in data["train_stats"].items():
1508
+ stats[f"ppo/{k}"] = torch.mean(v, axis=0)
1509
+ stats["ppo/val/var_explained"] = 1 - stats["ppo/val/error"] / stats["ppo/returns/var"]
1510
+ return stats
1511
+
1512
+ def log_stats(
1513
+ self,
1514
+ stats: dict,
1515
+ batch: dict,
1516
+ rewards: List[torch.FloatTensor],
1517
+ columns_to_log: typing.Iterable[str] = ("query", "response"),
1518
+ ):
1519
+ """
1520
+ A function that logs all the training stats. Call it at the end of each epoch.
1521
+
1522
+ Args:
1523
+ stats (dict[str, Any]):
1524
+ A dictionary of training stats.
1525
+ batch (dict[str, Any]):
1526
+ A dictionary of batch data, this contains the queries and responses.
1527
+ rewards (`List[torch.FloatTensor]`):
1528
+ A tensor of rewards.
1529
+ """
1530
+
1531
+ # all gather stats
1532
+ if not isinstance(rewards, torch.Tensor):
1533
+ rewards = torch.tensor(rewards).to(self.current_device)
1534
+ rewards = self.accelerator.gather(rewards).flatten()
1535
+
1536
+ if self.config.log_with == "wandb":
1537
+ import wandb
1538
+
1539
+ if any(column_to_log not in batch.keys() for column_to_log in columns_to_log):
1540
+ raise ValueError(f"Columns to log {columns_to_log} are not present in the batch {batch.keys()}.")
1541
+
1542
+ batch_list = [batch[column_to_log] for column_to_log in columns_to_log]
1543
+ if self.is_distributed:
1544
+ gathered_batch_list = []
1545
+ for b in batch_list:
1546
+ flattened = gather_object(b)
1547
+ gathered_batch_list.append(flattened)
1548
+ batch_list = gathered_batch_list
1549
+
1550
+ # Log only if we are in the main process
1551
+ if self.accelerator.is_main_process:
1552
+ logs = {}
1553
+
1554
+ # Log stats
1555
+ if "query" not in batch.keys() and "response" not in batch.keys():
1556
+ # warn the user that the game logs will not be logged
1557
+ warnings.warn(
1558
+ "The game logs will not be logged because the batch does not contain the keys 'query' and "
1559
+ "'response'. "
1560
+ )
1561
+ elif self.config.log_with == "wandb":
1562
+ table_rows = [list(r) for r in zip(*batch_list, rewards.cpu().tolist())]
1563
+ logs.update({"game_log": wandb.Table(columns=[*columns_to_log, "reward"], rows=table_rows)})
1564
+
1565
+ logs.update(stats)
1566
+
1567
+ # manually cast in fp32 for bf16 torch tensors
1568
+ for k, v in logs.items():
1569
+ if isinstance(v, torch.Tensor) and v.dtype == torch.bfloat16:
1570
+ logs[k] = v.float()
1571
+
1572
+ logs["env/reward_mean"] = torch.mean(rewards).cpu().numpy().item()
1573
+ logs["env/reward_std"] = torch.std(rewards).cpu().numpy().item()
1574
+ logs["env/reward_dist"] = rewards.cpu().numpy()
1575
+
1576
+ if self.config.log_with == "tensorboard":
1577
+ # update the current step
1578
+ self.current_step += 1
1579
+
1580
+ self.accelerator.log(
1581
+ logs,
1582
+ step=self.current_step if self.config.log_with == "tensorboard" else None,
1583
+ )
1584
+
1585
+ def create_model_card(self, path: str, model_name: Optional[str] = "TRL Model") -> None:
1586
+ """Creates and saves a model card for a TRL model.
1587
+
1588
+ Args:
1589
+ path (`str`): The path to save the model card to.
1590
+ model_name (`str`, *optional*): The name of the model, defaults to `TRL Model`.
1591
+ """
1592
+ try:
1593
+ user = whoami()["name"]
1594
+ # handle the offline case
1595
+ except Exception:
1596
+ warnings.warn("Cannot retrieve user information assuming you are running in offline mode.")
1597
+ return
1598
+
1599
+ if not os.path.exists(path):
1600
+ os.makedirs(path)
1601
+
1602
+ model_card_content = MODEL_CARD_TEMPLATE.format(model_name=model_name, model_id=f"{user}/{path}")
1603
+ with open(os.path.join(path, "README.md"), "w", encoding="utf-8") as f:
1604
+ f.write(model_card_content)
1605
+
1606
+ def _save_pretrained(self, save_directory: str) -> None:
1607
+ self.accelerator.unwrap_model(self.model).save_pretrained(save_directory)
1608
+ self.tokenizer.save_pretrained(save_directory)
1609
+ self.create_model_card(save_directory)
1610
+
1611
+ def _show_tokens(self, tokens, masks):
1612
+ from rich import print
1613
+ from rich.text import Text
1614
+
1615
+ text = Text()
1616
+
1617
+ for _i, (token, mask) in enumerate(zip(tokens, masks)):
1618
+ if mask == 1:
1619
+ text.append(self.tokenizer.decode(token.item()), style="black on deep_sky_blue1")
1620
+ text.append(" ")
1621
+ else:
1622
+ text.append(self.tokenizer.decode(token.item()), style="black on cyan3")
1623
+ text.append(" ")
1624
+ print(text)
1625
+
1626
+ def _prepare_deepspeed(self, model: PreTrainedModelWrapper):
1627
+ # Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473
1628
+ deepspeed_plugin = self.accelerator.state.deepspeed_plugin
1629
+ config_kwargs = deepspeed_plugin.deepspeed_config
1630
+ if model is not None:
1631
+ if hasattr(model, "config"):
1632
+ hidden_size = (
1633
+ max(model.config.hidden_sizes)
1634
+ if getattr(model.config, "hidden_sizes", None)
1635
+ else getattr(model.config, "hidden_size", None)
1636
+ )
1637
+ if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
1638
+ # Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0`
1639
+ # This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081
1640
+ config_kwargs.update(
1641
+ {
1642
+ "zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
1643
+ "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
1644
+ "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
1645
+ }
1646
+ )
1647
+
1648
+ # If ZeRO-3 is used, we shard both the active and reference model.
1649
+ # Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0)
1650
+ if config_kwargs["zero_optimization"]["stage"] != 3:
1651
+ config_kwargs["zero_optimization"]["stage"] = 0
1652
+ model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
1653
+ model.eval()
1654
+ return model
merge_adapter.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from peft import PeftModel
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
3
+ import torch
4
+ from utils import ScriptArguments
5
+
6
+ parser = HfArgumentParser(ScriptArguments)
7
+ train_args: ScriptArguments = parser.parse_args_into_dataclasses(return_remaining_strings=True)[0]
8
+
9
+ base_model_name = train_args.base_model_name
10
+ model_name = train_args.model_name
11
+ merged_model_name = train_args.merged_model_name
12
+
13
+
14
+ def merge(model_base_name, model_adapter_name, model_merge_name):
15
+ # use cpu avoid gpu vram OOM
16
+ # if cpu memory small, use swap
17
+ model = AutoModelForCausalLM.from_pretrained(
18
+ model_base_name, device_map='auto', torch_dtype=torch.bfloat16, trust_remote_code=True, # llama-7b base
19
+ )
20
+ print('load base model')
21
+
22
+ tokenizer = AutoTokenizer.from_pretrained(
23
+ model_adapter_name,
24
+ trust_remote_code=True,
25
+ )
26
+
27
+ model = PeftModel.from_pretrained(
28
+ model,
29
+ model_adapter_name, # adapter
30
+ device_map='auto',
31
+ trust_remote_code=True,
32
+ )
33
+ # print(model)
34
+ print('load lora')
35
+
36
+ model = model.merge_and_unload()
37
+ print('merge base model + lora model finish')
38
+ # print(model)
39
+
40
+ model.save_pretrained(model_merge_name)
41
+ tokenizer.save_pretrained(model_merge_name)
42
+ print('save model finish')
43
+
44
+
45
+ if __name__ == "__main__":
46
+ merge(base_model_name, model_name, merged_model_name)
47
+ print('------merge done!---------')
ppo_multi_adapter.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ this code required trl==0.11.0 and support multi-adapter LoRA training
3
+ '''
4
+
5
+ from codecs import BOM_BE
6
+ import re
7
+ import torch
8
+ import os
9
+ from trl import AutoModelForCausalLMWithValueHead, PPOTrainer, PPOConfig
10
+ from trl.core import LengthSampler
11
+ from transformers import AutoTokenizer, BitsAndBytesConfig, HfArgumentParser
12
+ from accelerate import Accelerator
13
+ from utils import (
14
+ create_model_tokenizer,
15
+ create_peft,
16
+ is_main_process,
17
+ ScriptArguments,
18
+ DEFINE_EOS_TOKEN,
19
+ DEFINE_PAD_TOKEN,
20
+ format_prompt,
21
+ resolve_system_prompt,
22
+ )
23
+ import time
24
+ from ma_ppo_config import MultiAdapterPPOConfig
25
+ from ma_ppo_trainer import MultiAdapterPPOTrainer
26
+ from data_adapter import load_prompt_dataset
27
+
28
+ os.environ["WANDB_PROJECT"] = "ma-rlhf"
29
+ os.environ["WANDB_RUN_NAME"] = "ppo"
30
+
31
+
32
+ # class MyPPOTrainer(PPOTrainer):
33
+ parser = HfArgumentParser(ScriptArguments)
34
+ train_args: ScriptArguments = parser.parse_args_into_dataclasses(return_remaining_strings=True)[0]
35
+
36
+ dataset_name = train_args.dataset_name
37
+ dataset_sub_name = train_args.dataset_sub_name
38
+ dataset_split = train_args.dataset_split
39
+ model_name = train_args.model_name
40
+ rm_model_name = train_args.reward_model_name
41
+ deepspeed_config_name = train_args.deepspeed_config_name
42
+ batch_size = train_args.batch_size
43
+ mini_batch_size = train_args.mini_batch_size
44
+ ppo_epochs = train_args.ppo_epochs
45
+ output_max_length = train_args.output_max_length
46
+ seq_length = train_args.seq_length
47
+ output_name = train_args.output_name
48
+ is_peft = train_args.use_QLora
49
+ is_use_flash_attention2 = train_args.use_flash_attention_2
50
+
51
+ gradient_accumulation_steps = train_args.gradient_accumulation_steps
52
+ default_system_prompt = resolve_system_prompt(train_args.system_prompt)
53
+
54
+ def create_model_tokenizer(name, rm_model_name, peft_config):
55
+ # QLoRA
56
+ bnb_config = BitsAndBytesConfig(
57
+ load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16,
58
+ # bnb_4bit_use_double_quant=True,
59
+ )
60
+
61
+ device_map = {"": Accelerator().local_process_index}
62
+ print('device map: ', device_map)
63
+
64
+ model = AutoModelForCausalLMWithValueHead.from_pretrained(
65
+ name,
66
+ quantization_config=bnb_config,
67
+ peft_config=peft_config,
68
+ reward_adapter=rm_model_name,
69
+ device_map=device_map, # 70b use 'auto' would auto shard parameter
70
+ use_flash_attention_2=is_use_flash_attention2,
71
+ trust_remote_code=True,
72
+ # low_cpu_mem_usage=True,
73
+ )
74
+
75
+ tokenizer = AutoTokenizer.from_pretrained(
76
+ model_name,
77
+ # use_fast=True,
78
+ trust_remote_code=True,
79
+ )
80
+ tokenizer.add_special_tokens({'pad_token': DEFINE_PAD_TOKEN})
81
+ model.pad_token_id = tokenizer.pad_token_id
82
+ model.pad_token = tokenizer.pad_token
83
+ model.pad_token_id = tokenizer.pad_token_id
84
+ # model.config.pad_token_id = model.config.eos_token_id
85
+
86
+ return model, tokenizer
87
+
88
+
89
+ def create_dataset(dataset_name, tokenizer):
90
+ datasets = load_prompt_dataset(
91
+ dataset_name,
92
+ dataset_sub_name=dataset_sub_name,
93
+ split=dataset_split,
94
+ default_system_prompt=default_system_prompt,
95
+ )
96
+ datasets = datasets.map(
97
+ lambda examples: {
98
+ "query": [
99
+ format_prompt(question, system_prompt=system_prompt)
100
+ for system_prompt, question in zip(examples["system"], examples["prompt"])
101
+ ],
102
+ "input_ids": [
103
+ tokenizer(
104
+ format_prompt(question, system_prompt=system_prompt),
105
+ return_tensors="pt",
106
+ )["input_ids"][0]
107
+ for system_prompt, question in zip(examples["system"], examples["prompt"])
108
+ ],
109
+ },
110
+ batched=True,
111
+ remove_columns=datasets.column_names,
112
+ )
113
+
114
+ datasets = datasets.filter(lambda x: len(x["input_ids"]) < seq_length, batched=False)
115
+ datasets.set_format(type="torch")
116
+ return datasets
117
+
118
+
119
+ def collator(examples):
120
+ batch = {'query': [], 'input_ids': []}
121
+ for example in examples:
122
+ batch['query'].append(example['query'])
123
+ batch['input_ids'].append(torch.tensor(example['input_ids'], dtype=torch.long))
124
+ return batch
125
+
126
+ def train():
127
+ peft_config = create_peft(is_peft)
128
+ model, tokenizer = create_model_tokenizer(
129
+ model_name, rm_model_name, peft_config
130
+ ) # model is sequence classification
131
+
132
+ dataset = create_dataset(dataset_name, tokenizer)
133
+ print(dataset)
134
+
135
+ # generation config
136
+ generation_kwargs = {
137
+ "min_length": -1,
138
+ "max_new_tokens": output_max_length,
139
+ "top_k": 0.0,
140
+ "top_p": 1.0,
141
+ "do_sample": True,
142
+ "pad_token_id": tokenizer.pad_token_id,
143
+ "eos_token_id": tokenizer.eos_token_id,
144
+ "forced_eos_token_id": tokenizer.eos_token_id, # class ForcedEOSTokenLogitsProcessor(LogitsProcessor) from transformers
145
+ # "forced_eos_token_id": True,
146
+ }
147
+ output_length_sampler = LengthSampler(128, output_max_length)
148
+
149
+ config = MultiAdapterPPOConfig(
150
+ log_with='wandb',
151
+ learning_rate=1e-5,
152
+ batch_size=batch_size,
153
+ mini_batch_size=mini_batch_size,
154
+ gradient_accumulation_steps=gradient_accumulation_steps,
155
+ optimize_cuda_cache=True,
156
+ early_stopping=True,
157
+ target_kl=0.1,
158
+ ppo_epochs=ppo_epochs,
159
+ seed=0,
160
+ init_kl_coef=0.2,
161
+ adap_kl_ctrl=True,
162
+ max_grad_norm=1.0, # fix generate nan
163
+ )
164
+
165
+ trainer = MultiAdapterPPOTrainer(
166
+ config,
167
+ model,
168
+ ref_model=None, # share parameters
169
+ tokenizer=tokenizer,
170
+ dataset=dataset,
171
+ data_collator=collator,
172
+ )
173
+
174
+ reward_baseline = 0.0
175
+ save_freq = 50
176
+
177
+ # for epoch, batch in enumerate(trainer.dataloader):
178
+ for epoch, batch in enumerate(trainer.dataloader):
179
+ start_time = time.time()
180
+
181
+ if epoch >= config.total_ppo_epochs:
182
+ break
183
+
184
+ question_tensors = batch["input_ids"]
185
+ response_tensors = trainer.generate(
186
+ question_tensors,
187
+ return_prompt=False,
188
+ # length_sampler=output_length_sampler,
189
+ **generation_kwargs,
190
+ )
191
+ batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True)
192
+
193
+ texts = [q + r for q, r in zip(batch["query"], batch["response"])]
194
+ rm_model = trainer.accelerator.unwrap_model(trainer.model)
195
+ raw_rewards = []
196
+ for text in texts:
197
+ inputs = tokenizer(text, return_tensors='pt').to(trainer.accelerator.device)
198
+ score = rm_model.compute_reward_score(**inputs)[0,-1,0] - reward_baseline
199
+ raw_rewards.append(score)
200
+ rewards = raw_rewards
201
+
202
+ ## PPO Step
203
+ stats = trainer.step(question_tensors, response_tensors, rewards)
204
+ trainer.log_stats(stats, batch, rewards)
205
+
206
+ if is_main_process():
207
+ for text, reward in zip(texts, rewards):
208
+ print('-----------------------------------')
209
+ print(text)
210
+ print(reward.item())
211
+ print('-----------------------------------')
212
+ print(f"step:{epoch}/all:{len(trainer.dataloader)},loss:{stats['ppo/loss/total']},mean_scores:{stats['ppo/mean_scores']}" )
213
+
214
+ if save_freq and epoch and epoch % save_freq == 0:
215
+ trainer.save_pretrained(f'{output_name}_{epoch}')
216
+ print(f'{output_name}_{epoch}')
217
+ # break
218
+ trainer.save_pretrained(output_name)
219
+
220
+ if __name__ == "__main__":
221
+ train()
reward_model.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import deepspeed
3
+ from trl import RewardTrainer, RewardConfig
4
+ import torch
5
+ from accelerate import Accelerator
6
+ from utils import (
7
+ ScriptArguments,
8
+ DEFINE_PAD_TOKEN,
9
+ format_prompt_answer,
10
+ maybe_distributed_barrier,
11
+ resolve_system_prompt,
12
+ )
13
+ from transformers import (
14
+ AutoTokenizer,
15
+ BitsAndBytesConfig,
16
+ HfArgumentParser,
17
+ AutoModelForSequenceClassification,
18
+ )
19
+ from data_adapter import load_preference_dataset
20
+ os.environ["WANDB_PROJECT"] = "ma-rlhf"
21
+ os.environ["WANDB_RUN_NAME"] = "reward_model"
22
+
23
+ parser = HfArgumentParser(ScriptArguments)
24
+ train_args: ScriptArguments = parser.parse_args_into_dataclasses(return_remaining_strings=True)[0]
25
+
26
+ dataset_name = train_args.dataset_name
27
+ dataset_sub_name = train_args.dataset_sub_name
28
+ dataset_split = train_args.dataset_split
29
+ model_name = train_args.model_name
30
+ deepspeed_config_name = train_args.deepspeed_config_name
31
+ seq_length = train_args.seq_length
32
+ batch_size = train_args.batch_size
33
+ output_name = train_args.output_name
34
+ is_peft = train_args.use_QLora
35
+ is_use_flash_attention2 = train_args.use_flash_attention_2
36
+ num_train_epochs = train_args.num_train_epochs
37
+ gradient_accumulation_steps = train_args.gradient_accumulation_steps
38
+ learning_rate = train_args.learning_rate
39
+ default_system_prompt = resolve_system_prompt(train_args.system_prompt)
40
+
41
+
42
+ def create_model_tokenizer(name):
43
+ # QLoRA
44
+ bnb_config = BitsAndBytesConfig(
45
+ load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16,
46
+ bnb_4bit_use_double_quant=True,
47
+ )
48
+
49
+ device_map = {"": Accelerator().local_process_index}
50
+ print('device map: ', device_map)
51
+
52
+ model = AutoModelForSequenceClassification.from_pretrained(
53
+ model_name,
54
+ quantization_config=bnb_config,
55
+ device_map=device_map, # 70b use auto
56
+ num_labels=1,
57
+ use_cache=False,
58
+ trust_remote_code=True,
59
+ )
60
+
61
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
62
+
63
+ tokenizer.add_special_tokens({'pad_token': DEFINE_PAD_TOKEN})
64
+ model.pad_token_id = tokenizer.pad_token_id
65
+ model.pad_token = tokenizer.pad_token
66
+ model.config.pad_token_id = tokenizer.pad_token_id
67
+ model.config.pad_token = tokenizer.pad_token
68
+
69
+ return model, tokenizer
70
+
71
+
72
+ def create_reward_model_datasets(datasets_name, dataset_sub_name, tokenizer):
73
+ train_dataset = load_preference_dataset(
74
+ datasets_name,
75
+ dataset_sub_name=dataset_sub_name,
76
+ split=dataset_split,
77
+ default_system_prompt=default_system_prompt,
78
+ )
79
+ train_dataset = train_dataset.map(
80
+ lambda examples: tokenize_reward_batch(examples, tokenizer),
81
+ batched=True,
82
+ )
83
+
84
+ maybe_distributed_barrier()
85
+ train_dataset = train_dataset.filter(
86
+ lambda x: len(x["input_ids_chosen"]) <= seq_length
87
+ and len(x["input_ids_rejected"]) <= seq_length
88
+ )
89
+ maybe_distributed_barrier()
90
+
91
+ # eval_dataset = eval_dataset.map(
92
+ # preprocess_function_hhrlhf,
93
+ # batched=True,
94
+ # num_proc=8,
95
+ # )
96
+
97
+ # torch.distributed.barrier()
98
+ # eval_dataset = eval_dataset.filter(
99
+ # lambda x: len(x["input_ids_chosen"]) <= seq_length
100
+ # and len(x["input_ids_rejected"]) <= seq_length
101
+ # )
102
+
103
+ # torch.distributed.barrier()
104
+
105
+ # return train_dataset, eval_dataset
106
+ return train_dataset, None
107
+
108
+
109
+ def tokenize_reward_batch(examples, tokenizer):
110
+ new_examples = {
111
+ "input_ids_chosen": [],
112
+ "attention_mask_chosen": [],
113
+ "input_ids_rejected": [],
114
+ "attention_mask_rejected": [],
115
+ }
116
+ for system_prompt, prompt, response_chosen, response_rejected in zip(
117
+ examples["system"], examples["prompt"], examples["chosen"], examples["rejected"]
118
+ ):
119
+ chosen_text = format_prompt_answer(prompt, response_chosen, system_prompt=system_prompt)
120
+ rejected_text = format_prompt_answer(prompt, response_rejected, system_prompt=system_prompt)
121
+
122
+ tokenized_chosen = tokenizer(chosen_text, truncation=True, padding="longest")
123
+ tokenized_rejected = tokenizer(rejected_text, truncation=True, padding="longest")
124
+
125
+ new_examples["input_ids_chosen"].append(tokenized_chosen["input_ids"])
126
+ new_examples["attention_mask_chosen"].append(tokenized_chosen["attention_mask"])
127
+ new_examples["input_ids_rejected"].append(tokenized_rejected["input_ids"])
128
+ new_examples["attention_mask_rejected"].append(tokenized_rejected["attention_mask"])
129
+
130
+ return new_examples
131
+
132
+
133
+ def train():
134
+ model, tokenizer = create_model_tokenizer(model_name) # model is sequence classification
135
+ train_datasets, test_datasets = create_reward_model_datasets(dataset_name, None, tokenizer)
136
+
137
+ # PEFT
138
+ peft_config = create_peft_reward_model(is_peft)
139
+
140
+ # ZERO stage3 use config like # https://github.com/huggingface/trl/issues/835
141
+ reward_config = RewardConfig(
142
+ output_dir=output_name,
143
+ per_device_train_batch_size=batch_size,
144
+ per_device_eval_batch_size=batch_size,
145
+ num_train_epochs=num_train_epochs,
146
+ gradient_accumulation_steps=gradient_accumulation_steps,
147
+ gradient_checkpointing=True,
148
+ learning_rate=learning_rate,
149
+ report_to="wandb",
150
+ warmup_ratio=0.01,
151
+ remove_unused_columns=True,
152
+ optim="adamw_torch",
153
+ logging_steps=1,
154
+ max_length=seq_length,
155
+ deepspeed=deepspeed_config_name,
156
+ bf16=True,
157
+ lr_scheduler_type='cosine',
158
+ # evaluation_strategy="steps",
159
+ # eval_steps=100,
160
+ # max_steps=10,
161
+ )
162
+
163
+ trainer = RewardTrainer(
164
+ model,
165
+ args=reward_config,
166
+ train_dataset=train_datasets,
167
+ processing_class=tokenizer,
168
+ peft_config=peft_config,
169
+ )
170
+
171
+ trainer.train()
172
+ trainer.save_model(output_name)
173
+
174
+
175
+ if __name__ == "__main__":
176
+ train()
scripts/run_ba_role_dpo.sh ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
5
+ exp_root="$(cd "${script_dir}/.." && pwd)"
6
+
7
+ role_name="${ROLE_NAME:-writer}"
8
+ deepspeed_config_name="${DEEPSPEED_CONFIG_NAME:-${exp_root}/config/ds.json}"
9
+ dataset_sub_name="${DATASET_SUB_NAME:-}"
10
+ dataset_split="${DATASET_SPLIT:-train}"
11
+ system_prompt="${SYSTEM_PROMPT:-}"
12
+
13
+ output_root="${OUTPUT_ROOT:-${exp_root}/output/${role_name}}"
14
+ model_sft_full_path="${MODEL_SFT_FULL_PATH:-${output_root}/sft_full}"
15
+ preference_dataset_name="${PREFERENCE_DATASET_NAME:-${exp_root}/data/${role_name}_preference.jsonl}"
16
+ model_dpo_lora_path="${output_root}/dpo_lora"
17
+ model_dpo_full_path="${output_root}/dpo_full"
18
+
19
+ mkdir -p "${output_root}"
20
+
21
+ if [ ! -f "${preference_dataset_name}" ] && [ ! -d "${preference_dataset_name}" ]; then
22
+ echo "Missing preference dataset: ${preference_dataset_name}"
23
+ echo "See ${exp_root}/README.md and the sample JSONL files."
24
+ exit 1
25
+ fi
26
+
27
+ deepspeed "${exp_root}/dpo.py" \
28
+ --dataset_name="${preference_dataset_name}" \
29
+ --dataset_sub_name="${dataset_sub_name}" \
30
+ --dataset_split="${dataset_split}" \
31
+ --system_prompt="${system_prompt}" \
32
+ --model_name="${model_sft_full_path}" \
33
+ --output_name="${model_dpo_lora_path}" \
34
+ --use_QLora="${USE_QLORA:-True}" \
35
+ --use_flash_attention_2="${USE_FLASH_ATTENTION_2:-True}" \
36
+ --deepspeed_config_name="${deepspeed_config_name}" \
37
+ --batch_size="${BATCH_SIZE:-8}" \
38
+ --num_train_epochs="${NUM_TRAIN_EPOCHS:-2}" \
39
+ --seq_length="${SEQ_LENGTH:-1024}" \
40
+ --output_max_length="${OUTPUT_MAX_LENGTH:-512}" \
41
+ --gradient_accumulation_steps="${GRADIENT_ACCUMULATION_STEPS:-4}" \
42
+ --learning_rate="${LEARNING_RATE:-2e-5}"
43
+
44
+ python "${exp_root}/merge_adapter.py" \
45
+ --base_model_name="${model_sft_full_path}" \
46
+ --model_name="${model_dpo_lora_path}" \
47
+ --merged_model_name="${model_dpo_full_path}"
scripts/run_ba_role_ppo.sh ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
5
+ exp_root="$(cd "${script_dir}/.." && pwd)"
6
+
7
+ role_name="${ROLE_NAME:-writer}"
8
+ deepspeed_config_name="${DEEPSPEED_CONFIG_NAME:-${exp_root}/config/ds.json}"
9
+ dataset_sub_name="${DATASET_SUB_NAME:-}"
10
+ dataset_split="${DATASET_SPLIT:-train}"
11
+ system_prompt="${SYSTEM_PROMPT:-}"
12
+
13
+ output_root="${OUTPUT_ROOT:-${exp_root}/output/${role_name}}"
14
+ policy_model_path="${POLICY_MODEL_PATH:-${output_root}/dpo_full}"
15
+ reward_model_name="${REWARD_MODEL_NAME:-${output_root}/reward_model_lora}"
16
+ prompt_dataset_name="${PROMPT_DATASET_NAME:-${exp_root}/data/${role_name}_prompts.jsonl}"
17
+ model_ppo_lora_path="${output_root}/ppo_lora"
18
+ model_ppo_full_path="${output_root}/ppo_full"
19
+
20
+ mkdir -p "${output_root}"
21
+
22
+ if [ ! -f "${prompt_dataset_name}" ] && [ ! -d "${prompt_dataset_name}" ]; then
23
+ echo "Missing PPO prompt dataset: ${prompt_dataset_name}"
24
+ echo "See ${exp_root}/README.md and the sample JSONL files."
25
+ exit 1
26
+ fi
27
+
28
+ deepspeed "${exp_root}/ppo_multi_adapter.py" \
29
+ --dataset_name="${prompt_dataset_name}" \
30
+ --dataset_sub_name="${dataset_sub_name}" \
31
+ --dataset_split="${dataset_split}" \
32
+ --system_prompt="${system_prompt}" \
33
+ --model_name="${policy_model_path}" \
34
+ --reward_model_name="${reward_model_name}" \
35
+ --output_name="${model_ppo_lora_path}" \
36
+ --use_QLora="${USE_QLORA:-True}" \
37
+ --use_flash_attention_2="${USE_FLASH_ATTENTION_2:-True}" \
38
+ --deepspeed_config_name="${deepspeed_config_name}" \
39
+ --batch_size="${BATCH_SIZE:-8}" \
40
+ --mini_batch_size="${MINI_BATCH_SIZE:-1}" \
41
+ --ppo_epochs="${PPO_EPOCHS:-1}" \
42
+ --output_max_length="${OUTPUT_MAX_LENGTH:-512}" \
43
+ --seq_length="${SEQ_LENGTH:-512}" \
44
+ --gradient_accumulation_steps="${GRADIENT_ACCUMULATION_STEPS:-2}"
45
+
46
+ python "${exp_root}/merge_adapter.py" \
47
+ --base_model_name="${policy_model_path}" \
48
+ --model_name="${model_ppo_lora_path}" \
49
+ --merged_model_name="${model_ppo_full_path}"
scripts/run_ba_role_reward.sh ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
5
+ exp_root="$(cd "${script_dir}/.." && pwd)"
6
+
7
+ role_name="${ROLE_NAME:-writer}"
8
+ deepspeed_config_name="${DEEPSPEED_CONFIG_NAME:-${exp_root}/config/ds.json}"
9
+ dataset_sub_name="${DATASET_SUB_NAME:-}"
10
+ dataset_split="${DATASET_SPLIT:-train}"
11
+ system_prompt="${SYSTEM_PROMPT:-}"
12
+
13
+ output_root="${OUTPUT_ROOT:-${exp_root}/output/${role_name}}"
14
+ reward_base_model_path="${REWARD_BASE_MODEL_PATH:-${output_root}/sft_full}"
15
+ preference_dataset_name="${PREFERENCE_DATASET_NAME:-${exp_root}/data/${role_name}_preference.jsonl}"
16
+ reward_model_lora_path="${output_root}/reward_model_lora"
17
+
18
+ mkdir -p "${output_root}"
19
+
20
+ if [ ! -f "${preference_dataset_name}" ] && [ ! -d "${preference_dataset_name}" ]; then
21
+ echo "Missing preference dataset: ${preference_dataset_name}"
22
+ echo "See ${exp_root}/README.md and the sample JSONL files."
23
+ exit 1
24
+ fi
25
+
26
+ deepspeed "${exp_root}/reward_model.py" \
27
+ --dataset_name="${preference_dataset_name}" \
28
+ --dataset_sub_name="${dataset_sub_name}" \
29
+ --dataset_split="${dataset_split}" \
30
+ --system_prompt="${system_prompt}" \
31
+ --model_name="${reward_base_model_path}" \
32
+ --seq_length="${SEQ_LENGTH:-1024}" \
33
+ --batch_size="${BATCH_SIZE:-8}" \
34
+ --output_name="${reward_model_lora_path}" \
35
+ --use_QLora="${USE_QLORA:-True}" \
36
+ --use_flash_attention_2="${USE_FLASH_ATTENTION_2:-True}" \
37
+ --deepspeed_config_name="${deepspeed_config_name}" \
38
+ --num_train_epochs="${NUM_TRAIN_EPOCHS:-2}" \
39
+ --gradient_accumulation_steps="${GRADIENT_ACCUMULATION_STEPS:-4}" \
40
+ --learning_rate="${LEARNING_RATE:-2e-5}"
scripts/run_ba_role_sft.sh ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+
4
+ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
5
+ exp_root="$(cd "${script_dir}/.." && pwd)"
6
+
7
+ role_name="${ROLE_NAME:-data_insight}"
8
+ base_model_path="${BASE_MODEL_PATH:-meta-llama/Llama-3.1-8B}"
9
+ deepspeed_config_name="${DEEPSPEED_CONFIG_NAME:-${exp_root}/config/ds.json}"
10
+ dataset_sub_name="${DATASET_SUB_NAME:-}"
11
+ dataset_split="${DATASET_SPLIT:-train}"
12
+ system_prompt="${SYSTEM_PROMPT:-}"
13
+
14
+ output_root="${OUTPUT_ROOT:-${exp_root}/output/${role_name}}"
15
+ sft_dataset_name="${SFT_DATASET_NAME:-${exp_root}/data/${role_name}_sft.jsonl}"
16
+ model_sft_lora_path="${output_root}/sft_lora"
17
+ model_sft_full_path="${output_root}/sft_full"
18
+
19
+ mkdir -p "${output_root}"
20
+
21
+ if [ ! -f "${sft_dataset_name}" ] && [ ! -d "${sft_dataset_name}" ]; then
22
+ echo "Missing SFT dataset: ${sft_dataset_name}"
23
+ echo "See ${exp_root}/README.md and the sample JSONL files."
24
+ exit 1
25
+ fi
26
+
27
+ deepspeed "${exp_root}/sft.py" \
28
+ --dataset_name="${sft_dataset_name}" \
29
+ --dataset_sub_name="${dataset_sub_name}" \
30
+ --dataset_split="${dataset_split}" \
31
+ --system_prompt="${system_prompt}" \
32
+ --model_name="${base_model_path}" \
33
+ --seq_length="${SEQ_LENGTH:-1024}" \
34
+ --output_name="${model_sft_lora_path}" \
35
+ --use_QLora="${USE_QLORA:-True}" \
36
+ --batch_size="${BATCH_SIZE:-8}" \
37
+ --use_flash_attention_2="${USE_FLASH_ATTENTION_2:-True}" \
38
+ --deepspeed_config_name="${deepspeed_config_name}" \
39
+ --num_train_epochs="${NUM_TRAIN_EPOCHS:-2}" \
40
+ --gradient_accumulation_steps="${GRADIENT_ACCUMULATION_STEPS:-8}" \
41
+ --learning_rate="${LEARNING_RATE:-1e-5}"
42
+
43
+ python "${exp_root}/merge_adapter.py" \
44
+ --base_model_name="${base_model_path}" \
45
+ --model_name="${model_sft_lora_path}" \
46
+ --merged_model_name="${model_sft_full_path}"
sft.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from trl import SFTTrainer, SFTConfig
4
+ from trl.trainer.utils import DataCollatorForCompletionOnlyLM
5
+ from accelerate import Accelerator
6
+ import random
7
+ random.seed(42)
8
+
9
+ os.environ["WANDB_PROJECT"] = "ma-rlhf"
10
+ os.environ["WANDB_RUN_NAME"] = "sft"
11
+
12
+ from transformers import (
13
+ AutoModelForCausalLM,
14
+ AutoTokenizer,
15
+ BitsAndBytesConfig,
16
+ HfArgumentParser,
17
+ )
18
+ from utils import (
19
+ ScriptArguments,
20
+ DEFINE_PAD_TOKEN,
21
+ create_peft,
22
+ formatting_prompt_response_func_batched,
23
+ resolve_system_prompt,
24
+ )
25
+ from data_adapter import load_sft_dataset
26
+
27
+ parser = HfArgumentParser(ScriptArguments)
28
+ train_args: ScriptArguments = parser.parse_args_into_dataclasses(return_remaining_strings=True)[0]
29
+
30
+ dataset_name = train_args.dataset_name
31
+ dataset_sub_name = train_args.dataset_sub_name
32
+ dataset_split = train_args.dataset_split
33
+ model_name = train_args.model_name
34
+ deepspeed_config_name = train_args.deepspeed_config_name
35
+ seq_length = train_args.seq_length
36
+ batch_size = train_args.batch_size
37
+ output_name = train_args.output_name
38
+ is_peft = train_args.use_QLora
39
+ use_flash_attention_2 = train_args.use_flash_attention_2
40
+ dataset_sub_name = None
41
+ num_train_epochs = train_args.num_train_epochs
42
+ gradient_accumulation_steps = train_args.gradient_accumulation_steps
43
+ learning_rate = train_args.learning_rate
44
+
45
+ default_system_prompt = resolve_system_prompt(train_args.system_prompt)
46
+
47
+
48
+ def create_datasets(dataset_name, dataset_sub_name):
49
+ dataset = load_sft_dataset(
50
+ dataset_name,
51
+ dataset_sub_name=dataset_sub_name,
52
+ split=dataset_split,
53
+ default_system_prompt=default_system_prompt,
54
+ )
55
+ return dataset, None
56
+
57
+
58
+ def create_model_tokenizer(name):
59
+ # QLoRA
60
+ bnb_config = BitsAndBytesConfig(
61
+ load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16
62
+ )
63
+ device_map = {"": Accelerator().local_process_index}
64
+ model = AutoModelForCausalLM.from_pretrained(
65
+ model_name,
66
+ quantization_config=bnb_config if is_peft else None,
67
+ device_map=device_map,
68
+ use_flash_attention_2=use_flash_attention_2, # gpt 2 not support flash attention2
69
+ trust_remote_code=True,
70
+ torch_dtype=torch.bfloat16,
71
+ use_cache=False,
72
+ )
73
+ model.gradient_checkpointing_enable()
74
+
75
+ tokenizer = AutoTokenizer.from_pretrained(model_name,
76
+ trust_remote_code=True,
77
+ padding_side='left',
78
+ # model_max_length=1024
79
+ )
80
+ tokenizer.add_special_tokens({'pad_token': DEFINE_PAD_TOKEN})
81
+ model.pad_token_id = tokenizer.pad_token_id
82
+ model.pad_token = tokenizer.pad_token
83
+ model.config.pad_token_id = model.config.eos_token_id
84
+
85
+ return model, tokenizer
86
+
87
+
88
+ def create_sft_datasets(datasets, tokenizer, format_func, seq_length=512):
89
+ return datasets, None
90
+
91
+ def create_collator(tokenizer):
92
+ '''
93
+ ref https://github.com/huggingface/trl/blob/main/tests/test_data_collator_completion_only.py
94
+ '''
95
+ # instruction_template = "###Question: "
96
+ response_template = "###Answer:"
97
+ response_template_id = tokenizer.encode(
98
+ response_template, add_special_tokens=False
99
+ )[1:]
100
+ return DataCollatorForCompletionOnlyLM(response_template_id, tokenizer=tokenizer)
101
+
102
+
103
+ def train():
104
+ model, tokenizer = create_model_tokenizer(model_name)
105
+ datasets, _ = create_datasets(dataset_name, dataset_sub_name)
106
+ format_fun = formatting_prompt_response_func_batched
107
+ train_datasets, _ = create_sft_datasets(datasets, tokenizer, format_fun, seq_length)
108
+ collator = create_collator(tokenizer)
109
+
110
+ # peft
111
+ peft_config = create_peft(is_peft)
112
+
113
+ training_args = SFTConfig(
114
+ output_dir=output_name,
115
+ save_strategy='epoch',
116
+ logging_steps=1,
117
+ num_train_epochs=num_train_epochs,
118
+ gradient_checkpointing=True,
119
+ bf16=True,
120
+ learning_rate=learning_rate,
121
+ warmup_ratio=0.1,
122
+ per_device_train_batch_size=batch_size,
123
+ per_device_eval_batch_size=batch_size,
124
+ gradient_accumulation_steps=gradient_accumulation_steps,
125
+ deepspeed=deepspeed_config_name,
126
+ report_to='wandb',
127
+ lr_scheduler_type='cosine',
128
+ max_seq_length=seq_length,
129
+ # max_steps=10,
130
+ )
131
+
132
+ trainer = SFTTrainer(
133
+ model,
134
+ args=training_args,
135
+ train_dataset=train_datasets,
136
+ peft_config=peft_config,
137
+ data_collator=collator,
138
+ formatting_func=format_fun,
139
+ )
140
+ trainer.train()
141
+ trainer.save_model(output_name)
142
+
143
+
144
+ if __name__ == "__main__":
145
+ train()
utils.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import re
3
+ import random
4
+
5
+ from transformers.utils import PaddingStrategy
6
+ from accelerate import Accelerator
7
+ from peft import LoraConfig, TaskType
8
+ from dataclasses import dataclass, field
9
+ from typing import Dict, Optional, Any, List, Union
10
+ from transformers import (
11
+ AutoModelForCausalLM,
12
+ AutoTokenizer,
13
+ BitsAndBytesConfig,
14
+ PreTrainedTokenizerBase,
15
+ HfArgumentParser,
16
+ )
17
+
18
+ # DEFINE_EOS_TOKEN = '''</s>'''
19
+ # DEFINE_BOS_TOKEN = '''<s>'''
20
+ # DEFINE_PAD_TOKEN = '''<pad>'''
21
+ # SYSTEM_PROMPT = '''You are a robot named "MA-RLHF", you are always friendly and answer questions。'''
22
+ DEFINE_BOS_TOKEN = '''<|begin_of_text|>'''
23
+ DEFINE_EOS_TOKEN = '''<|end_of_text|>'''
24
+ DEFINE_PAD_TOKEN = '''<|reserved_special_token_0|>'''
25
+ SYSTEM_PROMPT = '''You are MA-RLHF Chatbot, you should friendly answer the question'''
26
+
27
+ # o1
28
+ DEFINE_SEP_TOKEN = '''<|reserved_special_token_1|>''' # seperate token, or step token
29
+ DEFINE_POSITIVE_TOKEN = '''Positive'''
30
+ DEFINE_NEGATIVE_TOKEN= '''Negative'''
31
+ STEP_INSTRUCTION = '''Solve this math problem using step-by-step reasoning. Require that the output of each step ends with the "<|reserved_special_token_1|>" token.\n'''
32
+ PRM800K_STEP_INSTRUCTION = '''Solve this math problem using step-by-step reasoning. Require that the output of each step ends with the "<|reserved_special_token_1|>" token.\n'''
33
+ MATH_STEP_INSTRUCTION = '''Solve this math problem using step-by-step reasoning. \n'''
34
+ GSM8K_STEP_INSTRUCTION = '''Solve this math problem using step-by-step reasoning. Require that the output of each step ends with the "<|reserved_special_token_1|>" token.\n'''
35
+ # PRM_INSTRUCTION = '''Scoring step-by-step reasoning with predict "Positive" or "Negative" .\n'''
36
+ PRM_INSTRUCTION = '''Score each step under the 'Positive' and 'Negative' labels based on its correctness.\n'''
37
+
38
+
39
+ def is_main_process():
40
+ return (
41
+ not torch.distributed.is_available()
42
+ or not torch.distributed.is_initialized()
43
+ or torch.distributed.get_rank() == 0
44
+ )
45
+
46
+
47
+ def maybe_distributed_barrier():
48
+ if torch.distributed.is_available() and torch.distributed.is_initialized():
49
+ torch.distributed.barrier()
50
+
51
+
52
+ def create_model_tokenizer(name):
53
+ # QLoRA
54
+ bnb_config = BitsAndBytesConfig(
55
+ load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16
56
+ )
57
+
58
+ device_map = {"": Accelerator().local_process_index}
59
+ print('device map: ', device_map)
60
+
61
+ model = AutoModelForCausalLM.from_pretrained(
62
+ name,
63
+ quantization_config=bnb_config,
64
+ device_map=device_map,
65
+ # torch_dtype=torch.bfloat16,
66
+ # use_flash_attention_2=True # gpt 2 not support flash attention2
67
+ )
68
+
69
+ tokenizer = AutoTokenizer.from_pretrained(name, use_fast=True)
70
+
71
+ return model, tokenizer
72
+
73
+
74
+ def create_peft(peft_flag: bool = False) -> LoraConfig:
75
+ if peft_flag == False:
76
+ return None
77
+ else:
78
+ # default peft lora is Q_Lora K_Lora
79
+ peft_config = LoraConfig(
80
+ r=64,
81
+ lora_alpha=8,
82
+ bias="none",
83
+ # lora_dropout=0.05,
84
+ task_type="CAUSAL_LM",
85
+ )
86
+ return peft_config
87
+
88
+
89
+ def create_peft_lm_head(peft_flag: bool = False) -> LoraConfig:
90
+ '''
91
+ 当新加入step token时,如果不对LM_head 加lora, 会导致难预测出step token
92
+ '''
93
+ if peft_flag == False:
94
+ return None
95
+ else:
96
+ # default peft lora is Q_Lora K_Lora
97
+ peft_config = LoraConfig(
98
+ r=64,
99
+ lora_alpha=8,
100
+ bias="none",
101
+ # lora_dropout=0.05,
102
+ task_type="CAUSAL_LM",
103
+ target_modules = ['q_proj', 'k_proj', 'v_proj', 'o_proj', 'lm_head'],
104
+ )
105
+ return peft_config
106
+
107
+ def create_peft_prm_lm_head(peft_flag: bool = False) -> LoraConfig:
108
+ '''
109
+ 当新加入step token时,如果不对LM_head 加lora, 会导致难预测出step token
110
+ '''
111
+ if peft_flag == False:
112
+ return None
113
+ else:
114
+ # default peft lora is Q_Lora K_Lora
115
+ peft_config = LoraConfig(
116
+ r=64,
117
+ lora_alpha=8,
118
+ bias="none",
119
+ lora_dropout=0,
120
+ task_type="CAUSAL_LM",
121
+ target_modules = ['q_proj', 'k_proj', 'lm_head'],
122
+ )
123
+ return peft_config
124
+
125
+ def create_peft_reward_model(peft_flag: bool = False) -> LoraConfig:
126
+ if peft_flag == False:
127
+ return None
128
+ else:
129
+ # default peft lora is Q_Lora K_Lora
130
+ peft_config = LoraConfig(
131
+ task_type=TaskType.SEQ_CLS,
132
+ inference_mode=False,
133
+ r=32,
134
+ lora_alpha=8,
135
+ bias="none",
136
+ lora_dropout=0.05,
137
+ modules_to_save=["scores"],
138
+ )
139
+ return peft_config
140
+
141
+
142
+ @dataclass
143
+ class RewardDataCollatorWithPadding:
144
+ tokenizer: PreTrainedTokenizerBase
145
+ padding: Union[bool, str, PaddingStrategy] = True
146
+ max_length: Optional[int] = None
147
+ pad_to_multiple_of: Optional[int] = None
148
+ return_tensors: str = "pt"
149
+
150
+ def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
151
+ features_j = []
152
+ features_k = []
153
+ for feature in features:
154
+ features_j.append(
155
+ {
156
+ "input_ids": feature["input_ids_j"],
157
+ "attention_mask": feature["attention_mask_j"],
158
+ }
159
+ )
160
+ features_k.append(
161
+ {
162
+ "input_ids": feature["input_ids_k"],
163
+ "attention_mask": feature["attention_mask_k"],
164
+ }
165
+ )
166
+ batch_j = self.tokenizer.pad(
167
+ features_j,
168
+ padding=self.padding,
169
+ max_length=self.max_length,
170
+ pad_to_multiple_of=self.pad_to_multiple_of,
171
+ return_tensors=self.return_tensors,
172
+ )
173
+ batch_k = self.tokenizer.pad(
174
+ features_k,
175
+ padding=self.padding,
176
+ max_length=self.max_length,
177
+ pad_to_multiple_of=self.pad_to_multiple_of,
178
+ return_tensors=self.return_tensors,
179
+ )
180
+ batch = {
181
+ "input_ids_j": batch_j["input_ids"],
182
+ "attention_mask_j": batch_j["attention_mask"],
183
+ "input_ids_k": batch_k["input_ids"],
184
+ "attention_mask_k": batch_k["attention_mask"],
185
+ "return_loss": True,
186
+ }
187
+ return batch
188
+
189
+
190
+ @dataclass
191
+ class ScriptArguments:
192
+ model_name: Optional[str] = field(default="", metadata={"help": "the model name"})
193
+
194
+ base_model_name: Optional[str] = field(default="", metadata={"help": "pretrained"})
195
+
196
+ reward_model_name: Optional[str] = field(default="", metadata={"help": "the reward model name"})
197
+
198
+ merged_model_name: Optional[str] = field(default="", metadata={"help": "lora + model"})
199
+
200
+ output_name: Optional[str] = field(default="", metadata={"help": "n steps to save the model"})
201
+
202
+ lora_path: Optional[str] = field(default="", metadata={"help": "lora path"})
203
+
204
+ dataset_name: Optional[str] = field(
205
+ default="", metadata={"help": "chinese medical english alpaca"}
206
+ )
207
+
208
+ dataset_sub_name: Optional[str] = field(default="", metadata={"help": "hf dataset config name"})
209
+
210
+ dataset_split: Optional[str] = field(default="train", metadata={"help": "dataset split"})
211
+
212
+ deepspeed_config_name: Optional[str] = field(default="", metadata={"help": "ds.json"})
213
+
214
+ prompt: Optional[str] = field(default="", metadata={"help": "for test generation"})
215
+
216
+ system_prompt: Optional[str] = field(default="", metadata={"help": "optional system prompt override"})
217
+
218
+ learning_rate: Optional[float] = field(
219
+ default=5e-6, metadata={"help": "todo: the learning rate,"}
220
+ )
221
+
222
+ seq_length: Optional[int] = field(default=1024, metadata={"help": "context max length"})
223
+
224
+ max_new_tokens: Optional[int] = field(default=128, metadata={"help": "max generate tokens"})
225
+
226
+ output_max_length: Optional[int] = field(
227
+ default=128, metadata={"help": "ppo maximum length for generation"}
228
+ )
229
+
230
+ mini_batch_size: Optional[int] = field(default=1, metadata={"help": "the PPO minibatch size"})
231
+
232
+ batch_size: Optional[int] = field(default=8, metadata={"help": "the batch size"})
233
+
234
+ ppo_epochs: Optional[int] = field(default=1, metadata={"help": "the number of ppo epochs"})
235
+
236
+ num_train_epochs: Optional[int] = field(default=1, metadata={"help": "train epochs "})
237
+
238
+ gradient_accumulation_steps: Optional[int] = field(
239
+ default=1, metadata={"help": "gradient accumulation steps"}
240
+ )
241
+
242
+ early_stopping: Optional[bool] = field(
243
+ default=False, metadata={"help": "whether to early stop"}
244
+ )
245
+
246
+ target_kl: Optional[float] = field(
247
+ default=0.1, metadata={"help": "kl target for early stopping"}
248
+ )
249
+
250
+ seed: Optional[int] = field(default=0, metadata={"help": "the seed"})
251
+
252
+ use_QLora: Optional[bool] = field(default=False, metadata={"help": "todo optional"})
253
+
254
+ use_flash_attention_2: Optional[bool] = field(
255
+ default=False, metadata={"help": "gpt2 no flash attention2"}
256
+ )
257
+
258
+ merge_checkpoint_type: Optional[str] = field(default='LM', metadata={"help": "merge check point"})
259
+
260
+ use_qlora_double_quant: Optional[bool] = field(default=False, metadata={"help": "merge check point"})
261
+
262
+ step_generate: Optional[bool] = field(default=False, metadata={"help": "step generation"})
263
+
264
+
265
+
266
+ def resolve_system_prompt(system_prompt: Optional[str] = None) -> str:
267
+ return system_prompt or SYSTEM_PROMPT
268
+
269
+
270
+ def format_prompt_answer(question, answer, system_prompt: Optional[str] = None):
271
+ '''for generation'''
272
+ current_system_prompt = resolve_system_prompt(system_prompt)
273
+ return f"###System: {current_system_prompt}\n###Question: {question}\n###Answer: {answer} {DEFINE_EOS_TOKEN}"
274
+
275
+
276
+ def format_prompt(question, system_prompt: Optional[str] = None):
277
+ current_system_prompt = resolve_system_prompt(system_prompt)
278
+ return f"###System: {current_system_prompt}\n###Question: {question}\n###Answer: "
279
+
280
+
281
+ def formatting_prompt_response_func(example):
282
+ return format_prompt_answer(
283
+ example["prompt"],
284
+ example["response"],
285
+ system_prompt=example.get("system"),
286
+ )
287
+
288
+
289
+ def formatting_prompt_response_func_batched(example):
290
+ output_text = []
291
+ systems = example.get("system", [None] * len(example["prompt"]))
292
+ for system_prompt, prompt, response in zip(systems, example["prompt"], example["response"]):
293
+ output_text.append(
294
+ format_prompt_answer(prompt, response, system_prompt=system_prompt)
295
+ )
296
+ return output_text
297
+
298
+
299
+ # medical finetune data haven't 'input', only has 'instruction'
300
+ def formatting_finetune_func(example):
301
+ return format_prompt_answer(example['instruction'], example['output'])
302
+
303
+
304
+ def formatting_alpaca_func_bached(example):
305
+ output_text = []
306
+ for instruction, item_input, item_output in zip(example["instruction"],
307
+ example['input'],
308
+ example['output'] ):
309
+ text = format_prompt_answer(f"{instruction} {item_input}".strip(), item_output)
310
+ output_text.append(text)
311
+ return output_text
312
+
313
+
314
+ def formatting_alpaca_func(example):
315
+ return format_prompt_answer(
316
+ f"{example['instruction']} {example['input']}".strip(),
317
+ example['output'],
318
+ )
319
+
320
+
321
+ def formatting_alpaca_chinese_func(example):
322
+ return f"###System: {SYSTEM_PROMPT}\n###Question: {example['instruction_zh']} {example['input_zh']}\n###Answer: {example['output_zh']}{DEFINE_EOS_TOKEN}"