TheJackBright commited on
Commit
c04333f
Β·
verified Β·
1 Parent(s): d8ba5fb

Upload hf_grpo_train.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. hf_grpo_train.py +275 -0
hf_grpo_train.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = [
3
+ # "torch>=2.2.0",
4
+ # "transformers>=4.40.0",
5
+ # "trl>=1.2.0",
6
+ # "peft>=0.10.0",
7
+ # "accelerate>=0.27.0",
8
+ # "bitsandbytes>=0.43.0",
9
+ # "datasets>=2.18.0",
10
+ # "huggingface-hub>=0.22.0",
11
+ # "trackio",
12
+ # "pydantic>=2.0",
13
+ # "numpy>=1.24",
14
+ # "pandas>=2.0",
15
+ # "matplotlib>=3.8",
16
+ # "tqdm>=4.60",
17
+ # "networkx>=3.0",
18
+ # "scipy>=1.10",
19
+ # "fastapi>=0.100",
20
+ # "uvicorn>=0.22",
21
+ # "httpx>=0.24",
22
+ # "pyyaml>=6.0",
23
+ # ]
24
+ # ///
25
+ """
26
+ PolyGuard GRPO Training Job β€” runs on Hugging Face Jobs cloud GPU.
27
+
28
+ This script:
29
+ 1. Clones the project from GitHub
30
+ 2. Installs the polyguard-rl package
31
+ 3. Loads the SFT adapter from Hub (or base model if no adapter)
32
+ 4. Generates GRPO rollout prompts from the PolyGuard environment
33
+ 5. Runs GRPO training with 4 grouped reward functions
34
+ 6. Pushes the final adapter to the Hugging Face Hub
35
+
36
+ Submit via CLI:
37
+ hf jobs uv run \
38
+ --flavor a10g-large \
39
+ --timeout 4h \
40
+ --secrets HF_TOKEN \
41
+ "https://huggingface.co/TheJackBright/polyguard-training-scripts/resolve/main/hf_grpo_train.py"
42
+
43
+ Environment variables:
44
+ HF_TOKEN : HF write token (required, passed via --secrets)
45
+ SFT_MODEL_ID : SFT adapter on Hub (default: TheJackBright/polyguard-qwen-sft)
46
+ HUB_MODEL_ID : output GRPO model repo (default: TheJackBright/polyguard-qwen-grpo)
47
+ N_EPISODES : GRPO rollout episodes (default: 256)
48
+ EPOCHS : training epochs (default: 2)
49
+ BATCH_SIZE : per-device train batch size (default: 2)
50
+ GRAD_ACCUM : gradient accumulation steps (default: 8)
51
+ MAX_PROMPT_LEN : max prompt tokens (default: 512)
52
+ MAX_COMPLETION_LEN: max completion tokens (default: 512)
53
+ GROUP_SIZE : GRPO group size (default: 4)
54
+ """
55
+ from __future__ import annotations
56
+
57
+ import inspect
58
+ import json
59
+ import os
60
+ import subprocess
61
+ import sys
62
+ from pathlib import Path
63
+ from typing import Any, Callable, Dict, List, Optional
64
+
65
+ # ─── Config from environment ────────────────────────────────────────────────
66
+ GITHUB_REPO = os.environ.get("GITHUB_REPO", "https://github.com/Vishwa-docs/Meta_PyTorch_Scalar_OpenEnv-Hackathon.git")
67
+ SFT_MODEL_ID = os.environ.get("SFT_MODEL_ID", "TheJackBright/polyguard-qwen-sft")
68
+ BASE_MODEL_NAME = os.environ.get("BASE_MODEL_NAME", "Qwen/Qwen2.5-1.5B-Instruct")
69
+ HUB_MODEL_ID = os.environ.get("HUB_MODEL_ID", "TheJackBright/polyguard-qwen-grpo")
70
+ N_EPISODES = int(os.environ.get("N_EPISODES", "256"))
71
+ EPOCHS = int(os.environ.get("EPOCHS", "2"))
72
+ BATCH_SIZE = int(os.environ.get("BATCH_SIZE", "2"))
73
+ GRAD_ACCUM = int(os.environ.get("GRAD_ACCUM", "8"))
74
+ MAX_PROMPT_LEN = int(os.environ.get("MAX_PROMPT_LEN", "512"))
75
+ MAX_COMPLETION_LEN = int(os.environ.get("MAX_COMPLETION_LEN", "512"))
76
+ GROUP_SIZE = int(os.environ.get("GROUP_SIZE", "4"))
77
+ SEED = 42
78
+ OUTPUT_DIR = "/tmp/polyguard_grpo_output"
79
+ PROMPTS_PATH = "/tmp/polyguard_grpo_prompts.jsonl"
80
+
81
+ print("=" * 60)
82
+ print("PolyGuard GRPO Training on HF Jobs")
83
+ print(f" SFT checkpoint: {SFT_MODEL_ID}")
84
+ print(f" Hub output: {HUB_MODEL_ID}")
85
+ print(f" Episodes: {N_EPISODES}")
86
+ print(f" Epochs: {EPOCHS}")
87
+ print(f" Group size: {GROUP_SIZE}")
88
+ print("=" * 60)
89
+
90
+
91
+ # ─── Step 1: Clone repo and install polyguard-rl ────────────────────────────
92
+ print("\n[1/5] Cloning project from GitHub...")
93
+ clone_dir = Path("/tmp/polyguard_project")
94
+ if not clone_dir.exists():
95
+ subprocess.run(
96
+ ["git", "clone", "--depth=1", GITHUB_REPO, str(clone_dir)],
97
+ check=True,
98
+ )
99
+ else:
100
+ print(" Already cloned.")
101
+
102
+ polyguard_rl_dir = clone_dir / "polyguard-rl"
103
+ print(f"\n[2/5] Installing polyguard-rl...")
104
+ subprocess.run(
105
+ [sys.executable, "-m", "pip", "install", "-e", str(polyguard_rl_dir), "--quiet"],
106
+ check=True,
107
+ )
108
+ if str(polyguard_rl_dir) not in sys.path:
109
+ sys.path.insert(0, str(polyguard_rl_dir))
110
+ os.chdir(polyguard_rl_dir)
111
+
112
+
113
+ # ─── Step 2: Build GRPO prompt dataset ──────────────────────────────────────
114
+ print(f"\n[3/5] Building GRPO rollout dataset ({N_EPISODES} episodes)...")
115
+
116
+ from app.env.env_core import PolyGuardEnv # noqa: E402
117
+ from app.models.policy.prompt_templates import build_planner_prompt # noqa: E402
118
+ from app.training.openenv_wrapper import PolyGuardEnvWrapper # noqa: E402
119
+
120
+ env = PolyGuardEnv()
121
+ prompts: List[Dict[str, Any]] = []
122
+
123
+ for ep_idx in range(N_EPISODES):
124
+ seed_i = SEED + ep_idx
125
+ obs, _ = env.reset(seed=seed_i)
126
+ prompt_text = build_planner_prompt(obs)
127
+ prompts.append({
128
+ "prompt": prompt_text,
129
+ "seed": seed_i,
130
+ "episode": ep_idx,
131
+ "difficulty": getattr(obs, "difficulty", "medium"),
132
+ })
133
+
134
+ print(f" Built {len(prompts)} GRPO prompts.")
135
+ with open(PROMPTS_PATH, "w") as f:
136
+ for row in prompts:
137
+ f.write(json.dumps(row) + "\n")
138
+
139
+
140
+ # ─── Step 3: Load model from SFT checkpoint ─────────────────────────────────
141
+ print(f"\n[4/5] Loading SFT model from {SFT_MODEL_ID}...")
142
+
143
+ import torch
144
+ from datasets import Dataset
145
+ from huggingface_hub import HfApi
146
+ from peft import PeftModel
147
+ from transformers import AutoModelForCausalLM, AutoTokenizer
148
+ from trl import GRPOConfig, GRPOTrainer
149
+
150
+ dtype = torch.bfloat16 if (torch.cuda.is_available() and torch.cuda.is_bf16_supported()) else torch.float16
151
+ print(f" CUDA: {torch.cuda.is_available()}")
152
+ if torch.cuda.is_available():
153
+ print(f" GPU: {torch.cuda.get_device_name(0)}")
154
+
155
+ tokenizer = AutoTokenizer.from_pretrained(SFT_MODEL_ID, trust_remote_code=True)
156
+ if tokenizer.pad_token is None:
157
+ tokenizer.pad_token = tokenizer.eos_token
158
+
159
+ # Try loading SFT adapter, fall back to base model
160
+ try:
161
+ base_model = AutoModelForCausalLM.from_pretrained(
162
+ BASE_MODEL_NAME,
163
+ torch_dtype=dtype,
164
+ device_map="auto",
165
+ trust_remote_code=True,
166
+ )
167
+ model = PeftModel.from_pretrained(base_model, SFT_MODEL_ID)
168
+ print(" Loaded SFT adapter on top of base model.")
169
+ except Exception as e:
170
+ print(f" SFT load failed ({e}), falling back to base model...")
171
+ model = AutoModelForCausalLM.from_pretrained(
172
+ BASE_MODEL_NAME,
173
+ torch_dtype=dtype,
174
+ device_map="auto",
175
+ trust_remote_code=True,
176
+ )
177
+
178
+
179
+ # ─── Step 4: Build grouped reward function ──────────────────────────────────
180
+ env_wrapper = PolyGuardEnvWrapper(score_timeout_s=30.0)
181
+
182
+ def build_polyguard_reward_fn(group_name: str) -> Callable:
183
+ from app.training.grpo_rewards import make_group_reward_fn
184
+ return make_group_reward_fn(env_wrapper=env_wrapper, group_name=group_name)
185
+
186
+ # TRL GRPO expects a single reward function that returns List[float]
187
+ # We combine the 4 reward groups into a weighted aggregate
188
+ from app.training.grpo_rewards import REWARD_GROUPS # noqa: E402
189
+
190
+ _group_fns = {g: build_polyguard_reward_fn(g) for g in REWARD_GROUPS}
191
+
192
+ def combined_reward_fn(
193
+ prompts: List[str],
194
+ completions: List[str],
195
+ **kwargs,
196
+ ) -> List[float]:
197
+ weights = {
198
+ "format_schema": 0.20,
199
+ "clinical_safety": 0.45,
200
+ "process_grounding": 0.20,
201
+ "anti_hack": 0.15,
202
+ }
203
+ n = len(prompts)
204
+ totals = [0.0] * n
205
+ for group, fn in _group_fns.items():
206
+ group_rewards = fn(prompts=prompts, completions=completions, **kwargs)
207
+ w = weights.get(group, 0.25)
208
+ for i, r in enumerate(group_rewards):
209
+ totals[i] += w * float(r)
210
+ # Clamp to [0.001, 0.999]
211
+ return [max(0.001, min(0.999, round(v, 3))) for v in totals]
212
+
213
+
214
+ # ─── Step 5: GRPO Training ──────────────────────────────────────────────────
215
+ ds = Dataset.from_list([{"prompt": p["prompt"]} for p in prompts]).shuffle(seed=SEED)
216
+
217
+ grpo_config_kwargs: Dict[str, Any] = {
218
+ "output_dir": OUTPUT_DIR,
219
+ "num_train_epochs": EPOCHS,
220
+ "per_device_train_batch_size": BATCH_SIZE,
221
+ "gradient_accumulation_steps": GRAD_ACCUM,
222
+ "learning_rate": 1e-5,
223
+ "bf16": dtype == torch.bfloat16,
224
+ "fp16": dtype == torch.float16,
225
+ "logging_steps": 5,
226
+ "save_steps": 50,
227
+ "save_total_limit": 2,
228
+ "seed": SEED,
229
+ "report_to": ["trackio"],
230
+ "run_name": "polyguard-grpo-qwen",
231
+ "project": "polyguard-training",
232
+ "push_to_hub": True,
233
+ "hub_model_id": HUB_MODEL_ID,
234
+ "hub_strategy": "every_save",
235
+ "num_generations": GROUP_SIZE,
236
+ "max_prompt_length": MAX_PROMPT_LEN,
237
+ "max_completion_length": MAX_COMPLETION_LEN,
238
+ "temperature": 0.9,
239
+ "beta": 0.1,
240
+ }
241
+
242
+ grpo_params = set(inspect.signature(GRPOConfig).parameters)
243
+ grpo_config = GRPOConfig(**{k: v for k, v in grpo_config_kwargs.items() if k in grpo_params})
244
+
245
+ trainer_kwargs: Dict[str, Any] = {
246
+ "model": model,
247
+ "args": grpo_config,
248
+ "train_dataset": ds,
249
+ "reward_funcs": combined_reward_fn,
250
+ }
251
+ trainer_params = set(inspect.signature(GRPOTrainer).parameters)
252
+ if "processing_class" in trainer_params:
253
+ trainer_kwargs["processing_class"] = tokenizer
254
+ elif "tokenizer" in trainer_params:
255
+ trainer_kwargs["tokenizer"] = tokenizer
256
+
257
+ trainer = GRPOTrainer(**{k: v for k, v in trainer_kwargs.items() if k in trainer_params})
258
+
259
+ print(f"\n GRPO dataset size: {len(ds)}")
260
+ print(f" Group size: {GROUP_SIZE}")
261
+ print(f" Starting GRPO training...\n")
262
+
263
+ train_result = trainer.train()
264
+
265
+ print("\n GRPO training complete.")
266
+ print(f" Steps: {train_result.global_step}")
267
+
268
+ print(f"\n[5/5] Pushing GRPO model to Hub: {HUB_MODEL_ID}...")
269
+ trainer.push_to_hub()
270
+ tokenizer.push_to_hub(HUB_MODEL_ID)
271
+
272
+ print("\n" + "=" * 60)
273
+ print("GRPO training complete!")
274
+ print(f"Model saved to: https://huggingface.co/{HUB_MODEL_ID}")
275
+ print("=" * 60)