stmasson commited on
Commit
9f82e21
·
verified ·
1 Parent(s): d66cd5b

Upload scripts/eval_prompt_comparison.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/eval_prompt_comparison.py +342 -0
scripts/eval_prompt_comparison.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = ["transformers>=4.46.0", "torch", "peft", "bitsandbytes", "accelerate", "datasets", "evalplus", "tqdm", "protobuf", "sentencepiece", "mistral-common>=1.5.0", "huggingface_hub"]
3
+ # ///
4
+
5
+ """
6
+ Prompt Comparison: Direct Code vs Reasoning
7
+ Tests if "code only" prompt improves fine-tuned model scores
8
+
9
+ VERSION: 1.0
10
+ """
11
+
12
+ import os
13
+ import re
14
+ import json
15
+ import torch
16
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
17
+ from peft import PeftModel
18
+ from datasets import load_dataset
19
+ from tqdm import tqdm
20
+ from huggingface_hub import HfApi
21
+
22
+ print("=" * 60)
23
+ print("PROMPT COMPARISON TEST")
24
+ print("Direct Code vs Reasoning Prompt")
25
+ print("Benchmark: HumanEval")
26
+ print("=" * 60)
27
+
28
+ # Configuration
29
+ BASE_MODEL = "mistralai/Devstral-Small-2505"
30
+ FINETUNED_ADAPTER = "stmasson/alizee-coder-devstral-1-small"
31
+ OUTPUT_REPO = "stmasson/alizee-coder-devstral-1-small"
32
+ TEMPERATURE = 0.1
33
+ MAX_NEW_TOKENS = 512
34
+
35
+ # Check GPU
36
+ print(f"\nGPU available: {torch.cuda.is_available()}")
37
+ if torch.cuda.is_available():
38
+ print(f"GPU: {torch.cuda.get_device_name(0)}")
39
+ print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
40
+
41
+ # 4-bit quantization config
42
+ bnb_config = BitsAndBytesConfig(
43
+ load_in_4bit=True,
44
+ bnb_4bit_quant_type="nf4",
45
+ bnb_4bit_compute_dtype=torch.bfloat16,
46
+ bnb_4bit_use_double_quant=True,
47
+ )
48
+
49
+ def load_humaneval():
50
+ """Load HumanEval dataset from EvalPlus"""
51
+ print("\nLoading HumanEval dataset...")
52
+ dataset = load_dataset("evalplus/humanevalplus", split="test")
53
+ print(f"Loaded {len(dataset)} problems")
54
+ return dataset
55
+
56
+ def load_model(model_name, adapter_name=None):
57
+ """Load model with optional LoRA adapter"""
58
+ print(f"\nLoading model: {model_name}")
59
+ if adapter_name:
60
+ print(f"With adapter: {adapter_name}")
61
+
62
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
63
+ if tokenizer.pad_token is None:
64
+ tokenizer.pad_token = tokenizer.eos_token
65
+
66
+ model = AutoModelForCausalLM.from_pretrained(
67
+ model_name,
68
+ quantization_config=bnb_config,
69
+ device_map="auto",
70
+ trust_remote_code=True,
71
+ torch_dtype=torch.bfloat16,
72
+ )
73
+
74
+ if adapter_name:
75
+ print("Loading LoRA adapter...")
76
+ model = PeftModel.from_pretrained(model, adapter_name)
77
+ model = model.merge_and_unload()
78
+ print("Adapter merged")
79
+
80
+ model.eval()
81
+ return model, tokenizer
82
+
83
+ def extract_python_code(text):
84
+ """Extract Python code from model output"""
85
+ pattern = r'```python\s*(.*?)\s*```'
86
+ matches = re.findall(pattern, text, re.DOTALL)
87
+ if matches:
88
+ return matches[-1].strip()
89
+
90
+ pattern = r'```\s*(.*?)\s*```'
91
+ matches = re.findall(pattern, text, re.DOTALL)
92
+ if matches:
93
+ return matches[-1].strip()
94
+
95
+ return text.strip()
96
+
97
+ def generate_direct_prompt(model, tokenizer, prompt):
98
+ """Generate with DIRECT CODE prompt - no reasoning, just code"""
99
+ instruct_prompt = f"<s>[INST] Complete this Python function. Output ONLY the function body code, no explanations or markdown:\n\n{prompt}[/INST]"
100
+
101
+ inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=4096).to(model.device)
102
+
103
+ with torch.no_grad():
104
+ outputs = model.generate(
105
+ **inputs,
106
+ max_new_tokens=MAX_NEW_TOKENS,
107
+ temperature=TEMPERATURE,
108
+ do_sample=True if TEMPERATURE > 0 else False,
109
+ pad_token_id=tokenizer.pad_token_id,
110
+ eos_token_id=tokenizer.eos_token_id,
111
+ )
112
+
113
+ raw = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
114
+ completion = extract_python_code(raw)
115
+
116
+ # Extract function body if full function
117
+ if completion.strip().startswith("def "):
118
+ lines = completion.split('\n')
119
+ body_lines = []
120
+ in_function = False
121
+ for line in lines:
122
+ if line.strip().startswith("def "):
123
+ in_function = True
124
+ continue
125
+ if in_function:
126
+ body_lines.append(line)
127
+ if body_lines:
128
+ completion = '\n'.join(body_lines)
129
+ elif completion == raw.strip():
130
+ completion = raw
131
+
132
+ # Stop at boundaries
133
+ for stop in ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"]:
134
+ if stop in completion:
135
+ completion = completion[:completion.index(stop)]
136
+
137
+ return completion
138
+
139
+ def generate_reasoning_prompt(model, tokenizer, prompt):
140
+ """Generate with REASONING prompt - original approach"""
141
+ instruct_prompt = f"<s>[INST] Solve this programming problem with detailed reasoning:\n\nComplete the following function:\n{prompt}\n[/INST]"
142
+
143
+ inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=4096).to(model.device)
144
+
145
+ with torch.no_grad():
146
+ outputs = model.generate(
147
+ **inputs,
148
+ max_new_tokens=MAX_NEW_TOKENS * 2,
149
+ temperature=TEMPERATURE,
150
+ do_sample=True if TEMPERATURE > 0 else False,
151
+ pad_token_id=tokenizer.pad_token_id,
152
+ eos_token_id=tokenizer.eos_token_id,
153
+ )
154
+
155
+ full_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
156
+ code = extract_python_code(full_response)
157
+
158
+ if "def " in code:
159
+ lines = code.split('\n')
160
+ result_lines = []
161
+ in_function = False
162
+ for line in lines:
163
+ if line.strip().startswith("def "):
164
+ in_function = True
165
+ continue
166
+ if in_function:
167
+ result_lines.append(line)
168
+ if result_lines:
169
+ return '\n'.join(result_lines)
170
+
171
+ return code
172
+
173
+ def simple_syntax_check(code):
174
+ """Basic syntax validation"""
175
+ try:
176
+ compile(code, '<string>', 'exec')
177
+ return True
178
+ except SyntaxError:
179
+ return False
180
+
181
+ def evaluate_samples(samples, dataset):
182
+ """Evaluate samples"""
183
+ results = {"passed": 0, "failed": 0, "error": 0}
184
+
185
+ for sample in samples:
186
+ task_id = sample["task_id"]
187
+ completion = sample["completion"]
188
+
189
+ problem = None
190
+ for p in dataset:
191
+ if p["task_id"] == task_id:
192
+ problem = p
193
+ break
194
+
195
+ if problem is None:
196
+ results["error"] += 1
197
+ continue
198
+
199
+ full_code = problem["prompt"] + completion
200
+
201
+ if not simple_syntax_check(full_code):
202
+ results["failed"] += 1
203
+ continue
204
+
205
+ try:
206
+ exec_globals = {}
207
+ exec(full_code, exec_globals)
208
+ entry_point = problem.get("entry_point", task_id.split("/")[-1])
209
+ if entry_point in exec_globals:
210
+ results["passed"] += 1
211
+ else:
212
+ results["failed"] += 1
213
+ except Exception:
214
+ results["error"] += 1
215
+
216
+ total = len(samples)
217
+ pass_rate = results["passed"] / total if total > 0 else 0
218
+
219
+ return {
220
+ "pass@1": pass_rate,
221
+ "passed": results["passed"],
222
+ "failed": results["failed"],
223
+ "error": results["error"],
224
+ "total": total
225
+ }
226
+
227
+ def main():
228
+ dataset = load_humaneval()
229
+
230
+ # Load fine-tuned model
231
+ print("\n" + "=" * 60)
232
+ print("LOADING FINE-TUNED MODEL")
233
+ print("=" * 60)
234
+ model, tokenizer = load_model(BASE_MODEL, FINETUNED_ADAPTER)
235
+
236
+ results = {}
237
+
238
+ # Test 1: Direct prompt
239
+ print("\n" + "=" * 60)
240
+ print("TEST 1: DIRECT CODE PROMPT")
241
+ print("'Output ONLY the code, no explanations'")
242
+ print("=" * 60)
243
+ direct_samples = []
244
+ for problem in tqdm(dataset, desc="Direct Prompt"):
245
+ task_id = problem["task_id"]
246
+ prompt = problem["prompt"]
247
+ try:
248
+ completion = generate_direct_prompt(model, tokenizer, prompt)
249
+ direct_samples.append({"task_id": task_id, "completion": completion})
250
+ except Exception as e:
251
+ print(f"Error on {task_id}: {e}")
252
+ direct_samples.append({"task_id": task_id, "completion": "# Error"})
253
+
254
+ results["direct"] = evaluate_samples(direct_samples, dataset)
255
+ print(f"\nDirect Prompt Results: pass@1 = {results['direct']['pass@1']*100:.2f}%")
256
+
257
+ # Test 2: Reasoning prompt
258
+ print("\n" + "=" * 60)
259
+ print("TEST 2: REASONING PROMPT (original)")
260
+ print("'Solve with detailed reasoning'")
261
+ print("=" * 60)
262
+ reasoning_samples = []
263
+ for problem in tqdm(dataset, desc="Reasoning Prompt"):
264
+ task_id = problem["task_id"]
265
+ prompt = problem["prompt"]
266
+ try:
267
+ completion = generate_reasoning_prompt(model, tokenizer, prompt)
268
+ reasoning_samples.append({"task_id": task_id, "completion": completion})
269
+ except Exception as e:
270
+ print(f"Error on {task_id}: {e}")
271
+ reasoning_samples.append({"task_id": task_id, "completion": "# Error"})
272
+
273
+ results["reasoning"] = evaluate_samples(reasoning_samples, dataset)
274
+ print(f"\nReasoning Prompt Results: pass@1 = {results['reasoning']['pass@1']*100:.2f}%")
275
+
276
+ # Summary
277
+ print("\n" + "=" * 60)
278
+ print("PROMPT COMPARISON SUMMARY - HumanEval")
279
+ print("=" * 60)
280
+ print(f"\n{'Prompt Type':<30} {'pass@1':>10} {'Passed':>8} {'Failed':>8}")
281
+ print("-" * 60)
282
+ print(f"{'Direct Code Prompt':<30} {results['direct']['pass@1']*100:>9.2f}% {results['direct']['passed']:>8} {results['direct']['failed']:>8}")
283
+ print(f"{'Reasoning Prompt (original)':<30} {results['reasoning']['pass@1']*100:>9.2f}% {results['reasoning']['passed']:>8} {results['reasoning']['failed']:>8}")
284
+
285
+ improvement = (results['direct']['pass@1'] - results['reasoning']['pass@1']) * 100
286
+ sign = "+" if improvement >= 0 else ""
287
+ print(f"\n{'Improvement (Direct vs Reasoning):':<30} {sign}{improvement:>9.2f}%")
288
+
289
+ # Reference
290
+ print(f"\n{'Reference - Base Model (v2):':<30} {'82.93%':>10}")
291
+ print(f"{'Reference - Reasoning (v2):':<30} {'62.20%':>10}")
292
+
293
+ # Save results
294
+ output = {
295
+ "benchmark": "HumanEval",
296
+ "experiment": "Prompt Comparison",
297
+ "model": FINETUNED_ADAPTER,
298
+ "results": {
299
+ "direct_prompt": {
300
+ "pass@1": float(results['direct']['pass@1']),
301
+ "passed": results['direct']['passed'],
302
+ "failed": results['direct']['failed'],
303
+ "total": results['direct']['total']
304
+ },
305
+ "reasoning_prompt": {
306
+ "pass@1": float(results['reasoning']['pass@1']),
307
+ "passed": results['reasoning']['passed'],
308
+ "failed": results['reasoning']['failed'],
309
+ "total": results['reasoning']['total']
310
+ },
311
+ "improvement": float(improvement),
312
+ "base_model_reference": 0.8293,
313
+ "reasoning_reference": 0.6220
314
+ },
315
+ "samples": {
316
+ "direct": direct_samples[:3],
317
+ "reasoning": reasoning_samples[:3]
318
+ }
319
+ }
320
+
321
+ with open("eval_prompt_comparison.json", "w") as f:
322
+ json.dump(output, f, indent=2)
323
+ print("\nResults saved to eval_prompt_comparison.json")
324
+
325
+ try:
326
+ api = HfApi()
327
+ api.upload_file(
328
+ path_or_fileobj="eval_prompt_comparison.json",
329
+ path_in_repo="eval_prompt_comparison.json",
330
+ repo_id=OUTPUT_REPO,
331
+ repo_type="model",
332
+ )
333
+ print(f"Results uploaded to {OUTPUT_REPO}")
334
+ except Exception as e:
335
+ print(f"Could not upload results: {e}")
336
+
337
+ print("\n" + "=" * 60)
338
+ print("EVALUATION COMPLETE")
339
+ print("=" * 60)
340
+
341
+ if __name__ == "__main__":
342
+ main()