training-scripts / scripts /eval_humaneval_hf.py
stmasson's picture
v2.0: Proper code extraction for base model
0723424 verified
# /// script
# dependencies = ["transformers>=4.46.0", "torch", "peft", "bitsandbytes", "accelerate", "datasets", "evalplus", "tqdm", "protobuf", "sentencepiece", "mistral-common>=1.5.0", "huggingface_hub"]
# ///
"""
HumanEval Evaluation: Base Devstral vs Fine-tuned Alizee-Coder
Runs on HF Jobs with GPU support
VERSION: 2.0 - Proper code extraction for both base and fine-tuned models
"""
import os
import re
import json
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
from peft import PeftModel
from datasets import load_dataset
from tqdm import tqdm
from huggingface_hub import HfApi
print("=" * 60)
print("EVALUATION: Devstral-Small vs Alizee-Coder-Devstral")
print("Benchmark: HumanEval (via EvalPlus)")
print("=" * 60)
# Configuration
BASE_MODEL = "mistralai/Devstral-Small-2505"
FINETUNED_ADAPTER = "stmasson/alizee-coder-devstral-1-small"
OUTPUT_REPO = "stmasson/alizee-coder-devstral-1-small"
NUM_SAMPLES_PER_PROBLEM = 1
TEMPERATURE = 0.1
MAX_NEW_TOKENS = 512
# Check GPU
print(f"\nGPU available: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f"GPU: {torch.cuda.get_device_name(0)}")
print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
# 4-bit quantization config
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=True,
)
def load_humaneval():
"""Load HumanEval dataset from EvalPlus"""
print("\nLoading HumanEval dataset...")
dataset = load_dataset("evalplus/humanevalplus", split="test")
print(f"Loaded {len(dataset)} problems")
return dataset
def load_model(model_name, adapter_name=None):
"""Load model with optional LoRA adapter"""
print(f"\nLoading model: {model_name}")
if adapter_name:
print(f"With adapter: {adapter_name}")
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_name,
quantization_config=bnb_config,
device_map="auto",
trust_remote_code=True,
torch_dtype=torch.bfloat16,
)
if adapter_name:
print("Loading LoRA adapter...")
model = PeftModel.from_pretrained(model, adapter_name)
# Merge for faster inference
model = model.merge_and_unload()
print("Adapter merged")
model.eval()
return model, tokenizer
def extract_python_code(text):
"""Extract Python code from model output"""
# Try ```python blocks
pattern = r'```python\s*(.*?)\s*```'
matches = re.findall(pattern, text, re.DOTALL)
if matches:
return matches[-1].strip()
# Try ``` blocks
pattern = r'```\s*(.*?)\s*```'
matches = re.findall(pattern, text, re.DOTALL)
if matches:
return matches[-1].strip()
# Return as-is
return text.strip()
def generate_completion_base(model, tokenizer, prompt):
"""Generate code completion for BASE model (handles chat-like responses)"""
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=MAX_NEW_TOKENS,
temperature=TEMPERATURE,
do_sample=True if TEMPERATURE > 0 else False,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id,
)
raw_completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
# Try to extract code from ```python blocks (if model generates chat-like response)
completion = extract_python_code(raw_completion)
# If extracted code looks like a full function, extract just the body
if completion.strip().startswith("def "):
lines = completion.split('\n')
body_lines = []
in_function = False
for line in lines:
if line.strip().startswith("def "):
in_function = True
continue
if in_function:
body_lines.append(line)
if body_lines:
completion = '\n'.join(body_lines)
# If no code block, use raw completion
elif completion == raw_completion.strip():
completion = raw_completion
# Stop at function boundary
stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"]
for stop in stop_tokens:
if stop in completion:
completion = completion[:completion.index(stop)]
return completion
def generate_completion_finetuned(model, tokenizer, prompt, problem_text):
"""Generate code completion for FINE-TUNED model (Instruct format)"""
instruct_prompt = f"<s>[INST] Solve this programming problem with detailed reasoning:\n\n{problem_text}\n\nComplete the following function:\n{prompt}\n[/INST]"
inputs = tokenizer(instruct_prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=MAX_NEW_TOKENS * 2, # More tokens for reasoning
temperature=TEMPERATURE,
do_sample=True if TEMPERATURE > 0 else False,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id,
)
full_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
code = extract_python_code(full_response)
# Extract just the function body if we got the full function
if "def " in code:
lines = code.split('\n')
result_lines = []
in_function = False
for line in lines:
if line.strip().startswith("def "):
in_function = True
continue
if in_function:
result_lines.append(line)
if result_lines:
return '\n'.join(result_lines)
return code
def evaluate_model(model, tokenizer, dataset, model_name, is_finetuned=False):
"""Evaluate model on HumanEval and return samples"""
print(f"\nEvaluating {model_name}...")
samples = []
for i, problem in enumerate(tqdm(dataset, desc=f"Generating ({model_name})")):
task_id = problem["task_id"]
prompt = problem["prompt"]
for _ in range(NUM_SAMPLES_PER_PROBLEM):
try:
if is_finetuned:
completion = generate_completion_finetuned(model, tokenizer, prompt, prompt)
else:
completion = generate_completion_base(model, tokenizer, prompt)
samples.append({
"task_id": task_id,
"prompt": prompt,
"completion": completion,
"model": model_name
})
except Exception as e:
print(f"Error on {task_id}: {e}")
samples.append({
"task_id": task_id,
"prompt": prompt,
"completion": "# Error during generation",
"model": model_name
})
return samples
def simple_syntax_check(code):
"""Basic syntax validation"""
try:
compile(code, '<string>', 'exec')
return True
except SyntaxError:
return False
def evaluate_samples(samples, dataset):
"""Simple evaluation: syntax check + basic test execution"""
results = {"passed": 0, "failed": 0, "error": 0}
detailed = []
for sample in samples:
task_id = sample["task_id"]
completion = sample["completion"]
# Find the problem
problem = None
for p in dataset:
if p["task_id"] == task_id:
problem = p
break
if problem is None:
results["error"] += 1
continue
# Combine prompt + completion
full_code = problem["prompt"] + completion
# Syntax check
if not simple_syntax_check(full_code):
results["failed"] += 1
detailed.append({"task_id": task_id, "status": "syntax_error"})
continue
# Try to run with test
try:
# Create test environment
exec_globals = {}
exec(full_code, exec_globals)
# Get entry point
entry_point = problem.get("entry_point", task_id.split("/")[-1])
# Check if function exists
if entry_point in exec_globals:
results["passed"] += 1
detailed.append({"task_id": task_id, "status": "passed"})
else:
results["failed"] += 1
detailed.append({"task_id": task_id, "status": "missing_function"})
except Exception as e:
results["error"] += 1
detailed.append({"task_id": task_id, "status": "runtime_error", "error": str(e)[:100]})
total = len(samples)
pass_rate = results["passed"] / total if total > 0 else 0
return {
"pass@1": pass_rate,
"passed": results["passed"],
"failed": results["failed"],
"error": results["error"],
"total": total,
"detailed": detailed[:10] # First 10 for inspection
}
def main():
# Load dataset
dataset = load_humaneval()
results = {}
all_samples = {}
# Evaluate base model
print("\n" + "=" * 60)
print("EVALUATING BASE MODEL")
print("=" * 60)
base_model, base_tokenizer = load_model(BASE_MODEL)
base_samples = evaluate_model(base_model, base_tokenizer, dataset, "Devstral-Small-Base", is_finetuned=False)
results["base"] = evaluate_samples(base_samples, dataset)
all_samples["base"] = base_samples
print(f"\nBase Model Results: pass@1 = {results['base']['pass@1']*100:.2f}%")
# Free memory
del base_model
torch.cuda.empty_cache()
# Evaluate fine-tuned model
print("\n" + "=" * 60)
print("EVALUATING FINE-TUNED MODEL")
print("=" * 60)
ft_model, ft_tokenizer = load_model(BASE_MODEL, FINETUNED_ADAPTER)
ft_samples = evaluate_model(ft_model, ft_tokenizer, dataset, "Alizee-Coder-Devstral", is_finetuned=True)
results["finetuned"] = evaluate_samples(ft_samples, dataset)
all_samples["finetuned"] = ft_samples
print(f"\nFine-tuned Model Results: pass@1 = {results['finetuned']['pass@1']*100:.2f}%")
# Summary
print("\n" + "=" * 60)
print("COMPARISON SUMMARY")
print("=" * 60)
print(f"\n{'Model':<40} {'pass@1':>10} {'Passed':>8} {'Failed':>8}")
print("-" * 70)
print(f"{'Devstral-Small-2505 (Base)':<40} {results['base']['pass@1']*100:>9.2f}% {results['base']['passed']:>8} {results['base']['failed']:>8}")
print(f"{'Alizee-Coder-Devstral (Fine-tuned)':<40} {results['finetuned']['pass@1']*100:>9.2f}% {results['finetuned']['passed']:>8} {results['finetuned']['failed']:>8}")
improvement = (results['finetuned']['pass@1'] - results['base']['pass@1']) * 100
sign = "+" if improvement >= 0 else ""
print(f"\n{'Improvement:':<40} {sign}{improvement:>9.2f}%")
# Save results
output = {
"benchmark": "HumanEval",
"base_model": BASE_MODEL,
"finetuned_model": FINETUNED_ADAPTER,
"results": {
"base": {
"pass@1": float(results['base']['pass@1']),
"passed": results['base']['passed'],
"failed": results['base']['failed'],
"total": results['base']['total']
},
"finetuned": {
"pass@1": float(results['finetuned']['pass@1']),
"passed": results['finetuned']['passed'],
"failed": results['finetuned']['failed'],
"total": results['finetuned']['total']
},
"improvement": float(improvement)
},
"samples": {
"base": base_samples[:5], # First 5 samples for inspection
"finetuned": ft_samples[:5]
}
}
# Save locally
with open("eval_results_humaneval.json", "w") as f:
json.dump(output, f, indent=2)
print("\nResults saved to eval_results_humaneval.json")
# Upload results to model card
try:
api = HfApi()
api.upload_file(
path_or_fileobj="eval_results_humaneval.json",
path_in_repo="eval_results_humaneval.json",
repo_id=OUTPUT_REPO,
repo_type="model",
)
print(f"Results uploaded to {OUTPUT_REPO}")
except Exception as e:
print(f"Could not upload results: {e}")
print("\n" + "=" * 60)
print("EVALUATION COMPLETE")
print("=" * 60)
if __name__ == "__main__":
main()