|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
HumanEval Evaluation v3 LITE: Direct Code Prompt |
|
|
Reduced dependencies, minimal storage usage |
|
|
""" |
|
|
|
|
|
import os |
|
|
import re |
|
|
import json |
|
|
import gc |
|
|
|
|
|
|
|
|
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache" |
|
|
os.environ["HF_HOME"] = "/tmp/hf_home" |
|
|
os.environ["HF_HUB_CACHE"] = "/tmp/hf_cache" |
|
|
|
|
|
import torch |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig |
|
|
from peft import PeftModel |
|
|
from datasets import load_dataset |
|
|
from tqdm import tqdm |
|
|
from huggingface_hub import HfApi |
|
|
|
|
|
print("=" * 60) |
|
|
print("EVALUATION v3 LITE: Direct Code Prompt Test") |
|
|
print("Benchmark: HumanEval") |
|
|
print("=" * 60) |
|
|
|
|
|
|
|
|
BASE_MODEL = "mistralai/Devstral-Small-2505" |
|
|
FINETUNED_ADAPTER = "stmasson/alizee-coder-devstral-1-small" |
|
|
OUTPUT_REPO = "stmasson/alizee-coder-devstral-1-small" |
|
|
TEMPERATURE = 0.1 |
|
|
MAX_NEW_TOKENS = 512 |
|
|
|
|
|
|
|
|
print(f"\nGPU available: {torch.cuda.is_available()}") |
|
|
if torch.cuda.is_available(): |
|
|
print(f"GPU: {torch.cuda.get_device_name(0)}") |
|
|
print(f"Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB") |
|
|
|
|
|
|
|
|
bnb_config = BitsAndBytesConfig( |
|
|
load_in_4bit=True, |
|
|
bnb_4bit_quant_type="nf4", |
|
|
bnb_4bit_compute_dtype=torch.bfloat16, |
|
|
bnb_4bit_use_double_quant=True, |
|
|
) |
|
|
|
|
|
def load_humaneval(): |
|
|
"""Load HumanEval dataset""" |
|
|
print("\nLoading HumanEval dataset...") |
|
|
dataset = load_dataset("openai/openai_humaneval", split="test") |
|
|
print(f"Loaded {len(dataset)} problems") |
|
|
return dataset |
|
|
|
|
|
def load_model(model_name, adapter_name=None): |
|
|
"""Load model with optional LoRA adapter""" |
|
|
print(f"\nLoading model: {model_name}") |
|
|
if adapter_name: |
|
|
print(f"With adapter: {adapter_name}") |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
|
|
if tokenizer.pad_token is None: |
|
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_name, |
|
|
quantization_config=bnb_config, |
|
|
device_map="auto", |
|
|
trust_remote_code=True, |
|
|
torch_dtype=torch.bfloat16, |
|
|
low_cpu_mem_usage=True, |
|
|
) |
|
|
|
|
|
if adapter_name: |
|
|
print("Loading LoRA adapter...") |
|
|
model = PeftModel.from_pretrained(model, adapter_name) |
|
|
model = model.merge_and_unload() |
|
|
print("Adapter merged") |
|
|
|
|
|
model.eval() |
|
|
|
|
|
|
|
|
gc.collect() |
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
return model, tokenizer |
|
|
|
|
|
def extract_python_code(text): |
|
|
"""Extract Python code from model output""" |
|
|
|
|
|
pattern = r'```python\s*(.*?)\s*```' |
|
|
matches = re.findall(pattern, text, re.DOTALL) |
|
|
if matches: |
|
|
return matches[-1].strip() |
|
|
|
|
|
|
|
|
pattern = r'```\s*(.*?)\s*```' |
|
|
matches = re.findall(pattern, text, re.DOTALL) |
|
|
if matches: |
|
|
return matches[-1].strip() |
|
|
|
|
|
return text.strip() |
|
|
|
|
|
def generate_completion_direct(model, tokenizer, prompt): |
|
|
"""Generate code with DIRECT CODE prompt (no reasoning)""" |
|
|
instruct_prompt = f"""<s>[INST] Complete this Python function. Output ONLY the function body code, no explanations: |
|
|
|
|
|
{prompt}[/INST]""" |
|
|
|
|
|
inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device) |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model.generate( |
|
|
**inputs, |
|
|
max_new_tokens=MAX_NEW_TOKENS, |
|
|
temperature=TEMPERATURE, |
|
|
do_sample=True if TEMPERATURE > 0 else False, |
|
|
pad_token_id=tokenizer.pad_token_id, |
|
|
eos_token_id=tokenizer.eos_token_id, |
|
|
) |
|
|
|
|
|
raw_completion = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True) |
|
|
completion = extract_python_code(raw_completion) |
|
|
|
|
|
if completion.strip().startswith("def "): |
|
|
lines = completion.split('\n') |
|
|
body_lines = [] |
|
|
in_function = False |
|
|
for line in lines: |
|
|
if line.strip().startswith("def "): |
|
|
in_function = True |
|
|
continue |
|
|
if in_function: |
|
|
body_lines.append(line) |
|
|
if body_lines: |
|
|
completion = '\n'.join(body_lines) |
|
|
elif completion == raw_completion.strip(): |
|
|
completion = raw_completion |
|
|
|
|
|
stop_tokens = ["\ndef ", "\nclass ", "\nif __name__", "\n\n\n"] |
|
|
for stop in stop_tokens: |
|
|
if stop in completion: |
|
|
completion = completion[:completion.index(stop)] |
|
|
|
|
|
return completion |
|
|
|
|
|
def generate_completion_reasoning(model, tokenizer, prompt): |
|
|
"""Generate code with REASONING prompt (original approach)""" |
|
|
instruct_prompt = f"""<s>[INST] Solve this programming problem with detailed reasoning: |
|
|
|
|
|
Complete the following function: |
|
|
{prompt}[/INST]""" |
|
|
|
|
|
inputs = tokenizer(instruct_prompt, return_tensors="pt", truncation=True, max_length=2048).to(model.device) |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model.generate( |
|
|
**inputs, |
|
|
max_new_tokens=MAX_NEW_TOKENS * 2, |
|
|
temperature=TEMPERATURE, |
|
|
do_sample=True if TEMPERATURE > 0 else False, |
|
|
pad_token_id=tokenizer.pad_token_id, |
|
|
eos_token_id=tokenizer.eos_token_id, |
|
|
) |
|
|
|
|
|
full_response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True) |
|
|
code = extract_python_code(full_response) |
|
|
|
|
|
if "def " in code: |
|
|
lines = code.split('\n') |
|
|
result_lines = [] |
|
|
in_function = False |
|
|
for line in lines: |
|
|
if line.strip().startswith("def "): |
|
|
in_function = True |
|
|
continue |
|
|
if in_function: |
|
|
result_lines.append(line) |
|
|
if result_lines: |
|
|
return '\n'.join(result_lines) |
|
|
|
|
|
return code |
|
|
|
|
|
def simple_syntax_check(code): |
|
|
"""Basic syntax validation""" |
|
|
try: |
|
|
compile(code, '<string>', 'exec') |
|
|
return True |
|
|
except SyntaxError: |
|
|
return False |
|
|
|
|
|
def evaluate_samples(samples, dataset): |
|
|
"""Evaluate samples""" |
|
|
results = {"passed": 0, "failed": 0, "error": 0} |
|
|
|
|
|
dataset_dict = {p["task_id"]: p for p in dataset} |
|
|
|
|
|
for sample in samples: |
|
|
task_id = sample["task_id"] |
|
|
completion = sample["completion"] |
|
|
|
|
|
problem = dataset_dict.get(task_id) |
|
|
if problem is None: |
|
|
results["error"] += 1 |
|
|
continue |
|
|
|
|
|
full_code = problem["prompt"] + completion |
|
|
|
|
|
if not simple_syntax_check(full_code): |
|
|
results["failed"] += 1 |
|
|
continue |
|
|
|
|
|
try: |
|
|
exec_globals = {} |
|
|
exec(full_code, exec_globals) |
|
|
entry_point = problem.get("entry_point", task_id.split("/")[-1]) |
|
|
if entry_point in exec_globals: |
|
|
results["passed"] += 1 |
|
|
else: |
|
|
results["failed"] += 1 |
|
|
except Exception: |
|
|
results["error"] += 1 |
|
|
|
|
|
total = len(samples) |
|
|
pass_rate = results["passed"] / total if total > 0 else 0 |
|
|
|
|
|
return { |
|
|
"pass@1": pass_rate, |
|
|
"passed": results["passed"], |
|
|
"failed": results["failed"], |
|
|
"error": results["error"], |
|
|
"total": total |
|
|
} |
|
|
|
|
|
def main(): |
|
|
dataset = load_humaneval() |
|
|
|
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
print("LOADING FINE-TUNED MODEL") |
|
|
print("=" * 60) |
|
|
model, tokenizer = load_model(BASE_MODEL, FINETUNED_ADAPTER) |
|
|
|
|
|
results = {} |
|
|
|
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
print("TEST 1: DIRECT CODE PROMPT") |
|
|
print("=" * 60) |
|
|
direct_samples = [] |
|
|
for problem in tqdm(dataset, desc="Direct Prompt"): |
|
|
try: |
|
|
completion = generate_completion_direct(model, tokenizer, problem["prompt"]) |
|
|
direct_samples.append({ |
|
|
"task_id": problem["task_id"], |
|
|
"completion": completion, |
|
|
}) |
|
|
except Exception as e: |
|
|
direct_samples.append({ |
|
|
"task_id": problem["task_id"], |
|
|
"completion": "# Error", |
|
|
}) |
|
|
results["direct"] = evaluate_samples(direct_samples, dataset) |
|
|
print(f"\nDirect Prompt: pass@1 = {results['direct']['pass@1']*100:.2f}%") |
|
|
|
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
print("TEST 2: REASONING PROMPT") |
|
|
print("=" * 60) |
|
|
reasoning_samples = [] |
|
|
for problem in tqdm(dataset, desc="Reasoning Prompt"): |
|
|
try: |
|
|
completion = generate_completion_reasoning(model, tokenizer, problem["prompt"]) |
|
|
reasoning_samples.append({ |
|
|
"task_id": problem["task_id"], |
|
|
"completion": completion, |
|
|
}) |
|
|
except Exception as e: |
|
|
reasoning_samples.append({ |
|
|
"task_id": problem["task_id"], |
|
|
"completion": "# Error", |
|
|
}) |
|
|
results["reasoning"] = evaluate_samples(reasoning_samples, dataset) |
|
|
print(f"\nReasoning Prompt: pass@1 = {results['reasoning']['pass@1']*100:.2f}%") |
|
|
|
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
print("PROMPT COMPARISON - HumanEval") |
|
|
print("=" * 60) |
|
|
print(f"\n{'Prompt Type':<25} {'pass@1':>10} {'Passed':>8} {'Failed':>8}") |
|
|
print("-" * 55) |
|
|
print(f"{'Direct Code':<25} {results['direct']['pass@1']*100:>9.2f}% {results['direct']['passed']:>8} {results['direct']['failed']:>8}") |
|
|
print(f"{'Reasoning':<25} {results['reasoning']['pass@1']*100:>9.2f}% {results['reasoning']['passed']:>8} {results['reasoning']['failed']:>8}") |
|
|
|
|
|
improvement = (results['direct']['pass@1'] - results['reasoning']['pass@1']) * 100 |
|
|
sign = "+" if improvement >= 0 else "" |
|
|
print(f"\n{'Improvement:':<25} {sign}{improvement:>9.2f}%") |
|
|
print(f"{'Base Model Reference:':<25} {'82.93%':>10}") |
|
|
|
|
|
|
|
|
output = { |
|
|
"benchmark": "HumanEval", |
|
|
"experiment": "Prompt Comparison", |
|
|
"results": { |
|
|
"direct": results["direct"], |
|
|
"reasoning": results["reasoning"], |
|
|
"improvement": float(improvement) |
|
|
} |
|
|
} |
|
|
|
|
|
with open("eval_prompt_comparison.json", "w") as f: |
|
|
json.dump(output, f, indent=2) |
|
|
|
|
|
try: |
|
|
api = HfApi() |
|
|
api.upload_file( |
|
|
path_or_fileobj="eval_prompt_comparison.json", |
|
|
path_in_repo="eval_prompt_comparison.json", |
|
|
repo_id=OUTPUT_REPO, |
|
|
repo_type="model", |
|
|
) |
|
|
print(f"\nResults uploaded to {OUTPUT_REPO}") |
|
|
except Exception as e: |
|
|
print(f"Could not upload: {e}") |
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
print("EVALUATION COMPLETE") |
|
|
print("=" * 60) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|