Can't reproduce the results on Humaneval

#18
by JingyaoLi - opened

Hello, may I ask how you conducted testing on Humaneval? I attempted to test using the two methods you provided in your Hugging Face blog, including code completion and code infilling on Humaneval. However, I only achieved results of 22% and 25% on Instruct CodeLLama 7B, which is far from the reported 35%.

args.max_length = 1024

if args.task == 'code-infilling':
    tokenizer = transformers.AutoTokenizer.from_pretrained(args.model_path)
    pipeline = transformers.pipeline(
        "text-generation",
        model=args.model_path,
        torch_dtype=torch.float16,
        device_map="auto",
    )
    for task_id in problems:
        prompt = problems[task_id]['question'] + '<FILL_ME>\n    return result'
        input_ids = tokenizer(prompt, return_tensors="pt")["input_ids"].to("cuda")
        output = model.generate(input_ids, max_new_tokens=args.max_length,)
        output = output[0].to("cpu")
        filling = tokenizer.decode(output[input_ids.shape[1]:], skip_special_tokens=True)
        completion = prompt.replace("<FILL_ME>", filling)

elif args.task == 'code-completion':
    tokenizer = transformers.AutoTokenizer.from_pretrained(args.model_path)
    model = transformers.AutoModelForCausalLM.from_pretrained(
        args.model_path,
        torch_dtype=torch.float16
    ).to("cuda")
    for task_id in problems:
        completion = pipeline(
            prompt,
            do_sample=True,
            temperature=0.2,
            top_p=0.9,
            num_return_sequences=1,
            eos_token_id=tokenizer.eos_token_id,
            max_length=args.max_length,
        )[0]['generated_text'].strip()

Sign up or log in to comment