from lm_eval import tasks, evaluator, utils from src.backend.manage_requests import EvalRequest import logging logging.getLogger("openai").setLevel(logging.WARNING) def run_evaluation(eval_request: EvalRequest, task_names, num_fewshot, batch_size, device, no_cache=True, limit=None): if limit: print("WARNING: --limit SHOULD ONLY BE USED FOR TESTING. REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.") task_names = utils.pattern_match(task_names, tasks.ALL_TASKS) print(f"Selected Tasks: {task_names}") results = evaluator.simple_evaluate( model="hf-causal-experimental", # "hf-causal" model_args=eval_request.get_model_args(), tasks=task_names, num_fewshot=num_fewshot, batch_size=batch_size, device=device, no_cache=no_cache, limit=limit, write_out=True, output_base_path="logs" ) results["config"]["model_dtype"] = eval_request.precision results["config"]["model_name"] = eval_request.model results["config"]["model_sha"] = eval_request.revision print(evaluator.make_table(results)) return results