Upload eval_lfm_baseline.py with huggingface_hub
Browse files- eval_lfm_baseline.py +2 -2
eval_lfm_baseline.py
CHANGED
|
@@ -17,9 +17,9 @@ tasks = "leaderboard|mmlu|5,leaderboard|hellaswag|0,leaderboard|arc_challenge|25
|
|
| 17 |
|
| 18 |
cmd = [
|
| 19 |
sys.executable, "-m", "lighteval", "accelerate",
|
| 20 |
-
"--
|
| 21 |
"--tasks", tasks,
|
| 22 |
-
"--
|
| 23 |
]
|
| 24 |
|
| 25 |
print(f"Running: {' '.join(cmd)}")
|
|
|
|
| 17 |
|
| 18 |
cmd = [
|
| 19 |
sys.executable, "-m", "lighteval", "accelerate",
|
| 20 |
+
"--model-args", "pretrained=LiquidAI/LFM2.5-1.2B-Instruct,trust_remote_code=True",
|
| 21 |
"--tasks", tasks,
|
| 22 |
+
"--output-dir", "./eval_results_baseline",
|
| 23 |
]
|
| 24 |
|
| 25 |
print(f"Running: {' '.join(cmd)}")
|