Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Add local run support
Browse files- yourbench_space/app.py +5 -1
- yourbench_space/evaluation.py +7 -2
yourbench_space/app.py
CHANGED
@@ -144,7 +144,11 @@ def enable_button(files):
|
|
144 |
def run_evaluation_pipeline(oauth_token: gr.OAuthToken | None, org_name, eval_name, config_name="lighteval"):
|
145 |
eval_ds_name = f"{org_name}/{eval_name}"
|
146 |
repo_id = f"{org_name}/leaderboard_yourbench_{eval_ds_name.replace('/', '_')}"
|
147 |
-
|
|
|
|
|
|
|
|
|
148 |
|
149 |
new_env = os.environ.copy()
|
150 |
if oauth_token:
|
|
|
144 |
def run_evaluation_pipeline(oauth_token: gr.OAuthToken | None, org_name, eval_name, config_name="lighteval"):
|
145 |
eval_ds_name = f"{org_name}/{eval_name}"
|
146 |
repo_id = f"{org_name}/leaderboard_yourbench_{eval_ds_name.replace('/', '_')}"
|
147 |
+
|
148 |
+
if os.environ.get("SYSTEM") == "spaces":
|
149 |
+
folder_path = str(PATH / "yourbench_space" / "leaderboard_space")
|
150 |
+
else:
|
151 |
+
folder_path = str(PATH.parent / "yourbench_space" / "leaderboard_space")
|
152 |
|
153 |
new_env = os.environ.copy()
|
154 |
if oauth_token:
|
yourbench_space/evaluation.py
CHANGED
@@ -14,7 +14,12 @@ OUTPUT_DIR = "/data" if ON_SPACES else "." # TODO: fix the space folder
|
|
14 |
|
15 |
def create_eval_file(eval_ds_name: str):
|
16 |
task_name = eval_ds_name.replace("/", "_")
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
18 |
subprocess.run(["lighteval", "tasks", "create", str(template_path), task_name, eval_ds_name])
|
19 |
|
20 |
|
@@ -40,7 +45,7 @@ async def run_evaluations(eval_ds_name: str, org: str, custom_env=None) -> list:
|
|
40 |
"lighteval",
|
41 |
"endpoint",
|
42 |
"inference-providers",
|
43 |
-
f"
|
44 |
f"custom|{task_name}|0|0",
|
45 |
"--custom-tasks",
|
46 |
f"custom_{task_name}_task.py",
|
|
|
14 |
|
15 |
def create_eval_file(eval_ds_name: str):
|
16 |
task_name = eval_ds_name.replace("/", "_")
|
17 |
+
|
18 |
+
if os.environ.get("SYSTEM") == "spaces":
|
19 |
+
template_path = Path("/home/user/app/yourbench_space/lighteval_task/yourbench_task.py")
|
20 |
+
else:
|
21 |
+
template_path = Path("yourbench_space/lighteval_task/yourbench_task.py")
|
22 |
+
|
23 |
subprocess.run(["lighteval", "tasks", "create", str(template_path), task_name, eval_ds_name])
|
24 |
|
25 |
|
|
|
45 |
"lighteval",
|
46 |
"endpoint",
|
47 |
"inference-providers",
|
48 |
+
f"model_name={model_name},provider={provider}",
|
49 |
f"custom|{task_name}|0|0",
|
50 |
"--custom-tasks",
|
51 |
f"custom_{task_name}_task.py",
|