Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 887 Bytes
2a5f9fb b4ba8b7 2a5f9fb 58560f8 beb2b32 2a5f9fb 6b87e28 395eff6 6b87e28 a5d34d3 6b87e28 beb2b32 6b87e28 2a5f9fb beb2b32 2a5f9fb b4ba8b7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
import os
from huggingface_hub import HfApi
# clone / pull the lmeh eval data
HF_TOKEN = os.environ.get("HF_TOKEN", None)
REPO_ID = "open-llm-leaderboard/open_llm_leaderboard"
QUEUE_REPO = "open-llm-leaderboard/requests"
AGGREGATED_REPO = "open-llm-leaderboard/contents"
VOTES_REPO = "open-llm-leaderboard/votes"
HF_HOME = os.getenv("HF_HOME", ".")
# Check HF_HOME write access
print(f"Initial HF_HOME set to: {HF_HOME}")
if not os.access(HF_HOME, os.W_OK):
print(f"No write access to HF_HOME: {HF_HOME}. Resetting to current directory.")
HF_HOME = "."
os.environ["HF_HOME"] = HF_HOME
else:
print("Write access confirmed for HF_HOME")
VOTES_PATH = os.path.join(HF_HOME, "model-votes")
EVAL_REQUESTS_PATH = os.path.join(HF_HOME, "eval-queue")
# Rate limit variables
RATE_LIMIT_PERIOD = 7
RATE_LIMIT_QUOTA = 5
HAS_HIGHER_RATE_LIMIT = []
API = HfApi(token=HF_TOKEN)
|