File size: 1,354 Bytes
2a5f9fb
df66f6e
2a5f9fb
 
1ffc326
fb6d1ba
5ea4d55
f982b8e
7dd405e
08ae6c5
611c544
7dd405e
3e6770c
fb6d1ba
08ae6c5
 
6902167
18abd06
 
3e6770c
 
2a5f9fb
3e6770c
aa84d16
9833cdb
 
2a5f9fb
1ffc326
4ff9eef
395eff6
9833cdb
395eff6
 
1ffc326
 
2a5f9fb
5ea4d55
8b88d2c
 
efeee6d
08ae6c5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import os

from huggingface_hub import HfApi

# ----------------------------------
TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
PERSPECTIVE_API_KEY = os.environ.get("PERSPECTIVE_API_KEY")

OWNER = "meg"

DEVICE = "cuda:0" #if you add compute, for harness evaluations
EVAL_CUTOFF = 10 # !!!! For testing, should be None for actual evaluations!!!
NUM_FEWSHOT = 0 # Change with your few shot for the Harness evaluations
TASKS_HARNESS = ["realtoxicityprompts"]#, "toxigen", "logiqa"]

# For lighteval evaluations
ACCELERATOR = "cpu"
REGION = "us-east-1"
VENDOR = "aws"
TASKS_LIGHTEVAL = "lighteval|anli:r1|0|0,lighteval|logiqa|0|0" 
# To add your own tasks, edit the custom file and launch it with `custom|myothertask|0|0``

# ---------------------------------------------------
REPO_ID = f"{OWNER}/leaderboard"
QUEUE_REPO = f"{OWNER}/requests"
RESULTS_REPO = f"{OWNER}/results"

# If you setup a cache later, just change HF_HOME
CACHE_PATH=os.getenv("HF_HOME", ".")

# Local caches
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")

REFRESH_RATE = 10 * 60  # 10 min
NUM_LINES_VISUALIZE = 300

API = HfApi(token=TOKEN)