meg-huggingface commited on
Commit
971bce4
1 Parent(s): 2b38d40

Adding CPU support (float32) and some additional comments.

Browse files
Files changed (2) hide show
  1. app.py +2 -2
  2. src/envs.py +15 -5
app.py CHANGED
@@ -27,7 +27,7 @@ from src.display.utils import (
27
  WeightType,
28
  Precision
29
  )
30
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
31
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
32
  from src.submission.submit import add_new_eval
33
 
@@ -311,7 +311,7 @@ with demo:
311
  choices=[i.value.name for i in Precision if i != Precision.Unknown],
312
  label="Precision",
313
  multiselect=False,
314
- value="float16",
315
  interactive=True,
316
  )
317
  weight_type = gr.Dropdown(
 
27
  WeightType,
28
  Precision
29
  )
30
+ from src.envs import API, DEVICE, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
31
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
32
  from src.submission.submit import add_new_eval
33
 
 
311
  choices=[i.value.name for i in Precision if i != Precision.Unknown],
312
  label="Precision",
313
  multiselect=False,
314
+ value="float16" if DEVICE != "cpu" else "float32",
315
  interactive=True,
316
  )
317
  weight_type = gr.Dropdown(
src/envs.py CHANGED
@@ -4,21 +4,31 @@ from huggingface_hub import HfApi
4
 
5
  # Info to change for your repository
6
  # ----------------------------------
7
- TOKEN = os.environ.get("TOKEN") # A read/write user token for a user that has access to your org
8
 
9
- OWNER = "Bias-Leaderboard" # Change to your org - don't forget to create a results and request file
10
- DEVICE = "cuda:0" # "cpu" # cuda:0 if you add compute
 
 
 
 
 
 
11
  LIMIT = 20 # !!!! Should be None for actual evaluations!!!
12
  # ----------------------------------
13
 
 
 
 
14
  REPO_ID = f"{OWNER}/leaderboard"
 
15
  QUEUE_REPO = f"{OWNER}/requests"
 
16
  RESULTS_REPO = f"{OWNER}/results"
17
 
18
- # If you setup a cache later, just change HF_HOME
19
  CACHE_PATH=os.getenv("HF_HOME", ".")
20
 
21
- # Local caches
22
  EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
23
  EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
24
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
 
4
 
5
  # Info to change for your repository
6
  # ----------------------------------
 
7
 
8
+ # A read/write user access token for a user in the org
9
+ # You can create these at https://huggingface.co/settings/tokens
10
+ # This must be set as a 'Secret' in the leaderboard Space settings.
11
+ TOKEN = os.environ.get("TOKEN")
12
+
13
+ # Change to your org name
14
+ OWNER = "Bias-Leaderboard"
15
+ DEVICE = "cuda:0" # "cpu" or "cuda:0" if you add compute
16
  LIMIT = 20 # !!!! Should be None for actual evaluations!!!
17
  # ----------------------------------
18
 
19
+ # Define some input/output variables.
20
+ # Don't forget to create a results and requests Dataset for your org
21
+ # Leaderboard Space
22
  REPO_ID = f"{OWNER}/leaderboard"
23
+ # Leaderboard input Dataset
24
  QUEUE_REPO = f"{OWNER}/requests"
25
+ # Leaderboard output Dataset
26
  RESULTS_REPO = f"{OWNER}/results"
27
 
28
+ # If you setup a cache, set HF_HOME.
29
  CACHE_PATH=os.getenv("HF_HOME", ".")
30
 
31
+ # Local caches to read previously-submitted/computed results
32
  EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
33
  EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
34
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")