lukehinds commited on
Commit
3f502ea
·
1 Parent(s): 37ba2c7

Improved logging

Browse files
app.py CHANGED
@@ -48,14 +48,14 @@ def restart_space():
48
 
49
  ### Space initialisation
50
  try:
51
- print(EVAL_REQUESTS_PATH)
52
  snapshot_download(
53
  repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
54
  )
55
  except Exception:
56
  restart_space()
57
  try:
58
- print(EVAL_RESULTS_PATH)
59
  snapshot_download(
60
  repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
61
  )
 
48
 
49
  ### Space initialisation
50
  try:
51
+ logger.info(f"Downloading evaluation requests from {QUEUE_REPO} to {EVAL_REQUESTS_PATH}")
52
  snapshot_download(
53
  repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
54
  )
55
  except Exception:
56
  restart_space()
57
  try:
58
+ logger.info(f"Downloading evaluation results from {RESULTS_REPO} to {EVAL_RESULTS_PATH}")
59
  snapshot_download(
60
  repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
61
  )
src/about.py CHANGED
@@ -81,4 +81,3 @@ CITATION_BUTTON_TEXT = r"""
81
  note={Online resource for evaluating LLM security aspects}
82
  }
83
  """
84
-
 
81
  note={Online resource for evaluating LLM security aspects}
82
  }
83
  """
 
src/display/utils.py CHANGED
@@ -1,8 +1,6 @@
1
  from dataclasses import dataclass, make_dataclass
2
  from enum import Enum
3
 
4
- import pandas as pd
5
-
6
  from src.about import Tasks
7
 
8
  def fields(raw_class):
 
1
  from dataclasses import dataclass, make_dataclass
2
  from enum import Enum
3
 
 
 
4
  from src.about import Tasks
5
 
6
  def fields(raw_class):
src/envs.py CHANGED
@@ -7,7 +7,7 @@ from huggingface_hub import HfApi
7
  TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
 
9
  OWNER = "stacklok"
10
- REPO_ID = "secure-code-leaderboard"
11
  # ----------------------------------
12
 
13
  REPO_ID = f"{OWNER}/{REPO_ID}"
 
7
  TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
 
9
  OWNER = "stacklok"
10
+ REPO_ID = "llm_security_leaderboard"
11
  # ----------------------------------
12
 
13
  REPO_ID = f"{OWNER}/{REPO_ID}"
src/leaderboard/read_evals.py CHANGED
@@ -19,7 +19,7 @@ class EvalResult:
19
  """
20
  eval_name: str # org_model_precision (uid)
21
  full_model: str # org/model (path on hub)
22
- org: str
23
  model: str
24
  results: dict
25
  rank : int = 0
@@ -29,7 +29,7 @@ class EvalResult:
29
  model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
30
  weight_type: WeightType = WeightType.Original # Original or Adapter
31
  revision: str = "" # commit hash, "" if main
32
- architecture: str = "Unknown"
33
  license: str = "?"
34
  likes: int = 0
35
  num_params: int = 0
@@ -116,7 +116,7 @@ class EvalResult:
116
  self.num_params = request.get("params", 0)
117
  self.date = request.get("submitted_time", "")
118
  except Exception:
119
- print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
120
 
121
  def to_dict(self):
122
  """Converts the Eval Result to a dict compatible with our dataframe display"""
@@ -126,7 +126,7 @@ class EvalResult:
126
  average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
127
  else:
128
  average = 0.0
129
-
130
  data_dict = {
131
  "eval_name": self.eval_name, # not a column, just a save name
132
  AutoEvalColumn.rank.name: self.rank,
 
19
  """
20
  eval_name: str # org_model_precision (uid)
21
  full_model: str # org/model (path on hub)
22
+ org: str
23
  model: str
24
  results: dict
25
  rank : int = 0
 
29
  model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
30
  weight_type: WeightType = WeightType.Original # Original or Adapter
31
  revision: str = "" # commit hash, "" if main
32
+ architecture: str = "Unknown"
33
  license: str = "?"
34
  likes: int = 0
35
  num_params: int = 0
 
116
  self.num_params = request.get("params", 0)
117
  self.date = request.get("submitted_time", "")
118
  except Exception:
119
+ logging.warning(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
120
 
121
  def to_dict(self):
122
  """Converts the Eval Result to a dict compatible with our dataframe display"""
 
126
  average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
127
  else:
128
  average = 0.0
129
+
130
  data_dict = {
131
  "eval_name": self.eval_name, # not a column, just a save name
132
  AutoEvalColumn.rank.name: self.rank,
utils/create_datasets.py CHANGED
@@ -1,12 +1,10 @@
1
- from huggingface_hub import HfApi, create_repo
 
2
 
3
  # Authenticate with Hugging Face token
4
  api = HfApi()
5
  api.create_repo(repo_id="stacklok/requests", repo_type="dataset")
6
 
7
- # Example: Push dataset files
8
- from huggingface_hub import HfApi
9
- from pathlib import Path
10
 
11
  api.upload_folder(
12
  folder_path=Path("path_to_local_dataset"),
 
1
+ from huggingface_hub import HfApi
2
+ from pathlib import Path
3
 
4
  # Authenticate with Hugging Face token
5
  api = HfApi()
6
  api.create_repo(repo_id="stacklok/requests", repo_type="dataset")
7
 
 
 
 
8
 
9
  api.upload_folder(
10
  folder_path=Path("path_to_local_dataset"),