rusticluftig commited on
Commit
f908089
1 Parent(s): 1f67d0f

Get the latest score per miner, instead of the oldest

Browse files
Files changed (1) hide show
  1. utils.py +15 -13
utils.py CHANGED
@@ -1,4 +1,3 @@
1
-
2
  import os
3
  import math
4
  import time
@@ -137,16 +136,18 @@ def get_subnet_data(
137
  return result
138
 
139
 
140
-
141
-
142
  def get_wandb_runs(project: str, filters: Dict[str, Any]) -> List:
143
- """Get the latest runs from Wandb, retrying infinitely until we get them."""
 
 
 
144
  while True:
145
  api = wandb.Api(api_key=WANDB_TOKEN)
146
  runs = list(
147
  api.runs(
148
  project,
149
  filters=filters,
 
150
  )
151
  )
152
  if len(runs) > 0:
@@ -160,6 +161,12 @@ def get_scores(
160
  uids: List[int],
161
  wandb_runs: List,
162
  ) -> Dict[int, Dict[str, Optional[float]]]:
 
 
 
 
 
 
163
  result = {}
164
  previous_timestamp = None
165
  # Iterate through the runs until we've processed all the uids.
@@ -170,9 +177,9 @@ def get_scores(
170
  all_uid_data = data["uid_data"]
171
  timestamp = data["timestamp"]
172
  # Make sure runs are indeed in descending time order.
173
- #assert (
174
- #previous_timestamp is None or timestamp < previous_timestamp
175
- #), f"Timestamps are not in descending order: {timestamp} >= {previous_timestamp}"
176
  previous_timestamp = timestamp
177
 
178
  for uid in uids:
@@ -194,7 +201,6 @@ def get_scores(
194
  return result
195
 
196
 
197
-
198
  def get_validator_weights(
199
  metagraph: bt.metagraph,
200
  ) -> Dict[int, Tuple[float, int, Dict[int, float]]]:
@@ -214,8 +220,6 @@ def get_validator_weights(
214
  return ret
215
 
216
 
217
-
218
-
219
  def get_losses_over_time(wandb_runs: List) -> pd.DataFrame:
220
  """Returns a dataframe of the best average model loss over time."""
221
  timestamps = []
@@ -240,7 +244,6 @@ def get_losses_over_time(wandb_runs: List) -> pd.DataFrame:
240
  return pd.DataFrame({"timestamp": timestamps, "best_loss": best_losses})
241
 
242
 
243
-
244
  def next_epoch(subtensor: bt.subtensor, block: int) -> int:
245
  return (
246
  block
@@ -255,7 +258,6 @@ def is_floatable(x) -> bool:
255
  ) or isinstance(x, int)
256
 
257
 
258
-
259
  def format_score(uid: int, scores, key) -> Optional[float]:
260
  if uid in scores:
261
  if key in scores[uid]:
@@ -291,7 +293,7 @@ def get_benchmarks() -> Tuple[pd.DataFrame, datetime.datetime]:
291
  bt.logging.error("No benchmark project set.")
292
  return None, None
293
  runs = get_wandb_runs(project=BENCHMARK_WANDB_PROJECT, filters=None)
294
- for run in runs[::-1]:
295
  artifacts = list(run.logged_artifacts())
296
  if artifacts:
297
  table = artifacts[-1].get("benchmarks")
 
 
1
  import os
2
  import math
3
  import time
 
136
  return result
137
 
138
 
 
 
139
  def get_wandb_runs(project: str, filters: Dict[str, Any]) -> List:
140
+ """Get the latest runs from Wandb, retrying infinitely until we get them.
141
+
142
+ Returns:
143
+ List: List of runs matching the provided filters, newest run (by creation time) first."""
144
  while True:
145
  api = wandb.Api(api_key=WANDB_TOKEN)
146
  runs = list(
147
  api.runs(
148
  project,
149
  filters=filters,
150
+ order="-created_at",
151
  )
152
  )
153
  if len(runs) > 0:
 
161
  uids: List[int],
162
  wandb_runs: List,
163
  ) -> Dict[int, Dict[str, Optional[float]]]:
164
+ """Returns the most recent scores for the provided UIDs.
165
+
166
+ Args:
167
+ uids (List[int]): List of UIDs to get scores for.
168
+ wandb_runs (List): List of validator runs from Wandb. Requires the runs are provided in descending order.
169
+ """
170
  result = {}
171
  previous_timestamp = None
172
  # Iterate through the runs until we've processed all the uids.
 
177
  all_uid_data = data["uid_data"]
178
  timestamp = data["timestamp"]
179
  # Make sure runs are indeed in descending time order.
180
+ assert (
181
+ previous_timestamp is None or timestamp < previous_timestamp
182
+ ), f"Timestamps are not in descending order: {timestamp} >= {previous_timestamp}"
183
  previous_timestamp = timestamp
184
 
185
  for uid in uids:
 
201
  return result
202
 
203
 
 
204
  def get_validator_weights(
205
  metagraph: bt.metagraph,
206
  ) -> Dict[int, Tuple[float, int, Dict[int, float]]]:
 
220
  return ret
221
 
222
 
 
 
223
  def get_losses_over_time(wandb_runs: List) -> pd.DataFrame:
224
  """Returns a dataframe of the best average model loss over time."""
225
  timestamps = []
 
244
  return pd.DataFrame({"timestamp": timestamps, "best_loss": best_losses})
245
 
246
 
 
247
  def next_epoch(subtensor: bt.subtensor, block: int) -> int:
248
  return (
249
  block
 
258
  ) or isinstance(x, int)
259
 
260
 
 
261
  def format_score(uid: int, scores, key) -> Optional[float]:
262
  if uid in scores:
263
  if key in scores[uid]:
 
293
  bt.logging.error("No benchmark project set.")
294
  return None, None
295
  runs = get_wandb_runs(project=BENCHMARK_WANDB_PROJECT, filters=None)
296
+ for run in runs:
297
  artifacts = list(run.logged_artifacts())
298
  if artifacts:
299
  table = artifacts[-1].get("benchmarks")