rusticluftig commited on
Commit
4baa143
β€’
1 Parent(s): 0d9018c

Working starting point

Browse files
Files changed (2) hide show
  1. app.py +201 -133
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
  import bittensor as bt
3
- import typing
4
  from bittensor.extrinsics.serving import get_metadata
5
  from dataclasses import dataclass
6
  import requests
@@ -9,17 +9,21 @@ import math
9
  import os
10
  import datetime
11
  import time
 
 
12
  from dotenv import load_dotenv
13
  from huggingface_hub import HfApi
14
  from apscheduler.schedulers.background import BackgroundScheduler
15
 
16
  load_dotenv()
17
 
18
- FONT = """<link href="https://fonts.cdnfonts.com/css/jmh-typewriter" rel="stylesheet">"""
19
- TITLE = """<h1 align="center" id="space-title" class="typewriter">Subnet 6 Leaderboard</h1>"""
20
- #IMAGE = """<a href="https://discord.gg/jqVphNsB4H" target="_blank"><img src="https://i.ibb.co/88wyVQ7/nousgirl.png" alt="nousgirl" style="margin: auto; width: 20%; border: 0;" /></a>"""
21
- HEADER = """<h2 align="center" class="typewriter"><a href="https://github.com/RaoFoundation/pretraining" target="_blank">Subnet 9</a> is a <a href="https://bittensor.com/" target="_blank">Bittensor</a> subnet that rewards miners for producing pretrained Foundation-Models on the <a href="https://huggingface.co/datasets/tiiuae/falcon-refinedweb" target="_blank">Falcon Refined Web dataset</a>. It acts like a continuous benchmark whereby miners are rewarded for attaining the best losses on randomly sampled pages of Falcon. The models with the best head-to-head loss on the evaluation data receive a steady emission of TAO.</h3>"""
22
- EVALUATION_DETAILS = """<b>Name</b> is the πŸ€— Hugging Face model name (click to go to the model card). <b>Rewards / Day</b> are the expected rewards per day for each model. <b>Last Average Loss</b> is the last loss value on the evaluation data for the model as calculated by a validator (lower is better). <b>UID</b> is the Bittensor user id of the submitter. <b>Block</b> is the Bittensor block that the model was submitted in. More stats on <a href="https://taostats.io/subnets/netuid-6/" target="_blank">taostats</a>."""
 
 
23
  EVALUATION_HEADER = """<h3 align="center">Shows the latest internal evaluation statistics as calculated by the Opentensor validator</h3>"""
24
  VALIDATOR_WANDB_PROJECT = "opentensor-dev/pretraining-subnet"
25
  H4_TOKEN = os.environ.get("H4_TOKEN", None)
@@ -30,10 +34,9 @@ MAX_AVG_LOSS_POINTS = 1
30
  RETRIES = 5
31
  DELAY_SECS = 3
32
  NETUID = 9
33
- # TODO: Update this for SN 9.
34
- SUBNET_START_BLOCK = 2225782
35
  SECONDS_PER_BLOCK = 12
36
 
 
37
  @dataclass
38
  class ModelData:
39
  uid: int
@@ -47,7 +50,15 @@ class ModelData:
47
  emission: float
48
 
49
  @classmethod
50
- def from_compressed_str(cls, uid: int, hotkey: str, cs: str, block: int, incentive: float, emission: float):
 
 
 
 
 
 
 
 
51
  """Returns an instance of this class from a compressed string representation"""
52
  tokens = cs.split(":")
53
  return ModelData(
@@ -59,9 +70,10 @@ class ModelData:
59
  hash=tokens[3] if tokens[3] != "None" else None,
60
  block=block,
61
  incentive=incentive,
62
- emission=emission
63
  )
64
 
 
65
  def run_with_retries(func, *args, **kwargs):
66
  for i in range(0, RETRIES):
67
  try:
@@ -72,17 +84,30 @@ def run_with_retries(func, *args, **kwargs):
72
  time.sleep(DELAY_SECS)
73
  raise RuntimeError("Should never happen")
74
 
75
- def get_subtensor_and_metagraph() -> typing.Tuple[bt.subtensor, bt.metagraph]:
76
- def _internal() -> typing.Tuple[bt.subtensor, bt.metagraph]:
77
- subtensor: bt.subtensor = bt.subtensor("finney")
78
- metagraph: bt.metagraph = bt.metagraph(9, lite=False)
 
79
  return subtensor, metagraph
 
80
  return run_with_retries(_internal)
81
 
 
82
  def get_tao_price() -> float:
83
- return run_with_retries(lambda: float(requests.get("https://api.kucoin.com/api/v1/market/stats?symbol=TAO-USDT").json()["data"]["last"]))
 
 
 
 
 
 
 
84
 
85
- def get_validator_weights(metagraph: bt.metagraph) -> typing.Dict[int, typing.Tuple[float, int, typing.Dict[int, float]]]:
 
 
 
86
  ret = {}
87
  for uid in metagraph.uids.tolist():
88
  vtrust = metagraph.validator_trust[uid].item()
@@ -96,7 +121,10 @@ def get_validator_weights(metagraph: bt.metagraph) -> typing.Dict[int, typing.Tu
96
  ret[uid][-1][ouid] = weight
97
  return ret
98
 
99
- def get_subnet_data(subtensor: bt.subtensor, metagraph: bt.metagraph) -> typing.List[ModelData]:
 
 
 
100
  result = []
101
  for uid in metagraph.uids.tolist():
102
  hotkey = metagraph.hotkeys[uid]
@@ -109,62 +137,75 @@ def get_subnet_data(subtensor: bt.subtensor, metagraph: bt.metagraph) -> typing.
109
  chain_str = bytes.fromhex(hex_data).decode()
110
  block = metadata["block"]
111
  incentive = metagraph.incentive[uid].nan_to_num().item()
112
- emission = metagraph.emission[uid].nan_to_num().item() * 20 # convert to daily TAO
 
 
113
 
114
  model_data = None
115
  try:
116
- model_data = ModelData.from_compressed_str(uid, hotkey, chain_str, block, incentive, emission)
 
 
117
  except:
118
  continue
119
 
120
  result.append(model_data)
121
  return result
122
 
 
123
  def is_floatable(x) -> bool:
124
- return (isinstance(x, float) and not math.isnan(x) and not math.isinf(x)) or isinstance(x, int)
125
-
126
- def get_float_score(key: str, history) -> typing.Tuple[typing.Optional[float], bool]:
127
- if key in history:
128
- data = list(history[key])
129
- if len(data) > 0:
130
- if is_floatable(data[-1]):
131
- return float(data[-1]), True
132
- else:
133
- data = [float(x) for x in data if is_floatable(x)]
134
- if len(data) > 0:
135
- return float(data[-1]), False
136
- return None, False
137
-
138
- def get_scores(uids: typing.List[int]) -> typing.Dict[int, typing.Dict[str, typing.Optional[float]]]:
139
  api = wandb.Api(api_key=WANDB_TOKEN)
140
- runs = list(api.runs(VALIDATOR_WANDB_PROJECT),
141
- filters={
142
- "type": "validator",
143
- "uid": 238
144
- })
 
145
 
146
  result = {}
147
- for run in runs:
148
- history = run.history()
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  for uid in uids:
150
  if uid in result:
151
  continue
152
- avg_loss, avg_loss_fresh = get_float_score(f"uid_data.{uid}", history)
153
- win_rate, win_rate_fresh = get_float_score(f"win_rate_data.{uid}", history)
154
- win_total, win_total_fresh = get_float_score(f"win_total_data.{uid}", history)
155
- weight, weight_fresh = get_float_score(f"weight_data.{uid}", history)
156
- result[uid] = {
157
- "avg_loss": avg_loss,
158
- "win_rate": win_rate,
159
- "win_total": win_total,
160
- "weight": weight,
161
- "fresh": avg_loss_fresh and win_rate_fresh and win_total_fresh
162
- }
163
- if len(result.keys()) == len(uids):
164
  break
165
  return result
166
 
167
- def format_score(uid: int, scores, key) -> typing.Optional[float]:
 
168
  if uid in scores:
169
  if key in scores[uid]:
170
  point = scores[uid][key]
@@ -172,102 +213,129 @@ def format_score(uid: int, scores, key) -> typing.Optional[float]:
172
  return round(scores[uid][key], 4)
173
  return None
174
 
175
- def next_tempo(start_block: int, tempo: int, block: int) -> int:
176
- start_num = start_block + tempo
177
- intervals = (block - start_num) // tempo
178
- nearest_num = start_num + ((intervals + 1) * tempo)
179
- return nearest_num
 
180
 
181
  def get_next_update_div(current_block: int, next_update_block: int) -> str:
182
  now = datetime.datetime.now()
183
  blocks_to_go = next_update_block - current_block
184
- next_update_time = now + datetime.timedelta(seconds=blocks_to_go * SECONDS_PER_BLOCK)
 
 
185
  delta = next_update_time - now
186
  return f"""<div align="center" style="font-size: larger;">Next reward update: <b>{blocks_to_go}</b> blocks (~{int(delta.total_seconds() // 60)} minutes)</div>"""
187
 
188
- subtensor, metagraph = get_subtensor_and_metagraph()
189
-
190
- tao_price = get_tao_price()
191
-
192
- leaderboard_df = get_subnet_data(subtensor, metagraph)
193
- leaderboard_df.sort(key=lambda x: x.incentive, reverse=True)
194
 
195
- scores = get_scores([x.uid for x in leaderboard_df])
196
-
197
- current_block = metagraph.block.item()
198
- next_update = next_tempo(
199
- SUBNET_START_BLOCK,
200
- subtensor.get_subnet_hyperparameters(NETUID).tempo,
201
- current_block
202
- )
203
-
204
- validator_df = get_validator_weights(metagraph)
205
- weight_keys = set()
206
- for uid, stats in validator_df.items():
207
- weight_keys.update(stats[-1].keys())
208
-
209
- def leaderboard_data(show_stale: bool):
210
- value = [
211
  [
212
- f'[{c.namespace}/{c.name} ({c.commit[0:8]})](https://huggingface.co/{c.namespace}/{c.name}/commit/{c.commit})',
213
  format_score(c.uid, scores, "win_rate"),
214
  format_score(c.uid, scores, "avg_loss"),
215
  format_score(c.uid, scores, "weight"),
216
  c.uid,
217
- c.block
218
- ] for c in leaderboard_df if scores[c.uid]["fresh"] or show_stale
 
 
219
  ]
220
- return value
221
 
222
- demo = gr.Blocks(css=".typewriter {font-family: 'JMH Typewriter', sans-serif;}")
223
- with demo:
224
- gr.HTML(FONT)
225
- gr.HTML(TITLE)
226
- #gr.HTML(IMAGE)
227
- gr.HTML(HEADER)
228
 
229
- gr.HTML(value=get_next_update_div(current_block, next_update))
230
 
231
- gr.Label(
232
- value={ f"{c.namespace}/{c.name} ({c.commit[0:8]}) Β· ${round(c.emission * tao_price, 2):,} (Ο„{round(c.emission, 2):,})": c.incentive for c in leaderboard_df if c.incentive},
233
- num_top_classes=10,
234
- )
235
-
236
- with gr.Accordion("Evaluation Stats"):
237
- gr.HTML(EVALUATION_HEADER)
238
- show_stale = gr.Checkbox(label="Show Stale", interactive=True)
239
- leaderboard_table = gr.components.Dataframe(
240
- value=leaderboard_data(show_stale.value),
241
- headers=["Name", "Win Rate", "Average Loss", "Weight", "UID", "Block"],
242
- datatype=["markdown", "number", "number", "number", "number", "number"],
243
- elem_id="leaderboard-table",
244
- interactive=False,
245
- visible=True,
246
- )
247
- gr.HTML(EVALUATION_DETAILS)
248
- show_stale.change(leaderboard_data, [show_stale], leaderboard_table)
249
-
250
- with gr.Accordion("Validator Stats"):
251
- validator_table = gr.components.Dataframe(
252
- value=[
253
- [uid, int(validator_df[uid][1]), round(validator_df[uid][0], 4)] + [validator_df[uid][-1].get(c.uid) for c in leaderboard_df if c.incentive]
254
- for uid, _ in sorted(
255
- zip(validator_df.keys(), [validator_df[x][1] for x in validator_df.keys()]),
256
- key=lambda x: x[1],
257
- reverse=True
258
- )
259
- ],
260
- headers=["UID", "Stake (Ο„)", "V-Trust"] + [f"{c.namespace}/{c.name} ({c.commit[0:8]})" for c in leaderboard_df if c.incentive],
261
- datatype=["number", "number", "number"] + ["number" for c in leaderboard_df if c.incentive],
262
- interactive=False,
263
- visible=True,
264
- )
265
 
266
- def restart_space():
267
- API.restart_space(repo_id=REPO_ID, token=H4_TOKEN)
 
 
 
 
 
 
 
 
 
 
 
 
268
 
269
- scheduler = BackgroundScheduler()
270
- scheduler.add_job(restart_space, "interval", seconds=60 * 15) # restart every 15 minutes
271
- scheduler.start()
 
 
 
272
 
273
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import bittensor as bt
3
+ from typing import Dict, List, Any, Optional, Tuple
4
  from bittensor.extrinsics.serving import get_metadata
5
  from dataclasses import dataclass
6
  import requests
 
9
  import os
10
  import datetime
11
  import time
12
+ import json
13
+ import pandas as pd
14
  from dotenv import load_dotenv
15
  from huggingface_hub import HfApi
16
  from apscheduler.schedulers.background import BackgroundScheduler
17
 
18
  load_dotenv()
19
 
20
+ FONT = (
21
+ """<link href="https://fonts.cdnfonts.com/css/jmh-typewriter" rel="stylesheet">"""
22
+ )
23
+ TITLE = """<h1 align="center" id="space-title" class="typewriter">Subnet 9 Leaderboard</h1>"""
24
+ # IMAGE = """<a href="https://discord.gg/jqVphNsB4H" target="_blank"><img src="https://i.ibb.co/88wyVQ7/nousgirl.png" alt="nousgirl" style="margin: auto; width: 20%; border: 0;" /></a>"""
25
+ HEADER = """<h2 align="center" class="typewriter"><a href="https://github.com/RaoFoundation/pretraining" target="_blank">Subnet 9</a> is a <a href="https://bittensor.com/" target="_blank">Bittensor</a> subnet that rewards miners for producing pretrained Foundation-Models on the <a href="https://huggingface.co/datasets/tiiuae/falcon-refinedweb" target="_blank">Falcon Refined Web dataset</a>. It acts like a continuous benchmark whereby miners are rewarded for attaining the best losses on randomly sampled pages of Falcon.<br/>The models with the best head-to-head loss on the evaluation data receive a steady emission of TAO.</h3>"""
26
+ EVALUATION_DETAILS = """<ul><li><b>Name:</b> the πŸ€— Hugging Face model name (click to go to the model card)</li><li><b>Rewards / Day:</b> the expected rewards per day based on current ranking.</li><li><b>Last Average Loss:</b> the last loss value on the evaluation data for the model as calculated by a validator (lower is better)</li><li><b>UID:</b> the Bittensor UID of the miner</li><li><b>Block:</b> the Bittensor block that the model was submitted in</li></ul><br/>More stats on <a href="https://taostats.io/subnets/netuid-9/" target="_blank">taostats</a>."""
27
  EVALUATION_HEADER = """<h3 align="center">Shows the latest internal evaluation statistics as calculated by the Opentensor validator</h3>"""
28
  VALIDATOR_WANDB_PROJECT = "opentensor-dev/pretraining-subnet"
29
  H4_TOKEN = os.environ.get("H4_TOKEN", None)
 
34
  RETRIES = 5
35
  DELAY_SECS = 3
36
  NETUID = 9
 
 
37
  SECONDS_PER_BLOCK = 12
38
 
39
+
40
  @dataclass
41
  class ModelData:
42
  uid: int
 
50
  emission: float
51
 
52
  @classmethod
53
+ def from_compressed_str(
54
+ cls,
55
+ uid: int,
56
+ hotkey: str,
57
+ cs: str,
58
+ block: int,
59
+ incentive: float,
60
+ emission: float,
61
+ ):
62
  """Returns an instance of this class from a compressed string representation"""
63
  tokens = cs.split(":")
64
  return ModelData(
 
70
  hash=tokens[3] if tokens[3] != "None" else None,
71
  block=block,
72
  incentive=incentive,
73
+ emission=emission,
74
  )
75
 
76
+
77
  def run_with_retries(func, *args, **kwargs):
78
  for i in range(0, RETRIES):
79
  try:
 
84
  time.sleep(DELAY_SECS)
85
  raise RuntimeError("Should never happen")
86
 
87
+
88
+ def get_subtensor_and_metagraph() -> Tuple[bt.subtensor, bt.metagraph]:
89
+ def _internal() -> Tuple[bt.subtensor, bt.metagraph]:
90
+ subtensor = bt.subtensor("finney")
91
+ metagraph = bt.metagraph(NETUID, lite=False)
92
  return subtensor, metagraph
93
+
94
  return run_with_retries(_internal)
95
 
96
+
97
  def get_tao_price() -> float:
98
+ return run_with_retries(
99
+ lambda: float(
100
+ requests.get(
101
+ "https://api.kucoin.com/api/v1/market/stats?symbol=TAO-USDT"
102
+ ).json()["data"]["last"]
103
+ )
104
+ )
105
+
106
 
107
+ def get_validator_weights(
108
+ metagraph: bt.metagraph,
109
+ ) -> Dict[int, Tuple[float, int, Dict[int, float]]]:
110
+ """Returns a dictionary of validator UIDs to (vtrust, stake, {uid: weight})."""
111
  ret = {}
112
  for uid in metagraph.uids.tolist():
113
  vtrust = metagraph.validator_trust[uid].item()
 
121
  ret[uid][-1][ouid] = weight
122
  return ret
123
 
124
+
125
+ def get_subnet_data(
126
+ subtensor: bt.subtensor, metagraph: bt.metagraph
127
+ ) -> List[ModelData]:
128
  result = []
129
  for uid in metagraph.uids.tolist():
130
  hotkey = metagraph.hotkeys[uid]
 
137
  chain_str = bytes.fromhex(hex_data).decode()
138
  block = metadata["block"]
139
  incentive = metagraph.incentive[uid].nan_to_num().item()
140
+ emission = (
141
+ metagraph.emission[uid].nan_to_num().item() * 20
142
+ ) # convert to daily TAO
143
 
144
  model_data = None
145
  try:
146
+ model_data = ModelData.from_compressed_str(
147
+ uid, hotkey, chain_str, block, incentive, emission
148
+ )
149
  except:
150
  continue
151
 
152
  result.append(model_data)
153
  return result
154
 
155
+
156
  def is_floatable(x) -> bool:
157
+ return (
158
+ isinstance(x, float) and not math.isnan(x) and not math.isinf(x)
159
+ ) or isinstance(x, int)
160
+
161
+
162
+ def get_scores(
163
+ uids: List[int],
164
+ ) -> Dict[int, Dict[str, Optional[float]]]:
 
 
 
 
 
 
 
165
  api = wandb.Api(api_key=WANDB_TOKEN)
166
+ runs = list(
167
+ api.runs(
168
+ VALIDATOR_WANDB_PROJECT,
169
+ filters={"config.type": "validator", "config.uid": 238},
170
+ )
171
+ )
172
 
173
  result = {}
174
+ previous_timestamp = None
175
+ # Iterate through the runs until we've processed all the uids.
176
+ for i, run in enumerate(runs):
177
+ if not "original_format_json" in run.summary:
178
+ continue
179
+ data = json.loads(run.summary["original_format_json"])
180
+ all_uid_data = data["uid_data"]
181
+ timestamp = data["timestamp"]
182
+
183
+ # Make sure runs are indeed in descending time order.
184
+ assert (
185
+ previous_timestamp is None or timestamp < previous_timestamp
186
+ ), f"Timestamps are not in descending order: {timestamp} >= {previous_timestamp}"
187
+ previous_timestamp = timestamp
188
+
189
  for uid in uids:
190
  if uid in result:
191
  continue
192
+ if str(uid) in all_uid_data:
193
+ uid_data = all_uid_data[str(uid)]
194
+ # Only the most recent run is fresh.
195
+ is_fresh = i == 0
196
+ result[uid] = {
197
+ "avg_loss": uid_data.get("average_loss", None),
198
+ "win_rate": uid_data.get("win_rate", None),
199
+ "win_total": uid_data.get("win_total", None),
200
+ "weight": uid_data.get("weight", None),
201
+ "fresh": is_fresh,
202
+ }
203
+ if len(result) == len(uids):
204
  break
205
  return result
206
 
207
+
208
+ def format_score(uid: int, scores, key) -> Optional[float]:
209
  if uid in scores:
210
  if key in scores[uid]:
211
  point = scores[uid][key]
 
213
  return round(scores[uid][key], 4)
214
  return None
215
 
216
+
217
+ def next_epoch(subtensor: bt.subtensor, block: int) -> int:
218
+ return subtensor.get_subnet_hyperparameters(
219
+ NETUID
220
+ ).tempo - subtensor.blocks_since_epoch(NETUID, block)
221
+
222
 
223
  def get_next_update_div(current_block: int, next_update_block: int) -> str:
224
  now = datetime.datetime.now()
225
  blocks_to_go = next_update_block - current_block
226
+ next_update_time = now + datetime.timedelta(
227
+ seconds=blocks_to_go * SECONDS_PER_BLOCK
228
+ )
229
  delta = next_update_time - now
230
  return f"""<div align="center" style="font-size: larger;">Next reward update: <b>{blocks_to_go}</b> blocks (~{int(delta.total_seconds() // 60)} minutes)</div>"""
231
 
 
 
 
 
 
 
232
 
233
+ def leaderboard_data(
234
+ leaderboard: List[ModelData],
235
+ scores: Dict[int, Dict[str, Optional[float]]],
236
+ show_stale: bool,
237
+ ) -> List[List[Any]]:
238
+ """Returns the leaderboard data, based on models data and UID scores."""
239
+ return [
 
 
 
 
 
 
 
 
 
240
  [
241
+ f"[{c.namespace}/{c.name} ({c.commit[0:8]})](https://huggingface.co/{c.namespace}/{c.name}/commit/{c.commit})",
242
  format_score(c.uid, scores, "win_rate"),
243
  format_score(c.uid, scores, "avg_loss"),
244
  format_score(c.uid, scores, "weight"),
245
  c.uid,
246
+ c.block,
247
+ ]
248
+ for c in leaderboard
249
+ if (c.uid in scores and scores[c.uid]["fresh"]) or show_stale
250
  ]
 
251
 
252
+ def restart_space():
253
+ API.restart_space(repo_id=REPO_ID, token=H4_TOKEN)
 
 
 
 
254
 
 
255
 
256
+ def main():
257
+ subtensor, metagraph = get_subtensor_and_metagraph()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
 
259
+ tao_price = get_tao_price()
260
+
261
+ model_data: List[ModelData] = get_subnet_data(subtensor, metagraph)
262
+ model_data.sort(key=lambda x: x.incentive, reverse=True)
263
+
264
+ scores = get_scores([x.uid for x in model_data])
265
+
266
+ current_block = metagraph.block.item()
267
+ next_epoch_block = next_epoch(subtensor, current_block)
268
+
269
+ validator_df = get_validator_weights(metagraph)
270
+ weight_keys = set()
271
+ for uid, stats in validator_df.items():
272
+ weight_keys.update(stats[-1].keys())
273
 
274
+ demo = gr.Blocks(css=".typewriter {font-family: 'JMH Typewriter', sans-serif;}")
275
+ with demo:
276
+ gr.HTML(FONT)
277
+ gr.HTML(TITLE)
278
+ # gr.HTML(IMAGE)
279
+ gr.HTML(HEADER)
280
 
281
+ gr.HTML(value=get_next_update_div(current_block, next_epoch_block))
282
+
283
+ gr.Label(
284
+ value={
285
+ f"{c.namespace}/{c.name} ({c.commit[0:8]}) Β· ${round(c.emission * tao_price, 2):,} (Ο„{round(c.emission, 2):,})": c.incentive
286
+ for c in model_data
287
+ if c.incentive
288
+ },
289
+ num_top_classes=10,
290
+ )
291
+
292
+ with gr.Accordion("Evaluation Stats"):
293
+ gr.HTML(EVALUATION_HEADER)
294
+ show_stale = gr.Checkbox(label="Show Stale", interactive=True)
295
+ leaderboard_table = gr.components.Dataframe(
296
+ value=leaderboard_data(model_data, scores, show_stale.value),
297
+ headers=["Name", "Win Rate", "Average Loss", "Weight", "UID", "Block"],
298
+ datatype=["markdown", "number", "number", "number", "number", "number"],
299
+ elem_id="leaderboard-table",
300
+ interactive=False,
301
+ visible=True,
302
+ )
303
+ gr.HTML(EVALUATION_DETAILS)
304
+ show_stale.change(lambda stale: leaderboard_data(model_data, scores, stale), inputs=[show_stale], outputs=leaderboard_table)
305
+
306
+ with gr.Accordion("Validator Stats"):
307
+ gr.components.Dataframe(
308
+ value=[
309
+ [uid, int(validator_df[uid][1]), round(validator_df[uid][0], 4)]
310
+ + [validator_df[uid][-1].get(c.uid) for c in model_data if c.incentive]
311
+ for uid, _ in sorted(
312
+ zip(
313
+ validator_df.keys(),
314
+ [validator_df[x][1] for x in validator_df.keys()],
315
+ ),
316
+ key=lambda x: x[1],
317
+ reverse=True,
318
+ )
319
+ ],
320
+ headers=["UID", "Stake (Ο„)", "V-Trust"]
321
+ + [
322
+ f"{c.namespace}/{c.name} ({c.commit[0:8]})"
323
+ for c in model_data
324
+ if c.incentive
325
+ ],
326
+ datatype=["number", "number", "number"]
327
+ + ["number" for c in model_data if c.incentive],
328
+ interactive=False,
329
+ visible=True,
330
+ )
331
+
332
+
333
+ scheduler = BackgroundScheduler()
334
+ scheduler.add_job(
335
+ restart_space, "interval", seconds=60 * 15
336
+ ) # restart every 15 minutes
337
+ scheduler.start()
338
+
339
+ demo.launch()
340
+
341
+ main()
requirements.txt CHANGED
@@ -3,4 +3,5 @@ requests==2.31.0
3
  wandb==0.16.2
4
  python-dotenv==1.0.1
5
  APScheduler==3.10.1
6
- huggingface-hub>=0.18.0
 
 
3
  wandb==0.16.2
4
  python-dotenv==1.0.1
5
  APScheduler==3.10.1
6
+ huggingface-hub>=0.18.0
7
+ pandas==2.2.0