myshell-test commited on
Commit
2e201ff
1 Parent(s): 8120aa6

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. .gitignore +162 -0
  2. README.md +5 -6
  3. app.py +536 -0
  4. requirement.txt +7 -0
.gitignore ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
161
+
162
+ testing/
README.md CHANGED
@@ -1,12 +1,11 @@
1
  ---
2
- title: Tts Subnet Leaderboard
3
- emoji: 🔥
4
  colorFrom: indigo
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 4.24.0
8
  app_file: app.py
9
  pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: MyShell TTS Subnet Leaderboard
3
+ emoji: ⚒️
4
  colorFrom: indigo
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 3.41.0
8
  app_file: app.py
9
  pinned: false
10
  ---
11
+ MyShell TTS Subnet
 
app.py ADDED
@@ -0,0 +1,536 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import bittensor as bt
3
+ import typing
4
+ from bittensor.extrinsics.serving import get_metadata
5
+ from dataclasses import dataclass
6
+ import requests
7
+ import wandb
8
+ import math
9
+ import os
10
+ import datetime
11
+ import time
12
+ import functools
13
+ import multiprocessing
14
+ from dotenv import load_dotenv
15
+ from huggingface_hub import HfApi
16
+ from apscheduler.schedulers.background import BackgroundScheduler
17
+ from tqdm import tqdm
18
+ import concurrent.futures
19
+ import sys
20
+
21
+ load_dotenv()
22
+
23
+ FONT = (
24
+ """<link href="https://fonts.cdnfonts.com/css/jmh-typewriter" rel="stylesheet">"""
25
+ )
26
+ TITLE = """<h1 align="center" id="space-title" class="typewriter">MyShell TTS Subnet Leaderboard</h1>"""
27
+ IMAGE = """<a href="https://discord.gg/myshell" target="_blank"><img src="https://avatars.githubusercontent.com/u/127754094?s=2000&v=4" alt="MyShell" style="margin: auto; width: 20%; border: 0;" /></a>"""
28
+ HEADER = """<h2 align="center" class="typewriter">MyShell TTS Subnet is a groundbreaking project that leverages the power of decentralized collaboration to advance the state-of-the-art in open-source Text-to-Speech (TTS) technology. By harnessing the Bittensor blockchain and a unique incentive mechanism, we aim to create the most advanced and accessible TTS models. By leveraging MyShell's user base of over one million individuals, we are devoted to pushing cutting-edge technology to every end-user.</h3>"""
29
+ EVALUATION_DETAILS = """<b>Name</b> is the 🤗 Hugging Face model name (click to go to the model card). <b>Rewards / Day</b> are the expected rewards per day for each model. <b>Block</b> is the Bittensor block that the model was submitted in. More stats on <a href="https://taostats.io/subnets/netuid-3/" target="_blank">taostats</a>."""
30
+ EVALUATION_HEADER = """<h3 align="center">Shows the latest internal evaluation statistics as calculated by a validator run by Cortex Foundation ({date}) </h3>"""
31
+ VALIDATOR_WANDB_PROJECT = "myshell_tc/tts_subnet_validator"
32
+ # os.environ.get("VALIDATOR_WANDB_PROJECT")
33
+ H4_TOKEN = os.environ.get("H4_TOKEN", None)
34
+ API = HfApi(token=H4_TOKEN)
35
+ REPO_ID = "myshell-ai/tts-subnet-leaderboard"
36
+ METAGRAPH_RETRIES = 10
37
+ METAGRAPH_DELAY_SECS = 30
38
+ METADATA_TTL = 10
39
+ NETUID = 3
40
+ SUBNET_START_BLOCK = 2635801
41
+ SECONDS_PER_BLOCK = 12
42
+ SUBTENSOR = os.environ.get("SUBTENSOR", "finney")
43
+
44
+
45
+ @dataclass
46
+ class Competition:
47
+ id: str
48
+ name: str
49
+
50
+
51
+ COMPETITIONS = [
52
+ Competition(id="p225", name="vctk-speaker1"),
53
+ ]
54
+ DEFAULT_COMPETITION_ID = "p225"
55
+ last_refresh = None
56
+
57
+
58
+ def run_in_subprocess(func: functools.partial, ttl: int) -> typing.Any:
59
+ """Runs the provided function on a subprocess with 'ttl' seconds to complete.
60
+ Args:
61
+ func (functools.partial): Function to be run.
62
+ ttl (int): How long to try for in seconds.
63
+ Returns:
64
+ Any: The value returned by 'func'
65
+ """
66
+
67
+ def wrapped_func(func: functools.partial, queue: multiprocessing.Queue):
68
+ try:
69
+ result = func()
70
+ queue.put(result)
71
+ except (Exception, BaseException) as e:
72
+ # Catch exceptions here to add them to the queue.
73
+ queue.put(e)
74
+
75
+ # Use "fork" (the default on all POSIX except macOS), because pickling doesn't seem
76
+ # to work on "spawn".
77
+ ctx = multiprocessing.get_context("fork")
78
+ queue = ctx.Queue()
79
+ process = ctx.Process(target=wrapped_func, args=[func, queue])
80
+
81
+ process.start()
82
+
83
+ process.join(timeout=ttl)
84
+
85
+ if process.is_alive():
86
+ process.terminate()
87
+ process.join()
88
+ raise TimeoutError(f"Failed to {func.func.__name__} after {ttl} seconds")
89
+
90
+ # Raises an error if the queue is empty. This is fine. It means our subprocess timed out.
91
+ result = queue.get(block=False)
92
+
93
+ # If we put an exception on the queue then raise instead of returning.
94
+ if isinstance(result, Exception):
95
+ raise result
96
+ if isinstance(result, BaseException):
97
+ raise Exception(f"BaseException raised in subprocess: {str(result)}")
98
+
99
+ return result
100
+
101
+
102
+ def get_subtensor_and_metagraph() -> typing.Tuple[bt.subtensor, bt.metagraph]:
103
+ for i in range(0, METAGRAPH_RETRIES):
104
+ try:
105
+ print("Connecting to subtensor...")
106
+ subtensor: bt.subtensor = bt.subtensor(SUBTENSOR)
107
+ print("Pulling metagraph...")
108
+ metagraph: bt.metagraph = subtensor.metagraph(NETUID, lite=False)
109
+ return subtensor, metagraph
110
+ except:
111
+ if i == METAGRAPH_RETRIES - 1:
112
+ raise
113
+ print(
114
+ f"Error connecting to subtensor or pulling metagraph, retry {i + 1} of {METAGRAPH_RETRIES} in {METAGRAPH_DELAY_SECS} seconds..."
115
+ )
116
+ time.sleep(METAGRAPH_DELAY_SECS)
117
+ raise RuntimeError()
118
+
119
+
120
+ @dataclass
121
+ class ModelData:
122
+ uid: int
123
+ hotkey: str
124
+ namespace: str
125
+ name: str
126
+ commit: str
127
+ hash: str
128
+ block: int
129
+ incentive: float
130
+ emission: float
131
+ competition: str
132
+
133
+ @classmethod
134
+ def from_compressed_str(
135
+ cls,
136
+ uid: int,
137
+ hotkey: str,
138
+ cs: str,
139
+ block: int,
140
+ incentive: float,
141
+ emission: float,
142
+ ):
143
+ """Returns an instance of this class from a compressed string representation"""
144
+ tokens = cs.split(":")
145
+ return ModelData(
146
+ uid=uid,
147
+ hotkey=hotkey,
148
+ namespace=tokens[0],
149
+ name=tokens[1],
150
+ commit=tokens[2] if tokens[2] != "None" else "",
151
+ hash=tokens[3] if tokens[3] != "None" else "",
152
+ competition=tokens[4]
153
+ if len(tokens) > 4 and tokens[4] != "None"
154
+ else DEFAULT_COMPETITION_ID,
155
+ block=block,
156
+ incentive=incentive,
157
+ emission=emission,
158
+ )
159
+
160
+
161
+ def get_tao_price() -> float:
162
+ for i in range(0, METAGRAPH_RETRIES):
163
+ try:
164
+ return float(
165
+ requests.get(
166
+ "https://api.kucoin.com/api/v1/market/stats?symbol=TAO-USDT"
167
+ ).json()["data"]["last"]
168
+ )
169
+ except:
170
+ if i == METAGRAPH_RETRIES - 1:
171
+ raise
172
+ time.sleep(METAGRAPH_DELAY_SECS)
173
+ raise RuntimeError()
174
+
175
+
176
+ def get_validator_weights(
177
+ metagraph: bt.metagraph,
178
+ ) -> typing.Dict[int, typing.Tuple[float, int, typing.Dict[int, float]]]:
179
+ ret = {}
180
+ for uid in metagraph.uids.tolist():
181
+ vtrust = metagraph.validator_trust[uid].item()
182
+ if vtrust > 0:
183
+ ret[uid] = (vtrust, metagraph.S[uid].item(), {})
184
+ for ouid in metagraph.uids.tolist():
185
+ if ouid == uid:
186
+ continue
187
+ weight = round(metagraph.weights[uid][ouid].item(), 4)
188
+ if weight > 0:
189
+ ret[uid][-1][ouid] = weight
190
+ return ret
191
+
192
+
193
+ def get_subnet_data(
194
+ subtensor: bt.subtensor, metagraph: bt.metagraph
195
+ ) -> typing.List[ModelData]:
196
+ global last_refresh
197
+
198
+ # Function to be executed in a thread
199
+ def fetch_data(uid):
200
+ hotkey = metagraph.hotkeys[uid]
201
+ try:
202
+ partial = functools.partial(
203
+ get_metadata, subtensor, metagraph.netuid, hotkey
204
+ )
205
+ metadata = run_in_subprocess(partial, METADATA_TTL)
206
+ except Exception as e:
207
+ return None
208
+
209
+ if not metadata:
210
+ return None
211
+
212
+ commitment = metadata["info"]["fields"][0]
213
+ hex_data = commitment[list(commitment.keys())[0]][2:]
214
+ chain_str = bytes.fromhex(hex_data).decode()
215
+ block = metadata["block"]
216
+ incentive = metagraph.incentive[uid].nan_to_num().item()
217
+ emission = (
218
+ metagraph.emission[uid].nan_to_num().item() * 20
219
+ ) # convert to daily TAO
220
+
221
+ try:
222
+ model_data = ModelData.from_compressed_str(
223
+ uid, hotkey, chain_str, block, incentive, emission
224
+ )
225
+ except Exception as e:
226
+ return None
227
+ return model_data
228
+
229
+ # Use ThreadPoolExecutor to fetch data in parallel
230
+ results = []
231
+ with concurrent.futures.ThreadPoolExecutor() as executor:
232
+ # Prepare the list of futures
233
+ futures = [executor.submit(fetch_data, uid) for uid in metagraph.uids.tolist()]
234
+ for future in tqdm(
235
+ concurrent.futures.as_completed(futures),
236
+ desc="Metadata for hotkeys",
237
+ total=len(futures),
238
+ ):
239
+ result = future.result()
240
+ if result:
241
+ results.append(result)
242
+
243
+ last_refresh = datetime.datetime.now()
244
+ return results
245
+
246
+
247
+ def floatable(x) -> bool:
248
+ return (
249
+ isinstance(x, float) and not math.isnan(x) and not math.isinf(x)
250
+ ) or isinstance(x, int)
251
+
252
+
253
+ def get_float_score(
254
+ key: str, history, competition_id: str
255
+ ) -> typing.Tuple[typing.Optional[float], bool]:
256
+ if key in history and "competition_id" in history:
257
+ data = list(history[key])
258
+ if len(data) > 0:
259
+ competitions = list(history["competition_id"])
260
+ while True:
261
+ if competitions.pop() != competition_id:
262
+ data.pop()
263
+ continue
264
+ if floatable(data[-1]):
265
+ return float(data[-1]), True
266
+ else:
267
+ data = [float(x) for x in data if floatable(x)]
268
+ if len(data) > 0:
269
+ return float(data[-1]), False
270
+ break
271
+ return None, False
272
+
273
+
274
+ def get_sample(
275
+ uid, history, competition_id: str
276
+ ) -> typing.Optional[typing.Tuple[str, str, str]]:
277
+ prompt_key = f"sample_prompt_data.{uid}"
278
+ response_key = f"sample_response_data.{uid}"
279
+ truth_key = f"sample_truth_data.{uid}"
280
+ if (
281
+ prompt_key in history
282
+ and response_key in history
283
+ and truth_key in history
284
+ and "competition_id" in history
285
+ ):
286
+ competitions = list(history["competition_id"])
287
+ prompts = list(history[prompt_key])
288
+ responses = list(history[response_key])
289
+ truths = list(history[truth_key])
290
+ while True:
291
+ prompt = prompts.pop()
292
+ response = responses.pop()
293
+ truth = truths.pop()
294
+ if competitions.pop() != competition_id:
295
+ continue
296
+ if (
297
+ isinstance(prompt, str)
298
+ and isinstance(response, str)
299
+ and isinstance(truth, str)
300
+ ):
301
+ return prompt, response, truth
302
+ break
303
+ return None
304
+
305
+
306
+ def get_scores(
307
+ uids: typing.List[int], competition_id: str
308
+ ) -> typing.Dict[int, typing.Dict[str, typing.Optional[float | str]]]:
309
+ api = wandb.Api()
310
+ runs = list(api.runs(VALIDATOR_WANDB_PROJECT))
311
+
312
+ result = {}
313
+ for run in runs:
314
+ history = run.history()
315
+ for uid in uids:
316
+ if uid in result.keys():
317
+ continue
318
+ win_rate, win_rate_fresh = get_float_score(
319
+ f"win_rate_data.{uid}", history, competition_id
320
+ )
321
+ win_total, win_total_fresh = get_float_score(
322
+ f"win_total_data.{uid}", history, competition_id
323
+ )
324
+ weight, weight_fresh = get_float_score(
325
+ f"weight_data.{uid}", history, competition_id
326
+ )
327
+ sample = get_sample(uid, history, competition_id)
328
+ result[uid] = {
329
+ "win_rate": win_rate,
330
+ "win_total": win_total,
331
+ "weight": weight,
332
+ "sample": sample,
333
+ "fresh": win_rate_fresh and win_total_fresh,
334
+ }
335
+ if len(result.keys()) == len(uids):
336
+ break
337
+ return result
338
+
339
+
340
+ def format_score(uid, scores, key) -> typing.Optional[float]:
341
+ if uid in scores:
342
+ if key in scores[uid]:
343
+ point = scores[uid][key]
344
+ if floatable(point):
345
+ return round(scores[uid][key], 4)
346
+ return None
347
+
348
+
349
+ def next_tempo(start_block, tempo, block):
350
+ start_num = start_block + tempo
351
+ intervals = (block - start_num) // tempo
352
+ nearest_num = start_num + ((intervals + 1) * tempo)
353
+ return nearest_num
354
+
355
+
356
+ subtensor, metagraph = get_subtensor_and_metagraph()
357
+
358
+ tao_price = get_tao_price()
359
+
360
+ leaderboard_df = get_subnet_data(subtensor, metagraph)
361
+ leaderboard_df.sort(key=lambda x: x.incentive, reverse=True)
362
+
363
+ print(leaderboard_df)
364
+
365
+ competition_scores = {
366
+ y.id: get_scores([x.uid for x in leaderboard_df if x.competition == y.id], y.id)
367
+ for y in COMPETITIONS
368
+ }
369
+
370
+ current_block = metagraph.block.item()
371
+ next_update = next_tempo(
372
+ SUBNET_START_BLOCK,
373
+ subtensor.get_subnet_hyperparameters(NETUID).tempo,
374
+ current_block,
375
+ )
376
+ blocks_to_go = next_update - current_block
377
+ current_time = datetime.datetime.now()
378
+ next_update_time = current_time + datetime.timedelta(
379
+ seconds=blocks_to_go * SECONDS_PER_BLOCK
380
+ )
381
+
382
+ validator_df = get_validator_weights(metagraph)
383
+ weight_keys = set()
384
+ for uid, stats in validator_df.items():
385
+ weight_keys.update(stats[-1].keys())
386
+
387
+
388
+ def get_next_update():
389
+ now = datetime.datetime.now()
390
+ delta = next_update_time - now
391
+ return f"""<div align="center" style="font-size: larger;">Next reward update: <b>{blocks_to_go}</b> blocks (~{int(delta.total_seconds() // 60)} minutes)</div>"""
392
+
393
+
394
+ def leaderboard_data(
395
+ show_stale: bool,
396
+ scores: typing.Dict[int, typing.Dict[str, typing.Optional[float | str]]],
397
+ competition_id: str,
398
+ ):
399
+ value = [
400
+ [
401
+ f"[{c.namespace}/{c.name} ({c.commit[0:8]}, UID={c.uid})](https://huggingface.co/{c.namespace}/{c.name}/commit/{c.commit})",
402
+ format_score(c.uid, scores, "win_rate"),
403
+ format_score(c.uid, scores, "weight"),
404
+ c.uid,
405
+ c.block,
406
+ ]
407
+ for c in leaderboard_df
408
+ if c.competition == competition_id and (scores[c.uid]["fresh"] or show_stale)
409
+ ]
410
+ return value
411
+
412
+
413
+ demo = gr.Blocks(css=".typewriter {font-family: 'JMH Typewriter', sans-serif;}")
414
+ with demo:
415
+ gr.HTML(FONT)
416
+ gr.HTML(TITLE)
417
+ gr.HTML(IMAGE)
418
+ gr.HTML(HEADER)
419
+
420
+ gr.HTML(value=get_next_update())
421
+
422
+ with gr.Tabs():
423
+ for competition in COMPETITIONS:
424
+ with gr.Tab(competition.name):
425
+ scores = competition_scores[competition.id]
426
+ print(scores)
427
+
428
+ class_denominator = sum(
429
+ leaderboard_df[i].incentive
430
+ for i in range(0, min(10, len(leaderboard_df)))
431
+ if leaderboard_df[i].incentive
432
+ and leaderboard_df[i].competition == competition.id
433
+ )
434
+
435
+ class_values = {
436
+ f"{leaderboard_df[i].namespace}/{leaderboard_df[i].name} ({leaderboard_df[i].commit[0:8]}, UID={leaderboard_df[i].uid}) · ${round(leaderboard_df[i].emission * tao_price, 2):,} (τ{round(leaderboard_df[i].emission, 2):,})": leaderboard_df[
437
+ i
438
+ ].incentive
439
+ / class_denominator
440
+ for i in range(0, min(10, len(leaderboard_df)))
441
+ if leaderboard_df[i].incentive
442
+ and leaderboard_df[i].competition == competition.id
443
+ }
444
+
445
+ gr.Label(
446
+ value=class_values,
447
+ num_top_classes=10,
448
+ )
449
+
450
+ with gr.Accordion("Evaluation Stats"):
451
+ gr.HTML(
452
+ EVALUATION_HEADER.replace(
453
+ "{date}",
454
+ last_refresh.strftime("refreshed at %H:%M on %Y-%m-%d"),
455
+ )
456
+ )
457
+ with gr.Tabs():
458
+ for entry in leaderboard_df:
459
+ if entry.competition == competition.id:
460
+ sample = scores[entry.uid]["sample"]
461
+ if sample is not None:
462
+ name = f"{entry.namespace}/{entry.name} ({entry.commit[0:8]}, UID={entry.uid})"
463
+ with gr.Tab(name):
464
+ gr.Chatbot([(sample[0], sample[1])])
465
+ # gr.Chatbot([(sample[0], f"*{name}*: {sample[1]}"), (None, f"*GPT-4*: {sample[2]}")])
466
+
467
+ show_stale = gr.Checkbox(label="Show Stale", interactive=True)
468
+ leaderboard_table = gr.components.Dataframe(
469
+ value=leaderboard_data(
470
+ show_stale.value, scores, competition.id
471
+ ),
472
+ headers=[
473
+ "Name",
474
+ "Win Rate",
475
+ "Weight",
476
+ "UID",
477
+ "Block",
478
+ ],
479
+ datatype=[
480
+ "markdown",
481
+ "number",
482
+ "number",
483
+ "number",
484
+ "number",
485
+ ],
486
+ elem_id="leaderboard-table",
487
+ interactive=False,
488
+ visible=True,
489
+ )
490
+ gr.HTML(EVALUATION_DETAILS)
491
+ show_stale.change(
492
+ lambda x: leaderboard_data(x, scores, competition.id),
493
+ [show_stale],
494
+ leaderboard_table,
495
+ )
496
+
497
+ with gr.Accordion("Validator Stats"):
498
+ validator_table = gr.components.Dataframe(
499
+ value=[
500
+ [uid, int(validator_df[uid][1]), round(validator_df[uid][0], 4)]
501
+ + [
502
+ validator_df[uid][-1].get(c.uid)
503
+ for c in leaderboard_df
504
+ if c.incentive
505
+ ]
506
+ for uid, _ in sorted(
507
+ zip(
508
+ validator_df.keys(),
509
+ [validator_df[x][1] for x in validator_df.keys()],
510
+ ),
511
+ key=lambda x: x[1],
512
+ reverse=True,
513
+ )
514
+ ],
515
+ headers=["UID", "Stake (τ)", "V-Trust"]
516
+ + [
517
+ f"{c.namespace}/{c.name} ({c.commit[0:8]}, UID={c.uid})"
518
+ for c in leaderboard_df
519
+ if c.incentive
520
+ ],
521
+ datatype=["number", "number", "number"]
522
+ + ["number" for c in leaderboard_df if c.incentive],
523
+ interactive=False,
524
+ visible=True,
525
+ )
526
+
527
+
528
+ def restart_space():
529
+ API.restart_space(repo_id=REPO_ID, token=H4_TOKEN)
530
+
531
+
532
+ scheduler = BackgroundScheduler()
533
+ scheduler.add_job(restart_space, "interval", seconds=60 * 5) # restart every 15 minutes
534
+ scheduler.start()
535
+
536
+ demo.launch()
requirement.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ bittensor==6.9.3
2
+ requests==2.31.0
3
+ wandb==0.16.2
4
+ python-dotenv==1.0.1
5
+ APScheduler==3.10.1
6
+ huggingface-hub>=0.18.0
7
+ tqdm==4.66.2