RusticLuftig commited on
Commit
8d00564
2 Parent(s): af88b2a d60014f

Merge pull request #4 from macrocosm-os/uid

Browse files
Files changed (2) hide show
  1. app.py +2 -3
  2. utils.py +1 -3
app.py CHANGED
@@ -17,8 +17,7 @@ FONT = (
17
  )
18
  TITLE = """<h1 align="center" id="space-title" class="typewriter">Finetuning Subnet Leaderboard</h1>"""
19
  HEADER = """<h2 align="center" class="typewriter"><a href="https://github.com/macrocosm-os/finetuning" target="_blank">Finetuning</a> is a <a href="https://bittensor.com/" target="_blank">Bittensor</a> subnet that rewards miners for producing finetuned models in defined competitions. The model with the best head-to-head score in each competition receive a steady emission of TAO.</h3>"""
20
- # TODO: Update links once subnet is regged.
21
- EVALUATION_DETAILS = """<ul><li><b>Name:</b> the 🤗 Hugging Face model name (click to go to the model card)</li><li><b>Rewards / Day:</b> the expected rewards per day based on current ranking.</li><li><b>Last Average Loss:</b> the last loss value on the evaluation data for the model as calculated by a validator (lower is better)</li><li><b>UID:</b> the Bittensor UID of the miner</li><li><b>Block:</b> the Bittensor block that the model was submitted in</li></ul><br/>More stats on <a href="https://taostats.io/subnets/netuid-9/" target="_blank">taostats</a>."""
22
  EVALUATION_HEADER = """<h3 align="center">Shows the latest internal evaluation statistics as calculated by the Opentensor validator</h3>"""
23
 
24
  HF_REPO_ID = "macrocosm-os/finetuning-leaderboard"
@@ -81,7 +80,7 @@ def main():
81
  if benchmarks is not None:
82
  with gr.Accordion("Top Model Benchmarks"):
83
  gr.components.Dataframe(benchmarks)
84
- gr.HTML("""<div>PPL computed using a stride of 512. See <a href='https://github.com/macrocosm-os/pretraining/blob/dev/scripts/run_benchmarks.py'>here</a> for the full code.</div>""")
85
  gr.HTML(f"""<div>Last Updated: {benchmark_timestamp.strftime("%Y-%m-%d %H:%M:%S")} (UTC)</div>""")
86
 
87
  with gr.Accordion("Evaluation Stats"):
 
17
  )
18
  TITLE = """<h1 align="center" id="space-title" class="typewriter">Finetuning Subnet Leaderboard</h1>"""
19
  HEADER = """<h2 align="center" class="typewriter"><a href="https://github.com/macrocosm-os/finetuning" target="_blank">Finetuning</a> is a <a href="https://bittensor.com/" target="_blank">Bittensor</a> subnet that rewards miners for producing finetuned models in defined competitions. The model with the best head-to-head score in each competition receive a steady emission of TAO.</h3>"""
20
+ EVALUATION_DETAILS = """<ul><li><b>Name:</b> the 🤗 Hugging Face model name (click to go to the model card)</li><li><b>Rewards / Day:</b> the expected rewards per day based on current ranking.</li><li><b>Last Average Loss:</b> the last loss value on the evaluation data for the model as calculated by a validator (lower is better)</li><li><b>UID:</b> the Bittensor UID of the miner</li><li><b>Block:</b> the Bittensor block that the model was submitted in</li></ul><br/>More stats on <a href="https://taostats.io/subnets/netuid-37/" target="_blank">taostats</a>."""
 
21
  EVALUATION_HEADER = """<h3 align="center">Shows the latest internal evaluation statistics as calculated by the Opentensor validator</h3>"""
22
 
23
  HF_REPO_ID = "macrocosm-os/finetuning-leaderboard"
 
80
  if benchmarks is not None:
81
  with gr.Accordion("Top Model Benchmarks"):
82
  gr.components.Dataframe(benchmarks)
83
+ gr.HTML("""<div>PPL computed using a stride of 512. See <a href='https://github.com/macrocosm-os/finetuning/blob/dev/scripts/run_benchmarks.py'>here</a> for the full code.</div>""")
84
  gr.HTML(f"""<div>Last Updated: {benchmark_timestamp.strftime("%Y-%m-%d %H:%M:%S")} (UTC)</div>""")
85
 
86
  with gr.Accordion("Evaluation Stats"):
utils.py CHANGED
@@ -8,7 +8,6 @@ import time
8
  import traceback
9
  from collections import defaultdict
10
  from dataclasses import dataclass
11
- from email.policy import default
12
  from typing import Any, Dict, List, Optional, Tuple
13
 
14
  import bittensor as bt
@@ -21,8 +20,7 @@ from wandb.apis.public.history import HistoryScan
21
 
22
  import competitions
23
 
24
- # TODO: Update once registered
25
- NETUID = 179
26
  DELAY_SECS = 3
27
  RETRIES = 3
28
 
 
8
  import traceback
9
  from collections import defaultdict
10
  from dataclasses import dataclass
 
11
  from typing import Any, Dict, List, Optional, Tuple
12
 
13
  import bittensor as bt
 
20
 
21
  import competitions
22
 
23
+ NETUID = 37
 
24
  DELAY_SECS = 3
25
  RETRIES = 3
26