Rename `HuggingFaceH4` to `open-llm-leaderboard` org in modelcards

#14
by Wauplin HF staff - opened
Files changed (2) hide show
  1. app.py +2 -2
  2. functions.py +3 -3
app.py CHANGED
@@ -27,11 +27,11 @@ def refresh(how_much=43200): # default to 12 hour
27
  refresh(600) # 10 minutes if any error happens
28
 
29
  gradio_title="🧐 Open LLM Leaderboard Results PR Opener"
30
- gradio_desc= """🎯 This tool's aim is to provide [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) results in the model card.
31
 
32
  ## 💭 What Does This Tool Do:
33
 
34
- - This tool adds the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) result of your model at the end of your model card.
35
 
36
  - This tool also adds evaluation results as your model's metadata to showcase the evaluation results as a widget.
37
 
 
27
  refresh(600) # 10 minutes if any error happens
28
 
29
  gradio_title="🧐 Open LLM Leaderboard Results PR Opener"
30
+ gradio_desc= """🎯 This tool's aim is to provide [Open LLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) results in the model card.
31
 
32
  ## 💭 What Does This Tool Do:
33
 
34
+ - This tool adds the [Open LLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) result of your model at the end of your model card.
35
 
36
  - This tool also adds evaluation results as your model's metadata to showcase the evaluation results as a widget.
37
 
functions.py CHANGED
@@ -33,7 +33,7 @@ def get_details_url(repo):
33
 
34
 
35
  def get_query_url(repo):
36
- return f"https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query={repo}"
37
 
38
 
39
  def get_task_summary(results):
@@ -118,7 +118,7 @@ def get_eval_results(repo):
118
  md_writer.value_matrix = [["Avg.", results['Average ⬆️']]] + [[v["dataset_name"], v["metric_value"]] for v in task_summary.values()]
119
 
120
  text = f"""
121
- # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
122
  Detailed results can be found [here]({get_details_url(repo)})
123
 
124
  {md_writer.dumps()}
@@ -130,7 +130,7 @@ def get_edited_yaml_readme(repo, token: str | None):
130
  card = ModelCard.load(repo, token=token)
131
  results = search(df, repo)
132
 
133
- common = {"task_type": 'text-generation', "task_name": 'Text Generation', "source_name": "Open LLM Leaderboard", "source_url": f"https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query={repo}"}
134
 
135
  tasks_results = get_task_summary(results)
136
 
 
33
 
34
 
35
  def get_query_url(repo):
36
+ return f"https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query={repo}"
37
 
38
 
39
  def get_task_summary(results):
 
118
  md_writer.value_matrix = [["Avg.", results['Average ⬆️']]] + [[v["dataset_name"], v["metric_value"]] for v in task_summary.values()]
119
 
120
  text = f"""
121
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)
122
  Detailed results can be found [here]({get_details_url(repo)})
123
 
124
  {md_writer.dumps()}
 
130
  card = ModelCard.load(repo, token=token)
131
  results = search(df, repo)
132
 
133
+ common = {"task_type": 'text-generation', "task_name": 'Text Generation', "source_name": "Open LLM Leaderboard", "source_url": f"https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query={repo}"}
134
 
135
  tasks_results = get_task_summary(results)
136