yuchenlin commited on
Commit
7f1ee50
β€’
1 Parent(s): 62815c6

references

Browse files
Files changed (2) hide show
  1. app.py +4 -1
  2. constants.py +1 -1
app.py CHANGED
@@ -54,7 +54,10 @@ def make_clickable_model(model_name, model_info):
54
  link = model_info[model_name]['hf_name']
55
  else:
56
  link = f"https://huggingface.co/{model_info[model_name]['hf_name']}"
57
- return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_info[model_name]["pretty_name"]}</a>'
 
 
 
58
 
59
 
60
  def build_demo(original_df, TYPES):
 
54
  link = model_info[model_name]['hf_name']
55
  else:
56
  link = f"https://huggingface.co/{model_info[model_name]['hf_name']}"
57
+ if model_name.startswith("gpt"):
58
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted; background-color: lightgray;">{model_info[model_name]["pretty_name"]}</a>'
59
+ else:
60
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_info[model_name]["pretty_name"]}</a>'
61
 
62
 
63
  def build_demo(original_df, TYPES):
constants.py CHANGED
@@ -16,7 +16,7 @@ TITLE = "<html> <head> <style> h1 {text-align: center;} </style> </head> <body>
16
 
17
  INTRODUCTION_TEXT= """
18
  # URIAL Bench (Evaluating Base LLMs with URIAL on MT-Bench)
19
- [πŸ›œ Website](https://allenai.github.io/re-align/index.html) | [πŸ’» GitHub](https://github.com/Re-Align/URIAL) | [πŸ“– Paper](https://arxiv.org/abs/2312.01552) | [🐦 Twitter](https://x.com/billyuchenlin/status/1759541978881311125?s=20)
20
 
21
  > URIAL Bench tests the capacity of base LLMs for alignment without introducing the factors of fine-tuning (learning rate, data, etc.), which are hard to control for fair comparisons.
22
  Specifically, we use [URIAL](https://github.com/Re-Align/URIAL/tree/main/run_scripts/mt-bench#run-urial-inference) to align a base LLM, and evaluate its performance on MT-Bench.
 
16
 
17
  INTRODUCTION_TEXT= """
18
  # URIAL Bench (Evaluating Base LLMs with URIAL on MT-Bench)
19
+ [πŸ›œ Website](https://allenai.github.io/re-align/index.html) | [πŸ’» GitHub](https://github.com/Re-Align/URIAL) | [πŸ“– Paper](https://arxiv.org/abs/2312.01552) | [🐦 Tweet 1](https://x.com/billyuchenlin/status/1759541978881311125?s=20) | [🐦 Tweet 2](https://x.com/billyuchenlin/status/1762206077566013505?s=20)
20
 
21
  > URIAL Bench tests the capacity of base LLMs for alignment without introducing the factors of fine-tuning (learning rate, data, etc.), which are hard to control for fair comparisons.
22
  Specifically, we use [URIAL](https://github.com/Re-Align/URIAL/tree/main/run_scripts/mt-bench#run-urial-inference) to align a base LLM, and evaluate its performance on MT-Bench.