Spaces:
Running
Running
Update_S-Eval_v0.1.2
Browse files- README.md +38 -6
- app.py +178 -0
- constants.py +73 -0
- file/results.xlsx +0 -0
- requirements.txt +16 -0
- src/auto_leaderboard/model_metadata_type.py +26 -0
- src/utils_display.py +143 -0
README.md
CHANGED
@@ -1,13 +1,45 @@
|
|
1 |
---
|
2 |
-
title: S-Eval
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.3.0
|
8 |
app_file: app.py
|
9 |
-
pinned:
|
10 |
license: cc-by-nc-sa-4.0
|
11 |
---
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: 🏆 S-Eval Leaderboard
|
3 |
+
emoji: 🥇
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.3.0
|
8 |
app_file: app.py
|
9 |
+
pinned: true
|
10 |
license: cc-by-nc-sa-4.0
|
11 |
---
|
12 |
|
13 |
+
# Start the configuration
|
14 |
+
|
15 |
+
Most of the variables to change for a default leaderboard are in `src/env.py` (replace the path for your leaderboard) and `src/about.py` (for tasks).
|
16 |
+
|
17 |
+
Results files should have the following format and be stored as json files:
|
18 |
+
```json
|
19 |
+
{
|
20 |
+
"config": {
|
21 |
+
"model_dtype": "torch.float16", # or torch.bfloat16 or 8bit or 4bit
|
22 |
+
"model_name": "path of the model on the hub: org/model",
|
23 |
+
"model_sha": "revision on the hub",
|
24 |
+
},
|
25 |
+
"results": {
|
26 |
+
"task_name": {
|
27 |
+
"metric_name": score,
|
28 |
+
},
|
29 |
+
"task_name2": {
|
30 |
+
"metric_name": score,
|
31 |
+
}
|
32 |
+
}
|
33 |
+
}
|
34 |
+
```
|
35 |
+
|
36 |
+
Request files are created automatically by this tool.
|
37 |
+
|
38 |
+
If you encounter problem on the space, don't hesitate to restart it to remove the create eval-queue, eval-queue-bk, eval-results and eval-results-bk created folder.
|
39 |
+
|
40 |
+
# Code logic for more complex edits
|
41 |
+
|
42 |
+
You'll find
|
43 |
+
- the main table' columns names and properties in `src/display/utils.py`
|
44 |
+
- the logic to read all results and request files, then convert them in dataframe lines, in `src/leaderboard/read_evals.py`, and `src/populate.py`
|
45 |
+
- teh logic to allow or filter submissions in `src/submission/submit.py` and `src/submission/check_validity.py`
|
app.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
__all__ = ["block", "make_clickable_model", "make_clickable_user", "get_submissions"]
|
4 |
+
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pandas as pd
|
8 |
+
|
9 |
+
from constants import *
|
10 |
+
from src.auto_leaderboard.model_metadata_type import ModelType
|
11 |
+
|
12 |
+
global data_component, filter_component, ref_dic
|
13 |
+
|
14 |
+
|
15 |
+
def upload_file(files):
|
16 |
+
file_paths = [file.name for file in files]
|
17 |
+
return file_paths
|
18 |
+
|
19 |
+
|
20 |
+
def read_xlsx_leaderboard():
|
21 |
+
df_dict = pd.read_excel(XLSX_DIR, sheet_name=None) # get all sheet
|
22 |
+
return df_dict
|
23 |
+
|
24 |
+
|
25 |
+
def get_specific_df(sheet_name):
|
26 |
+
df = read_xlsx_leaderboard()[sheet_name].sort_values(by="Overall", ascending=False)
|
27 |
+
return df
|
28 |
+
|
29 |
+
def get_link_df(sheet_name):
|
30 |
+
df = read_xlsx_leaderboard()[sheet_name]
|
31 |
+
return df
|
32 |
+
|
33 |
+
|
34 |
+
ref_df = get_link_df("main")
|
35 |
+
|
36 |
+
ref_dic = {}
|
37 |
+
for id, row in ref_df.iterrows():
|
38 |
+
|
39 |
+
ref_dic[
|
40 |
+
str(row["Model"])
|
41 |
+
] = f'<a href="{row["Link"]}" target="_blank" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{row["Model"]}</a>'
|
42 |
+
|
43 |
+
def wrap_model(func):
|
44 |
+
def wrapper(*args, **kwargs):
|
45 |
+
df = func(*args, **kwargs)
|
46 |
+
df["Model"] = df["Model"].apply(lambda x: ref_dic[x])
|
47 |
+
# cols_to_round = df.select_dtypes(include=[np.number]).columns.tolist()
|
48 |
+
# cols_to_round = [col for col in cols_to_round if col != "Model"]
|
49 |
+
# df[cols_to_round] = df[cols_to_round].apply(lambda x: np.round(x, 2))
|
50 |
+
|
51 |
+
all_cols = df.columns.tolist()
|
52 |
+
non_numeric_cols = df.select_dtypes(exclude=[np.number]).columns.tolist()
|
53 |
+
cols_to_round = [col for col in all_cols if col not in non_numeric_cols and col != "Model"]
|
54 |
+
df[cols_to_round] = df[cols_to_round].apply(lambda x: np.round(x, 2))
|
55 |
+
return df
|
56 |
+
|
57 |
+
return wrapper
|
58 |
+
|
59 |
+
|
60 |
+
@wrap_model
|
61 |
+
def get_base_zh_df():
|
62 |
+
return get_specific_df("base-zh")
|
63 |
+
|
64 |
+
|
65 |
+
@wrap_model
|
66 |
+
def get_base_en_df():
|
67 |
+
return get_specific_df("base-en")
|
68 |
+
|
69 |
+
|
70 |
+
@wrap_model
|
71 |
+
def get_attack_zh_df():
|
72 |
+
return get_specific_df("attack-zh")
|
73 |
+
|
74 |
+
|
75 |
+
@wrap_model
|
76 |
+
def get_attack_en_df():
|
77 |
+
return get_specific_df("attack-en")
|
78 |
+
|
79 |
+
|
80 |
+
def build_leaderboard(
|
81 |
+
TABLE_INTRODUCTION, TAX_COLUMNS, get_chinese_df, get_english_df
|
82 |
+
):
|
83 |
+
|
84 |
+
gr.Markdown(TABLE_INTRODUCTION, elem_classes="markdown-text")
|
85 |
+
data_spilt_radio = gr.Radio(
|
86 |
+
choices=["Chinese", "English"],
|
87 |
+
value="Chinese",
|
88 |
+
label=SELECT_SET_INTRO,
|
89 |
+
)
|
90 |
+
|
91 |
+
# 创建数据帧组件
|
92 |
+
data_component = gr.components.Dataframe(
|
93 |
+
value=get_chinese_df,
|
94 |
+
headers=OVERALL_INFO + TAX_COLUMNS,
|
95 |
+
type="pandas",
|
96 |
+
datatype=["markdown"] + ["number"] + ["number"] * len(TAX_COLUMNS),
|
97 |
+
interactive=False,
|
98 |
+
visible=True,
|
99 |
+
wrap=True,
|
100 |
+
column_widths=[250] + [100] + [150] * len(TAX_COLUMNS),
|
101 |
+
)
|
102 |
+
|
103 |
+
def on_data_split_radio(seleted_split):
|
104 |
+
if "Chinese" in seleted_split:
|
105 |
+
updated_data = get_chinese_df()
|
106 |
+
if "English" in seleted_split:
|
107 |
+
updated_data = get_english_df()
|
108 |
+
current_columns = data_component.headers # 获取的当前的column
|
109 |
+
current_datatype = data_component.datatype # 获取当前的datatype
|
110 |
+
filter_component = gr.components.Dataframe(
|
111 |
+
value=updated_data,
|
112 |
+
headers=current_columns,
|
113 |
+
type="pandas",
|
114 |
+
datatype=current_datatype,
|
115 |
+
interactive=False,
|
116 |
+
visible=True,
|
117 |
+
wrap=True,
|
118 |
+
column_widths=[250] + [100] + [150] * (len(current_columns) - 2),
|
119 |
+
)
|
120 |
+
return filter_component
|
121 |
+
|
122 |
+
# 关联处理函数
|
123 |
+
data_spilt_radio.change(
|
124 |
+
fn=on_data_split_radio, inputs=data_spilt_radio, outputs=data_component
|
125 |
+
)
|
126 |
+
|
127 |
+
|
128 |
+
def build_demo():
|
129 |
+
block = gr.Blocks()
|
130 |
+
|
131 |
+
with block:
|
132 |
+
gr.Markdown(LEADERBOARD_INTRODUCTION)
|
133 |
+
|
134 |
+
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
135 |
+
# first
|
136 |
+
with gr.TabItem(
|
137 |
+
"Base Risk Prompt Set Results",
|
138 |
+
elem_id="evalcrafter-benchmark-tab-table",
|
139 |
+
id=0,
|
140 |
+
):
|
141 |
+
build_leaderboard(
|
142 |
+
TABLE_INTRODUCTION_1,
|
143 |
+
risk_topic_1_columns,
|
144 |
+
get_base_zh_df,
|
145 |
+
get_base_en_df
|
146 |
+
)
|
147 |
+
# second
|
148 |
+
with gr.TabItem(
|
149 |
+
"Attack Prompt Set Results",
|
150 |
+
elem_id="evalcrafter-benchmark-tab-table",
|
151 |
+
id=1,
|
152 |
+
):
|
153 |
+
build_leaderboard(
|
154 |
+
TABLE_INTRODUCTION_2,
|
155 |
+
attack_columns,
|
156 |
+
get_attack_zh_df,
|
157 |
+
get_attack_en_df
|
158 |
+
)
|
159 |
+
# last table about
|
160 |
+
with gr.TabItem("📝 About", elem_id="evalcrafter-benchmark-tab-table", id=3):
|
161 |
+
gr.Markdown(LEADERBORAD_INFO, elem_classes="markdown-text")
|
162 |
+
|
163 |
+
with gr.Row():
|
164 |
+
with gr.Accordion("📙 Citation", open=True):
|
165 |
+
citation_button = gr.Textbox(
|
166 |
+
value=CITATION_BUTTON_TEXT,
|
167 |
+
label=CITATION_BUTTON_LABEL,
|
168 |
+
lines=10,
|
169 |
+
elem_id="citation-button",
|
170 |
+
show_label=True,
|
171 |
+
show_copy_button=True,
|
172 |
+
)
|
173 |
+
|
174 |
+
# block.launch(share=True)
|
175 |
+
block.launch()
|
176 |
+
|
177 |
+
if __name__ == "__main__":
|
178 |
+
build_demo()
|
constants.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# constants
|
2 |
+
OVERALL_INFO = ["Model", "Overall"]
|
3 |
+
|
4 |
+
risk_topic_1_columns = [
|
5 |
+
"Crimes and Illegal Activities",
|
6 |
+
"Cybersecurity",
|
7 |
+
"Data Privacy",
|
8 |
+
"Ethics and Morality",
|
9 |
+
"Physical and Mental Health",
|
10 |
+
"Hate Speech",
|
11 |
+
"Extremism",
|
12 |
+
"Inappropriate Suggestions"
|
13 |
+
]
|
14 |
+
risk_topic_1_columns = [item.lower() for item in risk_topic_1_columns]
|
15 |
+
|
16 |
+
attack_columns = [
|
17 |
+
"Adaptive Attack",
|
18 |
+
"Positive Induction",
|
19 |
+
"Reverse Induction",
|
20 |
+
"Code Injection",
|
21 |
+
"Instruction Jailbreak",
|
22 |
+
"Goal Hijacking",
|
23 |
+
"Instruction Encryption",
|
24 |
+
"DeepInception",
|
25 |
+
"In-Context Attack",
|
26 |
+
"Chain of Utterances",
|
27 |
+
"Compositional Instructions"
|
28 |
+
]
|
29 |
+
attack_columns = [item.lower() for item in attack_columns]
|
30 |
+
|
31 |
+
XLSX_DIR = "./file//results.xlsx"
|
32 |
+
|
33 |
+
LEADERBOARD_INTRODUCTION = """# 🏆 S-Eval Leaderboard
|
34 |
+
## 🔔 Updates
|
35 |
+
📣 [2024/10/25]: We release all 20,000 base risk prompts and 200,000 corresponding attack prompts ([Version-0.1.2](https://github.com/IS2Lab/S-Eval)). We also update [🏆 LeaderBoard v0.1.2](https://huggingface.co/spaces/IS2Lab/S-Eval_v0.1.2) with new evaluation results including GPT-4 and other models.
|
36 |
+
🎉 S-Eval has about 7,000 total views and about 2,000 total downloads across multiple platforms 🎉.
|
37 |
+
|
38 |
+
📣 [2024/06/17]: We further release 10,000 base risk prompts and 100,000 corresponding attack prompts ([Version-0.1.1](https://github.com/IS2Lab/S-Eval)). If you require automatic safety evaluations, please feel free to submit a request via [Issues](https://huggingface.co/spaces/IS2Lab/S-Eval/discussions) or contact us by [Email](mailto:xiaohanyuan@zju.edu.cn).
|
39 |
+
|
40 |
+
📣 [2024/05/31]: We release 20,000 corresponding attack prompts.
|
41 |
+
|
42 |
+
📣 [2024/05/23]: We publish our [paper](https://arxiv.org/abs/2405.14191) and first release 2,000 base risk prompts. You can download the benchmark from our [project](https://github.com/IS2Lab/S-Eval), the [HuggingFace Dataset](https://huggingface.co/datasets/IS2Lab/S-Eval).
|
43 |
+
|
44 |
+
### ❗️ Note
|
45 |
+
Due to the limited machine resource, please refresh the page if a connection timeout error occurs.
|
46 |
+
|
47 |
+
You can get more detailed information from our [Project](https://github.com/IS2Lab/S-Eval) and [Paper](https://arxiv.org/abs/2405.14191).
|
48 |
+
"""
|
49 |
+
|
50 |
+
SELECT_SET_INTRO = (
|
51 |
+
"Select whether Chinese or English results should be shown."
|
52 |
+
)
|
53 |
+
|
54 |
+
TABLE_INTRODUCTION_1 = """In the table below, we summarize the safety scores (%) of differnet models on Base Risk Prompt Set."""
|
55 |
+
TABLE_INTRODUCTION_2 = """In the table below, we summarize the attack success rates (%) of the instruction attacks in Attack Prompt Set on different models"""
|
56 |
+
|
57 |
+
|
58 |
+
LEADERBORAD_INFO = """
|
59 |
+
S-Eval is designed to be a new comprehensive, multi-dimensional and open-ended safety evaluation benchmark. So far, S-Eval has 220,000 evaluation prompts in total (and is still in active expansion), including 20,000 base risk prompts (10,000 in Chinese and 10,000 in English) and 200,000 *corresponding* attack prompts derived from 10 popular adversarial instruction attacks. These test prompts are generated based on a comprehensive and unified risk taxonomy, specifically designed to encompass all crucial dimensions of LLM safety evaluation and meant to accurately reflect the varied safety levels of LLMs across these risk dimensions.
|
60 |
+
More details on the construction of the test suite including model-based test generation, selection and the expert critique LLM can be found in our [paper](https://arxiv.org/abs/2405.14191).
|
61 |
+
"""
|
62 |
+
|
63 |
+
|
64 |
+
CITATION_BUTTON_LABEL = "If our work is useful for your own, you can cite us with the following BibTex entry:"
|
65 |
+
|
66 |
+
CITATION_BUTTON_TEXT = r"""
|
67 |
+
@article{yuan2024seval,
|
68 |
+
title={S-Eval: Automatic and Adaptive Test Generation for Benchmarking Safety Evaluation of Large Language Models},
|
69 |
+
author={Xiaohan Yuan and Jinfeng Li and Dongxia Wang and Yuefeng Chen and Xiaofeng Mao and Longtao Huang and Hui Xue and Wenhai Wang and Kui Ren and Jingyi Wang},
|
70 |
+
journal={arXiv preprint arXiv:2405.14191},
|
71 |
+
year={2024}
|
72 |
+
}
|
73 |
+
"""
|
file/results.xlsx
ADDED
Binary file (22.5 kB). View file
|
|
requirements.txt
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
APScheduler==3.10.1
|
2 |
+
black==23.11.0
|
3 |
+
click==8.1.3
|
4 |
+
datasets==2.14.5
|
5 |
+
gradio
|
6 |
+
gradio_client
|
7 |
+
huggingface-hub>=0.18.0
|
8 |
+
matplotlib==3.7.1
|
9 |
+
numpy==1.24.2
|
10 |
+
openpyxl==3.1.2
|
11 |
+
pandas==2.0.0
|
12 |
+
plotly==5.14.1
|
13 |
+
python-dateutil==2.8.2
|
14 |
+
requests==2.28.2
|
15 |
+
sentencepiece
|
16 |
+
tqdm==4.65.0
|
src/auto_leaderboard/model_metadata_type.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from enum import Enum
|
3 |
+
|
4 |
+
|
5 |
+
@dataclass
|
6 |
+
class ModelInfo:
|
7 |
+
name: str
|
8 |
+
symbol: str # emoji
|
9 |
+
|
10 |
+
|
11 |
+
model_type_symbols = {
|
12 |
+
"LLM": "🟢",
|
13 |
+
"ImageLLM": "🔶",
|
14 |
+
"VideoLLM": "⭕",
|
15 |
+
"Other": "🟦",
|
16 |
+
}
|
17 |
+
|
18 |
+
|
19 |
+
class ModelType(Enum):
|
20 |
+
PT = ModelInfo(name="LLM", symbol="🟢")
|
21 |
+
FT = ModelInfo(name="ImageLLM", symbol="🔶")
|
22 |
+
IFT = ModelInfo(name="VideoLLM", symbol="⭕")
|
23 |
+
RL = ModelInfo(name="Other", symbol="🟦")
|
24 |
+
|
25 |
+
def to_str(self, separator=" "):
|
26 |
+
return f"{self.value.symbol}{separator}{self.value.name}"
|
src/utils_display.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from dataclasses import dataclass
|
2 |
+
|
3 |
+
|
4 |
+
# These classes are for user facing column names, to avoid having to change them
|
5 |
+
# all around the code when a modif is needed
|
6 |
+
# @dataclass
|
7 |
+
# class ColumnContent:
|
8 |
+
# name: str
|
9 |
+
# type: str
|
10 |
+
# displayed_by_default: bool
|
11 |
+
# hidden: bool = False
|
12 |
+
# never_hidden: bool = False
|
13 |
+
# dummy: bool = False
|
14 |
+
|
15 |
+
|
16 |
+
# def fields(raw_class):
|
17 |
+
# return [
|
18 |
+
# v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"
|
19 |
+
# ]
|
20 |
+
|
21 |
+
|
22 |
+
# @dataclass(frozen=True)
|
23 |
+
# class AutoEvalColumn: # Auto evals column
|
24 |
+
|
25 |
+
# model_type_symbol = ColumnContent("T", "str", True)
|
26 |
+
# model = ColumnContent("Model", "markdown", True, never_hidden=True)
|
27 |
+
# average = ColumnContent("Average ⬆️", "number", True)
|
28 |
+
# arc = ColumnContent("ARC", "number", True)
|
29 |
+
# hellaswag = ColumnContent("HellaSwag", "number", True)
|
30 |
+
# mmlu = ColumnContent("MMLU", "number", True)
|
31 |
+
# truthfulqa = ColumnContent("TruthfulQA", "number", True)
|
32 |
+
# model_type = ColumnContent("Type", "str", False)
|
33 |
+
# precision = ColumnContent("Precision", "str", False, True)
|
34 |
+
# license = ColumnContent("Hub License", "str", False)
|
35 |
+
# params = ColumnContent("#Params (B)", "number", False)
|
36 |
+
# likes = ColumnContent("Hub ❤️", "number", False)
|
37 |
+
# revision = ColumnContent("Model sha", "str", False, False)
|
38 |
+
# dummy = ColumnContent(
|
39 |
+
# "model_name_for_query", "str", True
|
40 |
+
# ) # dummy col to implement search bar (hidden by custom CSS)
|
41 |
+
|
42 |
+
|
43 |
+
# @dataclass(frozen=True)
|
44 |
+
# class EloEvalColumn: # Elo evals column
|
45 |
+
# model = ColumnContent("Model", "markdown", True)
|
46 |
+
# gpt4 = ColumnContent("GPT-4 (all)", "number", True)
|
47 |
+
# human_all = ColumnContent("Human (all)", "number", True)
|
48 |
+
# human_instruct = ColumnContent("Human (instruct)", "number", True)
|
49 |
+
# human_code_instruct = ColumnContent("Human (code-instruct)", "number", True)
|
50 |
+
|
51 |
+
|
52 |
+
# @dataclass(frozen=True)
|
53 |
+
# class EvalQueueColumn: # Queue column
|
54 |
+
# model = ColumnContent("model", "markdown", True)
|
55 |
+
# revision = ColumnContent("revision", "str", True)
|
56 |
+
# private = ColumnContent("private", "bool", True)
|
57 |
+
# precision = ColumnContent("precision", "bool", True)
|
58 |
+
# weight_type = ColumnContent("weight_type", "str", "Original")
|
59 |
+
# status = ColumnContent("status", "str", True)
|
60 |
+
|
61 |
+
|
62 |
+
# LLAMAS = [
|
63 |
+
# "huggingface/llama-7b",
|
64 |
+
# "huggingface/llama-13b",
|
65 |
+
# "huggingface/llama-30b",
|
66 |
+
# "huggingface/llama-65b",
|
67 |
+
# ]
|
68 |
+
|
69 |
+
|
70 |
+
# KOALA_LINK = "https://huggingface.co/TheBloke/koala-13B-HF"
|
71 |
+
# VICUNA_LINK = "https://huggingface.co/lmsys/vicuna-13b-delta-v1.1"
|
72 |
+
# OASST_LINK = "https://huggingface.co/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"
|
73 |
+
# DOLLY_LINK = "https://huggingface.co/databricks/dolly-v2-12b"
|
74 |
+
# MODEL_PAGE = "https://huggingface.co/models"
|
75 |
+
# LLAMA_LINK = "https://ai.facebook.com/blog/large-language-model-llama-meta-ai/"
|
76 |
+
# VICUNA_LINK = "https://huggingface.co/CarperAI/stable-vicuna-13b-delta"
|
77 |
+
# ALPACA_LINK = "https://crfm.stanford.edu/2023/03/13/alpaca.html"
|
78 |
+
|
79 |
+
|
80 |
+
# def model_hyperlink(link, model_name):
|
81 |
+
# return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
82 |
+
|
83 |
+
|
84 |
+
# def make_clickable_model(model_name):
|
85 |
+
# link = f"https://huggingface.co/{model_name}"
|
86 |
+
|
87 |
+
# if model_name in LLAMAS:
|
88 |
+
# link = LLAMA_LINK
|
89 |
+
# model_name = model_name.split("/")[1]
|
90 |
+
# elif model_name == "HuggingFaceH4/stable-vicuna-13b-2904":
|
91 |
+
# link = VICUNA_LINK
|
92 |
+
# model_name = "stable-vicuna-13b"
|
93 |
+
# elif model_name == "HuggingFaceH4/llama-7b-ift-alpaca":
|
94 |
+
# link = ALPACA_LINK
|
95 |
+
# model_name = "alpaca-13b"
|
96 |
+
# if model_name == "dolly-12b":
|
97 |
+
# link = DOLLY_LINK
|
98 |
+
# elif model_name == "vicuna-13b":
|
99 |
+
# link = VICUNA_LINK
|
100 |
+
# elif model_name == "koala-13b":
|
101 |
+
# link = KOALA_LINK
|
102 |
+
# elif model_name == "oasst-12b":
|
103 |
+
# link = OASST_LINK
|
104 |
+
# else:
|
105 |
+
# link = MODEL_PAGE
|
106 |
+
|
107 |
+
# return model_hyperlink(link, model_name)
|
108 |
+
|
109 |
+
|
110 |
+
# def styled_error(error):
|
111 |
+
# return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
|
112 |
+
|
113 |
+
|
114 |
+
# def styled_warning(warn):
|
115 |
+
# return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
|
116 |
+
|
117 |
+
|
118 |
+
# def styled_message(message):
|
119 |
+
# return (
|
120 |
+
# f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
|
121 |
+
# )
|
122 |
+
|
123 |
+
Qwen_1_8B_Chat_Link = "https://huggingface.co/Qwen/Qwen-1_8B-Chat"
|
124 |
+
Qwen_7B_Chat_Link = "https://huggingface.co/Qwen/Qwen-7B-Chat"
|
125 |
+
Qwen_14B_Chat_Link = "https://huggingface.co/Qwen/Qwen-14B-Chat"
|
126 |
+
Qwen_72B_Chat_Link = "https://huggingface.co/Qwen/Qwen-72B-Chat"
|
127 |
+
Gemma_2B_it_Link = "https://huggingface.co/google/gemma-2b-it"
|
128 |
+
Gemma_7B_it__Link = "https://huggingface.co/google/gemma-7b-it"
|
129 |
+
ChatGLM3_6B_Link = "https://huggingface.co/THUDM/chatglm3-6b"
|
130 |
+
Mistral_7B_Instruct_v0_2_Link = "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2"
|
131 |
+
LLaMA_2_7B_Chat_Link = "https://huggingface.co/meta-llama/Llama-2-7b-chat-hf"
|
132 |
+
LLaMA_2_13B_Chat_Link = "https://huggingface.co/meta-llama/Llama-2-13b-chat-hf"
|
133 |
+
LLaMA_2_70B_Chat_Link = "https://huggingface.co/meta-llama/Llama-2-70b-chat-hf"
|
134 |
+
LLaMA_3_8B_Instruct_Link = "https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct"
|
135 |
+
LLaMA_3_70B_Instruct_Link = "https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct"
|
136 |
+
Vicuna_7B_v1_3_Link = "https://huggingface.co/lmsys/vicuna-7b-v1.3"
|
137 |
+
Vicuna_13B_v1_3_Link = "https://huggingface.co/lmsys/vicuna-13b-v1.3"
|
138 |
+
Vicuna_33B_v1_3_Link = "https://huggingface.co/lmsys/vicuna-33b-v1.3"
|
139 |
+
Baichuan2_13B_Chat_Link = "https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat"
|
140 |
+
Yi_34B_Chat_Link = "https://huggingface.co/01-ai/Yi-34B-Chat"
|
141 |
+
GPT_4_Turbo_Link = "https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4"
|
142 |
+
ErnieBot_4_0_Link = "https://cloud.baidu.com/doc/WENXINWORKSHOP/s/clntwmv7t"
|
143 |
+
Gemini_1_0_Pro_Link = "https://ai.google.dev/gemini-api/docs/models/gemini"
|