Spaces:
Runtime error
Runtime error
File size: 8,173 Bytes
0ba78e9 fd18ef6 0ba78e9 0d5e0f7 0ba78e9 6c14077 0ba78e9 6c14077 0d5e0f7 6c14077 0d5e0f7 0ba78e9 0d5e0f7 0ba78e9 6c14077 0d5e0f7 294f139 d7705b9 294f139 fd18ef6 294f139 6c14077 d7705b9 6c14077 fd18ef6 6c14077 fd18ef6 da19d23 0153e97 4f5bbb3 d7705b9 da19d23 294f139 6c14077 d7705b9 f574f70 0d5e0f7 fd18ef6 6c14077 0ba78e9 2859204 6c14077 294f139 0153e97 6c14077 294f139 da19d23 6c14077 fd18ef6 294f139 fd18ef6 294f139 fd18ef6 0d5e0f7 2859204 6c14077 0d5e0f7 6c14077 da19d23 0d5e0f7 fd18ef6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
import os
import uuid
from pathlib import Path
import streamlit as st
from datasets import get_dataset_config_names
from dotenv import load_dotenv
from huggingface_hub import list_datasets
from utils import get_compatible_models, get_metadata, http_get, http_post
if Path(".env").is_file():
load_dotenv(".env")
HF_TOKEN = os.getenv("HF_TOKEN")
AUTOTRAIN_USERNAME = os.getenv("AUTOTRAIN_USERNAME")
AUTOTRAIN_BACKEND_API = os.getenv("AUTOTRAIN_BACKEND_API")
DATASETS_PREVIEW_API = os.getenv("DATASETS_PREVIEW_API")
TASK_TO_ID = {
"binary_classification": 1,
"multi_class_classification": 2,
# "multi_label_classification": 3, # Not fully supported in AutoTrain
"entity_extraction": 4,
"extractive_question_answering": 5,
"translation": 6,
"summarization": 8,
# "single_column_regression": 10,
}
AUTOTRAIN_TASK_TO_HUB_TASK = {
"binary_classification": "text-classification",
"multi_class_classification": "text-classification",
# "multi_label_classification": "text-classification", # Not fully supported in AutoTrain
"entity_extraction": "token-classification",
"extractive_question_answering": "question-answering",
"translation": "translation",
"summarization": "summarization",
# "single_column_regression": 10,
}
HUB_TASK_TO_AUTOTRAIN_TASK = {v: k for k, v in AUTOTRAIN_TASK_TO_HUB_TASK.items()}
###########
### APP ###
###########
st.title("Evaluation as a Service")
st.markdown(
"""
Welcome to Hugging Face's Evaluation as a Service! This application allows
you to evaluate any π€ Transformers model with a dataset on the Hub. Please
select the dataset and configuration below. The results of your evaluation
will be displayed on the public leaderboard
[here](https://huggingface.co/spaces/autoevaluate/leaderboards).
"""
)
all_datasets = [d.id for d in list_datasets()]
query_params = st.experimental_get_query_params()
default_dataset = all_datasets[0]
if "dataset" in query_params:
if len(query_params["dataset"]) > 0 and query_params["dataset"][0] in all_datasets:
default_dataset = query_params["dataset"][0]
selected_dataset = st.selectbox("Select a dataset", all_datasets, index=all_datasets.index(default_dataset))
st.experimental_set_query_params(**{"dataset": [selected_dataset]})
# TODO: In general this will be a list of multiple configs => need to generalise logic here
metadata = get_metadata(selected_dataset)
if metadata is None:
st.warning("No evaluation metadata found. Please configure the evaluation job below.")
with st.expander("Advanced configuration"):
## Select task
selected_task = st.selectbox("Select a task", list(AUTOTRAIN_TASK_TO_HUB_TASK.values()))
### Select config
configs = get_dataset_config_names(selected_dataset)
selected_config = st.selectbox("Select a config", configs)
## Select splits
splits_resp = http_get(path="/splits", domain=DATASETS_PREVIEW_API, params={"dataset": selected_dataset})
if splits_resp.status_code == 200:
split_names = []
all_splits = splits_resp.json()
print(all_splits)
for split in all_splits["splits"]:
print(selected_config)
if split["config"] == selected_config:
split_names.append(split["split"])
selected_split = st.selectbox("Select a split", split_names) # , index=split_names.index(eval_split))
## Show columns
rows_resp = http_get(
path="/rows",
domain="https://datasets-preview.huggingface.tech",
params={"dataset": selected_dataset, "config": selected_config, "split": selected_split},
).json()
columns = rows_resp["columns"]
col_names = []
for c in columns:
col_names.append(c["column"]["name"])
# splits = metadata[0]["splits"]
# split_names = list(splits.values())
# eval_split = splits.get("eval_split", split_names[0])
# selected_split = st.selectbox("Select a split", split_names, index=split_names.index(eval_split))
# TODO: add a function to handle the mapping task <--> column mapping
# col_mapping = metadata[0]["col_mapping"]
# col_names = list(col_mapping.keys())
st.markdown("**Map your data columns**")
col1, col2 = st.columns(2)
# TODO: find a better way to layout these items
# TODO: propagate this information to payload
# TODO: make it task specific
col_mapping = {}
with col1:
if selected_task == "text-classification":
st.markdown("`text` column")
st.text("")
st.text("")
st.text("")
st.text("")
st.markdown("`target` column")
elif selected_task == "question-answering":
st.markdown("`context` column")
st.text("")
st.text("")
st.text("")
st.text("")
st.markdown("`question` column")
with col2:
text_col = st.selectbox("This column should contain the text you want to classify", col_names, index=0)
target_col = st.selectbox(
"This column should contain the labels you want to assign to the text", col_names, index=1
)
col_mapping[text_col] = "text"
col_mapping[target_col] = "target"
with st.form(key="form"):
compatible_models = get_compatible_models(selected_task, selected_dataset)
selected_models = st.multiselect(
"Select the models you wish to evaluate", compatible_models
) # , compatible_models[0])
submit_button = st.form_submit_button("Make submission")
if submit_button:
project_id = str(uuid.uuid4())[:3]
autotrain_task_name = HUB_TASK_TO_AUTOTRAIN_TASK[selected_task]
payload = {
"username": AUTOTRAIN_USERNAME,
"proj_name": f"my-eval-project-{project_id}",
"task": TASK_TO_ID[autotrain_task_name],
"config": {
"language": "en",
"max_models": 5,
"instance": {
"provider": "aws",
"instance_type": "ml.g4dn.4xlarge",
"max_runtime_seconds": 172800,
"num_instances": 1,
"disk_size_gb": 150,
},
"evaluation": {
"metrics": [],
"models": selected_models,
},
},
}
project_json_resp = http_post(
path="/projects/create", payload=payload, token=HF_TOKEN, domain=AUTOTRAIN_BACKEND_API
).json()
print(project_json_resp)
if project_json_resp["created"]:
payload = {
"split": 4,
"col_mapping": col_mapping,
"load_config": {"max_size_bytes": 0, "shuffle": False},
}
data_json_resp = http_post(
path=f"/projects/{project_json_resp['id']}/data/{selected_dataset}",
payload=payload,
token=HF_TOKEN,
domain=AUTOTRAIN_BACKEND_API,
params={"type": "dataset", "config_name": selected_config, "split_name": selected_split},
).json()
print(data_json_resp)
if data_json_resp["download_status"] == 1:
train_json_resp = http_get(
path=f"/projects/{project_json_resp['id']}/data/start_process",
token=HF_TOKEN,
domain=AUTOTRAIN_BACKEND_API,
).json()
print(train_json_resp)
if train_json_resp["success"]:
st.success(f"β
Successfully submitted evaluation job with project ID {project_id}")
st.markdown(
f"""
Evaluation takes appoximately 1 hour to complete, so grab a β or π΅ while you wait:
* π Click [here](https://huggingface.co/spaces/huggingface/leaderboards) to view the results from your submission
"""
)
else:
st.error("π Oh noes, there was an error submitting your submission!")
|