Spaces:
Runtime error
Runtime error
Add IMDB
Browse files
app.py
CHANGED
@@ -26,7 +26,19 @@ TASK_TO_ID = {
|
|
26 |
}
|
27 |
|
28 |
# TODO: remove this hardcorded logic and accept any dataset on the Hub
|
29 |
-
DATASETS_TO_EVALUATE = ["emotion", "conll2003"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
dataset_name = st.selectbox("Select a dataset", [f"lewtun/autoevaluate__{dset}" for dset in DATASETS_TO_EVALUATE])
|
32 |
|
@@ -48,12 +60,14 @@ with st.form(key="form"):
|
|
48 |
selected_split = st.selectbox("Select a split", split_names, index=split_names.index(eval_split))
|
49 |
|
50 |
col_mapping = metadata[0]["col_mapping"]
|
51 |
-
col_names = list(col_mapping.
|
52 |
|
53 |
# TODO: figure out how to get all dataset column names (i.e. features) without download dataset itself
|
54 |
st.markdown("**Map your data columns**")
|
55 |
col1, col2 = st.columns(2)
|
56 |
|
|
|
|
|
57 |
with col1:
|
58 |
st.markdown("`text` column")
|
59 |
st.text("")
|
@@ -69,7 +83,7 @@ with st.form(key="form"):
|
|
69 |
|
70 |
selected_models = st.multiselect("Select the models you wish to evaluate", compatible_models, compatible_models[0])
|
71 |
|
72 |
-
submit_button = st.form_submit_button("Make
|
73 |
|
74 |
if submit_button:
|
75 |
for model in selected_models:
|
@@ -85,5 +99,15 @@ with st.form(key="form"):
|
|
85 |
json_resp = http_post(
|
86 |
path="/evaluate/create", payload=payload, token=HF_TOKEN, domain=AUTOTRAIN_BACKEND_API
|
87 |
).json()
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
}
|
27 |
|
28 |
# TODO: remove this hardcorded logic and accept any dataset on the Hub
|
29 |
+
DATASETS_TO_EVALUATE = ["emotion", "conll2003", "imdb"]
|
30 |
+
|
31 |
+
###########
|
32 |
+
### APP ###
|
33 |
+
###########
|
34 |
+
st.title("Evaluation as a Service")
|
35 |
+
st.markdown(
|
36 |
+
"""
|
37 |
+
Welcome to Hugging Face's Evaluation as a Service! This application allows
|
38 |
+
you to evaluate any π€ Transformers model on the Hub. Please select the
|
39 |
+
dataset and configuration below.
|
40 |
+
"""
|
41 |
+
)
|
42 |
|
43 |
dataset_name = st.selectbox("Select a dataset", [f"lewtun/autoevaluate__{dset}" for dset in DATASETS_TO_EVALUATE])
|
44 |
|
|
|
60 |
selected_split = st.selectbox("Select a split", split_names, index=split_names.index(eval_split))
|
61 |
|
62 |
col_mapping = metadata[0]["col_mapping"]
|
63 |
+
col_names = list(col_mapping.keys())
|
64 |
|
65 |
# TODO: figure out how to get all dataset column names (i.e. features) without download dataset itself
|
66 |
st.markdown("**Map your data columns**")
|
67 |
col1, col2 = st.columns(2)
|
68 |
|
69 |
+
# TODO: find a better way to layout these items
|
70 |
+
# TODO: propagate this information to payload
|
71 |
with col1:
|
72 |
st.markdown("`text` column")
|
73 |
st.text("")
|
|
|
83 |
|
84 |
selected_models = st.multiselect("Select the models you wish to evaluate", compatible_models, compatible_models[0])
|
85 |
|
86 |
+
submit_button = st.form_submit_button("Make submission")
|
87 |
|
88 |
if submit_button:
|
89 |
for model in selected_models:
|
|
|
99 |
json_resp = http_post(
|
100 |
path="/evaluate/create", payload=payload, token=HF_TOKEN, domain=AUTOTRAIN_BACKEND_API
|
101 |
).json()
|
102 |
+
if json_resp["status"] == 1:
|
103 |
+
st.success(f"β
Successfully submitted model {model} for evaluation with job ID {json_resp['id']}")
|
104 |
+
st.markdown(
|
105 |
+
f"""
|
106 |
+
Evaluation takes appoximately 1 hour to complete, so grab a β or π΅ while you wait:
|
107 |
+
|
108 |
+
* π Click [here](https://huggingface.co/spaces/huggingface/leaderboards) to view the results from your submission
|
109 |
+
* πΎ Click [here](https://huggingface.co/datasets/autoevaluate/eval-staging-{json_resp['id']}) to view the stored predictions on the Hugging Face Hub
|
110 |
+
"""
|
111 |
+
)
|
112 |
+
else:
|
113 |
+
st.error("π Oh noes, there was an error submitting your submission!")
|