Spaces:
Runtime error
Runtime error
File size: 5,806 Bytes
a557d54 c00ae85 a557d54 c00ae85 a557d54 94a4e9f c00ae85 6d09ca9 a557d54 c00ae85 338fec2 c00ae85 59f829c c00ae85 2acc05f 6d09ca9 2acc05f a557d54 c00ae85 6d09ca9 a353f77 a557d54 6d09ca9 a557d54 a353f77 59f829c a353f77 59f829c 2acc05f a353f77 9757ddd 4e25e73 2acc05f 4e25e73 2acc05f 4e25e73 6d09ca9 a557d54 a353f77 a557d54 6d09ca9 338fec2 59f829c 338fec2 6d09ca9 338fec2 6d09ca9 59f829c 6d09ca9 59f829c c00ae85 338fec2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
import json
import os
import shutil
from datetime import datetime
from pathlib import Path
import streamlit as st
from dotenv import load_dotenv
from huggingface_hub import HfApi, Repository
from utils import http_post, validate_json
if Path(".env").is_file():
load_dotenv(".env")
HF_TOKEN = os.getenv("HF_TOKEN")
AUTONLP_USERNAME = os.getenv("AUTONLP_USERNAME")
HF_AUTONLP_BACKEND_API = os.getenv("HF_AUTONLP_BACKEND_API")
LOCAL_REPO = "submission_repo"
## TODO ##
# 1. Add check that fields are nested under `tasks` field correctly
# 2. Add check that names of tasks and datasets are valid
###########
### APP ###
###########
st.title("GEM Submissions")
st.markdown(
"""
Welcome to the [GEM benchmark](https://gem-benchmark.com/)! GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation, both through human annotations and automated Metrics.
GEM aims to:
- measure NLG progress across many NLG tasks across languages.
- audit data and models and present results via data cards and model robustness reports.
- develop standards for evaluation of generated text using both automated and human metrics.
Use this page to submit your system's predictions to the benchmark.
"""
)
with st.form(key="form"):
# Flush local repo
shutil.rmtree(LOCAL_REPO, ignore_errors=True)
submission_errors = 0
uploaded_file = st.file_uploader("Upload submission.json file", type=["json"])
if uploaded_file:
if uploaded_file.name != "submission.json":
st.error(f"β Invalid filename. Please upload a submission.json file.")
submission_errors += 1
else:
data = str(uploaded_file.read(), "utf-8")
json_data = json.loads(data)
is_valid, message = validate_json(json_data)
if is_valid:
st.success(message)
else:
st.error(message)
submission_errors += 1
with st.expander("Submission format"):
st.markdown(
"""
Please follow this JSON format for your `submission.json` file:
```json
{
"submission_name": "An identifying name of your system",
"param_count": 123, # The number of parameters your system has.
"description": "An optional brief description of the system that will be shown on the results page",
"tasks":
{
"dataset_identifier": {
"values": ["output-0", "output-1", "..."], # A list of system outputs.
"keys": ["gem_id-0", "gem_id-1", ...] # A list of GEM IDs.
}
}
}
```
In this case, `dataset_identifier` is the identifier of the dataset
followed by an identifier of the set the outputs were created from, for
example `_validation` or `_test`. For example, the `mlsum_de` test set
would have the identifier `mlsum_de_test`. The `keys` field is needed
to avoid accidental shuffling that will impact your metrics. Simply add a list
of the `gem_id` for each output example in the same order as your
values. Please see the sample submission below:
"""
)
with open("sample-submission.json", "r") as f:
example_submission = json.load(f)
st.json(example_submission)
token = st.text_input(
"Enter π€ Hub access token",
type="password",
help="You can generate an access token via your π€ Hub settings. See the [docs](https://huggingface.co/docs/hub/security#user-access-tokens) for more details",
)
if token:
try:
user_info = HfApi().whoami(token)
except Exception as e:
st.error("β Invalid access token")
submission_errors += 1
submit_button = st.form_submit_button("Make Submission")
if submit_button and submission_errors == 0:
st.write("β³ Preparing submission for evaluation ...")
user_name = user_info["name"]
submission_name = json_data["submission_name"]
# Create submission dataset under benchmarks ORG
dataset_repo_url = f"https://huggingface.co/datasets/GEM-submissions/gem-{user_name}"
repo = Repository(
local_dir=LOCAL_REPO,
clone_from=dataset_repo_url,
repo_type="dataset",
private=True,
use_auth_token=HF_TOKEN,
)
submission_metadata = {"benchmark": "gem", "type": "prediction", "submission_name": submission_name}
repo.repocard_metadata_save(submission_metadata)
with open(f"{LOCAL_REPO}/submission.json", "w", encoding="utf-8") as f:
json.dump(json_data, f)
# TODO: add informative commit msg
commit_url = repo.push_to_hub()
if commit_url is not None:
commit_sha = commit_url.split("/")[-1]
else:
commit_sha = repo.git_head_commit_url().split("/")[-1]
submission_time = str(int(datetime.now().timestamp()))
submission_id = submission_name + "__" + commit_sha + "__" + submission_time
payload = {
"username": AUTONLP_USERNAME,
"dataset": "GEM/references",
"task": 1,
"model": "gem",
"submission_dataset": f"benchmarks/gem-{user_name}",
"submission_id": submission_id,
"col_mapping": {},
"split": "test",
"config": None,
}
json_resp = http_post(
path="/evaluate/create", payload=payload, token=HF_TOKEN, domain=HF_AUTONLP_BACKEND_API
).json()
if json_resp["status"] == 1:
st.success(f"β
Submission {submission_name} was successfully submitted for evaluation!")
else:
st.error("π Oh noes, there was an error submitting your submission! Please contact the organisers")
# Flush local repo
shutil.rmtree(LOCAL_REPO, ignore_errors=True)
|