ignacioct commited on
Commit
b1f968e
1 Parent(s): 78fb99e

initial push

Browse files
Files changed (4) hide show
  1. README.md +3 -5
  2. app.py +389 -0
  3. dumpy.py +52 -0
  4. requirements.txt +72 -0
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- title: DiBTSpanishDashboard
3
- emoji: 😻
4
  colorFrom: indigo
5
  colorTo: indigo
6
  sdk: gradio
@@ -8,6 +8,4 @@ sdk_version: 4.21.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Spanish - Multilingual Prompt Evaluation Project
3
+ emoji: 🌍
4
  colorFrom: indigo
5
  colorTo: indigo
6
  sdk: gradio
 
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
+ ---
 
 
app.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from apscheduler.schedulers.background import BackgroundScheduler
2
+ import datetime
3
+ import os
4
+ from typing import Dict, Tuple
5
+ from uuid import UUID
6
+
7
+ import altair as alt
8
+ import argilla as rg
9
+ from argilla.feedback import FeedbackDataset
10
+ from argilla.client.feedback.dataset.remote.dataset import RemoteFeedbackDataset
11
+ import gradio as gr
12
+ import pandas as pd
13
+
14
+ # Translation of legends and titels
15
+ ANNOTATED = 'Anotaciones'
16
+ NUMBER_ANNOTATED = 'Anotaciones totales'
17
+ PENDING = 'Pendiente'
18
+
19
+ NUMBER_ANNOTATORS = "Número de anotadores"
20
+ NAME = 'Username'
21
+ NUMBER_ANNOTATIONS = 'Número de anotaciones'
22
+
23
+ CATEGORY = 'Categoría'
24
+
25
+
26
+
27
+ def obtain_source_target_datasets() -> (
28
+ Tuple[
29
+ FeedbackDataset | RemoteFeedbackDataset, FeedbackDataset | RemoteFeedbackDataset
30
+ ]
31
+ ):
32
+ """
33
+ This function returns the source and target datasets to be used in the application.
34
+
35
+ Returns:
36
+ A tuple with the source and target datasets. The source dataset is filtered by the response status 'pending'.
37
+
38
+ """
39
+
40
+ # Obtain the public dataset and see how many pending records are there
41
+ source_dataset = rg.FeedbackDataset.from_argilla(
42
+ os.getenv("SOURCE_DATASET"), workspace=os.getenv("SOURCE_WORKSPACE")
43
+ )
44
+ filtered_source_dataset = source_dataset.filter_by(response_status=["pending"])
45
+
46
+ # Obtain a list of users from the private workspace
47
+ # target_dataset = rg.FeedbackDataset.from_argilla(
48
+ # os.getenv("RESULTS_DATASET"), workspace=os.getenv("RESULTS_WORKSPACE")
49
+ # )
50
+
51
+ target_dataset = source_dataset.filter_by(response_status=["submitted"])
52
+
53
+ return filtered_source_dataset, target_dataset
54
+
55
+
56
+ def get_user_annotations_dictionary(
57
+ dataset: FeedbackDataset | RemoteFeedbackDataset,
58
+ ) -> Dict[str, int]:
59
+ """
60
+ This function returns a dictionary with the username as the key and the number of annotations as the value.
61
+
62
+ Args:
63
+ dataset: The dataset to be analyzed.
64
+ Returns:
65
+ A dictionary with the username as the key and the number of annotations as the value.
66
+ """
67
+ output = {}
68
+ for record in dataset:
69
+ for response in record.responses:
70
+ if str(response.user_id) not in output.keys():
71
+ output[str(response.user_id)] = 1
72
+ else:
73
+ output[str(response.user_id)] += 1
74
+
75
+ # Changing the name of the keys, from the id to the username
76
+ for key in list(output.keys()):
77
+ output[rg.User.from_id(UUID(key)).username] = output.pop(key)
78
+
79
+ return output
80
+
81
+
82
+ def donut_chart_total() -> alt.Chart:
83
+ """
84
+ This function returns a donut chart with the progress of the total annotations.
85
+ Counts each record that has been annotated at least once.
86
+
87
+ Returns:
88
+ An altair chart with the donut chart.
89
+ """
90
+
91
+ # Load your data
92
+ annotated_records = len(target_dataset)
93
+ pending_records = int(os.getenv("TARGET_RECORDS")) - annotated_records
94
+
95
+ # Prepare data for the donut chart
96
+ source = pd.DataFrame(
97
+ {
98
+ "values": [annotated_records, pending_records],
99
+ "category": [ANNOTATED, PENDING],
100
+ "colors": ["#4CAF50", "#757575"], # Green for Completed, Grey for Remaining
101
+ }
102
+ )
103
+
104
+ base = alt.Chart(source).encode(
105
+ theta=alt.Theta("values:Q", stack=True),
106
+ radius=alt.Radius(
107
+ "values", scale=alt.Scale(type="sqrt", zero=True, rangeMin=20)
108
+ ),
109
+ color=alt.Color("category:N", legend=alt.Legend(title=CATEGORY)),
110
+ )
111
+
112
+ c1 = base.mark_arc(innerRadius=20, stroke="#fff")
113
+
114
+ c2 = base.mark_text(radiusOffset=20).encode(text="values:Q")
115
+
116
+ chart = c1 + c2
117
+
118
+ return chart
119
+
120
+
121
+ def donut_chart_target() -> alt.Chart:
122
+ """
123
+ This function returns a donut chart with the progress of the total annotations, in terms of the v1 objective.
124
+ Counts each record that has been annotated at least once.
125
+
126
+ Returns:
127
+ An altair chart with the donut chart.
128
+ """
129
+
130
+ # Load your data
131
+ annotated_records = len(target_dataset)
132
+ pending_records = int(os.getenv("TARGET_ANNOTATIONS_V1")) - annotated_records
133
+
134
+ # Prepare data for the donut chart
135
+ source = pd.DataFrame(
136
+ {
137
+ "values": [annotated_records, pending_records],
138
+ "category": [ANNOTATED, PENDING],
139
+ "colors": ["#4CAF50", "#757575"], # Green for Completed, Grey for Remaining
140
+ }
141
+ )
142
+
143
+ base = alt.Chart(source).encode(
144
+ theta=alt.Theta("values:Q", stack=True),
145
+ radius=alt.Radius(
146
+ "values", scale=alt.Scale(type="sqrt", zero=True, rangeMin=20)
147
+ ),
148
+ color=alt.Color("category:N", legend=alt.Legend(title="Category")),
149
+ )
150
+
151
+ c1 = base.mark_arc(innerRadius=20, stroke="#fff")
152
+
153
+ c2 = base.mark_text(radiusOffset=20).encode(text="values:Q")
154
+
155
+ chart = c1 + c2
156
+
157
+ return chart
158
+
159
+
160
+ def kpi_chart_remaining() -> alt.Chart:
161
+ """
162
+ This function returns a KPI chart with the remaining amount of records to be annotated.
163
+ Returns:
164
+ An altair chart with the KPI chart.
165
+ """
166
+
167
+ pending_records = int(os.getenv("TARGET_RECORDS")) - len(target_dataset)
168
+ # Assuming you have a DataFrame with user data, create a sample DataFrame
169
+ data = pd.DataFrame({"Category": [PENDING], "Value": [pending_records]})
170
+
171
+ # Create Altair chart
172
+ chart = (
173
+ alt.Chart(data)
174
+ .mark_text(fontSize=100, align="center", baseline="middle", color="steelblue")
175
+ .encode(text="Value:N")
176
+ .properties(title=PENDING, width=250, height=200)
177
+ )
178
+
179
+ return chart
180
+
181
+
182
+ def kpi_chart_submitted() -> alt.Chart:
183
+ """
184
+ This function returns a KPI chart with the total amount of records that have been annotated.
185
+ Returns:
186
+ An altair chart with the KPI chart.
187
+ """
188
+
189
+ total = len(target_dataset)
190
+
191
+ # Assuming you have a DataFrame with user data, create a sample DataFrame
192
+ data = pd.DataFrame({"Category": [NUMBER_ANNOTATED], "Value": [total]})
193
+
194
+ # Create Altair chart
195
+ chart = (
196
+ alt.Chart(data)
197
+ .mark_text(fontSize=100, align="center", baseline="middle", color="#e68b39")
198
+ .encode(text="Value:N")
199
+ .properties(title=NUMBER_ANNOTATED, width=250, height=200)
200
+ )
201
+
202
+ return chart
203
+
204
+
205
+ def kpi_chart() -> alt.Chart:
206
+ """
207
+ This function returns a KPI chart with the total amount of annotators.
208
+
209
+ Returns:
210
+ An altair chart with the KPI chart.
211
+ """
212
+
213
+ # Obtain the total amount of annotators
214
+ total_annotators = len(user_ids_annotations)
215
+
216
+ # Assuming you have a DataFrame with user data, create a sample DataFrame
217
+ data = pd.DataFrame(
218
+ {"Category": [NUMBER_ANNOTATORS], "Value": [total_annotators]}
219
+ )
220
+
221
+ # Create Altair chart
222
+ chart = (
223
+ alt.Chart(data)
224
+ .mark_text(fontSize=100, align="center", baseline="middle", color="steelblue")
225
+ .encode(text="Value:N")
226
+ .properties(title=NUMBER_ANNOTATORS, width=250, height=200)
227
+ )
228
+
229
+ return chart
230
+
231
+
232
+ def render_hub_user_link(hub_id):
233
+ link = f"https://huggingface.co/{hub_id}"
234
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{hub_id}</a>'
235
+
236
+
237
+ def obtain_top_users(user_ids_annotations: Dict[str, int], N: int = 50) -> pd.DataFrame:
238
+ """
239
+ This function returns the top N users with the most annotations.
240
+
241
+ Args:
242
+ user_ids_annotations: A dictionary with the user ids as the key and the number of annotations as the value.
243
+
244
+ Returns:
245
+ A pandas dataframe with the top N users with the most annotations.
246
+ """
247
+
248
+ dataframe = pd.DataFrame(
249
+ user_ids_annotations.items(), columns=[NAME, NUMBER_ANNOTATIONS]
250
+ )
251
+ dataframe[NAME] = dataframe[NAME].apply(render_hub_user_link)
252
+ dataframe = dataframe.sort_values(by=NUMBER_ANNOTATIONS, ascending=False)
253
+ return dataframe.head(N)
254
+
255
+
256
+ def fetch_data() -> None:
257
+ """
258
+ This function fetches the data from the source and target datasets and updates the global variables.
259
+ """
260
+
261
+ print(f"Starting to fetch data: {datetime.datetime.now()}")
262
+
263
+ global source_dataset, target_dataset, user_ids_annotations, annotated, remaining, percentage_completed, top_dataframe
264
+ source_dataset, target_dataset = obtain_source_target_datasets()
265
+ user_ids_annotations = get_user_annotations_dictionary(target_dataset)
266
+
267
+ annotated = len(target_dataset)
268
+ remaining = int(os.getenv("TARGET_RECORDS")) - annotated
269
+ percentage_completed = round(
270
+ (annotated / int(os.getenv("TARGET_RECORDS"))) * 100, 1
271
+ )
272
+
273
+ # Print the current date and time
274
+ print(f"Data fetched: {datetime.datetime.now()}")
275
+
276
+
277
+ def get_top(N = 50) -> pd.DataFrame:
278
+ return obtain_top_users(user_ids_annotations, N=N)
279
+
280
+
281
+ def main() -> None:
282
+
283
+ # Set the update interval
284
+ update_interval = 300 # seconds
285
+ update_interval_charts = 30 # seconds
286
+
287
+ # Connect to the space with rg.init()
288
+ rg.init(
289
+ api_url=os.getenv("ARGILLA_API_URL"),
290
+ api_key=os.getenv("ARGILLA_API_KEY"),
291
+ )
292
+
293
+ fetch_data()
294
+
295
+ scheduler = BackgroundScheduler()
296
+ scheduler.add_job(
297
+ func=fetch_data, trigger="interval", seconds=update_interval, max_instances=1
298
+ )
299
+ scheduler.start()
300
+
301
+ # To avoid the orange border for the Gradio elements that are in constant loading
302
+ css = """
303
+ .generating {
304
+ border: none;
305
+ }
306
+ """
307
+
308
+ with gr.Blocks(css=css) as demo:
309
+ gr.Markdown(
310
+ """
311
+ # 🇳🇱🇧🇪 Nederlands - Multilingual Prompt Evaluation Project
312
+
313
+ Hugging Face en @argilla crowdsourcen het [Multilingual Prompt Evaluation Project](https://github.com/huggingface/data-is-better-together/tree/main/prompt_translation): een open meertalige benchmark voor de evaluatie van taalmodellen, en dus ook voor het Nederlands.
314
+
315
+ ## 500 prompts vertalen
316
+ En zoals altijd: daarvoor is data nodig! Vorige week hebben ze met de community al de beste 500 prompts geselecteerd die de benchmark gaan vormen. In het Engels, uiteraard.
317
+ **Daarom is nu jouw hulp nodig**: als we samen alle 500 prompts vertalen kunnen we Nederlands toegevoegd krijgen aan het leaderboard.
318
+
319
+ ## Meedoen
320
+ Meedoen is simpel. Ga naar de [Annotatie-Space](https://dibt-dutch-prompt-translation-for-dutch.hf.space/), log in of maak een Hugging Face account, en je kunt meteen aan de slag.
321
+ Alvast bedankt! Oh, je krijgt ook een steuntje in de rug: GPT4 heeft alvast een vertaalsuggestie voor je klaargezet.
322
+ """
323
+ )
324
+
325
+ gr.Markdown(
326
+ f"""
327
+ ## 🚀 Voortgang
328
+ Dit is wat de community tot nu toe heeft bereikt!
329
+ """
330
+ )
331
+ with gr.Row():
332
+
333
+ kpi_submitted_plot = gr.Plot(label="Plot")
334
+ demo.load(
335
+ kpi_chart_submitted,
336
+ inputs=[],
337
+ outputs=[kpi_submitted_plot],
338
+ every=update_interval_charts,
339
+ )
340
+
341
+ kpi_remaining_plot = gr.Plot(label="Plot")
342
+ demo.load(
343
+ kpi_chart_remaining,
344
+ inputs=[],
345
+ outputs=[kpi_remaining_plot],
346
+ every=update_interval_charts,
347
+ )
348
+
349
+ donut_total_plot = gr.Plot(label="Plot")
350
+ demo.load(
351
+ donut_chart_total,
352
+ inputs=[],
353
+ outputs=[donut_total_plot],
354
+ every=update_interval_charts,
355
+ )
356
+
357
+ gr.Markdown(
358
+ """
359
+ ## 👾 Scoreboard
360
+ Het totaal aantal vertalers en de vertalers met de meeste bijdragen:
361
+ """
362
+ )
363
+
364
+ with gr.Row():
365
+
366
+ kpi_hall_plot = gr.Plot(label="Plot")
367
+ demo.load(
368
+ kpi_chart, inputs=[], outputs=[kpi_hall_plot], every=update_interval_charts
369
+ )
370
+
371
+ top_df_plot = gr.Dataframe(
372
+ headers=[NAME, NUMBER_ANNOTATIONS],
373
+ datatype=[
374
+ "markdown",
375
+ "number",
376
+ ],
377
+ row_count=50,
378
+ col_count=(2, "fixed"),
379
+ interactive=False,
380
+ every=update_interval,
381
+ )
382
+ demo.load(get_top, None, [top_df_plot], every=update_interval_charts)
383
+
384
+ # Launch the Gradio interface
385
+ demo.launch()
386
+
387
+
388
+ if __name__ == "__main__":
389
+ main()
dumpy.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+
5
+ import argilla as rg
6
+ from huggingface_hub import HfApi
7
+
8
+ logger = logging.getLogger(__name__)
9
+ logger.setLevel(logging.INFO)
10
+
11
+ if __name__ == "__main__":
12
+ logger.info("*** Initializing Argilla session ***")
13
+ rg.init(
14
+ api_url=os.getenv("ARGILLA_API_URL"),
15
+ api_key=os.getenv("ARGILLA_API_KEY"),
16
+ extra_headers={"Authorization": f"Bearer {os.getenv('HF_TOKEN')}"},
17
+ )
18
+
19
+ logger.info("*** Fetching dataset from Argilla ***")
20
+ dataset = rg.FeedbackDataset.from_argilla(
21
+ os.getenv("SOURCE_DATASET"),
22
+ workspace=os.getenv("SOURCE_WORKSPACE"),
23
+ )
24
+ logger.info("*** Filtering records by `response_status` ***")
25
+ dataset = dataset.filter_by(response_status=["submitted"]) # type: ignore
26
+
27
+ logger.info("*** Calculating users and annotation count ***")
28
+ output = {}
29
+ for record in dataset.records:
30
+ for response in record.responses:
31
+ if response.user_id not in output:
32
+ output[response.user_id] = 0
33
+ output[response.user_id] += 1
34
+
35
+ for key in list(output.keys()):
36
+ output[rg.User.from_id(key).username] = output.pop(key)
37
+
38
+ logger.info("*** Users and annotation count successfully calculated! ***")
39
+
40
+ logger.info("*** Dumping Python dict into `stats.json` ***")
41
+ with open("stats.json", "w") as file:
42
+ json.dump(output, file, indent=4)
43
+
44
+ logger.info("*** Uploading `stats.json` to Hugging Face Hub ***")
45
+ api = HfApi(token=os.getenv("HF_TOKEN"))
46
+ api.upload_file(
47
+ path_or_fileobj="stats.json",
48
+ path_in_repo="stats.json",
49
+ repo_id="DIBT/prompt-collective-dashboard",
50
+ repo_type="space",
51
+ )
52
+ logger.info("*** `stats.json` successfully uploaded to Hugging Face Hub! ***")
requirements.txt ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.2.1
2
+ altair==5.2.0
3
+ annotated-types==0.6.0
4
+ anyio==4.2.0
5
+ apscheduler==3.10.4
6
+ argilla==1.23.0
7
+ attrs==23.2.0
8
+ backoff==2.2.1
9
+ certifi==2024.2.2
10
+ charset-normalizer==3.3.2
11
+ click==8.1.7
12
+ colorama==0.4.6
13
+ contourpy==1.2.0
14
+ cycler==0.12.1
15
+ Deprecated==1.2.14
16
+ exceptiongroup==1.2.0
17
+ fastapi==0.109.2
18
+ ffmpy==0.3.1
19
+ filelock==3.13.1
20
+ fonttools==4.48.1
21
+ fsspec==2024.2.0
22
+ gradio==4.17.0
23
+ gradio_client==0.9.0
24
+ h11==0.14.0
25
+ httpcore==1.0.2
26
+ httpx==0.26.0
27
+ huggingface-hub==0.20.3
28
+ idna==3.6
29
+ importlib-resources==6.1.1
30
+ Jinja2==3.1.3
31
+ jsonschema==4.21.1
32
+ jsonschema-specifications==2023.12.1
33
+ kiwisolver==1.4.5
34
+ markdown-it-py==3.0.0
35
+ MarkupSafe==2.1.5
36
+ matplotlib==3.8.2
37
+ mdurl==0.1.2
38
+ monotonic==1.6
39
+ numpy==1.23.5
40
+ orjson==3.9.13
41
+ packaging==23.2
42
+ pandas==1.5.3
43
+ pillow==10.2.0
44
+ pydantic==2.6.1
45
+ pydantic_core==2.16.2
46
+ pydub==0.25.1
47
+ Pygments==2.17.2
48
+ pyparsing==3.1.1
49
+ python-dateutil==2.8.2
50
+ python-multipart==0.0.7
51
+ pytz==2024.1
52
+ PyYAML==6.0.1
53
+ referencing==0.33.0
54
+ requests==2.31.0
55
+ rich==13.7.0
56
+ rpds-py==0.17.1
57
+ ruff==0.2.1
58
+ semantic-version==2.10.0
59
+ shellingham==1.5.4
60
+ six==1.16.0
61
+ sniffio==1.3.0
62
+ starlette==0.36.3
63
+ tomlkit==0.12.0
64
+ toolz==0.12.1
65
+ tqdm==4.66.1
66
+ typer==0.9.0
67
+ typing_extensions==4.9.0
68
+ urllib3==2.2.0
69
+ uvicorn==0.27.0.post1
70
+ vega-datasets==0.9.0
71
+ websockets==11.0.3
72
+ wrapt==1.14.1