Spaces:
Runtime error
Runtime error
Commit
β’
239727c
0
Parent(s):
Duplicate from nlphuji/whoops-explorer
Browse filesCo-authored-by: Yonatan <yonatanbitton@users.noreply.huggingface.co>
- .gitattributes +34 -0
- .gitignore +171 -0
- README.md +13 -0
- app.py +85 -0
- app2.py +116 -0
- app_tabs.py +90 -0
- requirements.txt +2 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
.idea
|
7 |
+
.idea/
|
8 |
+
*.idea
|
9 |
+
|
10 |
+
inspectionProfiles
|
11 |
+
inspectionProfiles/
|
12 |
+
|
13 |
+
debug_dataset.py
|
14 |
+
app2.py
|
15 |
+
app_tabs.py
|
16 |
+
|
17 |
+
# C extensions
|
18 |
+
*.so
|
19 |
+
|
20 |
+
# Distribution / packaging
|
21 |
+
.Python
|
22 |
+
build/
|
23 |
+
develop-eggs/
|
24 |
+
dist/
|
25 |
+
downloads/
|
26 |
+
eggs/
|
27 |
+
.eggs/
|
28 |
+
lib/
|
29 |
+
lib64/
|
30 |
+
parts/
|
31 |
+
sdist/
|
32 |
+
var/
|
33 |
+
wheels/
|
34 |
+
share/python-wheels/
|
35 |
+
*.egg-info/
|
36 |
+
.installed.cfg
|
37 |
+
*.egg
|
38 |
+
MANIFEST
|
39 |
+
|
40 |
+
# PyInstaller
|
41 |
+
# Usually these files are written by a python script from a template
|
42 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
43 |
+
*.manifest
|
44 |
+
*.spec
|
45 |
+
|
46 |
+
# Installer logs
|
47 |
+
pip-log.txt
|
48 |
+
pip-delete-this-directory.txt
|
49 |
+
|
50 |
+
# Unit test / coverage reports
|
51 |
+
htmlcov/
|
52 |
+
.tox/
|
53 |
+
.nox/
|
54 |
+
.coverage
|
55 |
+
.coverage.*
|
56 |
+
.cache
|
57 |
+
nosetests.xml
|
58 |
+
coverage.xml
|
59 |
+
*.cover
|
60 |
+
*.py,cover
|
61 |
+
.hypothesis/
|
62 |
+
.pytest_cache/
|
63 |
+
cover/
|
64 |
+
|
65 |
+
# Translations
|
66 |
+
*.mo
|
67 |
+
*.pot
|
68 |
+
|
69 |
+
# Django stuff:
|
70 |
+
*.log
|
71 |
+
local_settings.py
|
72 |
+
db.sqlite3
|
73 |
+
db.sqlite3-journal
|
74 |
+
|
75 |
+
# Flask stuff:
|
76 |
+
instance/
|
77 |
+
.webassets-cache
|
78 |
+
|
79 |
+
# Scrapy stuff:
|
80 |
+
.scrapy
|
81 |
+
|
82 |
+
# Sphinx documentation
|
83 |
+
docs/_build/
|
84 |
+
|
85 |
+
# PyBuilder
|
86 |
+
.pybuilder/
|
87 |
+
target/
|
88 |
+
|
89 |
+
# Jupyter Notebook
|
90 |
+
.ipynb_checkpoints
|
91 |
+
|
92 |
+
# IPython
|
93 |
+
profile_default/
|
94 |
+
ipython_config.py
|
95 |
+
|
96 |
+
# pyenv
|
97 |
+
# For a library or package, you might want to ignore these files since the code is
|
98 |
+
# intended to run in multiple environments; otherwise, check them in:
|
99 |
+
# .python-version
|
100 |
+
|
101 |
+
# pipenv
|
102 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
103 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
104 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
105 |
+
# install all needed dependencies.
|
106 |
+
#Pipfile.lock
|
107 |
+
|
108 |
+
# poetry
|
109 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
110 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
111 |
+
# commonly ignored for libraries.
|
112 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
113 |
+
#poetry.lock
|
114 |
+
|
115 |
+
# pdm
|
116 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
117 |
+
#pdm.lock
|
118 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
119 |
+
# in version control.
|
120 |
+
# https://pdm.fming.dev/#use-with-ide
|
121 |
+
.pdm.toml
|
122 |
+
|
123 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
124 |
+
__pypackages__/
|
125 |
+
|
126 |
+
# Celery stuff
|
127 |
+
celerybeat-schedule
|
128 |
+
celerybeat.pid
|
129 |
+
|
130 |
+
# SageMath parsed files
|
131 |
+
*.sage.py
|
132 |
+
|
133 |
+
# Environments
|
134 |
+
.env
|
135 |
+
.venv
|
136 |
+
env/
|
137 |
+
venv/
|
138 |
+
ENV/
|
139 |
+
env.bak/
|
140 |
+
venv.bak/
|
141 |
+
|
142 |
+
# Spyder project settings
|
143 |
+
.spyderproject
|
144 |
+
.spyproject
|
145 |
+
|
146 |
+
# Rope project settings
|
147 |
+
.ropeproject
|
148 |
+
|
149 |
+
# mkdocs documentation
|
150 |
+
/site
|
151 |
+
|
152 |
+
# mypy
|
153 |
+
.mypy_cache/
|
154 |
+
.dmypy.json
|
155 |
+
dmypy.json
|
156 |
+
|
157 |
+
# Pyre type checker
|
158 |
+
.pyre/
|
159 |
+
|
160 |
+
# pytype static type analyzer
|
161 |
+
.pytype/
|
162 |
+
|
163 |
+
# Cython debug symbols
|
164 |
+
cython_debug/
|
165 |
+
|
166 |
+
# PyCharm
|
167 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
168 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
169 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
170 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
171 |
+
#.idea/
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Wmtis Explorer
|
3 |
+
emoji: π₯
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: blue
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.16.2
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: nlphuji/whoops-explorer
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
from datasets import load_dataset
|
5 |
+
|
6 |
+
whoops = load_dataset("nlphuji/whoops")['test']
|
7 |
+
print(f"Loaded WMTIS, first example:")
|
8 |
+
print(whoops[0])
|
9 |
+
dataset_size = len(whoops)
|
10 |
+
print(f"all dataset size: {dataset_size}")
|
11 |
+
|
12 |
+
IMAGE = 'image'
|
13 |
+
IMAGE_DESIGNER = 'image_designer'
|
14 |
+
DESIGNER_EXPLANATION = 'designer_explanation'
|
15 |
+
CROWD_CAPTIONS = 'crowd_captions'
|
16 |
+
CROWD_EXPLANATIONS = 'crowd_explanations'
|
17 |
+
CROWD_UNDERSPECIFIED_CAPTIONS = 'crowd_underspecified_captions'
|
18 |
+
SELECTED_CAPTION = 'selected_caption'
|
19 |
+
COMMONSENSE_CATEGORY = 'commonsense_category'
|
20 |
+
QA = 'question_answering_pairs'
|
21 |
+
IMAGE_ID = 'image_id'
|
22 |
+
left_side_columns = [IMAGE]
|
23 |
+
# right_side_columns = [x for x in whoops.features.keys() if x not in left_side_columns and x not in [QA]]
|
24 |
+
right_side_columns = [x for x in whoops.features.keys() if x not in left_side_columns]
|
25 |
+
enumerate_cols = [CROWD_CAPTIONS, CROWD_EXPLANATIONS, CROWD_UNDERSPECIFIED_CAPTIONS]
|
26 |
+
emoji_to_label = {IMAGE_DESIGNER: 'π¨, π§βπ¨, π»', DESIGNER_EXPLANATION: 'π‘, π€, π§βπ¨',
|
27 |
+
CROWD_CAPTIONS: 'π₯, π¬, π', CROWD_EXPLANATIONS: 'π₯, π‘, π€', CROWD_UNDERSPECIFIED_CAPTIONS: 'π₯, π¬, π',
|
28 |
+
QA: 'β, π€, π‘', IMAGE_ID: 'π, π, πΎ', COMMONSENSE_CATEGORY: 'π€, π, π‘', SELECTED_CAPTION: 'π, π, π¬'}
|
29 |
+
target_size = (1024, 1024)
|
30 |
+
MAX_LINES = 30
|
31 |
+
|
32 |
+
def get_instance_values(example):
|
33 |
+
values = []
|
34 |
+
for k in left_side_columns + right_side_columns:
|
35 |
+
if k in enumerate_cols:
|
36 |
+
value = list_to_string(example[k])
|
37 |
+
elif k == QA:
|
38 |
+
qa_list = [f"Q: {x[0]} A: {x[1]}" for x in example[k]]
|
39 |
+
value = list_to_string(qa_list)
|
40 |
+
else:
|
41 |
+
value = example[k]
|
42 |
+
values.append(value)
|
43 |
+
return values
|
44 |
+
|
45 |
+
|
46 |
+
def list_to_string(lst):
|
47 |
+
return '\n'.join(['{}. {}'.format(i + 1, item) for i, item in enumerate(lst)])
|
48 |
+
|
49 |
+
def plot_image(index):
|
50 |
+
example = whoops_sample[index]
|
51 |
+
instance_values = get_instance_values(example)
|
52 |
+
assert len(left_side_columns) == len(
|
53 |
+
instance_values[:len(left_side_columns)]) # excluding the image & designer
|
54 |
+
for key, value in zip(left_side_columns, instance_values[:len(left_side_columns)]):
|
55 |
+
if key == IMAGE:
|
56 |
+
img = whoops_sample[index]["image"]
|
57 |
+
img_resized = img.resize(target_size)
|
58 |
+
gr.Image(value=img_resized, label=whoops_sample[index]['commonsense_category'])
|
59 |
+
else:
|
60 |
+
label = key.capitalize().replace("_", " ")
|
61 |
+
gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}")
|
62 |
+
with gr.Accordion("Click for details", open=False):
|
63 |
+
assert len(right_side_columns) == len(
|
64 |
+
instance_values[len(left_side_columns):]) # excluding the image & designer
|
65 |
+
for key, value in zip(right_side_columns, instance_values[len(left_side_columns):]):
|
66 |
+
label = key.capitalize().replace("_", " ")
|
67 |
+
gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}", max_lines=MAX_LINES)
|
68 |
+
|
69 |
+
|
70 |
+
columns_number = 4
|
71 |
+
# rows_number = int(dataset_size / columns_number)
|
72 |
+
# rows_number = 25
|
73 |
+
rows_number = 20
|
74 |
+
whoops_sample = whoops.shuffle().select(range(0, columns_number * rows_number))
|
75 |
+
index = 0
|
76 |
+
|
77 |
+
with gr.Blocks() as demo:
|
78 |
+
gr.Markdown(f"# WHOOPS! Dataset Explorer")
|
79 |
+
for row_num in range(0, rows_number):
|
80 |
+
with gr.Row():
|
81 |
+
for col_num in range(0, columns_number):
|
82 |
+
with gr.Column():
|
83 |
+
plot_image(index)
|
84 |
+
index += 1
|
85 |
+
demo.launch()
|
app2.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
from datasets import load_dataset
|
5 |
+
|
6 |
+
whoops = load_dataset("nlphuji/whoops")['test']
|
7 |
+
print(f"Loaded WMTIS, first example:")
|
8 |
+
print(whoops[0])
|
9 |
+
dataset_size = len(whoops)
|
10 |
+
print(f"all dataset size: {dataset_size}")
|
11 |
+
|
12 |
+
IMAGE = 'image'
|
13 |
+
IMAGE_DESIGNER = 'image_designer'
|
14 |
+
DESIGNER_EXPLANATION = 'designer_explanation'
|
15 |
+
CROWD_CAPTIONS = 'crowd_captions'
|
16 |
+
CROWD_EXPLANATIONS = 'crowd_explanations'
|
17 |
+
CROWD_UNDERSPECIFIED_CAPTIONS = 'crowd_underspecified_captions'
|
18 |
+
SELECTED_CAPTION = 'selected_caption'
|
19 |
+
COMMONSENSE_CATEGORY = 'commonsense_category'
|
20 |
+
QA = 'question_answering_pairs'
|
21 |
+
IMAGE_ID = 'image_id'
|
22 |
+
left_side_columns = [IMAGE]
|
23 |
+
# right_side_columns = [x for x in whoops.features.keys() if x not in left_side_columns and x not in [QA]]
|
24 |
+
right_side_columns = [x for x in whoops.features.keys() if x not in left_side_columns]
|
25 |
+
enumerate_cols = [CROWD_CAPTIONS, CROWD_EXPLANATIONS, CROWD_UNDERSPECIFIED_CAPTIONS]
|
26 |
+
emoji_to_label = {IMAGE_DESIGNER: 'π¨, π§βπ¨, π»', DESIGNER_EXPLANATION: 'π‘, π€, π§βπ¨',
|
27 |
+
CROWD_CAPTIONS: 'π₯, π¬, π', CROWD_EXPLANATIONS: 'π₯, π‘, π€', CROWD_UNDERSPECIFIED_CAPTIONS: 'π₯, π¬, π',
|
28 |
+
QA: 'β, π€, π‘', IMAGE_ID: 'π, π, πΎ', COMMONSENSE_CATEGORY: 'π€, π, π‘', SELECTED_CAPTION: 'π, π, π¬'}
|
29 |
+
target_size = (1024, 1024)
|
30 |
+
MAX_LINES = 30
|
31 |
+
|
32 |
+
def get_instance_values(example):
|
33 |
+
values = []
|
34 |
+
for k in left_side_columns + right_side_columns:
|
35 |
+
if k in enumerate_cols:
|
36 |
+
value = list_to_string(example[k])
|
37 |
+
elif k == QA:
|
38 |
+
qa_list = [f"Q: {x[0]} A: {x[1]}" for x in example[k]]
|
39 |
+
value = list_to_string(qa_list)
|
40 |
+
else:
|
41 |
+
value = example[k]
|
42 |
+
values.append(value)
|
43 |
+
return values
|
44 |
+
|
45 |
+
|
46 |
+
def list_to_string(lst):
|
47 |
+
return '\n'.join(['{}. {}'.format(i + 1, item) for i, item in enumerate(lst)])
|
48 |
+
|
49 |
+
def plot_image(index):
|
50 |
+
example = whoops_sample[index]
|
51 |
+
instance_values = get_instance_values(example)
|
52 |
+
assert len(left_side_columns) == len(
|
53 |
+
instance_values[:len(left_side_columns)]) # excluding the image & designer
|
54 |
+
for key, value in zip(left_side_columns, instance_values[:len(left_side_columns)]):
|
55 |
+
if key == IMAGE:
|
56 |
+
img = whoops_sample[index]["image"]
|
57 |
+
img_resized = img.resize(target_size)
|
58 |
+
gr.Image(value=img_resized, label=whoops_sample[index]['commonsense_category'])
|
59 |
+
else:
|
60 |
+
label = key.capitalize().replace("_", " ")
|
61 |
+
gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}")
|
62 |
+
with gr.Accordion("Click for details", open=False):
|
63 |
+
assert len(right_side_columns) == len(
|
64 |
+
instance_values[len(left_side_columns):]) # excluding the image & designer
|
65 |
+
for key, value in zip(right_side_columns, instance_values[len(left_side_columns):]):
|
66 |
+
label = key.capitalize().replace("_", " ")
|
67 |
+
gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}", max_lines=MAX_LINES)
|
68 |
+
|
69 |
+
|
70 |
+
columns_number = 4
|
71 |
+
# rows_number = int(dataset_size / columns_number)
|
72 |
+
# rows_number = 25
|
73 |
+
rows_number = 20
|
74 |
+
whoops_sample = whoops.shuffle().select(range(0, columns_number * rows_number))
|
75 |
+
index = 0
|
76 |
+
LINES_NUMBER = 20
|
77 |
+
|
78 |
+
def display_df():
|
79 |
+
df_images = df.head(LINES_NUMBER)
|
80 |
+
return df_images
|
81 |
+
|
82 |
+
def display_next(dataframe, end):
|
83 |
+
start = int(end or len(dataframe))
|
84 |
+
end = int(start) + int(LINES_NUMBER)
|
85 |
+
global df
|
86 |
+
if end >= len(df) - 1:
|
87 |
+
start = 0
|
88 |
+
end = LINES_NUMBER
|
89 |
+
df = df.sample(frac=1)
|
90 |
+
print(f"Shuffle")
|
91 |
+
# print(f"end: {end},start: {start}")
|
92 |
+
df_images = df.iloc[start:end]
|
93 |
+
assert len(df_images) == LINES_NUMBER
|
94 |
+
return df_images, end
|
95 |
+
|
96 |
+
|
97 |
+
with gr.Blocks() as demo:
|
98 |
+
gr.Markdown(f"# WHOOPS! Dataset Explorer")
|
99 |
+
|
100 |
+
with gr.Row():
|
101 |
+
num_end = gr.Number(visible=False)
|
102 |
+
b1 = gr.Button("Get Initial dataframe")
|
103 |
+
b2 = gr.Button("Next Rows")
|
104 |
+
|
105 |
+
for row_num in range(0, int(LINES_NUMBER / columns_number)):
|
106 |
+
with gr.Row():
|
107 |
+
for col_num in range(0, columns_number):
|
108 |
+
with gr.Column():
|
109 |
+
plot_image(index)
|
110 |
+
index += 1
|
111 |
+
|
112 |
+
b1.click(fn=display_df, outputs=out_dataframe, api_name="initial_dataframe")
|
113 |
+
b2.click(fn=display_next, inputs=[out_dataframe, num_end], outputs=[out_dataframe, num_end],
|
114 |
+
api_name="next_rows")
|
115 |
+
|
116 |
+
demo.launch()
|
app_tabs.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
from datasets import load_dataset
|
5 |
+
|
6 |
+
wmtis = load_dataset("nlphuji/whoops")['test']
|
7 |
+
print(f"Loaded WMTIS, first example:")
|
8 |
+
print(wmtis[0])
|
9 |
+
dataset_size = len(wmtis)
|
10 |
+
print(f"dataset_size: {dataset_size}")
|
11 |
+
|
12 |
+
IMAGE = 'image'
|
13 |
+
IMAGE_DESIGNER = 'image_designer'
|
14 |
+
DESIGNER_EXPLANATION = 'designer_explanation'
|
15 |
+
CROWD_CAPTIONS = 'crowd_captions'
|
16 |
+
CROWD_EXPLANATIONS = 'crowd_explanations'
|
17 |
+
CROWD_UNDERSPECIFIED_CAPTIONS = 'crowd_underspecified_captions'
|
18 |
+
SELECTED_CAPTION = 'selected_caption'
|
19 |
+
COMMONSENSE_CATEGORY = 'commonsense_category'
|
20 |
+
QA = 'question_answering_pairs'
|
21 |
+
IMAGE_ID = 'image_id'
|
22 |
+
left_side_columns = [IMAGE]
|
23 |
+
right_side_columns = [x for x in wmtis.features.keys() if x not in left_side_columns and x not in [QA]]
|
24 |
+
enumerate_cols = [CROWD_CAPTIONS, CROWD_EXPLANATIONS, CROWD_UNDERSPECIFIED_CAPTIONS]
|
25 |
+
emoji_to_label = {IMAGE_DESIGNER: 'π¨, π§βπ¨, π»', DESIGNER_EXPLANATION: 'π‘, π€, π§βπ¨',
|
26 |
+
CROWD_CAPTIONS: 'π₯, π¬, π', CROWD_EXPLANATIONS: 'π₯, π‘, π€', CROWD_UNDERSPECIFIED_CAPTIONS: 'π₯, π¬, π',
|
27 |
+
QA: 'β, π€, π‘', IMAGE_ID: 'π, π, πΎ', COMMONSENSE_CATEGORY: 'π€, π, π‘', SELECTED_CAPTION: 'π, π, π¬'}
|
28 |
+
target_size = (1024, 1024)
|
29 |
+
|
30 |
+
|
31 |
+
def func(index):
|
32 |
+
example = wmtis[index]
|
33 |
+
values = get_instance_values(example)
|
34 |
+
return values
|
35 |
+
|
36 |
+
|
37 |
+
def get_instance_values(example):
|
38 |
+
values = []
|
39 |
+
for k in left_side_columns + right_side_columns:
|
40 |
+
if k in enumerate_cols:
|
41 |
+
value = list_to_string(example[k])
|
42 |
+
elif k == QA:
|
43 |
+
qa_list = [f"Q: {x[0]} A: {x[1]}" for x in example[k]]
|
44 |
+
value = list_to_string(qa_list)
|
45 |
+
else:
|
46 |
+
value = example[k]
|
47 |
+
values.append(value)
|
48 |
+
return values
|
49 |
+
|
50 |
+
|
51 |
+
def list_to_string(lst):
|
52 |
+
return '\n'.join(['{}. {}'.format(i + 1, item) for i, item in enumerate(lst)])
|
53 |
+
|
54 |
+
def create_image_accordion_block(index):
|
55 |
+
example = wmtis[index]
|
56 |
+
instance_values = get_instance_values(example)
|
57 |
+
assert len(left_side_columns) == len(
|
58 |
+
instance_values[:len(left_side_columns)]) # excluding the image & designer
|
59 |
+
for key, value in zip(left_side_columns, instance_values[:len(left_side_columns)]):
|
60 |
+
if key == IMAGE:
|
61 |
+
img = wmtis[index]["image"]
|
62 |
+
img_resized = img.resize(target_size)
|
63 |
+
gr.Image(value=img_resized, label=f"Image {emoji_to_label[key]}")
|
64 |
+
else:
|
65 |
+
label = key.capitalize().replace("_", " ")
|
66 |
+
gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}")
|
67 |
+
with gr.Accordion("Open for More!", open=False):
|
68 |
+
assert len(right_side_columns) == len(
|
69 |
+
instance_values[len(left_side_columns):]) # excluding the image & designer
|
70 |
+
for key, value in zip(right_side_columns, instance_values[len(left_side_columns):]):
|
71 |
+
label = key.capitalize().replace("_", " ")
|
72 |
+
gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}")
|
73 |
+
|
74 |
+
|
75 |
+
columns_number = 2
|
76 |
+
rows_number = 2
|
77 |
+
tabs_number = 27
|
78 |
+
|
79 |
+
with gr.Blocks() as demo:
|
80 |
+
gr.Markdown(f"# Whoops! images by category")
|
81 |
+
for tub_num in range(0, tabs_number):
|
82 |
+
print(f"create tab:{tub_num}")
|
83 |
+
with gr.Tab(f"Tab {tub_num}"):
|
84 |
+
for row_num in range(0, rows_number):
|
85 |
+
with gr.Row():
|
86 |
+
for col_num in range(0, columns_number):
|
87 |
+
with gr.Column():
|
88 |
+
index = random.choice(range(0, dataset_size))
|
89 |
+
create_image_accordion_block(index)
|
90 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
datasets
|
2 |
+
gradio==3.0.5
|