yonatanbitton commited on
Commit
d7c4521
β€’
1 Parent(s): f573c7e

first commit

Browse files
.idea/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
5
+ <option name="ignoredPackages">
6
+ <value>
7
+ <list size="3">
8
+ <item index="0" class="java.lang.String" itemvalue="matplotlib" />
9
+ <item index="1" class="java.lang.String" itemvalue="CLIP" />
10
+ <item index="2" class="java.lang.String" itemvalue="transformers" />
11
+ </list>
12
+ </value>
13
+ </option>
14
+ </inspection_tool>
15
+ </profile>
16
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9 (venv) (3)" project-jdk-type="Python SDK" />
4
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/whoops-explorer-full.iml" filepath="$PROJECT_DIR$/.idea/whoops-explorer-full.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="" vcs="Git" />
5
+ </component>
6
+ </project>
.idea/whoops-explorer-full.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="jdk" jdkName="Python 3.9 (venv) (3)" jdkType="Python SDK" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Whoops Explorer Full
3
- emoji: πŸ“Š
4
- colorFrom: green
5
- colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 3.23.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
+ title: WHOOPS! Explorer
3
+ emoji: πŸ”₯
4
+ colorFrom: purple
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 3.21.0
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ from datasets import load_dataset
4
+ import gradio as gr
5
+
6
+ whoops = load_dataset("nlphuji/whoops")['test']
7
+ # print(f"Loaded WHOOPS!, first example:")
8
+ # print(whoops[0])
9
+ dataset_size = len(whoops)
10
+
11
+ IMAGE = 'image'
12
+ IMAGE_DESIGNER = 'image_designer'
13
+ DESIGNER_EXPLANATION = 'designer_explanation'
14
+ CROWD_CAPTIONS = 'crowd_captions'
15
+ CROWD_EXPLANATIONS = 'crowd_explanations'
16
+ CROWD_UNDERSPECIFIED_CAPTIONS = 'crowd_underspecified_captions'
17
+ QA = 'question_answering_pairs'
18
+ IMAGE_ID = 'image_id'
19
+ SELECTED_CAPTION = 'selected_caption'
20
+ COMMONSENSE_CATEGORY = 'commonsense_category'
21
+ left_side_columns = [IMAGE]
22
+ right_side_columns = [x for x in whoops.features.keys() if x not in left_side_columns]
23
+ enumerate_cols = [CROWD_CAPTIONS, CROWD_EXPLANATIONS, CROWD_UNDERSPECIFIED_CAPTIONS]
24
+ emoji_to_label = {IMAGE_DESIGNER: '🎨, πŸ§‘β€πŸŽ¨, πŸ’»', DESIGNER_EXPLANATION: 'πŸ’‘, πŸ€”, πŸ§‘β€πŸŽ¨',
25
+ CROWD_CAPTIONS: 'πŸ‘₯, πŸ’¬, πŸ“', CROWD_EXPLANATIONS: 'πŸ‘₯, πŸ’‘, πŸ€”', CROWD_UNDERSPECIFIED_CAPTIONS: 'πŸ‘₯, πŸ’¬, πŸ‘Ž',
26
+ QA: '❓, πŸ€”, πŸ’‘', IMAGE_ID: 'πŸ”, πŸ“„, πŸ’Ύ', COMMONSENSE_CATEGORY: 'πŸ€”, πŸ“š, πŸ’‘', SELECTED_CAPTION: 'πŸ“, πŸ‘Œ, πŸ’¬'}
27
+ # batch_size = 16
28
+ batch_size = 8
29
+ target_size = (1024, 1024)
30
+
31
+
32
+ def func(index):
33
+ start_index = index * batch_size
34
+ end_index = start_index + batch_size
35
+ all_examples = [whoops[index] for index in list(range(start_index, end_index))]
36
+ values_lst = []
37
+ for example_idx, example in enumerate(all_examples):
38
+ values = get_instance_values(example)
39
+ values_lst += values
40
+ return values_lst
41
+
42
+
43
+ def get_instance_values(example):
44
+ values = []
45
+ for k in left_side_columns + right_side_columns:
46
+ if k == IMAGE:
47
+ value = example["image"].resize(target_size)
48
+ elif k in enumerate_cols:
49
+ value = list_to_string(example[k])
50
+ elif k == QA:
51
+ qa_list = [f"Q: {x[0]} A: {x[1]}" for x in example[k]]
52
+ value = list_to_string(qa_list)
53
+ else:
54
+ value = example[k]
55
+ values.append(value)
56
+ return values
57
+ def list_to_string(lst):
58
+ return '\n'.join(['{}. {}'.format(i+1, item) for i, item in enumerate(lst)])
59
+
60
+ demo = gr.Blocks()
61
+
62
+
63
+ def get_col():
64
+ instance_values = get_instance_values(example)
65
+ with gr.Column():
66
+ inputs_left = []
67
+ assert len(left_side_columns) == len(
68
+ instance_values[:len(left_side_columns)]) # excluding the image & designer
69
+ for key, value in zip(left_side_columns, instance_values[:len(left_side_columns)]):
70
+ if key == IMAGE:
71
+ img_resized = example["image"].resize(target_size)
72
+ input_k = gr.Image(value=img_resized, label=example['commonsense_category'])
73
+ else:
74
+ label = key.capitalize().replace("_", " ")
75
+ input_k = gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}")
76
+ inputs_left.append(input_k)
77
+ with gr.Accordion("Click for details", open=False):
78
+ text_inputs_right = []
79
+ assert len(right_side_columns) == len(
80
+ instance_values[len(left_side_columns):]) # excluding the image & designer
81
+ for key, value in zip(right_side_columns, instance_values[len(left_side_columns):]):
82
+ label = key.capitalize().replace("_", " ")
83
+ text_input_k = gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}")
84
+ text_inputs_right.append(text_input_k)
85
+ return inputs_left, text_inputs_right
86
+
87
+
88
+ with demo:
89
+ gr.Markdown("# Slide to iterate WHOOPS!")
90
+
91
+ with gr.Column():
92
+ num_batches = math.ceil(dataset_size / batch_size)
93
+ slider = gr.Slider(minimum=0, maximum=num_batches, step=1, label=f'Page (out of {num_batches})')
94
+ with gr.Row():
95
+ index = slider.value
96
+ start_index = 0 * batch_size
97
+ end_index = start_index + batch_size
98
+ all_examples = [whoops[index] for index in list(range(start_index, end_index))]
99
+ all_inputs_left_right = []
100
+ for example_idx, example in enumerate(all_examples):
101
+ inputs_left, text_inputs_right = get_col()
102
+ inputs_left_right = inputs_left + text_inputs_right
103
+ all_inputs_left_right += inputs_left_right
104
+
105
+ slider.change(func, inputs=[slider], outputs=all_inputs_left_right)
106
+
107
+ demo.launch()
app_grid.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ import gradio as gr
4
+ from datasets import load_dataset
5
+
6
+ whoops = load_dataset("nlphuji/whoops")['test']
7
+ whoops = whoops.shuffle()
8
+
9
+ print(f"Loaded WMTIS, first example:")
10
+ print(whoops[0])
11
+ dataset_size = len(whoops)
12
+ print(f"dataset_size: {dataset_size}")
13
+
14
+ IMAGE = 'image'
15
+ IMAGE_DESIGNER = 'image_designer'
16
+ DESIGNER_EXPLANATION = 'designer_explanation'
17
+ CROWD_CAPTIONS = 'crowd_captions'
18
+ CROWD_EXPLANATIONS = 'crowd_explanations'
19
+ CROWD_UNDERSPECIFIED_CAPTIONS = 'crowd_underspecified_captions'
20
+ SELECTED_CAPTION = 'selected_caption'
21
+ COMMONSENSE_CATEGORY = 'commonsense_category'
22
+ QA = 'question_answering_pairs'
23
+ IMAGE_ID = 'image_id'
24
+ left_side_columns = [IMAGE]
25
+ right_side_columns = [x for x in whoops.features.keys() if x not in left_side_columns and x not in [QA]]
26
+ enumerate_cols = [CROWD_CAPTIONS, CROWD_EXPLANATIONS, CROWD_UNDERSPECIFIED_CAPTIONS]
27
+ emoji_to_label = {IMAGE_DESIGNER: '🎨, πŸ§‘β€πŸŽ¨, πŸ’»', DESIGNER_EXPLANATION: 'πŸ’‘, πŸ€”, πŸ§‘β€πŸŽ¨',
28
+ CROWD_CAPTIONS: 'πŸ‘₯, πŸ’¬, πŸ“', CROWD_EXPLANATIONS: 'πŸ‘₯, πŸ’‘, πŸ€”', CROWD_UNDERSPECIFIED_CAPTIONS: 'πŸ‘₯, πŸ’¬, πŸ‘Ž',
29
+ QA: '❓, πŸ€”, πŸ’‘', IMAGE_ID: 'πŸ”, πŸ“„, πŸ’Ύ', COMMONSENSE_CATEGORY: 'πŸ€”, πŸ“š, πŸ’‘', SELECTED_CAPTION: 'πŸ“, πŸ‘Œ, πŸ’¬'}
30
+ target_size = (1024, 1024)
31
+
32
+ columns_number = 3
33
+ rows_number = 10
34
+
35
+ def func(index):
36
+ example = whoops[index]
37
+ values = get_instance_values(example)
38
+ return values
39
+
40
+
41
+ def get_instance_values(example):
42
+ values = []
43
+ for k in left_side_columns + right_side_columns:
44
+ if k in enumerate_cols:
45
+ value = list_to_string(example[k])
46
+ elif k == QA:
47
+ qa_list = [f"Q: {x[0]} A: {x[1]}" for x in example[k]]
48
+ value = list_to_string(qa_list)
49
+ else:
50
+ value = example[k]
51
+ values.append(value)
52
+ return values
53
+
54
+
55
+ def list_to_string(lst):
56
+ return '\n'.join(['{}. {}'.format(i + 1, item) for i, item in enumerate(lst)])
57
+
58
+ def create_image_accordion_block(index):
59
+ example = whoops[index]
60
+ instance_values = get_instance_values(example)
61
+ assert len(left_side_columns) == len(
62
+ instance_values[:len(left_side_columns)]) # excluding the image & designer
63
+ for key, value in zip(left_side_columns, instance_values[:len(left_side_columns)]):
64
+ if key == IMAGE:
65
+ img = whoops[index]["image"]
66
+ img_resized = img.resize(target_size)
67
+ gr.Image(value=img_resized, label=whoops[index]['commonsense_category'])
68
+ else:
69
+ label = key.capitalize().replace("_", " ")
70
+ gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}")
71
+ with gr.Accordion("Click for details", open=False):
72
+ assert len(right_side_columns) == len(
73
+ instance_values[len(left_side_columns):]) # excluding the image & designer
74
+ for key, value in zip(right_side_columns, instance_values[len(left_side_columns):]):
75
+ label = key.capitalize().replace("_", " ")
76
+ gr.Textbox(value=value, label=f"{label} {emoji_to_label[key]}")
77
+
78
+
79
+
80
+ with gr.Blocks() as demo:
81
+ gr.Markdown(f"# Slide to iterate WHOOPS!")
82
+ for row_num in range(0, rows_number):
83
+ with gr.Row():
84
+ for col_num in range(0, columns_number):
85
+ with gr.Column():
86
+ index = random.choice(range(0, dataset_size))
87
+ create_image_accordion_block(index)
88
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ datasets
2
+ gradio==3.21.0