jinggogogo commited on
Commit
d11e010
·
1 Parent(s): 7fcb379
Files changed (6) hide show
  1. .gitignore +6 -0
  2. README.md +4 -4
  3. app.py +674 -0
  4. config.yaml +8 -0
  5. detail.md +17 -0
  6. start.md +4 -0
.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ survey-videos
2
+ method_info.json
3
+ cmd.sh
4
+ eval
5
+ sync_dataset_image.py
6
+ survey-images
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Image Quality Study
3
- emoji: 🐠
4
- colorFrom: red
5
- colorTo: indigo
6
  sdk: gradio
7
  sdk_version: 5.5.0
8
  app_file: app.py
 
1
  ---
2
+ title: Editing Quality Study Mp4
3
+ emoji: 💻
4
+ colorFrom: pink
5
+ colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.5.0
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from argparse import ArgumentParser
2
+ import os, threading, time, yaml
3
+ from dataclasses import dataclass
4
+ from datetime import datetime, timezone
5
+ from pprint import pprint
6
+ import numpy as np
7
+ import gradio as gr
8
+ from huggingface_hub import HfApi
9
+
10
+
11
+ @dataclass
12
+ class VideoCfg:
13
+ value: str
14
+ label: str
15
+ visible: bool
16
+ show_share_button: bool = False
17
+ show_download_button: bool = False
18
+ show_label: bool = True
19
+ width: int | str | None = None
20
+
21
+
22
+ def time_now():
23
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d_%H-%M-%s")
24
+
25
+
26
+ class SurveyEngine:
27
+ def __init__(self, args):
28
+ self.args = args
29
+ self.api = HfApi()
30
+
31
+ # check user id and platform
32
+ self._generate_platform_completion_message()
33
+
34
+ # pull videos from hf dataset to hf space
35
+ self.repo_id = "jinggogogo/survey-images"
36
+ self.local_dir = "./survey-images"
37
+ self.api.snapshot_download(self.repo_id, local_dir=self.local_dir, repo_type="dataset")
38
+
39
+ # create a file to record the user study
40
+ response_file_name = f"{self.args.task}_{self.args.method_filter[0]}_{self.args.method_filter[1]}_{time_now()}.csv"
41
+ self.response_file_path_local = f"{self.local_dir}/response/{response_file_name}"
42
+ self.response_file_path_remote = f"response/{response_file_name}"
43
+ os.makedirs(f"{self.local_dir}/response", exist_ok=True)
44
+ if self.args.ask_user_id:
45
+ csv_header = "timestamp,path1,path2,selection,user_id,id_platform\n"
46
+ else:
47
+ csv_header = "timestamp,path1,path2,selection\n"
48
+ self._update_local_file(csv_header, self.response_file_path_local)
49
+
50
+ # create a file to record optional feedback
51
+ feedback_file_name = f"{time_now()}.txt"
52
+ self.feedback_file_path_local = f"{self.local_dir}/optional_feedback/{feedback_file_name}"
53
+ self.feedback_file_path_remote = f"optional_feedback/{feedback_file_name}"
54
+ os.makedirs(f"{self.local_dir}/optional_feedback", exist_ok=True)
55
+ self._update_local_file("", self.feedback_file_path_local)
56
+
57
+ self.video_paths, self.N_prompts, self.N_methods = self._get_all_video_paths()
58
+
59
+ self.theme = gr.themes.Base(
60
+ text_size=gr.themes.sizes.text_lg,
61
+ spacing_size=gr.themes.sizes.spacing_sm,
62
+ radius_size=gr.themes.sizes.radius_md,
63
+ )
64
+
65
+ if not args.no_sync:
66
+ self._start_periodic_sync()
67
+
68
+ def _generate_platform_completion_message(self):
69
+ if self.args.ask_user_id:
70
+ # get link from env
71
+ platform_completion_link = os.getenv("PLATFORM_COMPLETION_LINK")
72
+ if platform_completion_link is None:
73
+ raise ValueError("Please provide the platform completion link.")
74
+
75
+ # generate completion message
76
+ self.platform_completion_message = f"## Your {self.args.id_platform} completion link is [here]({platform_completion_link})."
77
+ else:
78
+ self.platform_completion_message = ""
79
+
80
+ def _start_periodic_sync(self):
81
+ def _upload_periodically(path_local, path_remote):
82
+ while True:
83
+ print(time_now())
84
+ print(f"Uploading {path_local}.")
85
+ try:
86
+ self._update_remote_file(path_local, path_remote)
87
+ except Exception as e:
88
+ print(e)
89
+ time.sleep(args.period_upload)
90
+
91
+ def _squash_commits_periodically():
92
+ while True:
93
+ print(time_now())
94
+ print("Squashing commits.")
95
+ try:
96
+ self.api.super_squash_history("jinggogogo/survey-images", repo_type="dataset")
97
+ except Exception as e:
98
+ print(e)
99
+ time.sleep(args.period_squash)
100
+
101
+ thread_upload_response = threading.Thread(
102
+ target=_upload_periodically,
103
+ args=(self.response_file_path_local, self.response_file_path_remote),
104
+ )
105
+ thread_upload_response.daemon = True
106
+ thread_upload_response.start()
107
+
108
+ thread_upload_feedback = threading.Thread(
109
+ target=_upload_periodically,
110
+ args=(self.feedback_file_path_local, self.feedback_file_path_remote),
111
+ )
112
+ thread_upload_feedback.daemon = True
113
+ thread_upload_feedback.start()
114
+
115
+ thread_squash_commits = threading.Thread(target=_squash_commits_periodically)
116
+ thread_squash_commits.daemon = True
117
+ thread_squash_commits.start()
118
+
119
+ def _get_all_video_paths(self):
120
+ video_dir = f"{self.local_dir}/images"
121
+
122
+ method_list = sorted(os.listdir(video_dir))
123
+
124
+ # filter methods
125
+ if len(self.args.method_filter) > 0:
126
+ method_filter = np.array(self.args.method_filter)
127
+ method_list = np.intersect1d(method_list, method_filter)
128
+
129
+ video_name_list = sorted(os.listdir(f"{video_dir}/{method_list[0]}"))
130
+
131
+ N_prompts = len(video_name_list)
132
+ N_methods = len(method_list)
133
+
134
+ video_paths = [] # (N_prompts, N_methods)
135
+ for video_name in video_name_list:
136
+ paths = [os.path.join(video_dir, method, video_name) for method in method_list]
137
+ video_paths.append(paths)
138
+ video_paths = np.array(video_paths)
139
+ return video_paths, N_prompts, N_methods
140
+
141
+ def _sample_video_pair(self, videos_left):
142
+ """
143
+ videos_left: (N_prompts, N_methods)
144
+ """
145
+ # random choose two prompts
146
+ N_videos_left = len(videos_left)
147
+ prompt_ids = np.random.choice(N_videos_left, 2, replace=False)
148
+
149
+ video_pair1 = videos_left[prompt_ids[0]]
150
+ video_pair1 = np.random.permutation(video_pair1)
151
+
152
+ video_pair2 = videos_left[prompt_ids[1]]
153
+ video_pair2 = np.random.permutation(video_pair2)
154
+
155
+ # update videos_left
156
+ # print(f"N_video_left before: {len(videos_left)}")
157
+ videos_left = np.delete(videos_left, prompt_ids, axis=0)
158
+ # print(f"N_video_left after: {len(videos_left)}")
159
+
160
+ radio_select_to_path_lut = {
161
+ "Image Set 1 👍": str(video_pair1[0]),
162
+ "Image Set 2 👍": str(video_pair1[1]),
163
+ "Image Set 3 👍": str(video_pair2[0]),
164
+ "Image Set 4 👍": str(video_pair2[1]),
165
+ "Similar 🤔": "Similar",
166
+ }
167
+ print("---------------")
168
+ print(time_now())
169
+ pprint(radio_select_to_path_lut)
170
+ return video_pair1, video_pair2, radio_select_to_path_lut, videos_left
171
+
172
+ def _update_local_file(self, message, file_path_local):
173
+ with open(file_path_local, "a") as f:
174
+ f.write(message)
175
+
176
+ def _update_remote_file(self, file_path_local, file_path_remote):
177
+ self.api.upload_file(
178
+ path_or_fileobj=file_path_local,
179
+ path_in_repo=file_path_remote,
180
+ repo_id=self.repo_id,
181
+ repo_type="dataset",
182
+ )
183
+
184
+ def _setup_video(self, path_a, path_b, label_a, label_b, visible):
185
+ cfg_a = VideoCfg(value=path_a, label=label_a, visible=visible)
186
+ cfg_b = VideoCfg(value=path_b, label=label_b, visible=visible)
187
+ video_a = gr.Image(**cfg_a.__dict__)
188
+ video_b = gr.Image(**cfg_b.__dict__)
189
+ return video_a, video_b
190
+
191
+ def _load_callback(self, videos_left):
192
+ (
193
+ video_pair1,
194
+ video_pair2,
195
+ radio_select_to_path_lut,
196
+ videos_left,
197
+ ) = self._sample_video_pair(videos_left)
198
+ update_video1 = gr.update(value=video_pair1[0])
199
+ update_video2 = gr.update(value=video_pair1[1])
200
+ update_video3 = gr.update(value=video_pair2[0])
201
+ update_video4 = gr.update(value=video_pair2[1])
202
+ update_md_run_out_videos = gr.update(visible=False)
203
+ return (
204
+ video_pair1,
205
+ video_pair2,
206
+ radio_select_to_path_lut,
207
+ videos_left,
208
+ update_video1,
209
+ update_video2,
210
+ update_video3,
211
+ update_video4,
212
+ update_md_run_out_videos,
213
+ )
214
+
215
+ def _click_select_radio(self):
216
+ update1 = gr.update(visible=True)
217
+ return update1
218
+
219
+ def _click_button_confirm1(self, radio_select, radio_select_to_path_lut, user_id):
220
+ # update response file with the acutal file path
221
+ selected_path = radio_select_to_path_lut[radio_select]
222
+ path1 = radio_select_to_path_lut["Image Set 1 👍"]
223
+ path2 = radio_select_to_path_lut["Image Set 2 👍"]
224
+ if self.args.ask_user_id:
225
+ id_platform = self.args.id_platform
226
+ message = f"{time_now()},{path1},{path2},{selected_path},{user_id},{id_platform}\n"
227
+ else:
228
+ message = f"{time_now()},{path1},{path2},{selected_path}\n"
229
+ self._update_local_file(message, self.response_file_path_local)
230
+
231
+ confirm_message = f"""
232
+ Your selection was:  
233
+ <span style="font-size:20px; color:orange "> {radio_select} </span> \n\n
234
+ """
235
+ # display confirm message
236
+ update_md_confirm1 = gr.update(visible=True, value=confirm_message)
237
+
238
+ # hide the radio and button for video 1-2
239
+ update_button_confirm1 = gr.update(visible=False)
240
+ update_ratio_select1 = gr.update(visible=False)
241
+
242
+ # show video 3-4 and radio
243
+ update_md_pair_34 = gr.update(visible=True)
244
+ update_video_3 = gr.update(visible=True)
245
+ update_video_4 = gr.update(visible=True)
246
+ update_radio_select2 = gr.update(visible=True)
247
+ return (
248
+ update_md_confirm1,
249
+ update_button_confirm1,
250
+ update_ratio_select1,
251
+ update_md_pair_34,
252
+ update_video_3,
253
+ update_video_4,
254
+ update_radio_select2,
255
+ )
256
+
257
+ def _click_button_confirm2(self, radio_select, radio_select_to_path_lut, user_id):
258
+ # update response file with the acutal file path
259
+ selected_path = radio_select_to_path_lut[radio_select]
260
+ path1 = radio_select_to_path_lut["Image Set 3 👍"]
261
+ path2 = radio_select_to_path_lut["Image Set 4 👍"]
262
+ if self.args.ask_user_id:
263
+ id_platform = self.args.id_platform
264
+ message = f"{time_now()},{path1},{path2},{selected_path},{user_id},{id_platform}\n"
265
+ else:
266
+ message = f"{time_now()},{path1},{path2},{selected_path}\n"
267
+ self._update_local_file(message, self.response_file_path_local)
268
+
269
+ confirm_message = f"""
270
+ Your selection was:&nbsp;&nbsp;
271
+ <span style="font-size:20px; color:orange "> {radio_select} </span> \n\n
272
+ ## Study Done!
273
+ {self.platform_completion_message}
274
+ Click the button below 🎲 if you'd like to evaluate another set. \n\n
275
+ You can exit this study by closing this page.
276
+ For more details about this study, click
277
+ [here](https://huggingface.co/spaces/zirui-wang/video_quality_study/blob/main/detail.md).
278
+ """
279
+
280
+ # display confirm message
281
+ update_md_confirm2 = gr.update(visible=True, value=confirm_message)
282
+
283
+ # hide the radio and button for video 3-4
284
+ update_button_confirm2 = gr.update(visible=False)
285
+ update_radio_select2 = gr.update(visible=False)
286
+
287
+ # show button_new
288
+ update_button_new = gr.update(visible=True)
289
+
290
+ # show textbox and button for optional feedback
291
+ update_textbox_optional = gr.update(visible=True)
292
+ update_button_submit_optional = gr.update(visible=True)
293
+ return (
294
+ update_md_confirm2,
295
+ update_button_confirm2,
296
+ update_radio_select2,
297
+ update_button_new,
298
+ update_textbox_optional,
299
+ update_button_submit_optional,
300
+ )
301
+
302
+ def _click_button_new(self, videos_left):
303
+ if len(videos_left) == 0:
304
+ return [None] * 3 + [[]] + [gr.update(visible=False)] * 15 + [gr.update(visible=True)]
305
+
306
+ print("---------------")
307
+ print(f"N_video_left before: {len(videos_left)}")
308
+ (
309
+ video_pair1,
310
+ video_pair2,
311
+ radio_select_to_path_lut,
312
+ videos_left,
313
+ ) = self._sample_video_pair(videos_left)
314
+ update_video1 = gr.update(value=video_pair1[0])
315
+ update_video2 = gr.update(value=video_pair1[1])
316
+ update_radio_select1 = gr.update(visible=True, value=None)
317
+ update_button_confirm1 = gr.update(visible=False)
318
+ update_md_confirm1 = gr.update(visible=False)
319
+
320
+ update_md_pair_34 = gr.update(visible=False)
321
+ update_video3 = gr.update(value=video_pair2[0], visible=False)
322
+ update_video4 = gr.update(value=video_pair2[1], visible=False)
323
+ update_radio_select2 = gr.update(visible=False, value=None)
324
+ update_button_confirm2 = gr.update(visible=False)
325
+ update_md_confirm2 = gr.update(visible=False)
326
+
327
+ update_button_new = gr.update(visible=False)
328
+
329
+ update_textbox_optional = gr.update(visible=False, value=None)
330
+ update_button_submit_optional = gr.update(visible=False)
331
+ update_md_optional_feedback = gr.update(visible=False)
332
+ update_md_run_out_videos = gr.update(visible=False)
333
+ return (
334
+ video_pair1,
335
+ video_pair2,
336
+ radio_select_to_path_lut,
337
+ videos_left,
338
+ update_video1,
339
+ update_video2,
340
+ update_radio_select1,
341
+ update_button_confirm1,
342
+ update_md_confirm1,
343
+ update_md_pair_34,
344
+ update_video3,
345
+ update_video4,
346
+ update_radio_select2,
347
+ update_button_confirm2,
348
+ update_md_confirm2,
349
+ update_button_new,
350
+ update_textbox_optional,
351
+ update_button_submit_optional,
352
+ update_md_optional_feedback,
353
+ update_md_run_out_videos,
354
+ )
355
+
356
+ def _click_button_optional_feedback(self, textbox_optional_feedback):
357
+ if textbox_optional_feedback == "":
358
+ return gr.skip(), gr.skip()
359
+ message = f"{time_now()}\n{textbox_optional_feedback}\n\n"
360
+ self._update_local_file(message, self.feedback_file_path_local)
361
+
362
+ update_md_optional_feedback = gr.update(visible=True)
363
+ update_button_submit_optional = gr.update(visible=False)
364
+ return update_md_optional_feedback, update_button_submit_optional
365
+
366
+ def _click_button_submit_user_id(self, textbox_user_id):
367
+ user_id = str(textbox_user_id).replace(",", "_").replace("\n", "_").replace(" ", "_")
368
+ if user_id == "":
369
+ return [gr.skip()] * 8
370
+ update_textbox_user_id = gr.update(interactive=False)
371
+ update_button_submit_user_id = gr.update(visible=False)
372
+ update_md_user_id = gr.update(visible=True)
373
+ update_md_pair_12 = gr.update(visible=True)
374
+ update_video1 = gr.update(visible=True)
375
+ update_video2 = gr.update(visible=True)
376
+ update_radio_select1 = gr.update(visible=True)
377
+ return (
378
+ user_id,
379
+ update_textbox_user_id,
380
+ update_button_submit_user_id,
381
+ update_md_user_id,
382
+ update_md_pair_12,
383
+ update_video1,
384
+ update_video2,
385
+ update_radio_select1,
386
+ )
387
+
388
+ def main(self):
389
+ # read in md file
390
+ with open("start.md", "r") as f:
391
+ md_start = f.read()
392
+
393
+ with gr.Blocks(theme=self.theme, title="Image Quality User Study") as demo:
394
+
395
+ # set up session states
396
+ # random pop videos from this list to get video pairs
397
+ videos_left = gr.State(value=self.video_paths, time_to_live=900)
398
+ video_pair1 = gr.State(value=["path1", "path2"], time_to_live=900)
399
+ video_pair2 = gr.State(value=["path2", "path4"], time_to_live=900)
400
+ radio_select_to_path_lut = gr.State(value={}, time_to_live=900) # hold a dict
401
+ user_id = gr.State(value="", time_to_live=900)
402
+
403
+ # set up layout
404
+ with gr.Column():
405
+ gr.Markdown(md_start)
406
+
407
+ # a debug button
408
+ if self.args.debug:
409
+
410
+ def _click_button_debug(
411
+ video_pair1,
412
+ video_pair2,
413
+ radio_select_to_path_lut,
414
+ videos_left,
415
+ user_id,
416
+ ):
417
+ print(f"video_pair1: {video_pair1}")
418
+ print(f"video_pair2: {video_pair2}")
419
+ print(f"radio_select_to_path_lut: {radio_select_to_path_lut}")
420
+ print(f"N videos_left: {len(videos_left)}")
421
+ print(f"user_id: {user_id}")
422
+
423
+ button_debug = gr.Button("debug", variant="primary", scale=1)
424
+ button_debug.click(
425
+ _click_button_debug,
426
+ inputs=[
427
+ video_pair1,
428
+ video_pair2,
429
+ radio_select_to_path_lut,
430
+ videos_left,
431
+ user_id,
432
+ ],
433
+ )
434
+
435
+ # ---------------- optional user id ----------------
436
+ if self.args.ask_user_id:
437
+ with gr.Row():
438
+ textbox_user_id = gr.Textbox(
439
+ label=f"Please enter your {self.args.id_platform} ID: ",
440
+ placeholder="Type here...",
441
+ scale=4,
442
+ lines=1,
443
+ interactive=True,
444
+ )
445
+ button_submit_user_id = gr.Button("Submit", variant="primary", scale=1)
446
+ md_user_id = gr.Markdown(
447
+ "Thank you for providing your ID! 🙏", visible=False
448
+ )
449
+
450
+ # ---------------- video 1-2 ----------------
451
+ md_pair_12 = gr.Markdown("## Image Set Pair 1", visible=False)
452
+ with gr.Row():
453
+ video1, video2 = self._setup_video(
454
+ path_a=video_pair1.value[0],
455
+ path_b=video_pair1.value[1],
456
+ label_a="Image Set 1",
457
+ label_b="Image Set 2",
458
+ visible=False,
459
+ )
460
+ with gr.Row():
461
+ radio_select1 = gr.Radio(
462
+ choices=["Image Set 1 👍", "Image Set 2 👍", "Similar 🤔"],
463
+ label="Your Preference:",
464
+ scale=2,
465
+ visible=False,
466
+ )
467
+
468
+ button_confirm1 = gr.Button(
469
+ value="Confirm",
470
+ variant="primary",
471
+ scale=1,
472
+ visible=False,
473
+ )
474
+
475
+ md_confirm1 = gr.Markdown(visible=False, inputs=button_confirm1)
476
+
477
+ # ---------------- video 3-4 ----------------
478
+ md_pair_34 = gr.Markdown("## Image Set Pair 2", visible=False)
479
+ with gr.Row():
480
+ video3, video4 = self._setup_video(
481
+ path_a=video_pair2.value[0],
482
+ path_b=video_pair2.value[1],
483
+ label_a="Image Set 3",
484
+ label_b="Image Set 4",
485
+ visible=False,
486
+ )
487
+ with gr.Row():
488
+ radio_select2 = gr.Radio(
489
+ choices=["Image Set 3 👍", "Image Set 4 👍", "Similar 🤔"],
490
+ label="Your Preference:",
491
+ scale=2,
492
+ visible=False,
493
+ )
494
+
495
+ button_confirm2 = gr.Button(
496
+ value="Confirm",
497
+ variant="primary",
498
+ scale=1,
499
+ visible=False,
500
+ )
501
+
502
+ md_confirm2 = gr.Markdown(visible=False, inputs=button_confirm2)
503
+
504
+ # ---------------- new button ----------------
505
+ button_new = gr.Button(
506
+ value="New One 🎲",
507
+ variant="primary",
508
+ visible=False,
509
+ )
510
+ md_run_out_videos = gr.Markdown(
511
+ "You've evaluated all video pairs. Thank you for your participation! 🙏",
512
+ visible=False,
513
+ )
514
+
515
+ # ---------------- optional feedback ----------------
516
+ with gr.Row():
517
+ textbox_optional_feedback = gr.Textbox(
518
+ label="Optional Comments:",
519
+ placeholder="Type here...",
520
+ lines=1,
521
+ scale=4,
522
+ interactive=True,
523
+ visible=False,
524
+ )
525
+ button_submit_optional_feedback = gr.Button(
526
+ value="Submit Comments",
527
+ variant="secondary",
528
+ scale=1,
529
+ visible=False,
530
+ )
531
+
532
+ md_optional_feedback = gr.Markdown(
533
+ "Thank you for providing additional comments! 🙏",
534
+ visible=False,
535
+ )
536
+
537
+ # set up callbacks
538
+ if self.args.ask_user_id:
539
+ button_submit_user_id.click(
540
+ self._click_button_submit_user_id,
541
+ trigger_mode="once",
542
+ inputs=textbox_user_id,
543
+ outputs=[
544
+ user_id,
545
+ textbox_user_id,
546
+ button_submit_user_id,
547
+ md_user_id,
548
+ md_pair_12,
549
+ video1,
550
+ video2,
551
+ radio_select1,
552
+ ],
553
+ )
554
+
555
+ radio_select1.select(
556
+ self._click_select_radio,
557
+ trigger_mode="once",
558
+ outputs=button_confirm1,
559
+ )
560
+
561
+ button_confirm1.click(
562
+ self._click_button_confirm1,
563
+ trigger_mode="once",
564
+ inputs=[radio_select1, radio_select_to_path_lut, user_id],
565
+ outputs=[
566
+ md_confirm1,
567
+ button_confirm1,
568
+ radio_select1,
569
+ md_pair_34,
570
+ video3,
571
+ video4,
572
+ radio_select2,
573
+ ],
574
+ )
575
+
576
+ radio_select2.select(
577
+ self._click_select_radio,
578
+ trigger_mode="once",
579
+ outputs=button_confirm2,
580
+ )
581
+
582
+ button_confirm2.click(
583
+ self._click_button_confirm2,
584
+ trigger_mode="once",
585
+ inputs=[radio_select2, radio_select_to_path_lut, user_id],
586
+ outputs=[
587
+ md_confirm2,
588
+ button_confirm2,
589
+ radio_select2,
590
+ button_new,
591
+ textbox_optional_feedback,
592
+ button_submit_optional_feedback,
593
+ ],
594
+ )
595
+
596
+ button_new.click(
597
+ self._click_button_new,
598
+ trigger_mode="once",
599
+ inputs=videos_left,
600
+ outputs=[
601
+ video_pair1,
602
+ video_pair2,
603
+ radio_select_to_path_lut,
604
+ videos_left,
605
+ video1,
606
+ video2,
607
+ radio_select1,
608
+ button_confirm1,
609
+ md_confirm1,
610
+ md_pair_34,
611
+ video3,
612
+ video4,
613
+ radio_select2,
614
+ button_confirm2,
615
+ md_confirm2,
616
+ button_new,
617
+ textbox_optional_feedback,
618
+ button_submit_optional_feedback,
619
+ md_optional_feedback,
620
+ md_run_out_videos,
621
+ ],
622
+ )
623
+
624
+ button_submit_optional_feedback.click(
625
+ self._click_button_optional_feedback,
626
+ trigger_mode="once",
627
+ inputs=textbox_optional_feedback,
628
+ outputs=[md_optional_feedback, button_submit_optional_feedback],
629
+ )
630
+
631
+ demo.load(
632
+ self._load_callback,
633
+ inputs=videos_left,
634
+ outputs=[
635
+ video_pair1,
636
+ video_pair2,
637
+ radio_select_to_path_lut,
638
+ videos_left,
639
+ video1,
640
+ video2,
641
+ video3,
642
+ video4,
643
+ md_run_out_videos,
644
+ ],
645
+ )
646
+ demo.launch(share=self.args.share, show_api=False)
647
+
648
+
649
+ def parse_args():
650
+ parser = ArgumentParser()
651
+
652
+ # use this config as HF space does not take args
653
+ parser.add_argument("--config", type=str, default="config.yaml")
654
+
655
+ # these args are useful for local debugging
656
+ parser.add_argument("--debug", action="store_true")
657
+ parser.add_argument("--share", action="store_true")
658
+ parser.add_argument("--no_sync", action="store_true")
659
+ args = parser.parse_args()
660
+
661
+ with open(args.config, "r") as f:
662
+ config = yaml.safe_load(f)
663
+
664
+ for key, value in config.items():
665
+ setattr(args, key, value)
666
+ pprint(vars(args))
667
+ return args
668
+
669
+
670
+ if __name__ == "__main__":
671
+ args = parse_args()
672
+
673
+ survey_engine = SurveyEngine(args)
674
+ survey_engine.main()
config.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ method_filter:
2
+ - method_B
3
+ - method_C
4
+ period_upload: 300 # Period (sec) to upload response files in seconds, 300sec=5min
5
+ period_squash: 43200 # Period (sec) to squash commits in seconds, 43200sec=12h
6
+ ask_user_id: True
7
+ id_platform: Prolific
8
+ task: Editing
detail.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # An Editing Quality User Study for 3D Editing Models
2
+ Are you interested in the latest developments in 3D editing models? We invite you to participate in our user study, where you can watch a few sets of images and provide your ratings. By participating, you will have the chance to experience the latest 3D editing models firsthand. The study should take as little as 1 minute to complete.
3
+
4
+ ## What Are We Evaluating?
5
+ This study aims to assess the quality of rendered images from 3D models edited by two different methods. We are focusing on key aspects such as spatial consistency, as well as multi-view coherence. While these models produce impressive results, they can also generate unexpected artifacts, such as strange morphing effects, abrupt changes in subjects, or unusual occlusions and geometries. We have created 25 renderings for this study; each rendering is generated using identical editing text prompts across the two models, and we may expand the study to include more renderings and additional models in the future.
6
+
7
+ | ![GIF 1](example/good.gif) | ![GIF 2](example/bad.gif) |
8
+ |:---------------------------------------------------------------------------:|:---------------------------------------------------------------------------:|
9
+ | 🙂 Pretty Good | 🤔 Subject changed and weird books. |
10
+
11
+ ## Your Contribution Matters
12
+ The results of this study, along with the edited models and prompts used, will be made publicly available as part of a dataset. This dataset will contribute to a research report aimed at advancing the understanding of 3D editing. Your feedback is crucial in helping us identify and understand these phenomena.
13
+
14
+ ## How to Participate
15
+ You can join the study using any device—whether it is a phone, tablet, laptop, or desktop computer. For the best experience, we recommend using a larger screen to observe potential artifacts more clearly.
16
+
17
+ Your participation is invaluable to us. Thank you for contributing to the advancement of 3D prompt-driven editing technology.
start.md ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # 🧐 3D Editing Quality Study
2
+ 🙏 Thank you for participating our study! This study should take ~1 minute.
3
+
4
+ 💼 Task: You'll watch 2 pairs of image sets rendered from edited 3D models. For each pair, select the image set that <mark>has better visual quality, more multi-view coherence, and with less noise.</mark>