Werli commited on
Commit
bac3a59
·
verified ·
1 Parent(s): b0b0867

Upload multi_comfy.py

Browse files
Files changed (1) hide show
  1. modules/multi_comfy.py +384 -0
modules/multi_comfy.py ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ from PIL import Image
4
+ import os
5
+ from collections import defaultdict
6
+
7
+ css = """
8
+ #custom-gallery{--row-height:180px;display:grid;grid-auto-rows:min-content;gap:10px}#custom-gallery .thumbnail-item{height:var(--row-height);width:100%;position:relative;overflow:hidden;border-radius:8px;box-shadow:0 2px 5px rgb(0 0 0 / .1);transition:transform 0.2s ease,box-shadow 0.2s ease}#custom-gallery .thumbnail-item:hover{transform:translateY(-3px);box-shadow:0 4px 12px rgb(0 0 0 / .15)}#custom-gallery .thumbnail-item img{width:auto;height:100%;max-width:100%;max-height:var(--row-height);object-fit:contain;margin:0 auto;display:block}#custom-gallery .thumbnail-item img.portrait{max-width:100%}#custom-gallery .thumbnail-item img.landscape{max-height:100%}.gallery-container{max-height:500px;overflow-y:auto;padding-right:0;--size-80:500px}.thumbnails{display:flex;position:absolute;bottom:0;width:120px;overflow-x:scroll;padding-top:320px;padding-bottom:280px;padding-left:4px;flex-wrap:wrap}
9
+ """
10
+
11
+ EMPTY_RESULT = ("Not Available",) * 15
12
+
13
+ # ---------- EXTRACTION FUNCTIONS ----------
14
+ def read_metadata(file_path):
15
+ try:
16
+ with Image.open(file_path) as img:
17
+ return img.info
18
+ except Exception as e:
19
+ return {"error": f"Error reading file: {str(e)}"}
20
+
21
+ def extract_workflow_data(file_path):
22
+ metadata = read_metadata(file_path)
23
+ if "error" in metadata:
24
+ return {"error": metadata["error"]}
25
+
26
+ if 'prompt' in metadata:
27
+ try:
28
+ return json.loads(metadata['prompt'])
29
+ except json.JSONDecodeError:
30
+ pass
31
+
32
+ for key, value in metadata.items():
33
+ if isinstance(value, str) and value.strip().startswith('{'):
34
+ try:
35
+ return json.loads(value)
36
+ except json.JSONDecodeError:
37
+ continue
38
+ return {"error": "No workflow data found"}
39
+
40
+ def extract_ksampler_params(workflow_data):
41
+ seed = steps = cfg = sampler = scheduler = denoise = "Not found"
42
+ if not isinstance(workflow_data, dict):
43
+ return seed, steps, cfg, sampler, scheduler, denoise
44
+ for node in workflow_data.values():
45
+ if isinstance(node, dict) and node.get("class_type", "") in ["KSampler", "KSampler (Efficient)"]:
46
+ inputs = node.get("inputs", {})
47
+ seed = inputs.get("seed", "Not found")
48
+ steps = inputs.get("steps", "Not found")
49
+ cfg = inputs.get("cfg", "Not found")
50
+ sampler = inputs.get("sampler_name", "Not found")
51
+ scheduler = inputs.get("scheduler", "Not found")
52
+ denoise = inputs.get("denoise", "Not found")
53
+ break
54
+ return str(seed), str(steps), str(cfg), str(sampler), str(scheduler), str(denoise)
55
+
56
+ def extract_prompts(workflow_data):
57
+ positive = negative = "Not found"
58
+ if not isinstance(workflow_data, dict):
59
+ return positive, negative
60
+ for node in workflow_data.values():
61
+ if isinstance(node, dict):
62
+ class_type = node.get("class_type", "")
63
+ inputs = node.get("inputs", {})
64
+ title = node.get("_meta", {}).get("title", "") if node.get("_meta") else ""
65
+
66
+ if "Text to Conditioning" in class_type:
67
+ if "POSITIVE" in title:
68
+ positive = inputs.get("text", "Not found")
69
+ elif "NEGATIVE" in title:
70
+ negative = inputs.get("text", "Not found")
71
+ if "ShowText|pysssss" in class_type:
72
+ if "text_1" in inputs:
73
+ positive = inputs["text_1"]
74
+ if "text_2" in inputs:
75
+ negative = inputs["text_2"]
76
+ if "DPRandomGenerator" in class_type:
77
+ if "POSITIVE" in title:
78
+ positive = inputs.get("text", "Not found")
79
+ elif "NEGATIVE" in title:
80
+ negative = inputs.get("text", "Not found")
81
+ return str(positive), str(negative)
82
+
83
+ def extract_loras(workflow_data):
84
+ loras = []
85
+ if not isinstance(workflow_data, dict):
86
+ return "None found"
87
+ for node in workflow_data.values():
88
+ if isinstance(node, dict):
89
+ inputs = node.get("inputs", {})
90
+ if "LoraLoader" in node.get("class_type", ""):
91
+ name = inputs.get("lora_name", "Unknown")
92
+ strength = inputs.get("strength_model", "Unknown")
93
+ loras.append(f"{name} (Strength: {strength})")
94
+ for val in inputs.values():
95
+ if isinstance(val, str) and "lora:" in val.lower():
96
+ loras.append(val)
97
+ return "\n".join(loras) if loras else "None found"
98
+
99
+ def extract_model_info(workflow_data):
100
+ models = []
101
+ if not isinstance(workflow_data, dict):
102
+ return "Not found"
103
+ for node in workflow_data.values():
104
+ if isinstance(node, dict):
105
+ inputs = node.get("inputs", {})
106
+ class_type = node.get("class_type", "")
107
+ if "CheckpointLoader" in class_type:
108
+ models.append(inputs.get("ckpt_name", "Unknown"))
109
+ if "Model Mecha Recipe" in class_type:
110
+ models.append(inputs.get("model_path", "Unknown"))
111
+ return "\n".join(models) if models else "Not found"
112
+
113
+ def extract_image_info_from_file(image_path):
114
+ """Extract actual image dimensions from the image file itself"""
115
+ try:
116
+ with Image.open(image_path) as img:
117
+ width, height = img.size
118
+ return str(width), str(height)
119
+ except Exception as e:
120
+ return "Not found", "Not found"
121
+
122
+ def extract_batch_size(workflow_data):
123
+ """Extract batch size from workflow data"""
124
+ batch_size = "Not found"
125
+ if not isinstance(workflow_data, dict):
126
+ return batch_size
127
+ for node in workflow_data.values():
128
+ if isinstance(node, dict) and node.get("class_type", "") == "EmptyLatentImage":
129
+ inputs = node.get("inputs", {})
130
+ batch_size = inputs.get("batch_size", "Not found")
131
+ break
132
+ return str(batch_size)
133
+
134
+ def extract_nodes_info(workflow_data):
135
+ if not isinstance(workflow_data, dict):
136
+ return "Not found"
137
+ total_nodes = len(workflow_data)
138
+ node_types = defaultdict(int)
139
+ for node in workflow_data.values():
140
+ if isinstance(node, dict):
141
+ node_types[node.get("class_type", "Unknown")] += 1
142
+ summary = f"Total Nodes: {total_nodes}\n"
143
+ for t, c in sorted(node_types.items()):
144
+ summary += f"{t}: {c}\n"
145
+ return summary.strip()
146
+
147
+ def extract_workflow_as_json(workflow_data):
148
+ if isinstance(workflow_data, dict):
149
+ return json.dumps(workflow_data, ensure_ascii=False, indent=2)
150
+ return "{}"
151
+ # ---------- EXTRACTION FUNCTIONS ----------
152
+ #
153
+ # ---------- IMAGE PROCESSING ----------
154
+ def process_single_image(image_path):
155
+ """Extract all workflow info from a single image path."""
156
+ if not image_path:
157
+ return EMPTY_RESULT
158
+
159
+ workflow_data = extract_workflow_data(image_path)
160
+
161
+ if isinstance(workflow_data, dict) and "error" not in workflow_data:
162
+ seed, steps, cfg, sampler, scheduler, denoise = extract_ksampler_params(workflow_data)
163
+ positive, negative = extract_prompts(workflow_data)
164
+ loras = extract_loras(workflow_data)
165
+ models = extract_model_info(workflow_data)
166
+
167
+ # Get actual image dimensions instead of workflow dimensions
168
+ width, height = extract_image_info_from_file(image_path)
169
+ batch = extract_batch_size(workflow_data)
170
+
171
+ nodes = extract_nodes_info(workflow_data)
172
+ full_json = extract_workflow_as_json(workflow_data)
173
+ else:
174
+ error = str(workflow_data.get("error", "Unknown error"))
175
+ seed = steps = cfg = sampler = scheduler = denoise = positive = negative = loras = models = width = height = batch = nodes = full_json = error
176
+
177
+ return seed, steps, cfg, sampler, scheduler, denoise, \
178
+ positive, negative, loras, models, width, height, batch, nodes, full_json
179
+
180
+ def append_gallery(gallery: list, image: str):
181
+ """Add a single image to the gallery"""
182
+ if gallery is None:
183
+ gallery = []
184
+ if not image:
185
+ return gallery, None
186
+ gallery.append(image)
187
+ return gallery, None
188
+
189
+ def extend_gallery(gallery, images):
190
+ """Extend gallery preserving uniqueness"""
191
+
192
+ if gallery is None:
193
+ gallery = []
194
+
195
+ if not images:
196
+ return gallery
197
+
198
+ # Normalize input - Gradio might pass various formats
199
+ incoming_paths = []
200
+ if isinstance(images, str): # Single image path
201
+ incoming_paths.append(images)
202
+ elif isinstance(images, list):
203
+ for img in images:
204
+ # Handle cases where elements could be tuples from Gallery
205
+ if isinstance(img, (tuple, list)):
206
+ incoming_paths.append(str(img[0]))
207
+ else:
208
+ incoming_paths.append(str(img))
209
+
210
+ unique_incoming = list(set(incoming_paths)) # Avoid duplicates
211
+
212
+ seen_paths = {item[0] if isinstance(item, (list, tuple)) else item for item in gallery}
213
+ new_entries = [path for path in unique_incoming if path not in seen_paths]
214
+
215
+ # Create entries matching expected gallery style
216
+ formatted_new = [(path, '') for path in new_entries]
217
+
218
+ updated_gallery = gallery + formatted_new
219
+
220
+ return updated_gallery
221
+
222
+ def process_gallery(gallery, results_state):
223
+ """Process all images and populate metadata in session."""
224
+ if not gallery or len(gallery) == 0:
225
+ # Clear results if nothing left
226
+ results_state.clear()
227
+ return EMPTY_RESULT + (results_state,)
228
+
229
+ updated_state = {}
230
+ first_image_result = EMPTY_RESULT
231
+ try:
232
+ for item in gallery:
233
+ path = item if isinstance(item, str) else item[0]
234
+
235
+ if path not in results_state:
236
+ res = process_single_image(path)
237
+ results_state[path] = res
238
+ updated_state[path] = res
239
+
240
+ if first_image_result == EMPTY_RESULT:
241
+ first_image_result = res
242
+ else:
243
+ # Already cached
244
+ res = results_state[path]
245
+ updated_state[path] = res
246
+
247
+ if first_image_result == EMPTY_RESULT:
248
+ first_image_result = res
249
+
250
+ results_state.update(updated_state)
251
+ return first_image_result + (results_state,)
252
+ except Exception as e:
253
+ print("[ERROR]", str(e))
254
+ return EMPTY_RESULT + (results_state,)
255
+
256
+ def get_selection_from_gallery(gallery, results_state, evt: gr.SelectData):
257
+ """Fetch result for selected image in gallery."""
258
+ if evt is None or evt.value is None:
259
+ # No selection: use first image
260
+ if gallery and len(gallery) > 0:
261
+ img_path = str(gallery[0][0] if isinstance(gallery[0], (list, tuple)) else gallery[0])
262
+ if img_path in results_state:
263
+ return list(results_state[img_path])
264
+ else:
265
+ # Handle selection event
266
+ try:
267
+ selected_value = evt.value
268
+ img_path = None
269
+
270
+ if isinstance(selected_value, dict) and 'image' in selected_value:
271
+ img_path = selected_value['image']['path']
272
+ elif isinstance(selected_value, (list, tuple)):
273
+ img_path = selected_value[0]
274
+ else:
275
+ img_path = str(selected_value)
276
+
277
+ if img_path in results_state:
278
+ return list(results_state[img_path])
279
+ except Exception as e:
280
+ print(f"Selection error: {e}")
281
+
282
+ # Return empty if no image found
283
+ return list(EMPTY_RESULT)
284
+ # ---------- IMAGE PROCESSING ----------
285
+ #
286
+ def create_multi_comfy():
287
+ with gr.Blocks(css=css, fill_width=True) as demo:
288
+ gr.Markdown("# 🛠️ ComfyUI Workflow Information Extractor")
289
+ gr.Markdown("Upload Multiple ComfyUI-generated images. Extract prompts, parameters, models, and full workflows.")
290
+ with gr.Row():
291
+ with gr.Column(scale=2):
292
+ upload_button = gr.UploadButton(
293
+ "📁 Upload Multiple Images",
294
+ file_types=["image"],
295
+ file_count="multiple",
296
+ size='lg'
297
+ )
298
+ gallery = gr.Gallery(
299
+ columns=3,
300
+ show_share_button=False,
301
+ interactive=True,
302
+ height='auto',
303
+ label='Grid of images',
304
+ preview=False,
305
+ elem_id='custom-gallery'
306
+ )
307
+ with gr.Column(scale=3):
308
+ with gr.Tabs():
309
+ with gr.Tab("Sampling Parameters"):
310
+ with gr.Row():
311
+ with gr.Column():
312
+ seed_out = gr.Textbox(label="Seed", interactive=False, show_copy_button=True)
313
+ steps_out = gr.Textbox(label="Steps", interactive=False, show_copy_button=True)
314
+ cfg_out = gr.Textbox(label="CFG Scale", interactive=False)
315
+ with gr.Column():
316
+ sampler_out = gr.Textbox(label="Sampler", interactive=False)
317
+ scheduler_out = gr.Textbox(label="Scheduler", interactive=False)
318
+ denoise_out = gr.Textbox(label="Denoise", interactive=False)
319
+
320
+ with gr.Tab("Prompts"):
321
+ pos_prompt = gr.Textbox(label="Positive Prompt", lines=4, interactive=False, show_copy_button=True)
322
+ neg_prompt = gr.Textbox(label="Negative Prompt", lines=4, interactive=False, show_copy_button=True)
323
+
324
+ with gr.Tab("Models & LoRAs"):
325
+ with gr.Row():
326
+ lora_out = gr.Textbox(label="LoRAs", lines=5, interactive=False, show_copy_button=True)
327
+ model_out = gr.Textbox(label="Base Models", lines=5, interactive=False, show_copy_button=True)
328
+
329
+ with gr.Tab("Image Info"):
330
+ with gr.Row():
331
+ with gr.Column():
332
+ width_out = gr.Textbox(label="Width", interactive=False)
333
+ height_out = gr.Textbox(label="Height", interactive=False)
334
+ batch_out = gr.Textbox(label="Batch Size", interactive=False)
335
+ with gr.Column():
336
+ nodes_out = gr.Textbox(label="Node Counts", lines=15, interactive=True, show_copy_button=True)
337
+
338
+ with gr.Tab("Full Workflow"):
339
+ json_out = gr.Textbox(label="Workflow JSON", lines=20, interactive=True, show_copy_button=True)
340
+
341
+ # State to store results per image
342
+ results_state = gr.State({})
343
+
344
+ # Event Connections
345
+ upload_event = upload_button.upload(
346
+ fn=extend_gallery,
347
+ inputs=[gallery, upload_button],
348
+ outputs=gallery,
349
+ queue=False
350
+ )
351
+
352
+ upload_event.then(
353
+ fn=process_gallery,
354
+ inputs=[gallery, results_state],
355
+ outputs=[
356
+ seed_out, steps_out, cfg_out, sampler_out, scheduler_out, denoise_out,
357
+ pos_prompt, neg_prompt, lora_out, model_out, width_out, height_out,
358
+ batch_out, nodes_out, json_out, results_state
359
+ ]
360
+ )
361
+ gallery.change(
362
+ fn=process_gallery,
363
+ inputs=[gallery, results_state],
364
+ outputs=[
365
+ seed_out, steps_out, cfg_out, sampler_out, scheduler_out, denoise_out,
366
+ pos_prompt, neg_prompt, lora_out, model_out, width_out, height_out,
367
+ batch_out, nodes_out, json_out, results_state
368
+ ],
369
+ queue=True
370
+ )
371
+
372
+ gallery.select(
373
+ get_selection_from_gallery,
374
+ inputs=[gallery, results_state],
375
+ outputs=[
376
+ seed_out, steps_out, cfg_out, sampler_out, scheduler_out, denoise_out,
377
+ pos_prompt, neg_prompt, lora_out, model_out, width_out, height_out,
378
+ batch_out, nodes_out, json_out
379
+ ]
380
+ )
381
+
382
+ gr.Markdown("---\n💡 **Note:** It's under development.")
383
+
384
+ return demo