Spaces:
ginipick
/
Running on Zero

ginipick commited on
Commit
59c7e0e
ยท
verified ยท
1 Parent(s): 1091c32

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -27
app.py CHANGED
@@ -19,7 +19,7 @@ hf_hub_download(
19
  repo_id="black-forest-labs/FLUX.1-Redux-dev",
20
  filename="flux1-redux-dev.safetensors",
21
  local_dir="models/style_models",
22
- token=HF_TOKEN # ํ† ํฐ ์ „๋‹ฌ
23
  )
24
  hf_hub_download(
25
  repo_id="black-forest-labs/FLUX.1-Depth-dev",
@@ -27,23 +27,43 @@ hf_hub_download(
27
  local_dir="models/diffusion_models",
28
  token=HF_TOKEN
29
  )
30
- # ๋‚˜๋จธ์ง€ hf_hub_download ํ˜ธ์ถœ์—๋„ token=HF_TOKEN ์ถ”๊ฐ€
31
- hf_hub_download(repo_id="Comfy-Org/sigclip_vision_384", filename="sigclip_vision_patch14_384.safetensors", local_dir="models/clip_vision", token=HF_TOKEN)
32
- hf_hub_download(repo_id="Kijai/DepthAnythingV2-safetensors", filename="depth_anything_v2_vitl_fp32.safetensors", local_dir="models/depthanything", token=HF_TOKEN)
33
- hf_hub_download(repo_id="black-forest-labs/FLUX.1-dev", filename="ae.safetensors", local_dir="models/vae/FLUX1", token=HF_TOKEN)
34
- hf_hub_download(repo_id="comfyanonymous/flux_text_encoders", filename="clip_l.safetensors", local_dir="models/text_encoders", token=HF_TOKEN)
35
- t5_path = hf_hub_download(repo_id="comfyanonymous/flux_text_encoders", filename="t5xxl_fp16.safetensors", local_dir="models/text_encoders/t5", token=HF_TOKEN)
36
-
37
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- # Import all the necessary functions from the original script
40
  def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
41
  try:
42
  return obj[index]
43
  except KeyError:
44
  return obj["result"][index]
45
 
46
- # Add all the necessary setup functions from the original script
47
  def find_path(name: str, path: str = None) -> str:
48
  if path is None:
49
  path = os.getcwd()
@@ -77,6 +97,8 @@ def add_extra_model_paths() -> None:
77
  add_comfyui_directory_to_sys_path()
78
  add_extra_model_paths()
79
 
 
 
80
  def import_custom_nodes() -> None:
81
  import asyncio
82
  import execution
@@ -109,8 +131,6 @@ from nodes import (
109
  import_custom_nodes()
110
 
111
  # Global variables for preloaded models and constants
112
- #with torch.inference_mode():
113
- # Initialize constants
114
  intconstant = NODE_CLASS_MAPPINGS["INTConstant"]()
115
  CONST_1024 = intconstant.get_value(value=1024)
116
 
@@ -154,6 +174,8 @@ downloadandloaddepthanythingv2model = NODE_CLASS_MAPPINGS["DownloadAndLoadDepthA
154
  DEPTH_MODEL = downloadandloaddepthanythingv2model.loadmodel(
155
  model="depth_anything_v2_vitl_fp32.safetensors"
156
  )
 
 
157
  cliptextencode = CLIPTextEncode()
158
  loadimage = LoadImage()
159
  vaeencode = VAEEncode()
@@ -297,7 +319,7 @@ def generate_image(prompt, structure_image, style_image, depth_strength=15, styl
297
  )
298
 
299
  # Save image
300
- prefix = cr_text.text_multiline(text="Flux_BFL_Depth_Redux")
301
 
302
  saved = saveimage.save_images(
303
  filename_prefix=get_value_at_index(prefix, 0),
@@ -307,29 +329,50 @@ def generate_image(prompt, structure_image, style_image, depth_strength=15, styl
307
  return saved_path
308
 
309
  # Create Gradio interface
310
-
311
  examples = [
312
- ["", "mona.png", "receita-tacos.webp", 15, 0.6],
313
- ["a woman looking at a house catching fire on the background", "disaster_girl.png", "abaporu.jpg", 15, 0.15],
314
- ["istanbul aerial, dramatic photography", "natasha.png", "istambul.jpg", 15, 0.5],
315
  ]
316
 
317
- output_image = gr.Image(label="Generated Image")
318
 
319
  with gr.Blocks() as app:
320
- gr.Markdown("# FLUX Style Shaping")
321
- gr.Markdown("Flux[dev] Redux + Flux[dev] Depth ComfyUI workflow by [Nathan Shipley](https://x.com/CitizenPlain) running directly on Gradio. [workflow](https://gist.github.com/nathanshipley/7a9ac1901adde76feebe58d558026f68) - [how to convert your any comfy workflow to gradio (soon)](#)")
 
322
  with gr.Row():
323
  with gr.Column():
324
- prompt_input = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...")
 
 
 
325
  with gr.Row():
326
  with gr.Group():
327
- structure_image = gr.Image(label="Structure Image", type="filepath")
328
- depth_strength = gr.Slider(minimum=0, maximum=50, value=15, label="Depth Strength")
 
 
 
 
 
 
 
 
 
329
  with gr.Group():
330
- style_image = gr.Image(label="Style Image", type="filepath")
331
- style_strength = gr.Slider(minimum=0, maximum=1, value=0.5, label="Style Strength")
332
- generate_btn = gr.Button("Generate")
 
 
 
 
 
 
 
 
 
333
 
334
  gr.Examples(
335
  examples=examples,
@@ -342,6 +385,22 @@ with gr.Blocks() as app:
342
 
343
  with gr.Column():
344
  output_image.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345
  generate_btn.click(
346
  fn=generate_image,
347
  inputs=[prompt_input, structure_image, style_image, depth_strength, style_strength],
 
19
  repo_id="black-forest-labs/FLUX.1-Redux-dev",
20
  filename="flux1-redux-dev.safetensors",
21
  local_dir="models/style_models",
22
+ token=HF_TOKEN
23
  )
24
  hf_hub_download(
25
  repo_id="black-forest-labs/FLUX.1-Depth-dev",
 
27
  local_dir="models/diffusion_models",
28
  token=HF_TOKEN
29
  )
30
+ hf_hub_download(
31
+ repo_id="Comfy-Org/sigclip_vision_384",
32
+ filename="sigclip_vision_patch14_384.safetensors",
33
+ local_dir="models/clip_vision",
34
+ token=HF_TOKEN
35
+ )
36
+ hf_hub_download(
37
+ repo_id="Kijai/DepthAnythingV2-safetensors",
38
+ filename="depth_anything_v2_vitl_fp32.safetensors",
39
+ local_dir="models/depthanything",
40
+ token=HF_TOKEN
41
+ )
42
+ hf_hub_download(
43
+ repo_id="black-forest-labs/FLUX.1-dev",
44
+ filename="ae.safetensors",
45
+ local_dir="models/vae/FLUX1",
46
+ token=HF_TOKEN
47
+ )
48
+ hf_hub_download(
49
+ repo_id="comfyanonymous/flux_text_encoders",
50
+ filename="clip_l.safetensors",
51
+ local_dir="models/text_encoders",
52
+ token=HF_TOKEN
53
+ )
54
+ t5_path = hf_hub_download(
55
+ repo_id="comfyanonymous/flux_text_encoders",
56
+ filename="t5xxl_fp16.safetensors",
57
+ local_dir="models/text_encoders/t5",
58
+ token=HF_TOKEN
59
+ )
60
 
 
61
  def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
62
  try:
63
  return obj[index]
64
  except KeyError:
65
  return obj["result"][index]
66
 
 
67
  def find_path(name: str, path: str = None) -> str:
68
  if path is None:
69
  path = os.getcwd()
 
97
  add_comfyui_directory_to_sys_path()
98
  add_extra_model_paths()
99
 
100
+
101
+
102
  def import_custom_nodes() -> None:
103
  import asyncio
104
  import execution
 
131
  import_custom_nodes()
132
 
133
  # Global variables for preloaded models and constants
 
 
134
  intconstant = NODE_CLASS_MAPPINGS["INTConstant"]()
135
  CONST_1024 = intconstant.get_value(value=1024)
136
 
 
174
  DEPTH_MODEL = downloadandloaddepthanythingv2model.loadmodel(
175
  model="depth_anything_v2_vitl_fp32.safetensors"
176
  )
177
+
178
+ # Initialize other nodes
179
  cliptextencode = CLIPTextEncode()
180
  loadimage = LoadImage()
181
  vaeencode = VAEEncode()
 
319
  )
320
 
321
  # Save image
322
+ prefix = cr_text.text_multiline(text="Virtual_TryOn")
323
 
324
  saved = saveimage.save_images(
325
  filename_prefix=get_value_at_index(prefix, 0),
 
329
  return saved_path
330
 
331
  # Create Gradio interface
 
332
  examples = [
333
+ ["person wearing fashionable clothing", "person.jpg", "fashion1.jpg", 15, 0.6],
334
+ ["person wearing elegant dress", "model1.jpg", "dress1.jpg", 15, 0.5],
335
+ ["person wearing casual outfit", "person2.jpg", "outfit1.jpg", 15, 0.5],
336
  ]
337
 
338
+ output_image = gr.Image(label="Virtual Try-On Result")
339
 
340
  with gr.Blocks() as app:
341
+ gr.Markdown("# AI Fashion Virtual Try-On")
342
+ gr.Markdown("Upload your photo and try on different clothing items virtually using AI. The system will generate an image of you wearing the selected clothing while maintaining your pose and appearance.")
343
+
344
  with gr.Row():
345
  with gr.Column():
346
+ prompt_input = gr.Textbox(
347
+ label="Style Description",
348
+ placeholder="Describe the desired style (e.g., 'person wearing elegant dress')"
349
+ )
350
  with gr.Row():
351
  with gr.Group():
352
+ structure_image = gr.Image(
353
+ label="Your Photo",
354
+ type="filepath",
355
+ info="Upload a full-body photo of yourself"
356
+ )
357
+ depth_strength = gr.Slider(
358
+ minimum=0,
359
+ maximum=50,
360
+ value=15,
361
+ label="Fitting Strength"
362
+ )
363
  with gr.Group():
364
+ style_image = gr.Image(
365
+ label="Clothing Item",
366
+ type="filepath",
367
+ info="Upload the clothing item you want to try on"
368
+ )
369
+ style_strength = gr.Slider(
370
+ minimum=0,
371
+ maximum=1,
372
+ value=0.5,
373
+ label="Style Transfer Strength"
374
+ )
375
+ generate_btn = gr.Button("Generate Try-On")
376
 
377
  gr.Examples(
378
  examples=examples,
 
385
 
386
  with gr.Column():
387
  output_image.render()
388
+
389
+ gr.Markdown("""
390
+ ### How to Use:
391
+ 1. Upload your full-body photo
392
+ 2. Upload the clothing item you want to try on
393
+ 3. Adjust the fitting and style strength if needed
394
+ 4. Add a description of the desired style (optional)
395
+ 5. Click 'Generate Try-On' to see the result
396
+
397
+ ### Tips:
398
+ - Use clear, well-lit photos
399
+ - Full-body photos work best
400
+ - Clothing items should be on a clean background
401
+ - Adjust the fitting strength for better results
402
+ """)
403
+
404
  generate_btn.click(
405
  fn=generate_image,
406
  inputs=[prompt_input, structure_image, style_image, depth_strength, style_strength],