Culda commited on
Commit
c744305
·
1 Parent(s): d82784d
Files changed (4) hide show
  1. .gitignore +1 -0
  2. README.md +29 -0
  3. app.py +50 -0
  4. requirements.txt +6 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ venv
README.md CHANGED
@@ -10,3 +10,32 @@ pinned: false
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
+ =======
14
+ # Flux Inpaint AI Model
15
+
16
+ This Space demonstrates the Flux inpaint AI model, which uses ControlNet for image inpainting tasks.
17
+
18
+ ## How to use
19
+
20
+ 1. Upload an input image
21
+ 2. Upload a mask image (white areas will be inpainted)
22
+ 3. Enter a prompt describing the desired output
23
+ 4. Adjust the sliders for fine-tuning (optional)
24
+ 5. Click "Submit" to generate the inpainted image
25
+
26
+ ## Model Details
27
+
28
+ This Space uses the following models:
29
+ - Base model: black-forest-labs/FLUX.1-dev
30
+ - ControlNet model: YishaoAI/flux-dev-controlnet-canny-kid-clothes
31
+
32
+ The inpainting process uses a Canny edge detector for additional control.
33
+
34
+ ## Parameters
35
+
36
+ - Strength: Controls the strength of the inpainting effect (0-1)
37
+ - Number of Inference Steps: More steps can lead to better quality but slower generation
38
+ - Guidance Scale: Controls how closely the image follows the prompt
39
+ - ControlNet Conditioning Scale: Adjusts the influence of the ControlNet model
40
+
41
+ Enjoy experimenting with the Flux Inpaint AI Model!
app.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from diffusers.utils import load_image
4
+ from diffusers.pipelines.flux.pipeline_flux_controlnet_inpaint import FluxControlNetInpaintPipeline
5
+ from diffusers.models.controlnet_flux import FluxControlNetModel
6
+ from controlnet_aux import CannyDetector
7
+
8
+ base_model = 'black-forest-labs/FLUX.1-dev'
9
+ controlnet_model = 'YishaoAI/flux-dev-controlnet-canny-kid-clothes'
10
+
11
+ controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.float16)
12
+ pipe = FluxControlNetInpaintPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.float16)
13
+ pipe.enable_model_cpu_offload()
14
+ pipe.to("cuda")
15
+
16
+ canny = CannyDetector()
17
+
18
+ def inpaint(image, mask, prompt, strength, num_inference_steps, guidance_scale, controlnet_conditioning_scale):
19
+ canny_image = canny(image)
20
+
21
+ image_res = pipe(
22
+ prompt,
23
+ image=image,
24
+ control_image=canny_image,
25
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
26
+ mask_image=mask,
27
+ strength=strength,
28
+ num_inference_steps=num_inference_steps,
29
+ guidance_scale=guidance_scale,
30
+ ).images[0]
31
+
32
+ return image_res
33
+
34
+ iface = gr.Interface(
35
+ fn=inpaint,
36
+ inputs=[
37
+ gr.Image(type="pil", label="Input Image"),
38
+ gr.Image(type="pil", label="Mask Image"),
39
+ gr.Textbox(label="Prompt"),
40
+ gr.Slider(0, 1, value=0.95, label="Strength"),
41
+ gr.Slider(1, 100, value=50, step=1, label="Number of Inference Steps"),
42
+ gr.Slider(0, 20, value=5, label="Guidance Scale"),
43
+ gr.Slider(0, 1, value=0.5, label="ControlNet Conditioning Scale")
44
+ ],
45
+ outputs=gr.Image(type="pil", label="Output Image"),
46
+ title="Flux Inpaint AI Model",
47
+ description="Upload an image and a mask, then provide a prompt to generate an inpainted image."
48
+ )
49
+
50
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ diffusers
3
+ transformers
4
+ accelerate
5
+ controlnet_aux
6
+ gradio