Spaces:
Running
Running
tokenizer
Browse files- app.py +31 -9
- requirements.txt +2 -0
app.py
CHANGED
@@ -3,20 +3,41 @@ import gradio as gr
|
|
3 |
from diffusers.pipelines.flux.pipeline_flux import FluxPipeline
|
4 |
from diffusers.models.controlnet_flux import FluxControlNetModel
|
5 |
from controlnet_aux import CannyDetector
|
|
|
6 |
|
7 |
-
base_model =
|
8 |
-
controlnet_model =
|
9 |
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
pipe.enable_model_cpu_offload()
|
13 |
pipe.to("cuda")
|
14 |
|
15 |
canny = CannyDetector()
|
16 |
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
canny_image = canny(image)
|
19 |
-
|
20 |
image_res = pipe(
|
21 |
prompt,
|
22 |
image=image,
|
@@ -27,9 +48,10 @@ def inpaint(image, mask, prompt, strength, num_inference_steps, guidance_scale,
|
|
27 |
num_inference_steps=num_inference_steps,
|
28 |
guidance_scale=guidance_scale,
|
29 |
).images[0]
|
30 |
-
|
31 |
return image_res
|
32 |
|
|
|
33 |
iface = gr.Interface(
|
34 |
fn=inpaint,
|
35 |
inputs=[
|
@@ -39,11 +61,11 @@ iface = gr.Interface(
|
|
39 |
gr.Slider(0, 1, value=0.95, label="Strength"),
|
40 |
gr.Slider(1, 100, value=50, step=1, label="Number of Inference Steps"),
|
41 |
gr.Slider(0, 20, value=5, label="Guidance Scale"),
|
42 |
-
gr.Slider(0, 1, value=0.5, label="ControlNet Conditioning Scale")
|
43 |
],
|
44 |
outputs=gr.Image(type="pil", label="Output Image"),
|
45 |
title="Flux Inpaint AI Model",
|
46 |
-
description="Upload an image and a mask, then provide a prompt to generate an inpainted image."
|
47 |
)
|
48 |
|
49 |
iface.launch()
|
|
|
3 |
from diffusers.pipelines.flux.pipeline_flux import FluxPipeline
|
4 |
from diffusers.models.controlnet_flux import FluxControlNetModel
|
5 |
from controlnet_aux import CannyDetector
|
6 |
+
from transformers import T5Tokenizer, T5TokenizerFast
|
7 |
|
8 |
+
base_model = "black-forest-labs/FLUX.1-schnell"
|
9 |
+
controlnet_model = "YishaoAI/flux-dev-controlnet-canny-kid-clothes"
|
10 |
|
11 |
+
# Try to load the fast tokenizer, fall back to slow if necessary
|
12 |
+
try:
|
13 |
+
tokenizer = T5TokenizerFast.from_pretrained(base_model)
|
14 |
+
except ValueError:
|
15 |
+
print("Fast tokenizer not available, falling back to slow tokenizer")
|
16 |
+
tokenizer = T5Tokenizer.from_pretrained(base_model)
|
17 |
+
|
18 |
+
controlnet = FluxControlNetModel.from_pretrained(
|
19 |
+
controlnet_model, torch_dtype=torch.float16
|
20 |
+
)
|
21 |
+
pipe = FluxPipeline.from_pretrained(
|
22 |
+
base_model, controlnet=controlnet, torch_dtype=torch.float16, tokenizer=tokenizer
|
23 |
+
)
|
24 |
pipe.enable_model_cpu_offload()
|
25 |
pipe.to("cuda")
|
26 |
|
27 |
canny = CannyDetector()
|
28 |
|
29 |
+
|
30 |
+
def inpaint(
|
31 |
+
image,
|
32 |
+
mask,
|
33 |
+
prompt,
|
34 |
+
strength,
|
35 |
+
num_inference_steps,
|
36 |
+
guidance_scale,
|
37 |
+
controlnet_conditioning_scale,
|
38 |
+
):
|
39 |
canny_image = canny(image)
|
40 |
+
|
41 |
image_res = pipe(
|
42 |
prompt,
|
43 |
image=image,
|
|
|
48 |
num_inference_steps=num_inference_steps,
|
49 |
guidance_scale=guidance_scale,
|
50 |
).images[0]
|
51 |
+
|
52 |
return image_res
|
53 |
|
54 |
+
|
55 |
iface = gr.Interface(
|
56 |
fn=inpaint,
|
57 |
inputs=[
|
|
|
61 |
gr.Slider(0, 1, value=0.95, label="Strength"),
|
62 |
gr.Slider(1, 100, value=50, step=1, label="Number of Inference Steps"),
|
63 |
gr.Slider(0, 20, value=5, label="Guidance Scale"),
|
64 |
+
gr.Slider(0, 1, value=0.5, label="ControlNet Conditioning Scale"),
|
65 |
],
|
66 |
outputs=gr.Image(type="pil", label="Output Image"),
|
67 |
title="Flux Inpaint AI Model",
|
68 |
+
description="Upload an image and a mask, then provide a prompt to generate an inpainted image.",
|
69 |
)
|
70 |
|
71 |
iface.launch()
|
requirements.txt
CHANGED
@@ -4,3 +4,5 @@ transformers
|
|
4 |
accelerate
|
5 |
controlnet_aux
|
6 |
gradio
|
|
|
|
|
|
4 |
accelerate
|
5 |
controlnet_aux
|
6 |
gradio
|
7 |
+
sentencepiece
|
8 |
+
tokenizers
|