fffiloni commited on
Commit
61e8157
1 Parent(s): ca411cc

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -0
app.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ hf_token = os.environ.get("HF_TOKEN")
4
+ import torch
5
+ from diffusers import StableDiffusion3Pipeline
6
+ from diffusers.models.controlnet_sd3 import ControlNetSD3Model
7
+ from diffusers.utils.torch_utils import randn_tensor
8
+
9
+ from pipeline_stable_diffusion_3_controlnet import StableDiffusion3CommonPipeline
10
+
11
+ # load pipeline
12
+ base_model = 'stabilityai/stable-diffusion-3-medium-diffusers'
13
+ pipe = StableDiffusion3CommonPipeline.from_pretrained(
14
+ base_model,
15
+ controlnet_list=['InstantX/SD3-Controlnet-Canny'],
16
+ hf_token=hf_token
17
+ )
18
+ pipe.to('cuda:0', torch.float16)
19
+
20
+ def infer(image_in, prompt):
21
+ prompt = 'Anime style illustration of a girl wearing a suit. A moon in sky. In the background we see a big rain approaching. text "InstantX" on image'
22
+ n_prompt = 'NSFW, nude, naked, porn, ugly'
23
+ # controlnet config
24
+ controlnet_conditioning = [
25
+ dict(
26
+ control_index=0,
27
+ control_image=load_image('https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg'),
28
+ control_weight=0.7,
29
+ control_pooled_projections='zeros'
30
+ )
31
+ ]
32
+ # infer
33
+ image = pipe(
34
+ prompt=prompt,
35
+ negative_prompt=n_prompt,
36
+ controlnet_conditioning=controlnet_conditioning,
37
+ num_inference_steps=28,
38
+ guidance_scale=7.0,
39
+ height=1024,
40
+ width=1024,
41
+ latents=latents,
42
+ ).images[0]
43
+
44
+ return image
45
+
46
+
47
+ with gr.Blocks() as demo:
48
+ with gr.Column():
49
+ gr.Markdown("""
50
+ # SD3 ControlNet
51
+ """)
52
+ image_in = gr.Image(label="Image reference", sources=["upload"], type="filepath")
53
+ prompt = gr.Textbox(label="Prompt")
54
+ submit_btn = gr.Button("Submit")
55
+ result = gr.Image(label="Result")
56
+
57
+ submit_btn.click(
58
+ fn = infer,
59
+ inputs = [image_in, prompt],
60
+ outputs = [result],
61
+ show_api=False
62
+ )
63
+ demo.queue().launch()