amos1088 commited on
Commit
8d2ed6a
·
1 Parent(s): 92fa744

test gradio

Browse files
Files changed (1) hide show
  1. app.py +32 -46
app.py CHANGED
@@ -1,58 +1,44 @@
1
  import gradio as gr
2
  import torch
3
- from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter
4
- from diffusers.utils import export_to_gif
5
 
6
- # Set device to CPU
7
- device = torch.device("cpu")
 
8
 
9
- # Load the motion adapter on CPU
10
- adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float32).to(device)
11
- model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
12
- pipe = AnimateDiffPipeline.from_pretrained(
13
- model_id, motion_adapter=adapter, torch_dtype=torch.float32
14
- ).to(device)
15
-
16
- scheduler = DDIMScheduler.from_pretrained(
17
  model_id,
18
- subfolder="scheduler",
19
- clip_sample=False,
20
- timestep_spacing="linspace",
21
- beta_schedule="linear",
22
- steps_offset=1,
23
- )
24
- pipe.scheduler = scheduler
25
-
26
- pipe.enable_vae_slicing()
27
 
28
- # Define the animation function
29
- def generate_animation(prompt, negative_prompt, num_frames, guidance_scale, num_inference_steps):
30
- output = pipe(
 
 
 
31
  prompt=prompt,
32
- negative_prompt=negative_prompt,
33
- num_frames=num_frames,
34
- guidance_scale=guidance_scale,
35
- num_inference_steps=num_inference_steps,
36
- generator=torch.Generator("cpu").manual_seed(42),
37
- )
38
- frames = output.frames[0]
39
- gif_path = "animation.gif"
40
- export_to_gif(frames, gif_path)
41
- return gif_path
42
 
43
- # Gradio Interface
44
- iface = gr.Interface(
45
- fn=generate_animation,
46
  inputs=[
47
- gr.Textbox(value="masterpiece, best quality, highly detailed...", label="Prompt"),
48
- gr.Textbox(value="bad quality, worse quality", label="Negative Prompt"),
49
- gr.Slider(1, 24, value=16, label="Number of Frames"),
50
- gr.Slider(1.0, 10.0, value=7.5, step=0.1, label="Guidance Scale"),
51
- gr.Slider(1, 50, value=25, label="Inference Steps"),
52
  ],
53
- outputs=gr.Image(label="Generated Animation"),
54
- title="Animated Stable Diffusion",
55
- description="Generate animations based on your prompt using Stable Diffusion.",
56
  )
57
 
58
- iface.launch()
 
 
1
  import gradio as gr
2
  import torch
3
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
 
4
 
5
+ # Load Stable Diffusion model and ControlNet reference-only model
6
+ model_id = "runwayml/stable-diffusion-v1-5"
7
+ controlnet_id = "lllyasviel/control_v11p_sd15_inpaint" # Use an appropriate ControlNet variant
8
 
9
+ # Initialize the pipeline on CPU
10
+ controlnet = ControlNetModel.from_pretrained(controlnet_id, torch_dtype=torch.float32)
11
+ pipeline = StableDiffusionControlNetPipeline.from_pretrained(
 
 
 
 
 
12
  model_id,
13
+ controlnet=controlnet,
14
+ torch_dtype=torch.float32
15
+ ).to("cpu")
 
 
 
 
 
 
16
 
17
+ # Define the Gradio interface function
18
+ def generate_image(prompt, reference_image):
19
+ # Process reference image
20
+ reference_image = reference_image.resize((512, 512))
21
+ # Generate image with reference-only style transfer
22
+ generated_image = pipeline(
23
  prompt=prompt,
24
+ image=reference_image,
25
+ controlnet_conditioning_scale=1.0,
26
+ guidance_scale=7.5,
27
+ num_inference_steps=50
28
+ ).images[0]
29
+ return generated_image
 
 
 
 
30
 
31
+ # Set up Gradio interface
32
+ interface = gr.Interface(
33
+ fn=generate_image,
34
  inputs=[
35
+ gr.Textbox(label="Prompt"),
36
+ gr.Image(type="pil", label="Reference Image (Style)")
 
 
 
37
  ],
38
+ outputs="image",
39
+ title="Image Generation with Reference-Only Style Transfer",
40
+ description="Generate an image based on a text prompt and style reference image using Stable Diffusion 3.5 with ControlNet (reference-only mode)."
41
  )
42
 
43
+ # Launch the Gradio interface
44
+ interface.launch()