radhika-minion02 commited on
Commit
a46020e
1 Parent(s): d41134c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -0
app.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import random
4
+ import gradio as gr
5
+ from diffusers import StableDiffusionPipeline, DDIMScheduler
6
+
7
+ # Fine-tuned model weights path
8
+ model_path = "model"
9
+
10
+ # Load fine-tuned pipeline
11
+ pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float32)
12
+
13
+ # Change Scheduler to DDIMScheduler
14
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
15
+
16
+ # For memory optimization
17
+ pipe.enable_xformers_memory_efficient_attention()
18
+
19
+ # Generate the image from the given text prompt
20
+ def generate_response(prompt):
21
+
22
+ # define the negative prompt
23
+ negative_prompt = "bad anatomy, ugly, deformed, desfigured, distorted face, poorly drawn, blurry, low quality, low definition, lowres, out of frame, out of image, cropped, cut off, signature, watermark"
24
+
25
+ # initialize the variables
26
+ num_samples = 5
27
+ guidance_scale = 7.5
28
+ num_inference_steps = 30
29
+ height = 512
30
+ width = 512
31
+
32
+ seed = random.randint(0, 2147483647)
33
+ generator = torch.Generator(device='cuda').manual_seed(seed)
34
+
35
+ with torch.inference_mode():
36
+ imgs = pipe(
37
+ prompt,
38
+ negative_prompt=negative_prompt,
39
+ height=height,
40
+ width=width,
41
+ num_images_per_prompt=num_samples,
42
+ num_inference_steps=num_inference_steps,
43
+ guidance_scale=guidance_scale,
44
+ generator=generator
45
+ ).images
46
+
47
+ return imgs[0]
48
+
49
+ # Create and launch the Gradio UI
50
+ gradio_ui = gr.Interface(fn=generate_response, inputs="text", outputs="image")
51
+ gradio_ui.launch()