awacke1 commited on
Commit
70f0c27
1 Parent(s): 8783961

Create backup.app.py

Browse files
Files changed (1) hide show
  1. backup.app.py +193 -0
backup.app.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from diffusers import DiffusionPipeline, LCMScheduler, AutoencoderTiny
3
+ import torch
4
+ import os
5
+ import datetime
6
+ import time
7
+ from PIL import Image
8
+ import re
9
+ import base64
10
+ from io import BytesIO
11
+ import pytz
12
+
13
+ try:
14
+ import intel_extension_for_pytorch as ipex
15
+ except:
16
+ pass
17
+
18
+ from PIL import Image
19
+ import numpy as np
20
+ import gradio as gr
21
+ import psutil
22
+ import time
23
+
24
+ SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
25
+ TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
26
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
27
+ # check if MPS is available OSX only M1/M2/M3 chips
28
+ mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
29
+ xpu_available = hasattr(torch, "xpu") and torch.xpu.is_available()
30
+ device = torch.device(
31
+ "cuda" if torch.cuda.is_available() else "xpu" if xpu_available else "cpu"
32
+ )
33
+ torch_device = device
34
+ torch_dtype = torch.float16
35
+
36
+ print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
37
+ print(f"TORCH_COMPILE: {TORCH_COMPILE}")
38
+ print(f"device: {device}")
39
+
40
+ if mps_available:
41
+ device = torch.device("mps")
42
+ torch_device = "cpu"
43
+ torch_dtype = torch.float32
44
+
45
+ if SAFETY_CHECKER == "True":
46
+ pipe = DiffusionPipeline.from_pretrained("Lykon/dreamshaper-7")
47
+ else:
48
+ pipe = DiffusionPipeline.from_pretrained("Lykon/dreamshaper-7", safety_checker=None)
49
+
50
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
51
+ pipe.to(device=torch_device, dtype=torch_dtype).to(device)
52
+ pipe.unet.to(memory_format=torch.channels_last)
53
+ pipe.set_progress_bar_config(disable=True)
54
+
55
+ # check if computer has less than 64GB of RAM using sys or os
56
+ if psutil.virtual_memory().total < 64 * 1024**3:
57
+ pipe.enable_attention_slicing()
58
+
59
+ if TORCH_COMPILE:
60
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
61
+ pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
62
+ pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0)
63
+
64
+ # Load LCM LoRA
65
+ pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
66
+ pipe.fuse_lora()
67
+
68
+ def safe_filename(text):
69
+ """Generate a safe filename from a string."""
70
+ safe_text = re.sub(r'\W+', '_', text)
71
+ timestamp = datetime.datetime.now().strftime("%Y%m%d")
72
+ return f"{safe_text}_{timestamp}.png"
73
+
74
+ def encode_image(image):
75
+ """Encode image to base64."""
76
+ buffered = BytesIO()
77
+ #image.save(buffered, format="PNG")
78
+ return base64.b64encode(buffered.getvalue()).decode()
79
+
80
+ def predict(prompt, guidance, steps, seed=1231231):
81
+ generator = torch.manual_seed(seed)
82
+ last_time = time.time()
83
+ results = pipe(
84
+ prompt=prompt,
85
+ generator=generator,
86
+ num_inference_steps=steps,
87
+ guidance_scale=guidance,
88
+ width=512,
89
+ height=512,
90
+ # original_inference_steps=params.lcm_steps,
91
+ output_type="pil",
92
+ )
93
+ print(f"Pipe took {time.time() - last_time} seconds")
94
+ nsfw_content_detected = (
95
+ results.nsfw_content_detected[0]
96
+ if "nsfw_content_detected" in results
97
+ else False
98
+ )
99
+ if nsfw_content_detected:
100
+ nsfw=gr.Button("🕹️NSFW🎨", scale=1)
101
+
102
+ # Generate file name
103
+ #date_str = datetime.datetime.now().strftime("%Y%m%d")
104
+ #safe_prompt = prompt.replace(" ", "_")[:50] # Truncate long prompts
105
+ #filename = f"{date_str}_{safe_prompt}.png"
106
+
107
+ central = pytz.timezone('US/Central')
108
+ safe_date_time = datetime.datetime.now().strftime("%Y%m%d")
109
+ replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
110
+ safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
111
+ filename = f"{safe_date_time}_{safe_prompt}.png"
112
+
113
+
114
+ # Save the image
115
+ if len(results.images) > 0:
116
+ image_path = os.path.join("", filename) # Specify your directory
117
+ results.images[0].save(image_path)
118
+ print(f"#Image saved as {image_path}")
119
+ #filename = safe_filename(prompt)
120
+ #image.save(filename)
121
+ encoded_image = encode_image(image)
122
+ html_link = f'<a href="data:image/png;base64,{encoded_image}" download="{filename}">Download Image</a>'
123
+ gr.Markdown(html_link)
124
+
125
+
126
+
127
+ return results.images[0] if len(results.images) > 0 else None
128
+
129
+
130
+ css = """
131
+ #container{
132
+ margin: 0 auto;
133
+ max-width: 40rem;
134
+ }
135
+ #intro{
136
+ max-width: 100%;
137
+ text-align: center;
138
+ margin: 0 auto;
139
+ }
140
+ """
141
+ with gr.Blocks(css=css) as demo:
142
+ with gr.Column(elem_id="container"):
143
+ gr.Markdown(
144
+ """## 🕹️ Stable Diffusion 1.5 - Real Time 🎨 Image Generation Using 🌐 Latent Consistency LoRAs""",
145
+ elem_id="intro",
146
+ )
147
+ with gr.Row():
148
+ with gr.Row():
149
+ prompt = gr.Textbox(
150
+ placeholder="Insert your prompt here:", scale=5, container=False
151
+ )
152
+ generate_bt = gr.Button("Generate", scale=1)
153
+
154
+ image = gr.Image(type="filepath")
155
+ with gr.Accordion("Advanced options", open=False):
156
+ guidance = gr.Slider(
157
+ label="Guidance", minimum=0.0, maximum=5, value=0.3, step=0.001
158
+ )
159
+ steps = gr.Slider(label="Steps", value=4, minimum=2, maximum=10, step=1)
160
+ seed = gr.Slider(
161
+ randomize=True, minimum=0, maximum=12013012031030, label="Seed", step=1
162
+ )
163
+ with gr.Accordion("Run with diffusers"):
164
+ gr.Markdown(
165
+ """## Running LCM-LoRAs it with `diffusers`
166
+ ```bash
167
+ pip install diffusers==0.23.0
168
+ ```
169
+
170
+ ```py
171
+ from diffusers import DiffusionPipeline, LCMScheduler
172
+ pipe = DiffusionPipeline.from_pretrained("Lykon/dreamshaper-7").to("cuda")
173
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
174
+ pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") #yes, it's a normal LoRA
175
+ results = pipe(
176
+ prompt="ImageEditor",
177
+ num_inference_steps=4,
178
+ guidance_scale=0.0,
179
+ )
180
+ results.images[0]
181
+ ```
182
+ """
183
+ )
184
+
185
+ inputs = [prompt, guidance, steps, seed]
186
+ generate_bt.click(fn=predict, inputs=inputs, outputs=image, show_progress=False)
187
+ prompt.input(fn=predict, inputs=inputs, outputs=image, show_progress=False)
188
+ guidance.change(fn=predict, inputs=inputs, outputs=image, show_progress=False)
189
+ steps.change(fn=predict, inputs=inputs, outputs=image, show_progress=False)
190
+ seed.change(fn=predict, inputs=inputs, outputs=image, show_progress=False)
191
+
192
+ demo.queue()
193
+ demo.launch()