AbdulBasit2007 commited on
Commit
3c8d82e
1 Parent(s): ce6b3b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +204 -97
app.py CHANGED
@@ -1,145 +1,252 @@
1
- !pip install torch torchvision torchaudio gradio diffusers optimum numpy
2
-
3
-
4
  import gradio as gr
 
5
  import numpy as np
6
- import random
7
- #import spaces #[uncomment to use ZeroGPU]
8
- from diffusers import DiffusionPipeline
9
  import torch
 
10
 
11
- device = "cuda" if torch.cuda.is_available() else "cpu"
12
- model_repo_id = "stabilityai/sdxl-turbo" #Replace to the model you would like to use
13
-
14
- if torch.cuda.is_available():
15
- torch_dtype = torch.float16
16
- else:
17
- torch_dtype = torch.float32
18
 
19
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
20
- pipe = pipe.to(device)
 
 
 
 
 
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
- MAX_IMAGE_SIZE = 1024
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
- #@spaces.GPU #[uncomment to use ZeroGPU]
26
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
 
 
 
27
 
 
 
 
 
 
 
 
 
 
 
 
28
  if randomize_seed:
29
  seed = random.randint(0, MAX_SEED)
30
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  generator = torch.Generator().manual_seed(seed)
32
 
 
 
 
 
33
  image = pipe(
34
- prompt = prompt,
35
- negative_prompt = negative_prompt,
36
- guidance_scale = guidance_scale,
37
- num_inference_steps = num_inference_steps,
38
- width = width,
39
- height = height,
40
- generator = generator
41
- ).images[0]
42
-
43
  return image, seed
44
 
 
45
  examples = [
46
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
47
- "An astronaut riding a green horse",
48
- "A delicious ceviche cheesecake slice",
 
 
 
 
49
  ]
50
 
51
- css="""
52
- #col-container {
53
- margin: 0 auto;
54
- max-width: 640px;
 
55
  }
56
- """
57
 
58
  with gr.Blocks(css=css) as demo:
59
-
60
- with gr.Column(elem_id="col-container"):
61
- gr.Markdown(f"""
62
- # Text-to-Image Gradio Template
63
- """)
64
-
65
- with gr.Row():
66
-
67
- prompt = gr.Text(
68
- label="Prompt",
69
- show_label=False,
70
- max_lines=1,
71
- placeholder="Enter your prompt",
72
- container=False,
73
- )
74
-
75
- run_button = gr.Button("Run", scale=0)
76
-
77
- result = gr.Image(label="Result", show_label=False)
78
-
79
- with gr.Accordion("Advanced Settings", open=False):
80
-
81
- negative_prompt = gr.Text(
82
  label="Negative prompt",
83
  max_lines=1,
84
  placeholder="Enter a negative prompt",
85
- visible=False,
 
 
 
 
 
 
 
 
86
  )
87
-
88
  seed = gr.Slider(
89
  label="Seed",
90
  minimum=0,
91
  maximum=MAX_SEED,
92
  step=1,
93
  value=0,
94
- )
95
-
96
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
97
-
98
- with gr.Row():
99
-
100
  width = gr.Slider(
101
  label="Width",
102
  minimum=256,
103
  maximum=MAX_IMAGE_SIZE,
104
  step=32,
105
- value=1024, #Replace with defaults that work for your model
106
  )
107
-
108
  height = gr.Slider(
109
  label="Height",
110
  minimum=256,
111
  maximum=MAX_IMAGE_SIZE,
112
  step=32,
113
- value=1024, #Replace with defaults that work for your model
114
- )
115
-
116
- with gr.Row():
117
-
118
- guidance_scale = gr.Slider(
119
- label="Guidance scale",
120
- minimum=0.0,
121
- maximum=10.0,
122
- step=0.1,
123
- value=0.0, #Replace with defaults that work for your model
124
- )
125
-
126
- num_inference_steps = gr.Slider(
127
- label="Number of inference steps",
128
- minimum=1,
129
- maximum=50,
130
- step=1,
131
- value=2, #Replace with defaults that work for your model
132
  )
133
-
134
- gr.Examples(
135
- examples = examples,
136
- inputs = [prompt]
137
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  gr.on(
139
- triggers=[run_button.click, prompt.submit],
140
- fn = infer,
141
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
142
- outputs = [result, seed]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  )
144
 
145
- demo.queue().launch()
 
 
1
+ from __future__ import annotations
2
+ import os
3
+ import random
4
  import gradio as gr
5
+ import spaces
6
  import numpy as np
7
+ import uuid
8
+ from optimum.quanto import freeze, qfloat8, quantize
9
+ from diffusers import PixArtAlphaPipeline, LCMScheduler
10
  import torch
11
+ from typing import Tuple
12
 
 
 
 
 
 
 
 
13
 
14
+ DESCRIPTION = """ # Instant Image
15
+ ### Super fast text to Image Generator.
16
+ ### <span style='color: red;'>You may change the steps from 4 to 8, if you didn't get satisfied results.
17
+ ### First Image processing takes time then images generate faster.
18
+ """
19
+ if not torch.cuda.is_available():
20
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
24
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4192"))
25
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "1") == "1"
26
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
27
+
28
+ style_list = [
29
+ {
30
+ "name": "(No style)",
31
+ "prompt": "{prompt}",
32
+ "negative_prompt": "",
33
+ },
34
+ {
35
+ "name": "Cinematic",
36
+ "prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
37
+ "negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
38
+ },
39
+ {
40
+ "name": "Realistic",
41
+ "prompt": "Photorealistic {prompt} . Ulta-realistic, professional, 4k, highly detailed",
42
+ "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly, disfigured",
43
+ },
44
+ {
45
+ "name": "Anime",
46
+ "prompt": "anime artwork {prompt} . anime style, key visual, vibrant, studio anime, highly detailed",
47
+ "negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
48
+ },
49
+ {
50
+ "name": "Digital Art",
51
+ "prompt": "concept art {prompt} . digital artwork, illustrative, painterly, matte painting, highly detailed",
52
+ "negative_prompt": "photo, photorealistic, realism, ugly",
53
+ },
54
+ {
55
+ "name": "Pixel art",
56
+ "prompt": "pixel-art {prompt} . low-res, blocky, pixel art style, 8-bit graphics",
57
+ "negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
58
+ },
59
+ {
60
+ "name": "Fantasy art",
61
+ "prompt": "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
62
+ "negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
63
+ },
64
+ {
65
+ "name": "3D Model",
66
+ "prompt": "professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting",
67
+ "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
68
+ },
69
+ ]
70
+
71
+
72
+ styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
73
+ STYLE_NAMES = list(styles.keys())
74
+ DEFAULT_STYLE_NAME = "(No style)"
75
+ NUM_IMAGES_PER_PROMPT = 1
76
 
77
+ pipe = PixArtAlphaPipeline.from_pretrained(
78
+ "PixArt-alpha/PixArt-LCM-XL-2-1024-MS",
79
+ torch_dtype=torch.float16,
80
+ use_safetensors=True,
81
+ ).to("cuda:0")
82
 
83
+ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
84
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
85
+ if not negative:
86
+ negative = ""
87
+ return p.replace("{prompt}", positive), n + negative
88
+
89
+ if USE_TORCH_COMPILE:
90
+ pipe.transformer = torch.compile(pipe.transformer, mode="reduce-overhead", fullgraph=True)
91
+ print("Model Compiled!")
92
+
93
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
94
  if randomize_seed:
95
  seed = random.randint(0, MAX_SEED)
96
+ return seed
97
+
98
+ @spaces.GPU(duration=30)
99
+ def generate(
100
+ prompt: str,
101
+ negative_prompt: str = "",
102
+ style: str = DEFAULT_STYLE_NAME,
103
+ use_negative_prompt: bool = False,
104
+ seed: int = 0,
105
+ width: int = 1024,
106
+ height: int = 1024,
107
+ inference_steps: int = 12,
108
+ randomize_seed: bool = False,
109
+ use_resolution_binning: bool = True,
110
+ progress=gr.Progress(track_tqdm=True),
111
+ ):
112
+ seed = int(randomize_seed_fn(seed, randomize_seed))
113
  generator = torch.Generator().manual_seed(seed)
114
 
115
+ if not use_negative_prompt:
116
+ negative_prompt = None # type: ignore
117
+ prompt, negative_prompt = apply_style(style, prompt, negative_prompt)
118
+
119
  image = pipe(
120
+ prompt=prompt,
121
+ negative_prompt=negative_prompt,
122
+ width=width,
123
+ height=height,
124
+ guidance_scale=0,
125
+ num_inference_steps=inference_steps,
126
+ generator=generator,
127
+ use_resolution_binning=use_resolution_binning,
128
+ ).images[0]
129
  return image, seed
130
 
131
+
132
  examples = [
133
+ "A Monkey with a happy face in the Sahara desert.",
134
+ "Eiffel Tower was Made up of ICE.",
135
+ "photo of 8k ultra realistic harbour, nreal engine 5, port, boats, sunset, beautiful light, full of colour, cinematic lighting, battered, trending on artstation, 4k, hyperrealistic, focused, extreme details",
136
+ "Color photo of a corgi made of transparent glass, standing on the riverside in Yosemite National Park.",
137
+ "A close-up photo of a woman. She wore a blue coat with a gray dress underneath and has blue eyes.",
138
+ "A litter of golden retriever puppies playing in the snow. Their heads pop out of the snow, covered in.",
139
+ "an astronaut sitting in a diner, eating fries, cinematic, analog film",
140
  ]
141
 
142
+ css = '''
143
+ .gradio-container{max-width: 560px !important}
144
+ h1{text-align:center}
145
+ footer {
146
+ visibility: hidden
147
  }
148
+ '''
149
 
150
  with gr.Blocks(css=css) as demo:
151
+ gr.Markdown(DESCRIPTION)
152
+ with gr.Row(equal_height=False):
153
+ with gr.Group():
154
+ with gr.Row():
155
+ prompt = gr.Text(
156
+ label="Prompt",
157
+ show_label=False,
158
+ max_lines=1,
159
+ placeholder="Enter your prompt",
160
+ container=False,
161
+ )
162
+ run_button = gr.Button("Run", scale=0)
163
+ result = gr.Image(label="Result")
164
+
165
+ with gr.Accordion("Advanced options", open=False):
166
+ with gr.Group():
167
+ with gr.Row():
168
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False, visible=True)
169
+ negative_prompt = gr.Text(
 
 
 
 
170
  label="Negative prompt",
171
  max_lines=1,
172
  placeholder="Enter a negative prompt",
173
+ visible=True,
174
+ )
175
+ style_selection = gr.Radio(
176
+ show_label=True,
177
+ container=True,
178
+ interactive=True,
179
+ choices=STYLE_NAMES,
180
+ value=DEFAULT_STYLE_NAME,
181
+ label="Image Style",
182
  )
 
183
  seed = gr.Slider(
184
  label="Seed",
185
  minimum=0,
186
  maximum=MAX_SEED,
187
  step=1,
188
  value=0,
189
+ )
 
190
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
191
+ with gr.Row(visible=True):
 
 
192
  width = gr.Slider(
193
  label="Width",
194
  minimum=256,
195
  maximum=MAX_IMAGE_SIZE,
196
  step=32,
197
+ value=1024,
198
  )
 
199
  height = gr.Slider(
200
  label="Height",
201
  minimum=256,
202
  maximum=MAX_IMAGE_SIZE,
203
  step=32,
204
+ value=1024,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  )
206
+ with gr.Row():
207
+ inference_steps = gr.Slider(
208
+ label="Steps",
209
+ minimum=4,
210
+ maximum=20,
211
+ step=1,
212
+ value=8,
213
+ )
214
+
215
+ gr.Examples(
216
+ examples=examples,
217
+ inputs=prompt,
218
+ outputs=[result, seed],
219
+ fn=generate,
220
+ cache_examples=CACHE_EXAMPLES,
221
+ )
222
+ use_negative_prompt.change(
223
+ fn=lambda x: gr.update(visible=x),
224
+ inputs=use_negative_prompt,
225
+ outputs=negative_prompt,
226
+ api_name=False,
227
+ )
228
+
229
  gr.on(
230
+ triggers=[
231
+ prompt.submit,
232
+ negative_prompt.submit,
233
+ run_button.click,
234
+ ],
235
+ fn=generate,
236
+ inputs=[
237
+ prompt,
238
+ negative_prompt,
239
+ style_selection,
240
+ use_negative_prompt,
241
+ seed,
242
+ width,
243
+ height,
244
+ inference_steps,
245
+ randomize_seed,
246
+ ],
247
+ outputs=[result, seed],
248
+ api_name="run",
249
  )
250
 
251
+ if __name__ == "__main__":
252
+ demo.queue(max_size=200).launch()