prithivMLmods commited on
Commit
8e1c54a
·
verified ·
1 Parent(s): 2d98ba3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -24
app.py CHANGED
@@ -11,6 +11,7 @@ from PIL import Image
11
  import spaces
12
  import torch
13
  from diffusers import DiffusionPipeline
 
14
 
15
  bad_words = json.loads(os.getenv('BAD_WORDS', "[]"))
16
  bad_words_negative = json.loads(os.getenv('BAD_WORDS_NEGATIVE', "[]"))
@@ -25,6 +26,36 @@ def check_text(prompt, negative=""):
25
  return True
26
  return False
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  DESCRIPTION = """"""
29
  if not torch.cuda.is_available():
30
  DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
@@ -42,7 +73,6 @@ NUM_IMAGES_PER_PROMPT = 1
42
  if torch.cuda.is_available():
43
  pipe = DiffusionPipeline.from_pretrained(
44
  "SG161222/RealVisXL_V4.0",
45
- ##"SG161222/RealVisXL_V1.02_Turbo",
46
  torch_dtype=torch.float16,
47
  use_safetensors=True,
48
  add_watermarker=False,
@@ -80,13 +110,12 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
80
  seed = random.randint(0, MAX_SEED)
81
  return seed
82
 
83
-
84
-
85
  @spaces.GPU(enable_queue=True)
86
  def generate(
87
  prompt: str,
88
  negative_prompt: str = "",
89
  use_negative_prompt: bool = False,
 
90
  seed: int = 0,
91
  width: int = 1024,
92
  height: int = 1024,
@@ -95,7 +124,10 @@ def generate(
95
  use_resolution_binning: bool = True,
96
  progress=gr.Progress(track_tqdm=True),
97
  ):
98
- pipe.to(device)
 
 
 
99
  seed = int(randomize_seed_fn(seed, randomize_seed))
100
  generator = torch.Generator().manual_seed(seed)
101
 
@@ -104,32 +136,30 @@ def generate(
104
  negative_prompt += default_negative
105
 
106
  options = {
107
- "prompt":prompt,
108
- "negative_prompt":negative_prompt,
109
- "width":width,
110
- "height":height,
111
- "guidance_scale":guidance_scale,
112
- "num_inference_steps":25,
113
- "generator":generator,
114
- "num_images_per_prompt":NUM_IMAGES_PER_PROMPT,
115
- "use_resolution_binning":use_resolution_binning,
116
- "output_type":"pil",
117
-
118
  }
119
 
120
- images = pipe(**options).images+pipe2(**options).images
121
 
122
  image_paths = [save_image(img) for img in images]
123
  return image_paths, seed
124
 
125
 
126
  examples = [
127
- "neon holography crystal cat",
128
- "a cat eating a piece of cheese",
129
- "an astronaut riding a horse in space",
130
- "a cartoon of a boy playing with a tiger",
131
- "a cute robot artist painting on an easel, concept art",
132
- #"a close up of a woman wearing a transparent, prismatic, elaborate nemeses headdress, over the should pose, brown skin-tone"
133
  ]
134
 
135
  css = '''
@@ -156,11 +186,12 @@ with gr.Blocks(css=css, theme="xiaobaiyuan/theme_brief") as demo:
156
  result = gr.Gallery(label="Result", columns=NUM_IMAGES_PER_PROMPT, show_label=False)
157
  with gr.Accordion("Advanced options", open=False):
158
  with gr.Row():
159
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
160
  negative_prompt = gr.Text(
161
  label="Negative prompt",
162
  max_lines=1,
163
  placeholder="Enter a negative prompt",
 
164
  visible=True,
165
  )
166
  seed = gr.Slider(
@@ -171,6 +202,7 @@ with gr.Blocks(css=css, theme="xiaobaiyuan/theme_brief") as demo:
171
  value=0,
172
  )
173
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
 
174
  with gr.Row(visible=True):
175
  width = gr.Slider(
176
  label="Width",
@@ -221,6 +253,7 @@ with gr.Blocks(css=css, theme="xiaobaiyuan/theme_brief") as demo:
221
  prompt,
222
  negative_prompt,
223
  use_negative_prompt,
 
224
  seed,
225
  width,
226
  height,
@@ -232,4 +265,4 @@ with gr.Blocks(css=css, theme="xiaobaiyuan/theme_brief") as demo:
232
  )
233
 
234
  if __name__ == "__main__":
235
- demo.queue(max_size=20).launch()
 
11
  import spaces
12
  import torch
13
  from diffusers import DiffusionPipeline
14
+ from typing import Tuple
15
 
16
  bad_words = json.loads(os.getenv('BAD_WORDS', "[]"))
17
  bad_words_negative = json.loads(os.getenv('BAD_WORDS_NEGATIVE', "[]"))
 
26
  return True
27
  return False
28
 
29
+ style_list = [
30
+ {
31
+ "name": "3840 x 2160",
32
+ "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
33
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
34
+ },
35
+ {
36
+ "name": "2560 x 1440",
37
+ "prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
38
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
39
+ },
40
+
41
+ {
42
+ "name": "(No style)",
43
+ "prompt": "{prompt}",
44
+ "negative_prompt": "",
45
+ },
46
+
47
+ ]
48
+
49
+ styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
50
+ STYLE_NAMES = list(styles.keys())
51
+ DEFAULT_STYLE_NAME = "3840 x 2160"
52
+
53
+ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
54
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
55
+ if not negative:
56
+ negative = ""
57
+ return p.replace("{prompt}", positive), n + negative
58
+
59
  DESCRIPTION = """"""
60
  if not torch.cuda.is_available():
61
  DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
 
73
  if torch.cuda.is_available():
74
  pipe = DiffusionPipeline.from_pretrained(
75
  "SG161222/RealVisXL_V4.0",
 
76
  torch_dtype=torch.float16,
77
  use_safetensors=True,
78
  add_watermarker=False,
 
110
  seed = random.randint(0, MAX_SEED)
111
  return seed
112
 
 
 
113
  @spaces.GPU(enable_queue=True)
114
  def generate(
115
  prompt: str,
116
  negative_prompt: str = "",
117
  use_negative_prompt: bool = False,
118
+ style: str = DEFAULT_STYLE_NAME,
119
  seed: int = 0,
120
  width: int = 1024,
121
  height: int = 1024,
 
124
  use_resolution_binning: bool = True,
125
  progress=gr.Progress(track_tqdm=True),
126
  ):
127
+ if check_text(prompt, negative_prompt):
128
+ raise ValueError("Prompt contains restricted words.")
129
+
130
+ prompt, negative_prompt = apply_style(style, prompt, negative_prompt)
131
  seed = int(randomize_seed_fn(seed, randomize_seed))
132
  generator = torch.Generator().manual_seed(seed)
133
 
 
136
  negative_prompt += default_negative
137
 
138
  options = {
139
+ "prompt": prompt,
140
+ "negative_prompt": negative_prompt,
141
+ "width": width,
142
+ "height": height,
143
+ "guidance_scale": guidance_scale,
144
+ "num_inference_steps": 25,
145
+ "generator": generator,
146
+ "num_images_per_prompt": NUM_IMAGES_PER_PROMPT,
147
+ "use_resolution_binning": use_resolution_binning,
148
+ "output_type": "pil",
 
149
  }
150
 
151
+ images = pipe(**options).images + pipe2(**options).images
152
 
153
  image_paths = [save_image(img) for img in images]
154
  return image_paths, seed
155
 
156
 
157
  examples = [
158
+ "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
159
+ "Silhouette of Hamburger standing in front of a, dark blue sky, a little saturated orange in the background sunset, night time, dark background, dark black hair, cinematic photography, cinematic lighting, dark theme, shattered camera lens, digital photography, 70mm, f2.8, lens aberration, grain, boke, double exposure, shaterred, color negative ",
160
+ "A silver grey cat wearing sunglasses and a tuxedo, as an agent in the movie The Matrix, in the style of a real photo --ar 43:31 --v 6.0 --style raw",
161
+ "A professional product photography of a (coffee cup, perfume bottle, hoodie) in front of a (vintage, modern) background --v 6.0 --style raw"
162
+
 
163
  ]
164
 
165
  css = '''
 
186
  result = gr.Gallery(label="Result", columns=NUM_IMAGES_PER_PROMPT, show_label=False)
187
  with gr.Accordion("Advanced options", open=False):
188
  with gr.Row():
189
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True, visible=True)
190
  negative_prompt = gr.Text(
191
  label="Negative prompt",
192
  max_lines=1,
193
  placeholder="Enter a negative prompt",
194
+ value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, NSFW",
195
  visible=True,
196
  )
197
  seed = gr.Slider(
 
202
  value=0,
203
  )
204
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
205
+ style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
206
  with gr.Row(visible=True):
207
  width = gr.Slider(
208
  label="Width",
 
253
  prompt,
254
  negative_prompt,
255
  use_negative_prompt,
256
+ style,
257
  seed,
258
  width,
259
  height,
 
265
  )
266
 
267
  if __name__ == "__main__":
268
+ demo.queue(max_size=20).launch()