prithivMLmods commited on
Commit
c6dec8d
1 Parent(s): 26549d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -28
app.py CHANGED
@@ -1,3 +1,13 @@
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import random
3
  import uuid
@@ -9,36 +19,43 @@ import spaces
9
  import torch
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
11
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  css = '''
13
- .gradio-container{max-width: 888px !important}
14
  h1{text-align:center}
15
  footer {
16
  visibility: hidden
17
  }
18
- .submit-btn {
19
- background-color: #6263c7 !important;
20
- color: white !important;
21
- }
22
- .submit-btn:hover {
23
- background-color: #6063ff !important;
24
- }
25
  '''
26
 
27
  examples = [
28
-
29
- "A tiny astronaut hatching from an egg on the moon, 4k, planet theme",
30
- "An anime-style illustration of a delicious, golden-brown wiener schnitzel on a plate, served with fresh lemon slices, parsley --style raw5",
31
  "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
32
  "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
33
- "Food photography of a milk shake with flying strawberrys against a pink background, professionally studio shot with cinematic lighting. The image is in the style of a professional studio shot --ar 85:128 --v 6.0 --style raw"
 
 
 
34
  ]
35
 
36
- MODEL_ID = os.getenv("MODEL_VAL_PATH")
 
37
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
38
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
39
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
40
- BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
41
 
 
42
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
43
  pipe = StableDiffusionXLPipeline.from_pretrained(
44
  MODEL_ID,
@@ -48,9 +65,11 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
48
  ).to(device)
49
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
50
 
 
51
  if USE_TORCH_COMPILE:
52
  pipe.compile()
53
 
 
54
  if ENABLE_CPU_OFFLOAD:
55
  pipe.enable_model_cpu_offload()
56
 
@@ -84,6 +103,7 @@ def generate(
84
  seed = int(randomize_seed_fn(seed, randomize_seed))
85
  generator = torch.Generator(device=device).manual_seed(seed)
86
 
 
87
  options = {
88
  "prompt": [prompt] * num_images,
89
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
@@ -98,6 +118,7 @@ def generate(
98
  if use_resolution_binning:
99
  options["use_resolution_binning"] = True
100
 
 
101
  images = []
102
  for i in range(0, num_images, BATCH_SIZE):
103
  batch_options = options.copy()
@@ -110,8 +131,9 @@ def generate(
110
  return image_paths, seed
111
 
112
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
113
- with gr.Row():
114
- with gr.Column(scale=1):
 
115
  prompt = gr.Text(
116
  label="Prompt",
117
  show_label=False,
@@ -119,13 +141,9 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
119
  placeholder="Enter your prompt",
120
  container=False,
121
  )
122
- run_button = gr.Button(
123
- "Generate as ( 1024 x 1024 )🤗",
124
- scale=0,
125
- elem_classes="submit-btn"
126
- )
127
-
128
- with gr.Accordion("Advanced options", open=True):
129
  num_images = gr.Slider(
130
  label="Number of Images",
131
  minimum=1,
@@ -172,7 +190,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
172
  minimum=0.1,
173
  maximum=6,
174
  step=0.1,
175
- value=2.0,
176
  )
177
  num_inference_steps = gr.Slider(
178
  label="Number of inference steps",
@@ -182,20 +200,19 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
182
  value=23,
183
  )
184
 
185
- with gr.Column(scale=2):
186
- result = gr.Gallery(label="Result", columns=1, show_label=False)
187
-
188
  gr.Examples(
189
  examples=examples,
190
  inputs=prompt,
191
  cache_examples=False
192
  )
 
193
  use_negative_prompt.change(
194
  fn=lambda x: gr.update(visible=x),
195
  inputs=use_negative_prompt,
196
  outputs=negative_prompt,
197
  api_name=False,
198
  )
 
199
  gr.on(
200
  triggers=[
201
  prompt.submit,
@@ -218,6 +235,6 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
218
  outputs=[result, seed],
219
  api_name="run",
220
  )
221
-
222
  if __name__ == "__main__":
223
  demo.queue(max_size=40).launch()
 
1
+ #!/usr/bin/env python
2
+ #patch 2.0 ()
3
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ # of this software and associated documentation files (the "Software"), to deal
5
+ # in the Software without restriction, including without limitation the rights
6
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ # copies of the Software, and to permit persons to whom the Software is
8
+ # furnished to do so, subject to the following conditions:
9
+ #
10
+ # ...
11
  import os
12
  import random
13
  import uuid
 
19
  import torch
20
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
21
 
22
+ #Load the HTML content
23
+ #html_file_url = "https://prithivmlmods-hamster-static.static.hf.space/index.html"
24
+ #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:180px; border:none;"></iframe>'
25
+ #html_file_url = "https://prithivmlmods-static-loading-theme.static.hf.space/index.html"
26
+
27
+ #html_file_url = "https://prithivhamster.vercel.app/"
28
+ #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:400px; border:none"></iframe>'
29
+
30
+ DESCRIPTIONx = """## STABLE HAMSTER 🐹
31
+
32
+ """
33
+
34
  css = '''
35
+ .gradio-container{max-width: 560px !important}
36
  h1{text-align:center}
37
  footer {
38
  visibility: hidden
39
  }
 
 
 
 
 
 
 
40
  '''
41
 
42
  examples = [
 
 
 
43
  "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
44
  "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
45
+ "Vector illustration of a horse, vector graphic design with flat colors on an brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw",
46
+ "Man in brown leather jacket posing for camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5",
47
+ "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16 "
48
+
49
  ]
50
 
51
+
52
+ MODEL_ID = os.getenv("MODEL_VAL_PATH") #Use SDXL Model as "MODEL_REPO" --------->>> ”VALUE”.
53
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
54
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
55
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
56
+ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
57
 
58
+ #Load model outside of function
59
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
60
  pipe = StableDiffusionXLPipeline.from_pretrained(
61
  MODEL_ID,
 
65
  ).to(device)
66
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
67
 
68
+ # <compile speedup >
69
  if USE_TORCH_COMPILE:
70
  pipe.compile()
71
 
72
+ # Offloading capacity (RAM)
73
  if ENABLE_CPU_OFFLOAD:
74
  pipe.enable_model_cpu_offload()
75
 
 
103
  seed = int(randomize_seed_fn(seed, randomize_seed))
104
  generator = torch.Generator(device=device).manual_seed(seed)
105
 
106
+ #Options
107
  options = {
108
  "prompt": [prompt] * num_images,
109
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
 
118
  if use_resolution_binning:
119
  options["use_resolution_binning"] = True
120
 
121
+ #Images potential batches
122
  images = []
123
  for i in range(0, num_images, BATCH_SIZE):
124
  batch_options = options.copy()
 
131
  return image_paths, seed
132
 
133
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
134
+ gr.Markdown(DESCRIPTIONx)
135
+ with gr.Group():
136
+ with gr.Row():
137
  prompt = gr.Text(
138
  label="Prompt",
139
  show_label=False,
 
141
  placeholder="Enter your prompt",
142
  container=False,
143
  )
144
+ run_button = gr.Button("Run", scale=0)
145
+ result = gr.Gallery(label="Result", columns=1, show_label=False)
146
+ with gr.Accordion("Advanced options", open=False, visible=False):
 
 
 
 
147
  num_images = gr.Slider(
148
  label="Number of Images",
149
  minimum=1,
 
190
  minimum=0.1,
191
  maximum=6,
192
  step=0.1,
193
+ value=3.0,
194
  )
195
  num_inference_steps = gr.Slider(
196
  label="Number of inference steps",
 
200
  value=23,
201
  )
202
 
 
 
 
203
  gr.Examples(
204
  examples=examples,
205
  inputs=prompt,
206
  cache_examples=False
207
  )
208
+
209
  use_negative_prompt.change(
210
  fn=lambda x: gr.update(visible=x),
211
  inputs=use_negative_prompt,
212
  outputs=negative_prompt,
213
  api_name=False,
214
  )
215
+
216
  gr.on(
217
  triggers=[
218
  prompt.submit,
 
235
  outputs=[result, seed],
236
  api_name="run",
237
  )
238
+
239
  if __name__ == "__main__":
240
  demo.queue(max_size=40).launch()