pallavijaini commited on
Commit
e445523
1 Parent(s): 133c131

Removed frontend and access tokens

Browse files
Files changed (1) hide show
  1. app.py +33 -43
app.py CHANGED
@@ -15,8 +15,8 @@ from PIL import Image
15
  from huggingface_hub import login
16
 
17
 
18
- myip_spr = "34.16.28.52"
19
- myport = "8089"
20
 
21
  SPR = f"http://{myip_spr}:{myport}"
22
 
@@ -26,13 +26,14 @@ print(os.system("hostname -i"))
26
  print(SPR)
27
 
28
 
29
- prompt_examples_list = [
 
30
  ['A cascading waterfall tumbles down moss-covered rocks, surrounded by a lush and vibrant forest.'],
31
  ['In a serene garden, delicate cherry blossoms fall like pink snowflakes.'],
32
  ['A breathtaking mountain range towers above a picturesque valley, with a winding river reflecting the surrounding beauty.'],
33
  ['A serene beach scene with turquoise waters, palm trees swaying in the breeze, and a radiant sunset painting the sky in hues of orange and pink.'],
34
  ['After the rain, sunlight breaks through the clouds, illuminating the verdant fields.']
35
- ]
36
 
37
  def update_language(value):
38
  if value == "zh-CN":
@@ -46,27 +47,24 @@ def url_requests(url, data):
46
  location = json.loads(resp.text)["ip"]
47
 
48
  img_byte = base64.b64decode(img_str)
49
- img_io = BytesIO(img_byte)
50
- img = Image.open(img_io)
51
 
52
  return img, location
53
 
54
- def img2img_generate(url, source_img, prompt, steps=25, strength=0.75, seed=42, guidance_scale=7.5, hidden=""):
55
-
56
- if hidden != os.environ["front_token"]:
57
- return None
58
-
59
  print('=*'*20)
60
  print(type(source_img))
61
  print("prompt: ", prompt)
 
62
  buffered = BytesIO()
63
  source_img.save(buffered, format="JPEG")
64
  img_b64 = base64.b64encode(buffered.getvalue())
65
 
66
  data = {"source_img": img_b64.decode(), "prompt": prompt, "steps": steps,
67
- "guidance_scale": guidance_scale, "seed": seed, "strength": strength,
68
- "token": os.environ["access_token"]}
69
-
70
  start_time = time.time()
71
  img, location = url_requests(url, data)
72
  print("*="*20)
@@ -75,29 +73,19 @@ def img2img_generate(url, source_img, prompt, steps=25, strength=0.75, seed=42,
75
 
76
  return img
77
 
78
- def toggle_content():
79
- if toggle_content.collapsed:
80
- toggle_content.collapsed = False
81
- return "Content expanded"
82
- else:
83
- toggle_content.collapsed = True
84
- return "Content collapsed"
85
-
86
  def txt2img_example_input(value):
87
  print('6/12/2023', value)
88
  return value
89
 
90
- def txt2img_generate(url, prompt, steps=25, seed=42, guidance_scale=7.5, hidden=""):
91
-
92
- if hidden != os.environ["front_token"]:
93
- return None
94
 
95
  print("prompt: ", prompt)
96
  print("steps: ", steps)
97
  print("url: ", url)
98
- data = {"prompt": prompt,
99
- "steps": steps, "guidance_scale": guidance_scale, "seed": seed,
100
- "token": os.environ["access_token"]}
 
101
  start_time = time.time()
102
  img, location = url_requests(url, data)
103
 
@@ -107,36 +95,37 @@ def txt2img_generate(url, prompt, steps=25, seed=42, guidance_scale=7.5, hidden=
107
 
108
  return img
109
 
110
- title = """
111
- # Stable Diffusion Inference Acceleration
 
112
  """
113
 
114
  subtitle = """
115
- # 4th Gen Intel Xeon Scalable Processor
116
  """
117
 
118
  md = """
119
  Have fun and try your own prompts and see a up to 9x performance acceleration on the new 4th Gen Intel Xeon using <a href=\"https://github.com/intel/intel-extension-for-transformers\">**Intel Extension for Transformers**</a>. You may also want to try creating your own Stable Diffusion with few-shot fine-tuning. Please refer to our <a href=\"https://medium.com/intel-analytics-software/personalized-stable-diffusion-with-few-shot-fine-tuning-on-a-single-cpu-f01a3316b13\">blog</a> and <a href=\"https://github.com/intel/neural-compressor/tree/master/examples/pytorch/diffusion_model/diffusers/textual_inversion\">code</a> available in <a href=\"https://github.com/intel/neural-compressor\">**Intel Neural Compressor**</a> and <a href=\"https://github.com/huggingface/diffusers\">**Hugging Face Diffusers**</a>.
120
  """
121
 
122
- legal = """
123
  Performance varies by use, configuration and other factors. Learn more at www.Intel.com/PerformanceIndex. Performance results are based on testing as of dates shown in configurations and may not reflect all publicly available updates. See backup for configuration details. No product or component can be absolutely secure.
124
- © Intel Corporation. Intel, the Intel logo, and other Intel marks are trademarks of Intel Corporation or its subsidiaries. Other names and brands may be claimed as the property of others.
125
  """
126
 
127
  details = """
128
- - 4th Gen Intel Xeon Scalable Processor Inference. Test by Intel on 10/06/2023. Ubuntu 22.04.1 LTS, Intel Extension for Transformers(1.1.dev154+g448cc17e), Transformers 4.28.1, Diffusers 0.12.1, oneDNN v2.7.4.
129
  """
130
 
131
  css = '''
132
  .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
133
  .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
134
  #component-4, #component-3, #component-10{min-height: 0}
135
- .duplicate-button img{margin: 0}
136
  #img_1, #img_2, #img_3, #img_4{height:15rem}
137
  #mdStyle{font-size: 0.7rem}
138
  #titleCenter {text-align:center}
139
- '''
140
 
141
  random_seed = random.randint(0, 2147483647)
142
 
@@ -148,13 +137,14 @@ with gr.Blocks(css=css) as demo:
148
  gr.Markdown(md)
149
 
150
  with gr.Tab("Text-to-Image"):
 
151
  with gr.Row() as text_to_image:
 
152
  with gr.Column():
153
  prompt = gr.inputs.Textbox(label='Prompt', default='a photo of an astronaut riding a horse on mars')
154
  inference_steps = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
155
- seed = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
156
  guidance_scale = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
157
- hidden = gr.Textbox(label='hidden', value=os.environ["front_token"], visible=False)
158
  txt2img_button = gr.Button("Generate Image", variant="primary")
159
  url_SPR_txt = gr.Textbox(label='url_SPR_txt', value=SPR, visible=False)
160
 
@@ -172,6 +162,7 @@ with gr.Blocks(css=css) as demo:
172
  )
173
 
174
  with gr.Tab("Image-to-Image text-guided generation"):
 
175
  with gr.Row() as image_to_image:
176
  with gr.Column():
177
  source_img = gr.Image(source="upload", type="pil", value="https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg")
@@ -179,8 +170,7 @@ with gr.Blocks(css=css) as demo:
179
  inference_steps_2 = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
180
  seed_2 = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
181
  guidance_scale_2 = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
182
- strength = gr.inputs.Slider(0.0, 1.0, label='Strength - adding more noise to it the larger the strength', default=0.75, step=0.01)
183
- hidden_2 = gr.Textbox(label='hidden', value=os.environ["front_token"], visible=False)
184
  img2img_button = gr.Button("Generate Image", variant="primary")
185
  url_SPR = gr.Textbox(label='url_SPR', value=SPR, visible=False)
186
 
@@ -195,8 +185,8 @@ with gr.Blocks(css=css) as demo:
195
  gr.Markdown(legal, elem_id='mdStyle')
196
 
197
 
198
- txt2img_button.click(fn=txt2img_generate, inputs=[url_SPR_txt, prompt, inference_steps, seed, guidance_scale, hidden], outputs=result_image_1, queue=False)
199
- img2img_button.click(fn=img2img_generate, inputs=[url_SPR, source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2, hidden_2], outputs=result_image_3, queue=False)
200
 
201
  dt = gr.Textbox(label="Current language", visible=False)
202
  dt.change(update_language, inputs=dt, outputs=[Eng, zh])
 
15
  from huggingface_hub import login
16
 
17
 
18
+ myip_spr = os.environ["myip_spr"]
19
+ myport = os.environ["myport"]
20
 
21
  SPR = f"http://{myip_spr}:{myport}"
22
 
 
26
  print(SPR)
27
 
28
 
29
+ prompt_examples_list =
30
+ [
31
  ['A cascading waterfall tumbles down moss-covered rocks, surrounded by a lush and vibrant forest.'],
32
  ['In a serene garden, delicate cherry blossoms fall like pink snowflakes.'],
33
  ['A breathtaking mountain range towers above a picturesque valley, with a winding river reflecting the surrounding beauty.'],
34
  ['A serene beach scene with turquoise waters, palm trees swaying in the breeze, and a radiant sunset painting the sky in hues of orange and pink.'],
35
  ['After the rain, sunlight breaks through the clouds, illuminating the verdant fields.']
36
+ ]
37
 
38
  def update_language(value):
39
  if value == "zh-CN":
 
47
  location = json.loads(resp.text)["ip"]
48
 
49
  img_byte = base64.b64decode(img_str)
50
+ img_io = BytesIO(img_byte)
51
+ img = Image.open(img_io)
52
 
53
  return img, location
54
 
55
+ def img2img_generate(url, source_img, prompt, steps=25, strength=0.75, seed=42, guidance_scale=7.5):
56
+
 
 
 
57
  print('=*'*20)
58
  print(type(source_img))
59
  print("prompt: ", prompt)
60
+
61
  buffered = BytesIO()
62
  source_img.save(buffered, format="JPEG")
63
  img_b64 = base64.b64encode(buffered.getvalue())
64
 
65
  data = {"source_img": img_b64.decode(), "prompt": prompt, "steps": steps,
66
+ "guidance_scale": guidance_scale, "seed": seed, "strength": strength}
67
+
 
68
  start_time = time.time()
69
  img, location = url_requests(url, data)
70
  print("*="*20)
 
73
 
74
  return img
75
 
 
 
 
 
 
 
 
 
76
  def txt2img_example_input(value):
77
  print('6/12/2023', value)
78
  return value
79
 
80
+ def txt2img_generate(url, prompt, steps=25, seed=42, guidance_scale=7.5):
 
 
 
81
 
82
  print("prompt: ", prompt)
83
  print("steps: ", steps)
84
  print("url: ", url)
85
+
86
+ data = {"prompt": prompt, "steps": steps,
87
+ "guidance_scale": guidance_scale, "seed": seed}
88
+
89
  start_time = time.time()
90
  img, location = url_requests(url, data)
91
 
 
95
 
96
  return img
97
 
98
+
99
+ title = """
100
+ # Stable Diffusion Inference Acceleration Comparison
101
  """
102
 
103
  subtitle = """
104
+ # between 4th Gen and 3rd Gen Intel Xeon Scalable Processor
105
  """
106
 
107
  md = """
108
  Have fun and try your own prompts and see a up to 9x performance acceleration on the new 4th Gen Intel Xeon using <a href=\"https://github.com/intel/intel-extension-for-transformers\">**Intel Extension for Transformers**</a>. You may also want to try creating your own Stable Diffusion with few-shot fine-tuning. Please refer to our <a href=\"https://medium.com/intel-analytics-software/personalized-stable-diffusion-with-few-shot-fine-tuning-on-a-single-cpu-f01a3316b13\">blog</a> and <a href=\"https://github.com/intel/neural-compressor/tree/master/examples/pytorch/diffusion_model/diffusers/textual_inversion\">code</a> available in <a href=\"https://github.com/intel/neural-compressor\">**Intel Neural Compressor**</a> and <a href=\"https://github.com/huggingface/diffusers\">**Hugging Face Diffusers**</a>.
109
  """
110
 
111
+ legal = """
112
  Performance varies by use, configuration and other factors. Learn more at www.Intel.com/PerformanceIndex. Performance results are based on testing as of dates shown in configurations and may not reflect all publicly available updates. See backup for configuration details. No product or component can be absolutely secure.
113
+ © Intel Corporation. Intel, the Intel logo, and other Intel marks are trademarks of Intel Corporation or its subsidiaries. Other names and brands may be claimed as the property of others.
114
  """
115
 
116
  details = """
117
+ - 4th Gen Intel Xeon Scalable Processor Inference. Test by Intel on 10/06/2023. Ubuntu 22.04.1 LTS, Intel Extension for Transformers(1.1.dev154+g448cc17e), Transformers 4.28.1, Diffusers 0.12.1, oneDNN v2.7.4.
118
  """
119
 
120
  css = '''
121
  .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
122
  .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
123
  #component-4, #component-3, #component-10{min-height: 0}
124
+ .duplicate-button img{margin: 0}
125
  #img_1, #img_2, #img_3, #img_4{height:15rem}
126
  #mdStyle{font-size: 0.7rem}
127
  #titleCenter {text-align:center}
128
+ '''
129
 
130
  random_seed = random.randint(0, 2147483647)
131
 
 
137
  gr.Markdown(md)
138
 
139
  with gr.Tab("Text-to-Image"):
140
+
141
  with gr.Row() as text_to_image:
142
+
143
  with gr.Column():
144
  prompt = gr.inputs.Textbox(label='Prompt', default='a photo of an astronaut riding a horse on mars')
145
  inference_steps = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
146
+ seed = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
147
  guidance_scale = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
 
148
  txt2img_button = gr.Button("Generate Image", variant="primary")
149
  url_SPR_txt = gr.Textbox(label='url_SPR_txt', value=SPR, visible=False)
150
 
 
162
  )
163
 
164
  with gr.Tab("Image-to-Image text-guided generation"):
165
+
166
  with gr.Row() as image_to_image:
167
  with gr.Column():
168
  source_img = gr.Image(source="upload", type="pil", value="https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg")
 
170
  inference_steps_2 = gr.inputs.Slider(1, 100, label='Inference Steps - increase the steps for better quality (e.g., avoiding black image) ', default=20, step=1)
171
  seed_2 = gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1)
172
  guidance_scale_2 = gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=7.5, step=0.1)
173
+ strength = gr.inputs.Slider(0.0, 1.0, label='Strength - adding more noise to it the larger the strength', default=0.75, step=0.01)
 
174
  img2img_button = gr.Button("Generate Image", variant="primary")
175
  url_SPR = gr.Textbox(label='url_SPR', value=SPR, visible=False)
176
 
 
185
  gr.Markdown(legal, elem_id='mdStyle')
186
 
187
 
188
+ txt2img_button.click(fn=txt2img_generate, inputs=[url_SPR_txt, prompt, inference_steps, seed, guidance_scale], outputs=result_image_1, queue=False)
189
+ img2img_button.click(fn=img2img_generate, inputs=[url_SPR, source_img, prompt_2, inference_steps_2, strength, seed_2, guidance_scale_2], outputs=result_image_3, queue=False)
190
 
191
  dt = gr.Textbox(label="Current language", visible=False)
192
  dt.change(update_language, inputs=dt, outputs=[Eng, zh])