mpatel57 commited on
Commit
8104fb0
1 Parent(s): 862799d

resolution options

Browse files
Files changed (1) hide show
  1. app.py +44 -5
app.py CHANGED
@@ -72,9 +72,9 @@ class Ours:
72
  "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
73
  ).to(device)
74
 
75
- def inference(self, text, negative_text, steps, guidance_scale):
76
  gen_images = []
77
- for i in range(4):
78
  image_emb, negative_image_emb = self.pipe_prior(
79
  text, negative_prompt=negative_text
80
  ).to_tuple()
@@ -83,6 +83,8 @@ class Ours:
83
  negative_image_embeds=negative_image_emb,
84
  num_inference_steps=steps,
85
  guidance_scale=guidance_scale,
 
 
86
  ).images
87
  gen_images.append(image[0])
88
  return gen_images
@@ -91,8 +93,11 @@ class Ours:
91
  selected_model = Ours(device=__DEVICE__)
92
 
93
  @spaces.GPU
94
- def get_images(text, negative_text, steps, guidance_scale):
95
- images = selected_model.inference(text, negative_text, steps, guidance_scale)
 
 
 
96
  new_images = []
97
  for img in images:
98
  new_images.append(img)
@@ -134,12 +139,37 @@ with gr.Blocks() as demo:
134
  label="Guidance Scale", minimum=0, maximum=10, value=7.5, step=0.1
135
  )
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  with gr.Row():
138
  btn = gr.Button(value="Generate Image")
139
 
140
  gallery = gr.Gallery(
141
  label="Generated images", show_label=False, elem_id="gallery"
142
- , columns=[4], rows=[1], object_fit="contain", height="auto")
143
 
144
  btn.click(
145
  get_images,
@@ -148,6 +178,9 @@ with gr.Blocks() as demo:
148
  negative_text,
149
  steps,
150
  guidance_scale,
 
 
 
151
  ],
152
  outputs=gallery,
153
  )
@@ -158,6 +191,9 @@ with gr.Blocks() as demo:
158
  negative_text,
159
  steps,
160
  guidance_scale,
 
 
 
161
  ],
162
  outputs=gallery,
163
  )
@@ -168,6 +204,9 @@ with gr.Blocks() as demo:
168
  negative_text,
169
  steps,
170
  guidance_scale,
 
 
 
171
  ],
172
  outputs=gallery,
173
  )
 
72
  "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
73
  ).to(device)
74
 
75
+ def inference(self, text, negative_text, steps, guidance_scale, width, height):
76
  gen_images = []
77
+ for i in range(2):
78
  image_emb, negative_image_emb = self.pipe_prior(
79
  text, negative_prompt=negative_text
80
  ).to_tuple()
 
83
  negative_image_embeds=negative_image_emb,
84
  num_inference_steps=steps,
85
  guidance_scale=guidance_scale,
86
+ width=width,
87
+ height=height,
88
  ).images
89
  gen_images.append(image[0])
90
  return gen_images
 
93
  selected_model = Ours(device=__DEVICE__)
94
 
95
  @spaces.GPU
96
+ def get_images(text, negative_text, steps, guidance_scale, width, height, fixed_res):
97
+ if fixed_res!="manual":
98
+ print(f"Using {fixed_res} resolution")
99
+ width, height = fixed_res.split("x")
100
+ images = selected_model.inference(text, negative_text, steps, guidance_scale, width=int(width), height=int(height))
101
  new_images = []
102
  for img in images:
103
  new_images.append(img)
 
139
  label="Guidance Scale", minimum=0, maximum=10, value=7.5, step=0.1
140
  )
141
 
142
+ with gr.Row():
143
+ with gr.Group():
144
+ width_inp = gr.Textbox(
145
+ label="Please provide the width",
146
+ value="512",
147
+ max_lines=1,
148
+ ).style(
149
+ border=(True, False, True, True),
150
+ rounded=(True, False, False, True),
151
+ container=False,
152
+ )
153
+ height_inp = gr.Textbox(
154
+ label="Please provide the height",
155
+ max_lines=1,
156
+ value="512",
157
+ ).style(
158
+ border=(True, False, True, True),
159
+ rounded=(True, False, False, True),
160
+ container=False,
161
+ )
162
+
163
+ fixed_res = gr.Dropdown(
164
+ ["manual", "512x512", "1024x1024", "1920x1080", "1280x720"], value="manual", label="Prefined Resolution", info="Either select one or manually define one!"
165
+ )
166
+
167
  with gr.Row():
168
  btn = gr.Button(value="Generate Image")
169
 
170
  gallery = gr.Gallery(
171
  label="Generated images", show_label=False, elem_id="gallery"
172
+ , columns=[2], rows=[1], object_fit="contain", height="auto")
173
 
174
  btn.click(
175
  get_images,
 
178
  negative_text,
179
  steps,
180
  guidance_scale,
181
+ width_inp,
182
+ height_inp,
183
+ fixed_res[0],
184
  ],
185
  outputs=gallery,
186
  )
 
191
  negative_text,
192
  steps,
193
  guidance_scale,
194
+ width_inp,
195
+ height_inp,
196
+ fixed_res[0],
197
  ],
198
  outputs=gallery,
199
  )
 
204
  negative_text,
205
  steps,
206
  guidance_scale,
207
+ width_inp,
208
+ height_inp,
209
+ fixed_res[0],
210
  ],
211
  outputs=gallery,
212
  )