Blealtan commited on
Commit
f1b6ca9
1 Parent(s): eead4ea

Add option for keeping dimensions

Browse files
Files changed (1) hide show
  1. app.py +11 -8
app.py CHANGED
@@ -242,12 +242,12 @@ def prepare_model(model_prefix):
242
  return encoder, decoder
243
 
244
 
245
- def encode(model_prefix, img):
246
  encoder, _ = prepare_model(model_prefix)
247
- img_transform = transforms.Compose([
248
- transforms.PILToTensor(),
249
- transforms.ConvertImageDtype(torch.float)
250
- ])
251
 
252
  with torch.no_grad():
253
  img = img_transform(img.convert("RGB")).unsqueeze(0).to(device)
@@ -276,7 +276,9 @@ def decode(model_prefix, z_str):
276
 
277
 
278
  st.title("Clip Guided Binary Autoencoder")
279
- st.write("Model is from [@BlinkDL](https://huggingface.co/BlinkDL/clip-guided-binary-autoencoder)")
 
 
280
  model_prefix = st.selectbox('The model to use',
281
  ('out-v7c_d8_256-224-13bit-OB32x0.5-745',
282
  'out-v7d_d16_512-224-13bit-OB32x0.5-2487',
@@ -286,13 +288,14 @@ encoder_tab, decoder_tab = st.tabs(["Encode", "Decode"])
286
 
287
  with encoder_tab:
288
  col_in, col_out = st.columns(2)
 
289
  uploaded_file = col_in.file_uploader('Choose an Image')
290
  if uploaded_file is not None:
291
  image = Image.open(uploaded_file)
292
  col_in.image(image, 'Input Image')
293
- z_str = encode(model_prefix, image)
294
  col_out.write("Encoded to:")
295
- col_out.code(z_str,language=None)
296
  col_out.image(decode(model_prefix, z_str), 'Output Image preview')
297
 
298
  with decoder_tab:
 
242
  return encoder, decoder
243
 
244
 
245
+ def encode(model_prefix, img, keep_dims):
246
  encoder, _ = prepare_model(model_prefix)
247
+ img_transform = transforms.Compose(
248
+ [transforms.PILToTensor(),
249
+ transforms.ConvertImageDtype(torch.float)] +
250
+ ([transforms.Resize((224, 224))] if not keep_dims else []))
251
 
252
  with torch.no_grad():
253
  img = img_transform(img.convert("RGB")).unsqueeze(0).to(device)
 
276
 
277
 
278
  st.title("Clip Guided Binary Autoencoder")
279
+ st.write(
280
+ "Model is from [@BlinkDL](https://huggingface.co/BlinkDL/clip-guided-binary-autoencoder)"
281
+ )
282
  model_prefix = st.selectbox('The model to use',
283
  ('out-v7c_d8_256-224-13bit-OB32x0.5-745',
284
  'out-v7d_d16_512-224-13bit-OB32x0.5-2487',
 
288
 
289
  with encoder_tab:
290
  col_in, col_out = st.columns(2)
291
+ keep_dims = col_in.checkbox('Keep the size of original input image', True)
292
  uploaded_file = col_in.file_uploader('Choose an Image')
293
  if uploaded_file is not None:
294
  image = Image.open(uploaded_file)
295
  col_in.image(image, 'Input Image')
296
+ z_str = encode(model_prefix, image, keep_dims)
297
  col_out.write("Encoded to:")
298
+ col_out.code(z_str, language=None)
299
  col_out.image(decode(model_prefix, z_str), 'Output Image preview')
300
 
301
  with decoder_tab: