Ahsen Khaliq commited on
Commit
3c93d7b
β€’
1 Parent(s): ec77de0

target images

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -198,7 +198,7 @@ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
198
  print('Using device:', device)
199
  model = load_vqgan_model(args.vqgan_config, args.vqgan_checkpoint).to(device)
200
  perceptor = clip.load(args.clip_model, jit=False)[0].eval().requires_grad_(False).to(device)
201
- def inference(text, seed, step_size, max_iterations, width, height, init_image, init_weight):
202
  all_frames = []
203
  size=[width, height]
204
  texts = text
@@ -207,7 +207,10 @@ def inference(text, seed, step_size, max_iterations, width, height, init_image,
207
  init_image = init_image.name
208
  else:
209
  init_image = ""
210
- target_images = ""
 
 
 
211
  max_iterations = max_iterations
212
  model_names={"vqgan_imagenet_f16_16384": 'ImageNet 16384',"vqgan_imagenet_f16_1024":"ImageNet 1024", 'vqgan_openimages_f16_8192':'OpenImages 8912',
213
  "wikiart_1024":"WikiArt 1024", "wikiart_16384":"WikiArt 16384", "coco":"COCO-Stuff", "faceshq":"FacesHQ", "sflckr":"S-FLCKR"}
@@ -369,6 +372,7 @@ gr.Interface(
369
  gr.inputs.Slider(minimum=200, maximum=500, default=256, label='height', step=1),
370
  gr.inputs.Image(type="file", label="Initial Image", optional=True),
371
  gr.inputs.Slider(minimum=0.0, maximum=15.0, default=0.0, label='Initial Weight', step=1.0),
 
372
  ],
373
  [gr.outputs.Image(type="numpy", label="Output Image"),gr.outputs.Image(type="file", label="Output GIF")],
374
  title=title,
 
198
  print('Using device:', device)
199
  model = load_vqgan_model(args.vqgan_config, args.vqgan_checkpoint).to(device)
200
  perceptor = clip.load(args.clip_model, jit=False)[0].eval().requires_grad_(False).to(device)
201
+ def inference(text, seed, step_size, max_iterations, width, height, init_image, init_weight, target_images):
202
  all_frames = []
203
  size=[width, height]
204
  texts = text
 
207
  init_image = init_image.name
208
  else:
209
  init_image = ""
210
+ if target_images:
211
+ target_images = target_images.name
212
+ else:
213
+ target_images = ""
214
  max_iterations = max_iterations
215
  model_names={"vqgan_imagenet_f16_16384": 'ImageNet 16384',"vqgan_imagenet_f16_1024":"ImageNet 1024", 'vqgan_openimages_f16_8192':'OpenImages 8912',
216
  "wikiart_1024":"WikiArt 1024", "wikiart_16384":"WikiArt 16384", "coco":"COCO-Stuff", "faceshq":"FacesHQ", "sflckr":"S-FLCKR"}
 
372
  gr.inputs.Slider(minimum=200, maximum=500, default=256, label='height', step=1),
373
  gr.inputs.Image(type="file", label="Initial Image", optional=True),
374
  gr.inputs.Slider(minimum=0.0, maximum=15.0, default=0.0, label='Initial Weight', step=1.0),
375
+ gr.inputs.Image(type="file", label="Target Image", optional=True)
376
  ],
377
  [gr.outputs.Image(type="numpy", label="Output Image"),gr.outputs.Image(type="file", label="Output GIF")],
378
  title=title,