Ahsen Khaliq commited on
Commit
2041e1f
β€’
1 Parent(s): 1de2d14

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -7
app.py CHANGED
@@ -357,7 +357,7 @@ def inference(text):
357
  losses_str = ', '.join(f'{loss.item():g}' for loss in losses)
358
  tqdm.write(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')
359
  out = synth(z)
360
- TF.to_pil_image(out[0].cpu()).save('progress.png')
361
  #display.display(display.Image('progress.png'))
362
 
363
  def ascend_txt():
@@ -374,13 +374,13 @@ def inference(text):
374
  result.append(prompt(iii))
375
  img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
376
  img = np.transpose(img, (1, 2, 0))
377
- imageio.imwrite('./steps/' + str(i) + '.png', np.array(img))
378
 
379
- return result
380
 
381
  def train(i):
382
  opt.zero_grad()
383
- lossAll = ascend_txt()
384
  if i % args.display_freq == 0:
385
  checkin(i, lossAll)
386
 
@@ -389,19 +389,20 @@ def inference(text):
389
  opt.step()
390
  with torch.no_grad():
391
  z.copy_(z.maximum(z_min).minimum(z_max))
 
392
 
393
  i = 0
394
  try:
395
  with tqdm() as pbar:
396
  while True:
397
- train(i)
398
  if i == max_iterations:
399
  break
400
  i += 1
401
  pbar.update()
402
  except KeyboardInterrupt:
403
  pass
404
- return "./steps/300.png"
405
 
406
  title = "VQGAN + CLIP"
407
  description = "Gradio demo for VQGAN + CLIP. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
@@ -410,7 +411,7 @@ article = "<p style='text-align: center'>Originally made by Katherine Crowson (h
410
  gr.Interface(
411
  inference,
412
  gr.inputs.Textbox(label="Input"),
413
- gr.outputs.Image(type="file", label="Output"),
414
  title=title,
415
  description=description,
416
  article=article,
 
357
  losses_str = ', '.join(f'{loss.item():g}' for loss in losses)
358
  tqdm.write(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')
359
  out = synth(z)
360
+ #TF.to_pil_image(out[0].cpu()).save('progress.png')
361
  #display.display(display.Image('progress.png'))
362
 
363
  def ascend_txt():
 
374
  result.append(prompt(iii))
375
  img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
376
  img = np.transpose(img, (1, 2, 0))
377
+ #imageio.imwrite('./steps/' + str(i) + '.png', np.array(img))
378
 
379
+ return result, np.array(img)
380
 
381
  def train(i):
382
  opt.zero_grad()
383
+ lossAll, image = ascend_txt()
384
  if i % args.display_freq == 0:
385
  checkin(i, lossAll)
386
 
 
389
  opt.step()
390
  with torch.no_grad():
391
  z.copy_(z.maximum(z_min).minimum(z_max))
392
+ return image
393
 
394
  i = 0
395
  try:
396
  with tqdm() as pbar:
397
  while True:
398
+ image = train(i)
399
  if i == max_iterations:
400
  break
401
  i += 1
402
  pbar.update()
403
  except KeyboardInterrupt:
404
  pass
405
+ return image
406
 
407
  title = "VQGAN + CLIP"
408
  description = "Gradio demo for VQGAN + CLIP. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
 
411
  gr.Interface(
412
  inference,
413
  gr.inputs.Textbox(label="Input"),
414
+ gr.outputs.Image(type="numpy", label="Output"),
415
  title=title,
416
  description=description,
417
  article=article,