nielsr HF staff commited on
Commit
1d9e1c4
1 Parent(s): 40b1ad3

Update examples

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -15,7 +15,10 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
  model.to(device)
16
 
17
  # load image examples
18
- urls = ['https://assetsnffrgf-a.akamaihd.net/assets/m/502013285/univ/art/502013285_univ_sqr_xl.jpg']
 
 
 
19
  for idx, url in enumerate(urls):
20
  image = Image.open(requests.get(url, stream=True).raw)
21
  image.save(f"image_{idx}.png")
@@ -42,7 +45,9 @@ def process_image(image):
42
  samples_img = [np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [n_px, n_px, 3]).astype(np.uint8) for s in samples]
43
 
44
  # stack images horizontally
45
- result = np.hstack(samples_img)
 
 
46
 
47
  # return as PIL Image
48
  completion = Image.fromarray(result)
@@ -52,7 +57,7 @@ def process_image(image):
52
  title = "Interactive demo: ImageGPT"
53
  description = "Demo for OpenAI's ImageGPT: Generative Pretraining from Pixels. To use it, simply upload an image or use the example image below and click 'submit'. Results will show up in a few seconds."
54
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.10282'>ImageGPT: Generative Pretraining from Pixels</a> | <a href='https://openai.com/blog/image-gpt/'>Official blog</a></p>"
55
- examples =[["image_0.png"]]
56
 
57
  iface = gr.Interface(fn=process_image,
58
  inputs=gr.inputs.Image(type="pil"),
 
15
  model.to(device)
16
 
17
  # load image examples
18
+ urls = ['https://avatars.githubusercontent.com/u/326577?v=4',
19
+ 'https://upload.wikimedia.org/wikipedia/commons/thumb/6/6e/Football_%28soccer_ball%29.svg/1200px-Football_%28soccer_ball%29.svg.png'
20
+ 'https://ichef.bbci.co.uk/news/976/cpsprodpb/12A9B/production/_111434467_gettyimages-1143489763.jpg',
21
+ ]
22
  for idx, url in enumerate(urls):
23
  image = Image.open(requests.get(url, stream=True).raw)
24
  image.save(f"image_{idx}.png")
 
45
  samples_img = [np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [n_px, n_px, 3]).astype(np.uint8) for s in samples]
46
 
47
  # stack images horizontally
48
+ row1 = np.hstack(samples_img[:4])
49
+ row2 = np.hstack(samples_img[4:])
50
+ result = np.vstack([row1, row2])
51
 
52
  # return as PIL Image
53
  completion = Image.fromarray(result)
 
57
  title = "Interactive demo: ImageGPT"
58
  description = "Demo for OpenAI's ImageGPT: Generative Pretraining from Pixels. To use it, simply upload an image or use the example image below and click 'submit'. Results will show up in a few seconds."
59
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.10282'>ImageGPT: Generative Pretraining from Pixels</a> | <a href='https://openai.com/blog/image-gpt/'>Official blog</a></p>"
60
+ examples =[f"image_{idx}.png" for idx in range(len(urls))]
61
 
62
  iface = gr.Interface(fn=process_image,
63
  inputs=gr.inputs.Image(type="pil"),