Spaces:
Runtime error
Runtime error
Ahsen Khaliq
commited on
Commit
Β·
eea2605
1
Parent(s):
1de7080
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,6 @@
|
|
|
|
|
|
|
|
1 |
import torch
|
2 |
torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.yaml', 'vqgan_imagenet_f16_16384.yaml')
|
3 |
torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.ckpt', 'vqgan_imagenet_f16_16384.ckpt')
|
@@ -28,7 +31,6 @@ import nvidia_smi
|
|
28 |
nvidia_smi.nvmlInit()
|
29 |
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)
|
30 |
# card id 0 hardcoded here, there is also a call to get all available card ids, so we could iterate
|
31 |
-
torch.hub.download_url_to_file('https://i.imgur.com/WEHmKef.jpg', 'gpu.jpg')
|
32 |
|
33 |
torch.hub.download_url_to_file('https://images.pexels.com/photos/158028/bellingrath-gardens-alabama-landscape-scenic-158028.jpeg', 'garden.jpeg')
|
34 |
torch.hub.download_url_to_file('https://images.pexels.com/photos/68767/divers-underwater-ocean-swim-68767.jpeg', 'coralreef.jpeg')
|
@@ -343,30 +345,17 @@ def inference(text, seed, step_size, max_iterations, width, height, init_image,
|
|
343 |
except KeyboardInterrupt:
|
344 |
pass
|
345 |
return image
|
346 |
-
inferences_running = 0
|
347 |
def load_image( infilename ) :
|
348 |
img = Image.open( infilename )
|
349 |
img.load()
|
350 |
data = np.asarray( img, dtype="int32" )
|
351 |
return data
|
352 |
-
|
353 |
-
global inferences_running
|
354 |
-
current = inferences_running
|
355 |
-
if current >= 3:
|
356 |
-
print(f"Rejected inference when we already had {current} running")
|
357 |
-
return load_image("./gpu.jpg")
|
358 |
-
print(f"Inference starting when we already had {current} running")
|
359 |
-
inferences_running += 1
|
360 |
-
try:
|
361 |
-
return inference(text, seed, step_size, max_iterations, width, height, init_image, init_weight)
|
362 |
-
finally:
|
363 |
-
print("Inference finished")
|
364 |
-
inferences_running -= 1
|
365 |
title = "VQGAN + CLIP"
|
366 |
description = "Gradio demo for VQGAN + CLIP. To use it, simply add your text, or click one of the examples to load them. Read more at the links below. Please click submit only once. Results will show up in under a minute."
|
367 |
article = "<p style='text-align: center'>Originally made by Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). The original BigGAN+CLIP method was by https://twitter.com/advadnoun. Added some explanations and modifications by Eleiber#8347, pooling trick by Crimeacs#8222 (https://twitter.com/EarthML1) and the GUI was made with the help of Abulafia#3734. | <a href='https://colab.research.google.com/drive/1ZAus_gn2RhTZWzOWUpPERNC0Q8OhZRTZ'>Colab</a> | <a href='https://github.com/CompVis/taming-transformers'>Taming Transformers Github Repo</a> | <a href='https://github.com/openai/CLIP'>CLIP Github Repo</a> | Special thanks to BoneAmputee (https://twitter.com/BoneAmputee) for suggestions and advice</p>"
|
368 |
gr.Interface(
|
369 |
-
|
370 |
[gr.inputs.Textbox(label="Text Input"),
|
371 |
gr.inputs.Number(default=42, label="seed"),
|
372 |
gr.inputs.Slider(minimum=0.1, maximum=0.9, default=0.17, label='step size'),
|
@@ -384,5 +373,6 @@ gr.Interface(
|
|
384 |
['a garden by james gurney',42,0.16, 100, 256, 256, 'garden.jpeg', 0.0 ],
|
385 |
['coral reef city artstationHQ',1000,0.6, 110, 200, 200, 'coralreef.jpeg', 0.0],
|
386 |
['a cabin in the mountains unreal engine',98,0.3, 120, 280, 280, 'cabin.jpeg', 0.0]
|
387 |
-
]
|
|
|
388 |
).launch(debug=True)
|
|
|
1 |
+
import os
|
2 |
+
os.system('pip install gradio --upgrade')
|
3 |
+
os.system('pip freeze')
|
4 |
import torch
|
5 |
torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.yaml', 'vqgan_imagenet_f16_16384.yaml')
|
6 |
torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.ckpt', 'vqgan_imagenet_f16_16384.ckpt')
|
|
|
31 |
nvidia_smi.nvmlInit()
|
32 |
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)
|
33 |
# card id 0 hardcoded here, there is also a call to get all available card ids, so we could iterate
|
|
|
34 |
|
35 |
torch.hub.download_url_to_file('https://images.pexels.com/photos/158028/bellingrath-gardens-alabama-landscape-scenic-158028.jpeg', 'garden.jpeg')
|
36 |
torch.hub.download_url_to_file('https://images.pexels.com/photos/68767/divers-underwater-ocean-swim-68767.jpeg', 'coralreef.jpeg')
|
|
|
345 |
except KeyboardInterrupt:
|
346 |
pass
|
347 |
return image
|
|
|
348 |
def load_image( infilename ) :
|
349 |
img = Image.open( infilename )
|
350 |
img.load()
|
351 |
data = np.asarray( img, dtype="int32" )
|
352 |
return data
|
353 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
354 |
title = "VQGAN + CLIP"
|
355 |
description = "Gradio demo for VQGAN + CLIP. To use it, simply add your text, or click one of the examples to load them. Read more at the links below. Please click submit only once. Results will show up in under a minute."
|
356 |
article = "<p style='text-align: center'>Originally made by Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). The original BigGAN+CLIP method was by https://twitter.com/advadnoun. Added some explanations and modifications by Eleiber#8347, pooling trick by Crimeacs#8222 (https://twitter.com/EarthML1) and the GUI was made with the help of Abulafia#3734. | <a href='https://colab.research.google.com/drive/1ZAus_gn2RhTZWzOWUpPERNC0Q8OhZRTZ'>Colab</a> | <a href='https://github.com/CompVis/taming-transformers'>Taming Transformers Github Repo</a> | <a href='https://github.com/openai/CLIP'>CLIP Github Repo</a> | Special thanks to BoneAmputee (https://twitter.com/BoneAmputee) for suggestions and advice</p>"
|
357 |
gr.Interface(
|
358 |
+
inference,
|
359 |
[gr.inputs.Textbox(label="Text Input"),
|
360 |
gr.inputs.Number(default=42, label="seed"),
|
361 |
gr.inputs.Slider(minimum=0.1, maximum=0.9, default=0.17, label='step size'),
|
|
|
373 |
['a garden by james gurney',42,0.16, 100, 256, 256, 'garden.jpeg', 0.0 ],
|
374 |
['coral reef city artstationHQ',1000,0.6, 110, 200, 200, 'coralreef.jpeg', 0.0],
|
375 |
['a cabin in the mountains unreal engine',98,0.3, 120, 280, 280, 'cabin.jpeg', 0.0]
|
376 |
+
],
|
377 |
+
enable_queue=True
|
378 |
).launch(debug=True)
|