Spaces:
Sleeping
Sleeping
move to gpu
Browse files- app.py +6 -2
- requirements.txt +3 -1
app.py
CHANGED
@@ -1,15 +1,19 @@
|
|
1 |
import gradio as gr
|
2 |
import open_clip
|
|
|
3 |
|
|
|
4 |
|
5 |
model, _, transform = open_clip.create_model_and_transforms(
|
6 |
"coca_ViT-L-14",
|
7 |
pretrained="laion2B-s13B-b90k-mscoco-2014.pt"
|
8 |
)
|
9 |
|
|
|
|
|
10 |
def output_generate(image):
|
11 |
-
im = transform(image).unsqueeze(0)
|
12 |
-
generated = model.generate(im)
|
13 |
return open_clip.decode(generated[0]).split("<end_of_text>")[0].replace("<start_of_text>", "")
|
14 |
|
15 |
iface = gr.Interface(fn=output_generate, inputs=gr.Image(type="pil"), outputs="text")
|
|
|
1 |
import gradio as gr
|
2 |
import open_clip
|
3 |
+
import torch
|
4 |
|
5 |
+
device = torch.device("cuda")
|
6 |
|
7 |
model, _, transform = open_clip.create_model_and_transforms(
|
8 |
"coca_ViT-L-14",
|
9 |
pretrained="laion2B-s13B-b90k-mscoco-2014.pt"
|
10 |
)
|
11 |
|
12 |
+
model.to(device)
|
13 |
+
|
14 |
def output_generate(image):
|
15 |
+
im = transform(image).unsqueeze(0).to(device)
|
16 |
+
generated = model.generate(im, seq_len=20)
|
17 |
return open_clip.decode(generated[0]).split("<end_of_text>")[0].replace("<start_of_text>", "")
|
18 |
|
19 |
iface = gr.Interface(fn=output_generate, inputs=gr.Image(type="pil"), outputs="text")
|
requirements.txt
CHANGED
@@ -1,2 +1,4 @@
|
|
1 |
git+https://github.com/mlfoundations/open_clip
|
2 |
-
transformers
|
|
|
|
|
|
1 |
git+https://github.com/mlfoundations/open_clip
|
2 |
+
transformers
|
3 |
+
--extra-index-url https://download.pytorch.org/whl/cu113
|
4 |
+
torch
|