Spaces:
Sleeping
Sleeping
RoboApocalypse
commited on
Merge branch 'main' of https://huggingface.co/spaces/gronkomatic/openclip-embed
Browse files
app.py
CHANGED
@@ -2,11 +2,13 @@ from typing import Union
|
|
2 |
import gradio as gr
|
3 |
from numpy import empty
|
4 |
import open_clip
|
|
|
5 |
import torch
|
6 |
import PIL.Image as Image
|
7 |
|
8 |
# Set device to GPU if available
|
9 |
-
device = torch.device(
|
|
|
10 |
|
11 |
# Load the OpenCLIP model and the necessary preprocessors
|
12 |
# openclip_model = 'laion/CLIP-ViT-B-32-laion2B-s34B-b79K'
|
@@ -19,6 +21,7 @@ model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms(
|
|
19 |
|
20 |
|
21 |
# Define function to generate text embeddings
|
|
|
22 |
def generate_text_embedding(text_data: Union[str, tuple[str]]) -> list[str]:
|
23 |
"""
|
24 |
Generate embeddings for text data using the OpenCLIP model.
|
|
|
2 |
import gradio as gr
|
3 |
from numpy import empty
|
4 |
import open_clip
|
5 |
+
# import spaces
|
6 |
import torch
|
7 |
import PIL.Image as Image
|
8 |
|
9 |
# Set device to GPU if available
|
10 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
11 |
+
print(f"PyTorch Device {device}")
|
12 |
|
13 |
# Load the OpenCLIP model and the necessary preprocessors
|
14 |
# openclip_model = 'laion/CLIP-ViT-B-32-laion2B-s34B-b79K'
|
|
|
21 |
|
22 |
|
23 |
# Define function to generate text embeddings
|
24 |
+
# @spaces.GPU
|
25 |
def generate_text_embedding(text_data: Union[str, tuple[str]]) -> list[str]:
|
26 |
"""
|
27 |
Generate embeddings for text data using the OpenCLIP model.
|