Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,8 @@ from transformers import pipeline
|
|
6 |
|
7 |
asr = pipeline("automatic-speech-recognition")
|
8 |
latent = gr.Interface.load("spaces/multimodalart/latentdiffusion")
|
9 |
-
zero =
|
|
|
10 |
#tts = gr.Interface.load("spaces/osanseviero/tortoisse-tts")
|
11 |
|
12 |
def text2image_latent(text, steps, width, height, images, diversity):
|
@@ -39,8 +40,20 @@ def speech_to_text(mic=None, file=None):
|
|
39 |
return transcription
|
40 |
|
41 |
def zero_shot(image, text_input):
|
42 |
-
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
with gr.Blocks() as demo:
|
46 |
gr.Markdown( """
|
|
|
6 |
|
7 |
asr = pipeline("automatic-speech-recognition")
|
8 |
latent = gr.Interface.load("spaces/multimodalart/latentdiffusion")
|
9 |
+
zero = pipeline("zero-shot-image-classification", model="openai/clip-vit-base-patch32")
|
10 |
+
#zero = gr.Interface.load("spaces/Datatrooper/zero-shot-image-classification")
|
11 |
#tts = gr.Interface.load("spaces/osanseviero/tortoisse-tts")
|
12 |
|
13 |
def text2image_latent(text, steps, width, height, images, diversity):
|
|
|
40 |
return transcription
|
41 |
|
42 |
def zero_shot(image, text_input):
|
43 |
+
PIL_image = Image.fromarray(np.uint8(image)).convert('RGB')
|
44 |
+
labels = labels_text.split(",")
|
45 |
+
res = pipe(images=PIL_image,
|
46 |
+
candidate_labels=labels,
|
47 |
+
hypothesis_template= "This is a photo of a {}")
|
48 |
+
return {dic["label"]: dic["score"] for dic in res}
|
49 |
+
|
50 |
+
def shot(image, labels_text):
|
51 |
+
PIL_image = Image.fromarray(np.uint8(image)).convert('RGB')
|
52 |
+
labels = labels_text.split(",")
|
53 |
+
res = pipe(images=PIL_image,
|
54 |
+
candidate_labels=labels,
|
55 |
+
hypothesis_template= "This is a photo of a {}")
|
56 |
+
return {dic["label"]: dic["score"] for dic in res}
|
57 |
|
58 |
with gr.Blocks() as demo:
|
59 |
gr.Markdown( """
|