Spaces:
Runtime error
Runtime error
autonomous019
commited on
Commit
•
41c00ad
1
Parent(s):
98ec703
writing output to logs
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import requests
|
|
5 |
import matplotlib.pyplot as plt
|
6 |
import gradio as gr
|
7 |
from transformers import ImageClassificationPipeline, PerceiverForImageClassificationConvProcessing, PerceiverFeatureExtractor
|
|
|
8 |
import torch
|
9 |
|
10 |
|
@@ -25,8 +26,27 @@ model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/
|
|
25 |
|
26 |
image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
def classify_image(image):
|
29 |
results = image_pipe(image)
|
|
|
|
|
30 |
print(results)
|
31 |
# convert to format Gradio expects
|
32 |
output = {}
|
@@ -34,6 +54,8 @@ def classify_image(image):
|
|
34 |
predicted_label = prediction['label']
|
35 |
score = prediction['score']
|
36 |
output[predicted_label] = score
|
|
|
|
|
37 |
return output
|
38 |
|
39 |
|
|
|
5 |
import matplotlib.pyplot as plt
|
6 |
import gradio as gr
|
7 |
from transformers import ImageClassificationPipeline, PerceiverForImageClassificationConvProcessing, PerceiverFeatureExtractor
|
8 |
+
from transformers import AutoTokenizer
|
9 |
import torch
|
10 |
|
11 |
|
|
|
26 |
|
27 |
image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)
|
28 |
|
29 |
+
'''
|
30 |
+
repo_name = "ydshieh/vit-gpt2-coco-en"
|
31 |
+
|
32 |
+
feature_extractor2 = ViTFeatureExtractor.from_pretrained(repo_name)
|
33 |
+
tokenizer = AutoTokenizer.from_pretrained(repo_name)
|
34 |
+
model2 = VisionEncoderDecoderModel.from_pretrained(repo_name)
|
35 |
+
pixel_values = feature_extractor2(image, return_tensors="pt").pixel_values
|
36 |
+
|
37 |
+
# autoregressively generate text (using beam search or other decoding strategy)
|
38 |
+
generated_ids = model2.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True)
|
39 |
+
# decode into text
|
40 |
+
preds = tokenizer.batch_decode(generated_ids[0], skip_special_tokens=True)
|
41 |
+
preds = [pred.strip() for pred in preds]
|
42 |
+
print(preds)
|
43 |
+
'''
|
44 |
+
|
45 |
+
|
46 |
def classify_image(image):
|
47 |
results = image_pipe(image)
|
48 |
+
|
49 |
+
print("RESULTS")
|
50 |
print(results)
|
51 |
# convert to format Gradio expects
|
52 |
output = {}
|
|
|
54 |
predicted_label = prediction['label']
|
55 |
score = prediction['score']
|
56 |
output[predicted_label] = score
|
57 |
+
print("OUTPUT")
|
58 |
+
print(output)
|
59 |
return output
|
60 |
|
61 |
|