Spaces:
Runtime error
Runtime error
autonomous019
commited on
Commit
•
d7d1270
1
Parent(s):
3decb3e
more edits
Browse files
app.py
CHANGED
@@ -16,14 +16,10 @@ model = ViTForImageClassification(config)
|
|
16 |
#print(config)
|
17 |
|
18 |
feature_extractor = ViTFeatureExtractor()
|
19 |
-
|
20 |
# or, to load one that corresponds to a checkpoint on the hub:
|
21 |
#feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224")
|
22 |
|
23 |
-
|
24 |
-
image = "cats.jpg"
|
25 |
-
|
26 |
-
|
27 |
feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv")
|
28 |
model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
|
29 |
|
@@ -39,12 +35,14 @@ def classify_image(image):
|
|
39 |
output[predicted_label] = score
|
40 |
return output
|
41 |
|
|
|
42 |
image = gr.inputs.Image(type="pil")
|
|
|
43 |
label = gr.outputs.Label(num_top_classes=5)
|
44 |
examples = [["cats.jpg"], ["dog.jpg"]]
|
45 |
-
title = "
|
46 |
-
description = "Demo for classifying images with Perceiver IO. To use it, simply upload an image
|
47 |
-
article = "<p style='text-align: center'
|
48 |
|
49 |
gr.Interface(fn=classify_image, inputs=image, outputs=label, title=title, description=description, examples="", enable_queue=True).launch(debug=True)
|
50 |
|
|
|
16 |
#print(config)
|
17 |
|
18 |
feature_extractor = ViTFeatureExtractor()
|
|
|
19 |
# or, to load one that corresponds to a checkpoint on the hub:
|
20 |
#feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224")
|
21 |
|
22 |
+
#the following gets called by classify_image()
|
|
|
|
|
|
|
23 |
feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv")
|
24 |
model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
|
25 |
|
|
|
35 |
output[predicted_label] = score
|
36 |
return output
|
37 |
|
38 |
+
|
39 |
image = gr.inputs.Image(type="pil")
|
40 |
+
image_piped = image_pipe(image)
|
41 |
label = gr.outputs.Label(num_top_classes=5)
|
42 |
examples = [["cats.jpg"], ["dog.jpg"]]
|
43 |
+
title = "Generate a Story from an Image"
|
44 |
+
description = "Demo for classifying images with Perceiver IO. To use it, simply upload an image and click 'submit' to let the model predict the 5 most probable ImageNet classes. Results will show up in a few seconds." + image_piped
|
45 |
+
article = "<p style='text-align: center'></p>"
|
46 |
|
47 |
gr.Interface(fn=classify_image, inputs=image, outputs=label, title=title, description=description, examples="", enable_queue=True).launch(debug=True)
|
48 |
|