File size: 1,794 Bytes
c0d2744
 
f3e91bc
 
 
89bb5fe
c0d2744
8f198c5
c0d2744
 
32a4a70
c0d2744
 
 
 
 
 
 
 
 
 
 
 
 
89bb5fe
1826665
ea25f78
1826665
c0d2744
1826665
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import gradio as gr
from transformers import ImageClassificationPipeline, PerceiverForImageClassificationConvProcessing, PerceiverFeatureExtractor
import torch

torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
torch.hub.download_url_to_file('https://storage.googleapis.com/perceiver_io/dalmation.jpg', 'dog.jpg')
 
feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv")
model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")

image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)

def classify_image(image):
  results = image_pipe(image)
  # convert to format Gradio expects
  output = {}
  for prediction in results:
    predicted_label = prediction['label']
    score = prediction['score']
    output[predicted_label] = score
  return output

image = gr.inputs.Image(type="pil")
label = gr.outputs.Label(num_top_classes=5)
examples = [["cats.jpg"], ["dog.jpg"]]
title = "Interactive demo: Perceiver for image classification"
description = "Demo for classifying images with Perceiver IO. To use it, simply upload an image or use the example images below and click 'submit' to let the model predict the 5 most probable ImageNet classes. Results will show up in a few seconds."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2107.14795'>Perceiver IO: A General Architecture for Structured Inputs & Outputs</a> | <a href='https://deepmind.com/blog/article/building-architectures-that-can-handle-the-worlds-data/'>Official blog</a></p>"

gr.Interface(fn=classify_image, inputs=image, outputs=label, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True)