SakuraD commited on
Commit
c3aa290
1 Parent(s): fc13876
Files changed (1) hide show
  1. app.py +2 -26
app.py CHANGED
@@ -48,15 +48,6 @@ def inference(img):
48
  prediction = model(image)
49
  prediction = F.softmax(prediction, dim=1).flatten()
50
 
51
- # pred_classes = prediction.topk(k=5).indices
52
- # pred_class_names = [imagenet_id_to_classname[str(i.item())] for i in pred_classes[0]]
53
- # pred_class_probs = [prediction[0][i.item()].item() * 100 for i in pred_classes[0]]
54
- # res = "Top 5 predicted labels:\n"
55
- # for name, prob in zip(pred_class_names, pred_class_probs):
56
- # res += f"[{prob:2.2f}%]\t{name}\n"
57
-
58
- # return res
59
-
60
  return {imagenet_id_to_classname[str(i)]: float(prediction[i]) for i in range(1000)}
61
 
62
  def set_example_image(example: list) -> dict:
@@ -76,7 +67,7 @@ with demo:
76
  with gr.Row():
77
  with gr.Column():
78
  with gr.Row():
79
- input_image = gr.Image(label='Input Image', type='numpy')
80
  with gr.Row():
81
  submit_button = gr.Button('Submit')
82
  with gr.Column():
@@ -93,19 +84,4 @@ with demo:
93
  submit_button.click(fn=inference, inputs=input_image, outputs=label)
94
  example_images.click(fn=set_example_image, inputs=example_images, outputs=example_images.components)
95
 
96
- demo.launch(enable_queue=True)
97
-
98
-
99
-
100
- # inputs = gr.inputs.Image(type='pil')
101
- # label = gr.outputs.Label(num_top_classes=5)
102
-
103
- # title = "UniFormer-S"
104
- # description = "Gradio demo for UniFormer: To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
105
- # article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2201.09450' target='_blank'>UniFormer: Unifying Convolution and Self-attention for Visual Recognition</a> | <a href='https://github.com/Sense-X/UniFormer' target='_blank'>Github Repo</a></p>"
106
-
107
- # gr.Interface(
108
- # inference, inputs, outputs=label,
109
- # title=title, description=description, article=article,
110
- # examples=[['library.jpeg'], ['cat.png'], ['dog.png'], ['panda.png']]
111
- # ).launch(enable_queue=True, cache_examples=True)
 
48
  prediction = model(image)
49
  prediction = F.softmax(prediction, dim=1).flatten()
50
 
 
 
 
 
 
 
 
 
 
51
  return {imagenet_id_to_classname[str(i)]: float(prediction[i]) for i in range(1000)}
52
 
53
  def set_example_image(example: list) -> dict:
 
67
  with gr.Row():
68
  with gr.Column():
69
  with gr.Row():
70
+ input_image = gr.Image(label='Input Image', type='pil')
71
  with gr.Row():
72
  submit_button = gr.Button('Submit')
73
  with gr.Column():
 
84
  submit_button.click(fn=inference, inputs=input_image, outputs=label)
85
  example_images.click(fn=set_example_image, inputs=example_images, outputs=example_images.components)
86
 
87
+ demo.launch(enable_queue=True)