MaulikMadhavi commited on
Commit
f04fba1
1 Parent(s): aa1c132

add app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -0
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torchvision
3
+ from torchvision.transforms import transforms
4
+ import torch
5
+ import requests
6
+
7
+ # Demo for image classification
8
+ model = torchvision.models.resnet18(pretrained=True)
9
+
10
+ trans_seq = torchvision.transforms.Compose([
11
+ transforms.Resize((224, 224)),
12
+ transforms.ToTensor(),
13
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
14
+ ])
15
+ model.eval()
16
+
17
+ # Download human-readable labels for ImageNet.
18
+ response = requests.get("https://git.io/JJkYN")
19
+ labels = response.text.split("\n")
20
+
21
+
22
+ def predict(image):
23
+ """
24
+ Predicts the confidences of different labels for the given image.
25
+
26
+ Args:
27
+ image (torch.Tensor): The input image tensor.
28
+
29
+ Returns:
30
+ dict: A dictionary containing the label names as keys and their corresponding confidences as values.
31
+ """
32
+ image = trans_seq(image)
33
+ image = image.unsqueeze(0)
34
+ with torch.no_grad():
35
+ prediction = torch.nn.functional.softmax(model(image)[0], dim=0)
36
+ confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
37
+ return confidences
38
+
39
+ # Pull out some examples from internet images
40
+ examples =[
41
+ "https://github.com/EliSchwartz/imagenet-sample-images/raw/master/n01484850_great_white_shark.JPEG",
42
+ "https://github.com/EliSchwartz/imagenet-sample-images/raw/master/n01443537_goldfish.JPEG",
43
+ "https://github.com/EliSchwartz/imagenet-sample-images/raw/master/n01632777_axolotl.JPEG",
44
+ "https://github.com/EliSchwartz/imagenet-sample-images/raw/master/n01534433_junco.JPEG",
45
+ "https://github.com/EliSchwartz/imagenet-sample-images/raw/master/n01753488_horned_viper.JPEG",
46
+ ]
47
+ with gr.Blocks(theme="soft") as demo:
48
+ input_img = gr.Image(label="Input Image", type="pil")
49
+ output = gr.Label(num_top_classes=3)
50
+ exam = gr.Examples(examples=examples, examples_per_page=10, inputs=[input_img], outputs=[output])
51
+ input_img.change(predict, inputs=[input_img], outputs=[output])
52
+
53
+ demo.launch()