Selma commited on
Commit
7c3ad8f
1 Parent(s): 1e7360a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -3
app.py CHANGED
@@ -1,7 +1,48 @@
1
  import gradio as gr
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  iface.launch()
 
1
  import gradio as gr
2
+ import torch
3
+ from torchvision import transforms as T
4
+ import torchvision.models as models
5
+ import requests
6
 
 
 
7
 
8
+ def classify(image):
9
+
10
+ ## preprocessing
11
+ # we need a transform step to normalise the pictures
12
+ transform = T.Compose([T.Resize(256), T.CenterCrop(224), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
13
+
14
+ # we import the model
15
+ resnet34 = models.resnet34(pretrained=True)
16
+
17
+ # normalise the image
18
+ image_transformed = transform(image)
19
+ # reshape
20
+ batch_image_transformed = torch.unsqueeze(image_transformed, 0)
21
+ # evaluation mode
22
+ resnet34.eval()
23
+ # get the predictions
24
+ output = resnet34(batch_image_transformed)
25
+
26
+ ## labeling
27
+ # Load the file containing the 1,000 labels for the ImageNet dataset classes
28
+ url = hf_hub_url(repo_id="Selma/pytorch-resnet34", filename="imagenet_classes.txt")
29
+ response = requests.get(URL)
30
+ # write to a label file
31
+ open("labels.txt", "wb").write(response.content)
32
+ #extract the labels from the file
33
+ with open('labels.txt', "r") as f:
34
+ labels = [line.strip() for line in f.readlines()]
35
+
36
+ ## predict the class
37
+ # Find the index (tensor) corresponding to the maximum score in the out tensor.
38
+ # Torch.max function can be used to find the information
39
+ _, index = torch.max(out, 1)
40
+
41
+ # Find the score in terms of percentage by using torch.nn.functional.softmax function
42
+ # which normalizes the output to range [0,1] and multiplying by 100
43
+ percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100
44
+
45
+ return "The image depicts: " + labels[index[0]] + "with a score of " + percentage[index[0]].item() + "%"
46
+
47
+ iface = gr.Interface(fn=classify, inputs="image", outputs="text")
48
  iface.launch()