czkaiweb commited on
Commit
c86592c
1 Parent(s): eddcfb6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -0
app.py CHANGED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL import Image
3
+ from torchvision import datasets, models, transforms
4
+ import gradio as gr
5
+ import os
6
+ import torch.nn as nn
7
+
8
+
9
+ os.system("wget https://github.com/liuxiaoyuyuyu/vanGogh-and-Other-Artist/blob/main/artist_classes.txt")
10
+ #os.system("wget https://github.com/liuxiaoyuyuyu/vanGogh-and-Other-Artist/blob/main/model_weights_mobilenet_v2_valp1trainp2.pth")
11
+
12
+ #model = torch.hub.load('pytorch/vision:v0.9.0', 'mobilenet_v2', pretrained=False)
13
+ #checkpoint = 'https://github.com/liuxiaoyuyuyu/vanGogh-and-Other-Artist/blob/main/model_weights_mobilenet_v2_valp1trainp2.pth'
14
+ #model.load_state_dict(torch.hub.load_state_dict_from_url(checkpoint, progress=False))
15
+ model = models.mobilenet_v2()
16
+ num_ftrs = model.classifier[1].in_features
17
+ model.classifier[1] = nn.Linear(num_ftrs, 6)
18
+ #model = model.to(device)
19
+ model.load_state_dict(torch.load('model_weights_mobilenet_v2_valp1trainp2.pth'))
20
+
21
+ #torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
22
+
23
+
24
+ def inference(input_image):
25
+ preprocess = transforms.Compose([
26
+ transforms.Resize(256),
27
+ transforms.CenterCrop(224),
28
+ transforms.ToTensor(),
29
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
30
+ ])
31
+ input_tensor = preprocess(input_image)
32
+ input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
33
+
34
+ # move the input and model to GPU for speed if available
35
+ if torch.cuda.is_available():
36
+ input_batch = input_batch.to('cuda')
37
+ model.to('cuda')
38
+
39
+ with torch.no_grad():
40
+ output = model(input_batch)
41
+ # The output has unnormalized scores. To get probabilities, you can run a softmax on it.
42
+ probabilities = torch.nn.functional.softmax(output[0], dim=0)
43
+
44
+ # Read the categories
45
+ with open("artist_classes.txt", "r") as f:
46
+ categories = [s.strip() for s in f.readlines()]
47
+ # Show top categories per image
48
+ top5_prob, top5_catid = torch.topk(probabilities, 6)
49
+ result = {}
50
+ for i in range(top5_prob.size(0)):
51
+ result[categories[top5_catid[i]]] = top5_prob[i].item()
52
+ return result
53
+
54
+ inputs = gr.inputs.Image(type='pil')
55
+ outputs = gr.outputs.Label(type="confidences",num_top_classes=5)
56
+
57
+ title = "MOBILENET V2"
58
+ description = "Gradio demo for MOBILENET V2, Efficient networks optimized for speed and memory, with residual blocks. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
59
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1801.04381'>MobileNetV2: Inverted Residuals and Linear Bottlenecks</a> | <a href='https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenet.py'>Github Repo</a></p>"
60
+
61
+ #examples = [
62
+ # ['dog.jpg']
63
+ #]
64
+ #gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples, analytics_enabled=False).launch()
65
+ gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, analytics_enabled=False).launch()