Geonmo commited on
Commit
88b7229
1 Parent(s): 9a5e553

initial commit

Browse files
Files changed (3) hide show
  1. app.py +108 -0
  2. requirements.txt +6 -0
  3. sac+logos+ava1-l14-linearMSE.pth +3 -0
app.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import torch
4
+ import pytorch_lightning as pl
5
+ import torch.nn as nn
6
+ import clip
7
+ from PIL import Image, ImageFile
8
+ import gradio as gr
9
+
10
+ # if you changed the MLP architecture during training, change it also here:
11
+ class MLP(pl.LightningModule):
12
+ def __init__(self, input_size, xcol='emb', ycol='avg_rating'):
13
+ super().__init__()
14
+ self.input_size = input_size
15
+ self.xcol = xcol
16
+ self.ycol = ycol
17
+ self.layers = nn.Sequential(
18
+ nn.Linear(self.input_size, 1024),
19
+ #nn.ReLU(),
20
+ nn.Dropout(0.2),
21
+ nn.Linear(1024, 128),
22
+ #nn.ReLU(),
23
+ nn.Dropout(0.2),
24
+ nn.Linear(128, 64),
25
+ #nn.ReLU(),
26
+ nn.Dropout(0.1),
27
+
28
+ nn.Linear(64, 16),
29
+ #nn.ReLU(),
30
+
31
+ nn.Linear(16, 1)
32
+ )
33
+
34
+ def forward(self, x):
35
+ return self.layers(x)
36
+
37
+ def training_step(self, batch, batch_idx):
38
+ x = batch[self.xcol]
39
+ y = batch[self.ycol].reshape(-1, 1)
40
+ x_hat = self.layers(x)
41
+ loss = F.mse_loss(x_hat, y)
42
+ return loss
43
+
44
+ def validation_step(self, batch, batch_idx):
45
+ x = batch[self.xcol]
46
+ y = batch[self.ycol].reshape(-1, 1)
47
+ x_hat = self.layers(x)
48
+ loss = F.mse_loss(x_hat, y)
49
+ return loss
50
+
51
+ def configure_optimizers(self):
52
+ optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
53
+ return optimizer
54
+
55
+ def normalized(a, axis=-1, order=2):
56
+ import numpy as np # pylint: disable=import-outside-toplevel
57
+
58
+ l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
59
+ l2[l2 == 0] = 1
60
+ return a / np.expand_dims(l2, axis)
61
+
62
+ def load_models():
63
+ model = MLP(768)
64
+ s = torch.load("sac+logos+ava1-l14-linearMSE.pth")
65
+
66
+ model.load_state_dict(s)
67
+ model.to("cuda")
68
+ model.eval()
69
+
70
+ device = "cuda" if torch.cuda.is_available() else "cpu"
71
+ model2, preprocess = clip.load("ViT-L/14", device=device)
72
+
73
+ model_dict = {}
74
+ model_dict['classifier'] = model
75
+ model_dict['clip_model'] = model2
76
+ model_dict['clip_preprocess'] = preprocess
77
+ model_dict['device'] = device
78
+
79
+ return model_dict
80
+
81
+ def predict(image):
82
+ image_input = model_dict['clip_preprocess'](image).unsqueeze(0).to(model_dict['device'])
83
+ with torch.no_grad():
84
+ image_features = model_dict['clip_model'].encode_image(image_input)
85
+ im_emb_arr = normalized(image_features.detach().cpu().numpy())
86
+ prediction = model_dict['classifier'](torch.from_numpy(im_emb_arr).to(model_dict['device']).type(torch.cuda.FloatTensor))
87
+ score = prediction.item()
88
+
89
+ return {'aesthetic score': score}
90
+
91
+ if __name__ == '__main__':
92
+ print('\tinit models')
93
+
94
+ global model_dict
95
+
96
+ model_dict = load_models()
97
+
98
+ inputs = [gr.inputs.Image(type='pil', label='Image')]
99
+
100
+ outputs = gr.outputs.JSON()
101
+
102
+ title = 'image aesthetic predictor'
103
+
104
+ gr.Interface(predict,
105
+ inputs,
106
+ outputs,
107
+ title=title,
108
+ ).launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ ftfy
2
+ regex
3
+ git+https://github.com/openai/CLIP.git
4
+ gradio
5
+ torch
6
+ pytorch-lightning
sac+logos+ava1-l14-linearMSE.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21dd590f3ccdc646f0d53120778b296013b096a035a2718c9cb0d511bff0f1e0
3
+ size 3714759