hcs commited on
Commit
475ac6e
1 Parent(s): 8d3b05d

Add application file

Browse files
Files changed (5) hide show
  1. 1_upload.sh +0 -0
  2. Nets.py +141 -0
  3. app.py +2 -1
  4. core.py +58 -0
  5. requirements.txt +0 -0
1_upload.sh ADDED
File without changes
Nets.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import math
4
+ from collections import OrderedDict
5
+
6
+ ################## AlexNet ##################
7
+ def bn_relu(inplanes):
8
+ return nn.Sequential(nn.BatchNorm2d(inplanes), nn.ReLU(inplace=True))
9
+
10
+ def bn_relu_pool(inplanes, kernel_size=3, stride=2):
11
+ return nn.Sequential(nn.BatchNorm2d(inplanes), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=kernel_size, stride=stride))
12
+
13
+ class AlexNet(nn.Module):
14
+ def __init__(self, num_classes=1):
15
+ super(AlexNet, self).__init__()
16
+ self.conv1 = nn.Conv2d(3, 96, kernel_size=11, stride=4, bias=False)
17
+ self.relu_pool1 = bn_relu_pool(inplanes=96)
18
+ self.conv2 = nn.Conv2d(96, 192, kernel_size=5, padding=2, groups=2, bias=False)
19
+ self.relu_pool2 = bn_relu_pool(inplanes=192)
20
+ self.conv3 = nn.Conv2d(192, 384, kernel_size=3, padding=1, groups=2, bias=False)
21
+ self.relu3 = bn_relu(inplanes=384)
22
+ self.conv4 = nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=2, bias=False)
23
+ self.relu4 = bn_relu(inplanes=384)
24
+ self.conv5 = nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=2, bias=False)
25
+ self.relu_pool5 = bn_relu_pool(inplanes=256)
26
+ # classifier
27
+ self.conv6 = nn.Conv2d(256, 256, kernel_size=5, groups=2, bias=False)
28
+ self.relu6 = bn_relu(inplanes=256)
29
+ self.conv7 = nn.Conv2d(256, num_classes, kernel_size=1, bias=False)
30
+
31
+ def forward(self, x):
32
+ x = self.conv1(x)
33
+ x = self.relu_pool1(x)
34
+ x = self.conv2(x)
35
+ x = self.relu_pool2(x)
36
+ x = self.conv3(x)
37
+ x = self.relu3(x)
38
+ x = self.conv4(x)
39
+ x = self.relu4(x)
40
+ x = self.conv5(x)
41
+ x = self.relu_pool5(x)
42
+ x = self.conv6(x)
43
+ x = self.relu6(x)
44
+ x = self.conv7(x)
45
+ x = x.view(x.size(0), -1)
46
+ return x
47
+
48
+
49
+
50
+ ################## ResNet ##################
51
+ def conv3x3(in_planes, out_planes, stride=1):
52
+ # 3x3 convolution with padding
53
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
54
+
55
+ class BasicBlock(nn.Module):
56
+ expansion = 1
57
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
58
+ super(BasicBlock, self).__init__()
59
+ m = OrderedDict()
60
+ m['conv1'] = conv3x3(inplanes, planes, stride)
61
+ m['bn1'] = nn.BatchNorm2d(planes)
62
+ m['relu1'] = nn.ReLU(inplace=True)
63
+ m['conv2'] = conv3x3(planes, planes)
64
+ m['bn2'] = nn.BatchNorm2d(planes)
65
+ self.group1 = nn.Sequential(m)
66
+ self.relu = nn.Sequential(nn.ReLU(inplace=True))
67
+ self.downsample = downsample
68
+
69
+ def forward(self, x):
70
+ if self.downsample is not None:
71
+ residual = self.downsample(x)
72
+ else:
73
+ residual = x
74
+ out = self.group1(x) + residual
75
+ out = self.relu(out)
76
+
77
+ return out
78
+
79
+ class ResNet(nn.Module):
80
+ def __init__(self, block, layers, num_classes=1000):
81
+ self.inplanes = 64
82
+ super(ResNet, self).__init__()
83
+
84
+ m = OrderedDict()
85
+ m['conv1'] = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
86
+ m['bn1'] = nn.BatchNorm2d(64)
87
+ m['relu1'] = nn.ReLU(inplace=True)
88
+ m['maxpool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
89
+ self.group1= nn.Sequential(m)
90
+
91
+ self.layer1 = self._make_layer(block, 64, layers[0])
92
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
93
+ self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
94
+ self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
95
+
96
+ self.avgpool = nn.Sequential(nn.AvgPool2d(7))
97
+ self.group2 = nn.Sequential(
98
+ OrderedDict([
99
+ ('fullyconnected', nn.Linear(512 * block.expansion, num_classes))
100
+ ])
101
+ )
102
+
103
+ for m in self.modules():
104
+ if isinstance(m, nn.Conv2d):
105
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
106
+ m.weight.data.normal_(0, math.sqrt(2. / n))
107
+ elif isinstance(m, nn.BatchNorm2d):
108
+ m.weight.data.fill_(1)
109
+ m.bias.data.zero_()
110
+ elif isinstance(m, nn.Linear):
111
+ torch.nn.init.xavier_uniform_(m.weight.data)
112
+ torch.nn.init.constant_(m.bias.data, 0)
113
+
114
+
115
+ def _make_layer(self, block, planes, blocks, stride=1):
116
+ downsample = None
117
+ if stride != 1 or self.inplanes != planes * block.expansion:
118
+ downsample = nn.Sequential(
119
+ nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
120
+ nn.BatchNorm2d(planes * block.expansion),
121
+ )
122
+
123
+ layers = []
124
+ layers.append(block(self.inplanes, planes, stride, downsample))
125
+ self.inplanes = planes * block.expansion
126
+ for i in range(1, blocks):
127
+ layers.append(block(self.inplanes, planes))
128
+
129
+ return nn.Sequential(*layers)
130
+
131
+ def forward(self, x):
132
+ x = self.group1(x)
133
+ x = self.layer1(x)
134
+ x = self.layer2(x)
135
+ x = self.layer3(x)
136
+ x = self.layer4(x)
137
+ x = self.avgpool(x)
138
+ x = x.view(x.size(0), -1)
139
+ x = self.group2(x)
140
+
141
+ return x
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import streamlit as st
2
  from PIL import Image
 
3
 
4
 
5
  # 加载模型和其他必要的库
@@ -17,7 +18,7 @@ def main():
17
 
18
  # 使用模型进行颜值打分
19
  # score = model.predict(image)
20
- score = 100
21
 
22
  # 在界面上显示颜值分数
23
  st.write("颜值打分:", score)
 
1
  import streamlit as st
2
  from PIL import Image
3
+ import core
4
 
5
 
6
  # 加载模型和其他必要的库
 
18
 
19
  # 使用模型进行颜值打分
20
  # score = model.predict(image)
21
+ score = core.fun(uploaded_image)
22
 
23
  # 在界面上显示颜值分数
24
  st.write("颜值打分:", score)
core.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torchvision.transforms as transforms
2
+ from PIL import Image
3
+ import Nets
4
+
5
+
6
+ def load_model(pretrained_dict, new):
7
+ model_dict = new.state_dict()
8
+ # 1. filter out unnecessary keys
9
+ pretrained_dict = {k: v for k, v in pretrained_dict['state_dict'].items() if k in model_dict}
10
+ # 2. overwrite entries in the existing state dict
11
+ model_dict.update(pretrained_dict)
12
+ new.load_state_dict(model_dict)
13
+
14
+
15
+ import torch
16
+
17
+ # 检查GPU可用性
18
+ if not torch.cuda.is_available():
19
+ device = torch.device("cuda")
20
+ else:
21
+ device = torch.device("cpu")
22
+ print("use cpu")
23
+
24
+ # model_ckpt_path = "./models/resnet18.pth"
25
+ model_ckpt_path = "https://huggingface.co/M4869/beauty_prediction_fpb5k/blob/main/resnet18.pth"
26
+
27
+ net = Nets.ResNet(block=Nets.BasicBlock, layers=[2, 2, 2, 2], num_classes=1).to(device)
28
+ load_model(torch.load(model_ckpt_path, encoding='latin1'), net)
29
+ net.eval()
30
+
31
+ transform = transforms.Compose([
32
+ transforms.Resize(256),
33
+ transforms.CenterCrop(224),
34
+ transforms.ToTensor(),
35
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])
36
+
37
+
38
+ def fun(img_path):
39
+ img = Image.open(img_path).convert('RGB')
40
+ img = transform(img)
41
+ with torch.no_grad():
42
+ img = img.unsqueeze(0).to(device)
43
+ output = net(img).squeeze(1).cpu().numpy()[0]
44
+ return output
45
+
46
+ # def main():
47
+ # for i in range(6, 7):
48
+ # img = Image.open("./data2/%d.jpg" % i).convert('RGB')
49
+ # img = transform(img)
50
+ #
51
+ # with torch.no_grad():
52
+ # img = img.unsqueeze(0).to(device)
53
+ # output = net(img).squeeze(1).cpu().numpy()[0]
54
+ # print(i, output * 20)
55
+
56
+
57
+ # if __name__ == '__main__':
58
+ # main()
requirements.txt ADDED
File without changes