Spaces:
Runtime error
Runtime error
Upload CaffeLoader.py
Browse files- CaffeLoader.py +254 -0
CaffeLoader.py
ADDED
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
|
4 |
+
|
5 |
+
class VGG(nn.Module):
|
6 |
+
def __init__(self, features, num_classes=1000):
|
7 |
+
super(VGG, self).__init__()
|
8 |
+
self.features = features
|
9 |
+
self.classifier = nn.Sequential(
|
10 |
+
nn.Linear(512 * 7 * 7, 4096),
|
11 |
+
nn.ReLU(True),
|
12 |
+
nn.Dropout(),
|
13 |
+
nn.Linear(4096, 4096),
|
14 |
+
nn.ReLU(True),
|
15 |
+
nn.Dropout(),
|
16 |
+
nn.Linear(4096, num_classes),
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
class VGG_SOD(nn.Module):
|
21 |
+
def __init__(self, features, num_classes=100):
|
22 |
+
super(VGG_SOD, self).__init__()
|
23 |
+
self.features = features
|
24 |
+
self.classifier = nn.Sequential(
|
25 |
+
nn.Linear(512 * 7 * 7, 4096),
|
26 |
+
nn.ReLU(True),
|
27 |
+
nn.Dropout(),
|
28 |
+
nn.Linear(4096, 4096),
|
29 |
+
nn.ReLU(True),
|
30 |
+
nn.Dropout(),
|
31 |
+
nn.Linear(4096, 100),
|
32 |
+
)
|
33 |
+
|
34 |
+
|
35 |
+
class VGG_FCN32S(nn.Module):
|
36 |
+
def __init__(self, features, num_classes=1000):
|
37 |
+
super(VGG_FCN32S, self).__init__()
|
38 |
+
self.features = features
|
39 |
+
self.classifier = nn.Sequential(
|
40 |
+
nn.Conv2d(512,4096,(7, 7)),
|
41 |
+
nn.ReLU(True),
|
42 |
+
nn.Dropout(0.5),
|
43 |
+
nn.Conv2d(4096,4096,(1, 1)),
|
44 |
+
nn.ReLU(True),
|
45 |
+
nn.Dropout(0.5),
|
46 |
+
)
|
47 |
+
|
48 |
+
|
49 |
+
class VGG_PRUNED(nn.Module):
|
50 |
+
def __init__(self, features, num_classes=1000):
|
51 |
+
super(VGG_PRUNED, self).__init__()
|
52 |
+
self.features = features
|
53 |
+
self.classifier = nn.Sequential(
|
54 |
+
nn.Linear(512 * 7 * 7, 4096),
|
55 |
+
nn.ReLU(True),
|
56 |
+
nn.Dropout(0.5),
|
57 |
+
nn.Linear(4096, 4096),
|
58 |
+
nn.ReLU(True),
|
59 |
+
nn.Dropout(0.5),
|
60 |
+
)
|
61 |
+
|
62 |
+
|
63 |
+
class NIN(nn.Module):
|
64 |
+
def __init__(self, pooling):
|
65 |
+
super(NIN, self).__init__()
|
66 |
+
if pooling == 'max':
|
67 |
+
pool2d = nn.MaxPool2d((3, 3),(2, 2),(0, 0),ceil_mode=True)
|
68 |
+
elif pooling == 'avg':
|
69 |
+
pool2d = nn.AvgPool2d((3, 3),(2, 2),(0, 0),ceil_mode=True)
|
70 |
+
|
71 |
+
self.features = nn.Sequential(
|
72 |
+
nn.Conv2d(3,96,(11, 11),(4, 4)),
|
73 |
+
nn.ReLU(inplace=True),
|
74 |
+
nn.Conv2d(96,96,(1, 1)),
|
75 |
+
nn.ReLU(inplace=True),
|
76 |
+
nn.Conv2d(96,96,(1, 1)),
|
77 |
+
nn.ReLU(inplace=True),
|
78 |
+
pool2d,
|
79 |
+
nn.Conv2d(96,256,(5, 5),(1, 1),(2, 2)),
|
80 |
+
nn.ReLU(inplace=True),
|
81 |
+
nn.Conv2d(256,256,(1, 1)),
|
82 |
+
nn.ReLU(inplace=True),
|
83 |
+
nn.Conv2d(256,256,(1, 1)),
|
84 |
+
nn.ReLU(inplace=True),
|
85 |
+
pool2d,
|
86 |
+
nn.Conv2d(256,384,(3, 3),(1, 1),(1, 1)),
|
87 |
+
nn.ReLU(inplace=True),
|
88 |
+
nn.Conv2d(384,384,(1, 1)),
|
89 |
+
nn.ReLU(inplace=True),
|
90 |
+
nn.Conv2d(384,384,(1, 1)),
|
91 |
+
nn.ReLU(inplace=True),
|
92 |
+
pool2d,
|
93 |
+
nn.Dropout(0.5),
|
94 |
+
nn.Conv2d(384,1024,(3, 3),(1, 1),(1, 1)),
|
95 |
+
nn.ReLU(inplace=True),
|
96 |
+
nn.Conv2d(1024,1024,(1, 1)),
|
97 |
+
nn.ReLU(inplace=True),
|
98 |
+
nn.Conv2d(1024,1000,(1, 1)),
|
99 |
+
nn.ReLU(inplace=True),
|
100 |
+
nn.AvgPool2d((6, 6),(1, 1),(0, 0),ceil_mode=True),
|
101 |
+
nn.Softmax(),
|
102 |
+
)
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
class ModelParallel(nn.Module):
|
107 |
+
def __init__(self, net, device_ids, device_splits):
|
108 |
+
super(ModelParallel, self).__init__()
|
109 |
+
self.device_list = self.name_devices(device_ids.split(','))
|
110 |
+
self.chunks = self.chunks_to_devices(self.split_net(net, device_splits.split(',')))
|
111 |
+
|
112 |
+
def name_devices(self, input_list):
|
113 |
+
device_list = []
|
114 |
+
for i, device in enumerate(input_list):
|
115 |
+
if str(device).lower() != 'c':
|
116 |
+
device_list.append("cuda:" + str(device))
|
117 |
+
else:
|
118 |
+
device_list.append("cpu")
|
119 |
+
return device_list
|
120 |
+
|
121 |
+
def split_net(self, net, device_splits):
|
122 |
+
chunks, cur_chunk = [], nn.Sequential()
|
123 |
+
for i, l in enumerate(net):
|
124 |
+
cur_chunk.add_module(str(i), net[i])
|
125 |
+
if str(i) in device_splits and device_splits != '':
|
126 |
+
del device_splits[0]
|
127 |
+
chunks.append(cur_chunk)
|
128 |
+
cur_chunk = nn.Sequential()
|
129 |
+
chunks.append(cur_chunk)
|
130 |
+
return chunks
|
131 |
+
|
132 |
+
def chunks_to_devices(self, chunks):
|
133 |
+
for i, chunk in enumerate(chunks):
|
134 |
+
chunk.to(self.device_list[i])
|
135 |
+
return chunks
|
136 |
+
|
137 |
+
def c(self, input, i):
|
138 |
+
if input.type() == 'torch.FloatTensor' and 'cuda' in self.device_list[i]:
|
139 |
+
input = input.type('torch.cuda.FloatTensor')
|
140 |
+
elif input.type() == 'torch.cuda.FloatTensor' and 'cpu' in self.device_list[i]:
|
141 |
+
input = input.type('torch.FloatTensor')
|
142 |
+
return input
|
143 |
+
|
144 |
+
def forward(self, input):
|
145 |
+
for i, chunk in enumerate(self.chunks):
|
146 |
+
if i < len(self.chunks) -1:
|
147 |
+
input = self.c(chunk(self.c(input, i).to(self.device_list[i])), i+1).to(self.device_list[i+1])
|
148 |
+
else:
|
149 |
+
input = chunk(input)
|
150 |
+
return input
|
151 |
+
|
152 |
+
|
153 |
+
|
154 |
+
def buildSequential(channel_list, pooling):
|
155 |
+
layers = []
|
156 |
+
in_channels = 3
|
157 |
+
if pooling == 'max':
|
158 |
+
pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
|
159 |
+
elif pooling == 'avg':
|
160 |
+
pool2d = nn.AvgPool2d(kernel_size=2, stride=2)
|
161 |
+
else:
|
162 |
+
raise ValueError("Unrecognized pooling parameter")
|
163 |
+
for c in channel_list:
|
164 |
+
if c == 'P':
|
165 |
+
layers += [pool2d]
|
166 |
+
else:
|
167 |
+
conv2d = nn.Conv2d(in_channels, c, kernel_size=3, padding=1)
|
168 |
+
layers += [conv2d, nn.ReLU(inplace=True)]
|
169 |
+
in_channels = c
|
170 |
+
return nn.Sequential(*layers)
|
171 |
+
|
172 |
+
|
173 |
+
channel_list = {
|
174 |
+
'VGG-16p': [24, 22, 'P', 41, 51, 'P', 108, 89, 111, 'P', 184, 276, 228, 'P', 512, 512, 512, 'P'],
|
175 |
+
'VGG-16': [64, 64, 'P', 128, 128, 'P', 256, 256, 256, 'P', 512, 512, 512, 'P', 512, 512, 512, 'P'],
|
176 |
+
'VGG-19': [64, 64, 'P', 128, 128, 'P', 256, 256, 256, 256, 'P', 512, 512, 512, 512, 'P', 512, 512, 512, 512, 'P'],
|
177 |
+
}
|
178 |
+
|
179 |
+
nin_dict = {
|
180 |
+
'C': ['conv1', 'cccp1', 'cccp2', 'conv2', 'cccp3', 'cccp4', 'conv3', 'cccp5', 'cccp6', 'conv4-1024', 'cccp7-1024', 'cccp8-1024'],
|
181 |
+
'R': ['relu0', 'relu1', 'relu2', 'relu3', 'relu5', 'relu6', 'relu7', 'relu8', 'relu9', 'relu10', 'relu11', 'relu12'],
|
182 |
+
'P': ['pool1', 'pool2', 'pool3', 'pool4'],
|
183 |
+
'D': ['drop'],
|
184 |
+
}
|
185 |
+
vgg16_dict = {
|
186 |
+
'C': ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3'],
|
187 |
+
'R': ['relu1_1', 'relu1_2', 'relu2_1', 'relu2_2', 'relu3_1', 'relu3_2', 'relu3_3', 'relu4_1', 'relu4_2', 'relu4_3', 'relu5_1', 'relu5_2', 'relu5_3'],
|
188 |
+
'P': ['pool1', 'pool2', 'pool3', 'pool4', 'pool5'],
|
189 |
+
}
|
190 |
+
vgg19_dict = {
|
191 |
+
'C': ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv3_4', 'conv4_1', 'conv4_2', 'conv4_3', 'conv4_4', 'conv5_1', 'conv5_2', 'conv5_3', 'conv5_4'],
|
192 |
+
'R': ['relu1_1', 'relu1_2', 'relu2_1', 'relu2_2', 'relu3_1', 'relu3_2', 'relu3_3', 'relu3_4', 'relu4_1', 'relu4_2', 'relu4_3', 'relu4_4', 'relu5_1', 'relu5_2', 'relu5_3', 'relu5_4'],
|
193 |
+
'P': ['pool1', 'pool2', 'pool3', 'pool4', 'pool5'],
|
194 |
+
}
|
195 |
+
|
196 |
+
|
197 |
+
def modelSelector(model_file, pooling):
|
198 |
+
vgg_list = ["fcn32s", "pruning", "sod", "vgg"]
|
199 |
+
if any(name in model_file for name in vgg_list):
|
200 |
+
if "pruning" in model_file:
|
201 |
+
print("VGG-16 Architecture Detected")
|
202 |
+
print("Using The Channel Pruning Model")
|
203 |
+
cnn, layerList = VGG_PRUNED(buildSequential(channel_list['VGG-16p'], pooling)), vgg16_dict
|
204 |
+
elif "fcn32s" in model_file:
|
205 |
+
print("VGG-16 Architecture Detected")
|
206 |
+
print("Using the fcn32s-heavy-pascal Model")
|
207 |
+
cnn, layerList = VGG_FCN32S(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict
|
208 |
+
elif "sod" in model_file:
|
209 |
+
print("VGG-16 Architecture Detected")
|
210 |
+
print("Using The SOD Fintune Model")
|
211 |
+
cnn, layerList = VGG_SOD(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict
|
212 |
+
elif "19" in model_file:
|
213 |
+
print("VGG-19 Architecture Detected")
|
214 |
+
cnn, layerList = VGG(buildSequential(channel_list['VGG-19'], pooling)), vgg19_dict
|
215 |
+
elif "16" in model_file:
|
216 |
+
print("VGG-16 Architecture Detected")
|
217 |
+
cnn, layerList = VGG(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict
|
218 |
+
else:
|
219 |
+
raise ValueError("VGG architecture not recognized.")
|
220 |
+
elif "nin" in model_file:
|
221 |
+
print("NIN Architecture Detected")
|
222 |
+
cnn, layerList = NIN(pooling), nin_dict
|
223 |
+
else:
|
224 |
+
raise ValueError("Model architecture not recognized.")
|
225 |
+
return cnn, layerList
|
226 |
+
|
227 |
+
|
228 |
+
# Print like Torch7/loadcaffe
|
229 |
+
def print_loadcaffe(cnn, layerList):
|
230 |
+
c = 0
|
231 |
+
for l in list(cnn):
|
232 |
+
if "Conv2d" in str(l):
|
233 |
+
in_c, out_c, ks = str(l.in_channels), str(l.out_channels), str(l.kernel_size)
|
234 |
+
print(layerList['C'][c] +": " + (out_c + " " + in_c + " " + ks).replace(")",'').replace("(",'').replace(",",'') )
|
235 |
+
c+=1
|
236 |
+
if c == len(layerList['C']):
|
237 |
+
break
|
238 |
+
|
239 |
+
|
240 |
+
# Load the model, and configure pooling layer type
|
241 |
+
def loadCaffemodel(model_file, pooling, use_gpu, disable_check):
|
242 |
+
cnn, layerList = modelSelector(str(model_file).lower(), pooling)
|
243 |
+
|
244 |
+
cnn.load_state_dict(torch.load(model_file), strict=(not disable_check))
|
245 |
+
print("Successfully loaded " + str(model_file))
|
246 |
+
|
247 |
+
# Maybe convert the model to cuda now, to avoid later issues
|
248 |
+
if "c" not in str(use_gpu).lower() or "c" not in str(use_gpu[0]).lower():
|
249 |
+
cnn = cnn.cuda()
|
250 |
+
cnn = cnn.features
|
251 |
+
|
252 |
+
print_loadcaffe(cnn, layerList)
|
253 |
+
|
254 |
+
return cnn, layerList
|