Pengjin commited on
Commit
8dd8c43
1 Parent(s): 6f8f7d3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +255 -0
app.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ from PIL import Image
6
+ import paddle
7
+ from paddle.nn import functional as F
8
+ import random
9
+ from paddle.io import Dataset
10
+ from visualdl import LogWriter
11
+ from paddle.vision.transforms import transforms as T
12
+ import warnings
13
+ import cv2 as cv
14
+ from PIL import Image
15
+ import re
16
+ warnings.filterwarnings("ignore")
17
+ os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
18
+
19
+ class SeparableConv2D(paddle.nn.Layer):
20
+ def __init__(self,
21
+ in_channels,
22
+ out_channels,
23
+ kernel_size,
24
+ stride=1,
25
+ padding=0,
26
+ dilation=1,
27
+ groups=None,
28
+ weight_attr=None,
29
+ bias_attr=None,
30
+ data_format="NCHW"):
31
+ super(SeparableConv2D, self).__init__()
32
+
33
+ self._padding = padding
34
+ self._stride = stride
35
+ self._dilation = dilation
36
+ self._in_channels = in_channels
37
+ self._data_format = data_format
38
+
39
+ # 第一次卷积参数,没有偏置参数
40
+ filter_shape = [in_channels, 1] + self.convert_to_list(kernel_size, 2, 'kernel_size')
41
+ self.weight_conv = self.create_parameter(shape=filter_shape, attr=weight_attr)
42
+
43
+ # 第二次卷积参数
44
+ filter_shape = [out_channels, in_channels] + self.convert_to_list(1, 2, 'kernel_size')
45
+ self.weight_pointwise = self.create_parameter(shape=filter_shape, attr=weight_attr)
46
+ self.bias_pointwise = self.create_parameter(shape=[out_channels],
47
+ attr=bias_attr,
48
+ is_bias=True)
49
+
50
+ def convert_to_list(self, value, n, name, dtype=np.int):
51
+ if isinstance(value, dtype):
52
+ return [value, ] * n
53
+ else:
54
+ try:
55
+ value_list = list(value)
56
+ except TypeError:
57
+ raise ValueError("The " + name +
58
+ "'s type must be list or tuple. Received: " + str(
59
+ value))
60
+ if len(value_list) != n:
61
+ raise ValueError("The " + name + "'s length must be " + str(n) +
62
+ ". Received: " + str(value))
63
+ for single_value in value_list:
64
+ try:
65
+ dtype(single_value)
66
+ except (ValueError, TypeError):
67
+ raise ValueError(
68
+ "The " + name + "'s type must be a list or tuple of " + str(
69
+ n) + " " + str(dtype) + " . Received: " + str(
70
+ value) + " "
71
+ "including element " + str(single_value) + " of type" + " "
72
+ + str(type(single_value)))
73
+ return value_list
74
+
75
+ def forward(self, inputs):
76
+ conv_out = F.conv2d(inputs,
77
+ self.weight_conv,
78
+ padding=self._padding,
79
+ stride=self._stride,
80
+ dilation=self._dilation,
81
+ groups=self._in_channels,
82
+ data_format=self._data_format)
83
+
84
+ out = F.conv2d(conv_out,
85
+ self.weight_pointwise,
86
+ bias=self.bias_pointwise,
87
+ padding=0,
88
+ stride=1,
89
+ dilation=1,
90
+ groups=1,
91
+ data_format=self._data_format)
92
+
93
+ return out
94
+ class Encoder(paddle.nn.Layer):
95
+ def __init__(self, in_channels, out_channels):
96
+ super(Encoder, self).__init__()
97
+
98
+ self.relus = paddle.nn.LayerList(
99
+ [paddle.nn.ReLU() for i in range(2)])
100
+ self.separable_conv_01 = SeparableConv2D(in_channels,
101
+ out_channels,
102
+ kernel_size=3,
103
+ padding='same')
104
+ self.bns = paddle.nn.LayerList(
105
+ [paddle.nn.BatchNorm2D(out_channels) for i in range(2)])
106
+
107
+ self.separable_conv_02 = SeparableConv2D(out_channels,
108
+ out_channels,
109
+ kernel_size=3,
110
+ padding='same')
111
+ self.pool = paddle.nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
112
+ self.residual_conv = paddle.nn.Conv2D(in_channels,
113
+ out_channels,
114
+ kernel_size=1,
115
+ stride=2,
116
+ padding='same')
117
+
118
+ def forward(self, inputs):
119
+ previous_block_activation = inputs
120
+
121
+ y = self.relus[0](inputs)
122
+ y = self.separable_conv_01(y)
123
+ y = self.bns[0](y)
124
+ y = self.relus[1](y)
125
+ y = self.separable_conv_02(y)
126
+ y = self.bns[1](y)
127
+ y = self.pool(y)
128
+
129
+ residual = self.residual_conv(previous_block_activation)
130
+ y = paddle.add(y, residual)
131
+
132
+ return y
133
+ class Decoder(paddle.nn.Layer):
134
+ def __init__(self, in_channels, out_channels):
135
+ super(Decoder, self).__init__()
136
+
137
+ self.relus = paddle.nn.LayerList(
138
+ [paddle.nn.ReLU() for i in range(2)])
139
+ self.conv_transpose_01 = paddle.nn.Conv2DTranspose(in_channels,
140
+ out_channels,
141
+ kernel_size=3,
142
+ padding=1)
143
+ self.conv_transpose_02 = paddle.nn.Conv2DTranspose(out_channels,
144
+ out_channels,
145
+ kernel_size=3,
146
+ padding=1)
147
+ self.bns = paddle.nn.LayerList(
148
+ [paddle.nn.BatchNorm2D(out_channels) for i in range(2)]
149
+ )
150
+ self.upsamples = paddle.nn.LayerList(
151
+ [paddle.nn.Upsample(scale_factor=2.0) for i in range(2)]
152
+ )
153
+ self.residual_conv = paddle.nn.Conv2D(in_channels,
154
+ out_channels,
155
+ kernel_size=1,
156
+ padding='same')
157
+
158
+ def forward(self, inputs):
159
+ previous_block_activation = inputs
160
+
161
+ y = self.relus[0](inputs)
162
+ y = self.conv_transpose_01(y)
163
+ y = self.bns[0](y)
164
+ y = self.relus[1](y)
165
+ y = self.conv_transpose_02(y)
166
+ y = self.bns[1](y)
167
+ y = self.upsamples[0](y)
168
+
169
+ residual = self.upsamples[1](previous_block_activation)
170
+ residual = self.residual_conv(residual)
171
+
172
+ y = paddle.add(y, residual)
173
+
174
+ return y
175
+ class PetNet(paddle.nn.Layer):
176
+ def __init__(self, num_classes):
177
+ super(PetNet, self).__init__()
178
+
179
+ self.conv_1 = paddle.nn.Conv2D(3, 32,
180
+ kernel_size=3,
181
+ stride=2,
182
+ padding='same')
183
+ self.bn = paddle.nn.BatchNorm2D(32)
184
+ self.relu = paddle.nn.ReLU()
185
+
186
+ in_channels = 32
187
+ self.encoders = []
188
+ self.encoder_list = [64, 128, 256]
189
+ self.decoder_list = [256, 128, 64, 32]
190
+
191
+ for out_channels in self.encoder_list:
192
+ block = self.add_sublayer('encoder_{}'.format(out_channels),
193
+ Encoder(in_channels, out_channels))
194
+ self.encoders.append(block)
195
+ in_channels = out_channels
196
+
197
+ self.decoders = []
198
+
199
+ for out_channels in self.decoder_list:
200
+ block = self.add_sublayer('decoder_{}'.format(out_channels),
201
+ Decoder(in_channels, out_channels))
202
+ self.decoders.append(block)
203
+ in_channels = out_channels
204
+
205
+ self.output_conv = paddle.nn.Conv2D(in_channels,
206
+ num_classes,
207
+ kernel_size=3,
208
+ padding='same')
209
+
210
+ def forward(self, inputs):
211
+ y = self.conv_1(inputs)
212
+ y = self.bn(y)
213
+ y = self.relu(y)
214
+
215
+ for encoder in self.encoders:
216
+ y = encoder(y)
217
+
218
+ for decoder in self.decoders:
219
+ y = decoder(y)
220
+
221
+ y = self.output_conv(y)
222
+ return y
223
+ IMAGE_SIZE = (512, 512)
224
+ num_classes = 2
225
+ network = PetNet(num_classes)
226
+ model = paddle.Model(network)
227
+
228
+ optimizer = paddle.optimizer.RMSProp(learning_rate=0.001, parameters=network.parameters())
229
+ layer_state_dict = paddle.load("mymodel.pdparams")
230
+ opt_state_dict = paddle.load("optimizer.pdopt")
231
+
232
+ network.set_state_dict(layer_state_dict)
233
+ optimizer.set_state_dict(opt_state_dict)
234
+
235
+ def FinalImage(mask,image):
236
+ # 这个函数的作用是把mask高斯模糊之后的遮罩和原始的image叠加起来
237
+ #输入 mask [0,255]的这招图
238
+ #image 必须无条件转化为512*512 三通道彩图
239
+
240
+ th = cv.threshold(mask,140,255,cv.THRESH_BINARY)[1]
241
+ blur = cv.GaussianBlur(th,(33,33), 15)
242
+ heatmap_img = cv.applyColorMap(blur, cv.COLORMAP_OCEAN)
243
+ Blendermap = cv.addWeighted(heatmap_img, 0.5, image, 1, 0)
244
+ return Blendermap
245
+
246
+ import gradio as gr
247
+ def Showsegmentation(image):
248
+ mask = paddle.argmax(network(paddle.to_tensor([((image - 127.5) / 127.5).transpose(2, 0, 1)]))[0], axis=0).numpy()
249
+ mask=mask.astype('uint8')*255
250
+ immask=cv.resize(mask, (512, 512))
251
+ image=cv.resize(image,(512,512))
252
+ blendmask=FinalImage(immask,image)
253
+ return blendmask
254
+
255
+ gr.Interface(fn=Showsegmentation, inputs="image", outputs="image").launch(share=True)