AK391 commited on
Commit
2782137
1 Parent(s): a3a294d
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. __init_paths.py +30 -0
  2. align_faces.py +266 -0
  3. data_loader/dataset_face.py +103 -0
  4. data_loader/degradations.py +765 -0
  5. distributed.py +126 -0
  6. face_colorization.py +48 -0
  7. face_detect/data/FDDB/img_list.txt +2845 -0
  8. face_detect/data/__init__.py +3 -0
  9. face_detect/data/config.py +42 -0
  10. face_detect/data/data_augment.py +237 -0
  11. face_detect/data/wider_face.py +101 -0
  12. face_detect/facemodels/__init__.py +0 -0
  13. face_detect/facemodels/net.py +137 -0
  14. face_detect/facemodels/retinaface.py +127 -0
  15. face_detect/layers/__init__.py +2 -0
  16. face_detect/layers/functions/prior_box.py +34 -0
  17. face_detect/layers/modules/__init__.py +3 -0
  18. face_detect/layers/modules/multibox_loss.py +125 -0
  19. face_detect/retinaface_detection.py +192 -0
  20. face_detect/utils/__init__.py +0 -0
  21. face_detect/utils/box_utils.py +330 -0
  22. face_detect/utils/nms/__init__.py +0 -0
  23. face_detect/utils/nms/py_cpu_nms.py +38 -0
  24. face_detect/utils/timer.py +40 -0
  25. face_enhancement.py +145 -0
  26. face_inpainting.py +101 -0
  27. face_model/face_gan.py +57 -0
  28. face_model/gpen_model.py +747 -0
  29. face_model/op/__init__.py +2 -0
  30. face_model/op/fused_act.py +96 -0
  31. face_model/op/fused_bias_act.cpp +21 -0
  32. face_model/op/fused_bias_act_kernel.cu +99 -0
  33. face_model/op/upfirdn2d.cpp +23 -0
  34. face_model/op/upfirdn2d.py +194 -0
  35. face_model/op/upfirdn2d_kernel.cu +272 -0
  36. face_parse/blocks.py +127 -0
  37. face_parse/face_parsing.py +78 -0
  38. face_parse/mask.png +0 -0
  39. face_parse/parse_model.py +77 -0
  40. face_parse/test.png +0 -0
  41. loss/helpers.py +119 -0
  42. loss/id_loss.py +50 -0
  43. loss/model_irse.py +85 -0
  44. lpips/__init__.py +178 -0
  45. lpips/lpips.py +219 -0
  46. lpips/pretrained_networks.py +180 -0
  47. lpips/trainer.py +280 -0
  48. lpips/weights/v0.0/alex.pth +3 -0
  49. lpips/weights/v0.0/squeeze.pth +3 -0
  50. lpips/weights/v0.0/vgg.pth +3 -0
__init_paths.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ @paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
3
+ @author: yangxy (yangtao9009@gmail.com)
4
+ '''
5
+ import os.path as osp
6
+ import sys
7
+
8
+ def add_path(path):
9
+ if path not in sys.path:
10
+ sys.path.insert(0, path)
11
+
12
+ this_dir = osp.dirname(__file__)
13
+
14
+ path = osp.join(this_dir, 'face_detect')
15
+ add_path(path)
16
+
17
+ path = osp.join(this_dir, 'face_parse')
18
+ add_path(path)
19
+
20
+ path = osp.join(this_dir, 'face_model')
21
+ add_path(path)
22
+
23
+ path = osp.join(this_dir, 'sr_model')
24
+ add_path(path)
25
+
26
+ path = osp.join(this_dir, 'loss')
27
+ add_path(path)
28
+
29
+ path = osp.join(this_dir, 'data_loader')
30
+ add_path(path)
align_faces.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on Mon Apr 24 15:43:29 2017
4
+ @author: zhaoy
5
+ """
6
+ """
7
+ @Modified by yangxy (yangtao9009@gmail.com)
8
+ """
9
+ import cv2
10
+ import numpy as np
11
+ from skimage import transform as trans
12
+
13
+ # reference facial points, a list of coordinates (x,y)
14
+ REFERENCE_FACIAL_POINTS = [
15
+ [30.29459953, 51.69630051],
16
+ [65.53179932, 51.50139999],
17
+ [48.02519989, 71.73660278],
18
+ [33.54930115, 92.3655014],
19
+ [62.72990036, 92.20410156]
20
+ ]
21
+
22
+ DEFAULT_CROP_SIZE = (96, 112)
23
+
24
+
25
+ def _umeyama(src, dst, estimate_scale=True, scale=1.0):
26
+ """Estimate N-D similarity transformation with or without scaling.
27
+ Parameters
28
+ ----------
29
+ src : (M, N) array
30
+ Source coordinates.
31
+ dst : (M, N) array
32
+ Destination coordinates.
33
+ estimate_scale : bool
34
+ Whether to estimate scaling factor.
35
+ Returns
36
+ -------
37
+ T : (N + 1, N + 1)
38
+ The homogeneous similarity transformation matrix. The matrix contains
39
+ NaN values only if the problem is not well-conditioned.
40
+ References
41
+ ----------
42
+ .. [1] "Least-squares estimation of transformation parameters between two
43
+ point patterns", Shinji Umeyama, PAMI 1991, :DOI:`10.1109/34.88573`
44
+ """
45
+
46
+ num = src.shape[0]
47
+ dim = src.shape[1]
48
+
49
+ # Compute mean of src and dst.
50
+ src_mean = src.mean(axis=0)
51
+ dst_mean = dst.mean(axis=0)
52
+
53
+ # Subtract mean from src and dst.
54
+ src_demean = src - src_mean
55
+ dst_demean = dst - dst_mean
56
+
57
+ # Eq. (38).
58
+ A = dst_demean.T @ src_demean / num
59
+
60
+ # Eq. (39).
61
+ d = np.ones((dim,), dtype=np.double)
62
+ if np.linalg.det(A) < 0:
63
+ d[dim - 1] = -1
64
+
65
+ T = np.eye(dim + 1, dtype=np.double)
66
+
67
+ U, S, V = np.linalg.svd(A)
68
+
69
+ # Eq. (40) and (43).
70
+ rank = np.linalg.matrix_rank(A)
71
+ if rank == 0:
72
+ return np.nan * T
73
+ elif rank == dim - 1:
74
+ if np.linalg.det(U) * np.linalg.det(V) > 0:
75
+ T[:dim, :dim] = U @ V
76
+ else:
77
+ s = d[dim - 1]
78
+ d[dim - 1] = -1
79
+ T[:dim, :dim] = U @ np.diag(d) @ V
80
+ d[dim - 1] = s
81
+ else:
82
+ T[:dim, :dim] = U @ np.diag(d) @ V
83
+
84
+ if estimate_scale:
85
+ # Eq. (41) and (42).
86
+ scale = 1.0 / src_demean.var(axis=0).sum() * (S @ d)
87
+ else:
88
+ scale = scale
89
+
90
+ T[:dim, dim] = dst_mean - scale * (T[:dim, :dim] @ src_mean.T)
91
+ T[:dim, :dim] *= scale
92
+
93
+ return T, scale
94
+
95
+
96
+ class FaceWarpException(Exception):
97
+ def __str__(self):
98
+ return 'In File {}:{}'.format(
99
+ __file__, super.__str__(self))
100
+
101
+
102
+ def get_reference_facial_points(output_size=None,
103
+ inner_padding_factor=0.0,
104
+ outer_padding=(0, 0),
105
+ default_square=False):
106
+ tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)
107
+ tmp_crop_size = np.array(DEFAULT_CROP_SIZE)
108
+
109
+ # 0) make the inner region a square
110
+ if default_square:
111
+ size_diff = max(tmp_crop_size) - tmp_crop_size
112
+ tmp_5pts += size_diff / 2
113
+ tmp_crop_size += size_diff
114
+
115
+ if (output_size and
116
+ output_size[0] == tmp_crop_size[0] and
117
+ output_size[1] == tmp_crop_size[1]):
118
+ print('output_size == DEFAULT_CROP_SIZE {}: return default reference points'.format(tmp_crop_size))
119
+ return tmp_5pts
120
+
121
+ if (inner_padding_factor == 0 and
122
+ outer_padding == (0, 0)):
123
+ if output_size is None:
124
+ print('No paddings to do: return default reference points')
125
+ return tmp_5pts
126
+ else:
127
+ raise FaceWarpException(
128
+ 'No paddings to do, output_size must be None or {}'.format(tmp_crop_size))
129
+
130
+ # check output size
131
+ if not (0 <= inner_padding_factor <= 1.0):
132
+ raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)')
133
+
134
+ if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0)
135
+ and output_size is None):
136
+ output_size = tmp_crop_size * \
137
+ (1 + inner_padding_factor * 2).astype(np.int32)
138
+ output_size += np.array(outer_padding)
139
+ print(' deduced from paddings, output_size = ', output_size)
140
+
141
+ if not (outer_padding[0] < output_size[0]
142
+ and outer_padding[1] < output_size[1]):
143
+ raise FaceWarpException('Not (outer_padding[0] < output_size[0]'
144
+ 'and outer_padding[1] < output_size[1])')
145
+
146
+ # 1) pad the inner region according inner_padding_factor
147
+ # print('---> STEP1: pad the inner region according inner_padding_factor')
148
+ if inner_padding_factor > 0:
149
+ size_diff = tmp_crop_size * inner_padding_factor * 2
150
+ tmp_5pts += size_diff / 2
151
+ tmp_crop_size += np.round(size_diff).astype(np.int32)
152
+
153
+ # print(' crop_size = ', tmp_crop_size)
154
+ # print(' reference_5pts = ', tmp_5pts)
155
+
156
+ # 2) resize the padded inner region
157
+ # print('---> STEP2: resize the padded inner region')
158
+ size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2
159
+ # print(' crop_size = ', tmp_crop_size)
160
+ # print(' size_bf_outer_pad = ', size_bf_outer_pad)
161
+
162
+ if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]:
163
+ raise FaceWarpException('Must have (output_size - outer_padding)'
164
+ '= some_scale * (crop_size * (1.0 + inner_padding_factor)')
165
+
166
+ scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0]
167
+ # print(' resize scale_factor = ', scale_factor)
168
+ tmp_5pts = tmp_5pts * scale_factor
169
+ # size_diff = tmp_crop_size * (scale_factor - min(scale_factor))
170
+ # tmp_5pts = tmp_5pts + size_diff / 2
171
+ tmp_crop_size = size_bf_outer_pad
172
+ # print(' crop_size = ', tmp_crop_size)
173
+ # print(' reference_5pts = ', tmp_5pts)
174
+
175
+ # 3) add outer_padding to make output_size
176
+ reference_5point = tmp_5pts + np.array(outer_padding)
177
+ tmp_crop_size = output_size
178
+ # print('---> STEP3: add outer_padding to make output_size')
179
+ # print(' crop_size = ', tmp_crop_size)
180
+ # print(' reference_5pts = ', tmp_5pts)
181
+ #
182
+ # print('===> end get_reference_facial_points\n')
183
+
184
+ return reference_5point
185
+
186
+
187
+ def get_affine_transform_matrix(src_pts, dst_pts):
188
+ tfm = np.float32([[1, 0, 0], [0, 1, 0]])
189
+ n_pts = src_pts.shape[0]
190
+ ones = np.ones((n_pts, 1), src_pts.dtype)
191
+ src_pts_ = np.hstack([src_pts, ones])
192
+ dst_pts_ = np.hstack([dst_pts, ones])
193
+
194
+ A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_)
195
+
196
+ if rank == 3:
197
+ tfm = np.float32([
198
+ [A[0, 0], A[1, 0], A[2, 0]],
199
+ [A[0, 1], A[1, 1], A[2, 1]]
200
+ ])
201
+ elif rank == 2:
202
+ tfm = np.float32([
203
+ [A[0, 0], A[1, 0], 0],
204
+ [A[0, 1], A[1, 1], 0]
205
+ ])
206
+
207
+ return tfm
208
+
209
+
210
+ def warp_and_crop_face(src_img,
211
+ facial_pts,
212
+ reference_pts=None,
213
+ crop_size=(96, 112),
214
+ align_type='smilarity'): #smilarity cv2_affine affine
215
+ if reference_pts is None:
216
+ if crop_size[0] == 96 and crop_size[1] == 112:
217
+ reference_pts = REFERENCE_FACIAL_POINTS
218
+ else:
219
+ default_square = False
220
+ inner_padding_factor = 0
221
+ outer_padding = (0, 0)
222
+ output_size = crop_size
223
+
224
+ reference_pts = get_reference_facial_points(output_size,
225
+ inner_padding_factor,
226
+ outer_padding,
227
+ default_square)
228
+
229
+ ref_pts = np.float32(reference_pts)
230
+ ref_pts_shp = ref_pts.shape
231
+ if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
232
+ raise FaceWarpException(
233
+ 'reference_pts.shape must be (K,2) or (2,K) and K>2')
234
+
235
+ if ref_pts_shp[0] == 2:
236
+ ref_pts = ref_pts.T
237
+
238
+ src_pts = np.float32(facial_pts)
239
+ src_pts_shp = src_pts.shape
240
+ if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
241
+ raise FaceWarpException(
242
+ 'facial_pts.shape must be (K,2) or (2,K) and K>2')
243
+
244
+ if src_pts_shp[0] == 2:
245
+ src_pts = src_pts.T
246
+
247
+ if src_pts.shape != ref_pts.shape:
248
+ raise FaceWarpException(
249
+ 'facial_pts and reference_pts must have the same shape')
250
+
251
+ if align_type is 'cv2_affine':
252
+ tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
253
+ tfm_inv = cv2.getAffineTransform(ref_pts[0:3], src_pts[0:3])
254
+ elif align_type is 'affine':
255
+ tfm = get_affine_transform_matrix(src_pts, ref_pts)
256
+ tfm_inv = get_affine_transform_matrix(ref_pts, src_pts)
257
+ else:
258
+ params, scale = _umeyama(src_pts, ref_pts)
259
+ tfm = params[:2, :]
260
+
261
+ params, _ = _umeyama(ref_pts, src_pts, False, scale=1.0/scale)
262
+ tfm_inv = params[:2, :]
263
+
264
+ face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]), flags=3)
265
+
266
+ return face_img, tfm_inv
data_loader/dataset_face.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import os
4
+ import glob
5
+ import math
6
+ import random
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from torch.utils.data import Dataset
10
+
11
+ import degradations
12
+
13
+
14
+ class GFPGAN_degradation(object):
15
+ def __init__(self):
16
+ self.kernel_list = ['iso', 'aniso']
17
+ self.kernel_prob = [0.5, 0.5]
18
+ self.blur_kernel_size = 41
19
+ self.blur_sigma = [0.1, 10]
20
+ self.downsample_range = [0.8, 8]
21
+ self.noise_range = [0, 20]
22
+ self.jpeg_range = [60, 100]
23
+ self.gray_prob = 0.2
24
+ self.color_jitter_prob = 0.0
25
+ self.color_jitter_pt_prob = 0.0
26
+ self.shift = 20/255.
27
+
28
+ def degrade_process(self, img_gt):
29
+ if random.random() > 0.5:
30
+ img_gt = cv2.flip(img_gt, 1)
31
+
32
+ h, w = img_gt.shape[:2]
33
+
34
+ # random color jitter
35
+ if np.random.uniform() < self.color_jitter_prob:
36
+ jitter_val = np.random.uniform(-self.shift, self.shift, 3).astype(np.float32)
37
+ img_gt = img_gt + jitter_val
38
+ img_gt = np.clip(img_gt, 0, 1)
39
+
40
+ # random grayscale
41
+ if np.random.uniform() < self.gray_prob:
42
+ img_gt = cv2.cvtColor(img_gt, cv2.COLOR_BGR2GRAY)
43
+ img_gt = np.tile(img_gt[:, :, None], [1, 1, 3])
44
+
45
+ # ------------------------ generate lq image ------------------------ #
46
+ # blur
47
+ kernel = degradations.random_mixed_kernels(
48
+ self.kernel_list,
49
+ self.kernel_prob,
50
+ self.blur_kernel_size,
51
+ self.blur_sigma,
52
+ self.blur_sigma, [-math.pi, math.pi],
53
+ noise_range=None)
54
+ img_lq = cv2.filter2D(img_gt, -1, kernel)
55
+ # downsample
56
+ scale = np.random.uniform(self.downsample_range[0], self.downsample_range[1])
57
+ img_lq = cv2.resize(img_lq, (int(w // scale), int(h // scale)), interpolation=cv2.INTER_LINEAR)
58
+
59
+ # noise
60
+ if self.noise_range is not None:
61
+ img_lq = degradations.random_add_gaussian_noise(img_lq, self.noise_range)
62
+ # jpeg compression
63
+ if self.jpeg_range is not None:
64
+ img_lq = degradations.random_add_jpg_compression(img_lq, self.jpeg_range)
65
+
66
+ # round and clip
67
+ img_lq = np.clip((img_lq * 255.0).round(), 0, 255) / 255.
68
+
69
+ # resize to original size
70
+ img_lq = cv2.resize(img_lq, (w, h), interpolation=cv2.INTER_LINEAR)
71
+
72
+ return img_gt, img_lq
73
+
74
+ class FaceDataset(Dataset):
75
+ def __init__(self, path, resolution=512):
76
+ self.resolution = resolution
77
+
78
+ self.HQ_imgs = glob.glob(os.path.join(path, '*.*'))
79
+ self.length = len(self.HQ_imgs)
80
+
81
+ self.degrader = GFPGAN_degradation()
82
+
83
+ def __len__(self):
84
+ return self.length
85
+
86
+ def __getitem__(self, index):
87
+ img_gt = cv2.imread(self.HQ_imgs[index], cv2.IMREAD_COLOR)
88
+ img_gt = cv2.resize(img_gt, (self.resolution, self.resolution), interpolation=cv2.INTER_AREA)
89
+
90
+ # BFR degradation
91
+ # We adopt the degradation of GFPGAN for simplicity, which however differs from our implementation in the paper.
92
+ # Data degradation plays a key role in BFR. Please replace it with your own methods.
93
+ img_gt = img_gt.astype(np.float32)/255.
94
+ img_gt, img_lq = self.degrader.degrade_process(img_gt)
95
+
96
+ img_gt = (torch.from_numpy(img_gt) - 0.5) / 0.5
97
+ img_lq = (torch.from_numpy(img_lq) - 0.5) / 0.5
98
+
99
+ img_gt = img_gt.permute(2, 0, 1).flip(0) # BGR->RGB
100
+ img_lq = img_lq.permute(2, 0, 1).flip(0) # BGR->RGB
101
+
102
+ return img_lq, img_gt
103
+
data_loader/degradations.py ADDED
@@ -0,0 +1,765 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import math
3
+ import numpy as np
4
+ import random
5
+ import torch
6
+ from scipy import special
7
+ from scipy.stats import multivariate_normal
8
+ from torchvision.transforms.functional_tensor import rgb_to_grayscale
9
+
10
+ # -------------------------------------------------------------------- #
11
+ # --------------------------- blur kernels --------------------------- #
12
+ # -------------------------------------------------------------------- #
13
+
14
+
15
+ # --------------------------- util functions --------------------------- #
16
+ def sigma_matrix2(sig_x, sig_y, theta):
17
+ """Calculate the rotated sigma matrix (two dimensional matrix).
18
+
19
+ Args:
20
+ sig_x (float):
21
+ sig_y (float):
22
+ theta (float): Radian measurement.
23
+
24
+ Returns:
25
+ ndarray: Rotated sigma matrix.
26
+ """
27
+ d_matrix = np.array([[sig_x**2, 0], [0, sig_y**2]])
28
+ u_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
29
+ return np.dot(u_matrix, np.dot(d_matrix, u_matrix.T))
30
+
31
+
32
+ def mesh_grid(kernel_size):
33
+ """Generate the mesh grid, centering at zero.
34
+
35
+ Args:
36
+ kernel_size (int):
37
+
38
+ Returns:
39
+ xy (ndarray): with the shape (kernel_size, kernel_size, 2)
40
+ xx (ndarray): with the shape (kernel_size, kernel_size)
41
+ yy (ndarray): with the shape (kernel_size, kernel_size)
42
+ """
43
+ ax = np.arange(-kernel_size // 2 + 1., kernel_size // 2 + 1.)
44
+ xx, yy = np.meshgrid(ax, ax)
45
+ xy = np.hstack((xx.reshape((kernel_size * kernel_size, 1)), yy.reshape(kernel_size * kernel_size,
46
+ 1))).reshape(kernel_size, kernel_size, 2)
47
+ return xy, xx, yy
48
+
49
+
50
+ def pdf2(sigma_matrix, grid):
51
+ """Calculate PDF of the bivariate Gaussian distribution.
52
+
53
+ Args:
54
+ sigma_matrix (ndarray): with the shape (2, 2)
55
+ grid (ndarray): generated by :func:`mesh_grid`,
56
+ with the shape (K, K, 2), K is the kernel size.
57
+
58
+ Returns:
59
+ kernel (ndarrray): un-normalized kernel.
60
+ """
61
+ inverse_sigma = np.linalg.inv(sigma_matrix)
62
+ kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2))
63
+ return kernel
64
+
65
+
66
+ def cdf2(d_matrix, grid):
67
+ """Calculate the CDF of the standard bivariate Gaussian distribution.
68
+ Used in skewed Gaussian distribution.
69
+
70
+ Args:
71
+ d_matrix (ndarrasy): skew matrix.
72
+ grid (ndarray): generated by :func:`mesh_grid`,
73
+ with the shape (K, K, 2), K is the kernel size.
74
+
75
+ Returns:
76
+ cdf (ndarray): skewed cdf.
77
+ """
78
+ rv = multivariate_normal([0, 0], [[1, 0], [0, 1]])
79
+ grid = np.dot(grid, d_matrix)
80
+ cdf = rv.cdf(grid)
81
+ return cdf
82
+
83
+
84
+ def bivariate_Gaussian(kernel_size, sig_x, sig_y, theta, grid=None, isotropic=True):
85
+ """Generate a bivariate isotropic or anisotropic Gaussian kernel.
86
+
87
+ In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
88
+
89
+ Args:
90
+ kernel_size (int):
91
+ sig_x (float):
92
+ sig_y (float):
93
+ theta (float): Radian measurement.
94
+ grid (ndarray, optional): generated by :func:`mesh_grid`,
95
+ with the shape (K, K, 2), K is the kernel size. Default: None
96
+ isotropic (bool):
97
+
98
+ Returns:
99
+ kernel (ndarray): normalized kernel.
100
+ """
101
+ if grid is None:
102
+ grid, _, _ = mesh_grid(kernel_size)
103
+ if isotropic:
104
+ sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
105
+ else:
106
+ sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
107
+ kernel = pdf2(sigma_matrix, grid)
108
+ kernel = kernel / np.sum(kernel)
109
+ return kernel
110
+
111
+
112
+ def bivariate_generalized_Gaussian(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
113
+ """Generate a bivariate generalized Gaussian kernel.
114
+ Described in `Parameter Estimation For Multivariate Generalized
115
+ Gaussian Distributions`_
116
+ by Pascal et. al (2013).
117
+
118
+ In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
119
+
120
+ Args:
121
+ kernel_size (int):
122
+ sig_x (float):
123
+ sig_y (float):
124
+ theta (float): Radian measurement.
125
+ beta (float): shape parameter, beta = 1 is the normal distribution.
126
+ grid (ndarray, optional): generated by :func:`mesh_grid`,
127
+ with the shape (K, K, 2), K is the kernel size. Default: None
128
+
129
+ Returns:
130
+ kernel (ndarray): normalized kernel.
131
+
132
+ .. _Parameter Estimation For Multivariate Generalized Gaussian
133
+ Distributions: https://arxiv.org/abs/1302.6498
134
+ """
135
+ if grid is None:
136
+ grid, _, _ = mesh_grid(kernel_size)
137
+ if isotropic:
138
+ sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
139
+ else:
140
+ sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
141
+ inverse_sigma = np.linalg.inv(sigma_matrix)
142
+ kernel = np.exp(-0.5 * np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta))
143
+ kernel = kernel / np.sum(kernel)
144
+ return kernel
145
+
146
+
147
+ def bivariate_plateau(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
148
+ """Generate a plateau-like anisotropic kernel.
149
+ 1 / (1+x^(beta))
150
+
151
+ Ref: https://stats.stackexchange.com/questions/203629/is-there-a-plateau-shaped-distribution
152
+
153
+ In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
154
+
155
+ Args:
156
+ kernel_size (int):
157
+ sig_x (float):
158
+ sig_y (float):
159
+ theta (float): Radian measurement.
160
+ beta (float): shape parameter, beta = 1 is the normal distribution.
161
+ grid (ndarray, optional): generated by :func:`mesh_grid`,
162
+ with the shape (K, K, 2), K is the kernel size. Default: None
163
+
164
+ Returns:
165
+ kernel (ndarray): normalized kernel.
166
+ """
167
+ if grid is None:
168
+ grid, _, _ = mesh_grid(kernel_size)
169
+ if isotropic:
170
+ sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
171
+ else:
172
+ sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
173
+ inverse_sigma = np.linalg.inv(sigma_matrix)
174
+ kernel = np.reciprocal(np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta) + 1)
175
+ kernel = kernel / np.sum(kernel)
176
+ return kernel
177
+
178
+
179
+ def random_bivariate_Gaussian(kernel_size,
180
+ sigma_x_range,
181
+ sigma_y_range,
182
+ rotation_range,
183
+ noise_range=None,
184
+ isotropic=True):
185
+ """Randomly generate bivariate isotropic or anisotropic Gaussian kernels.
186
+
187
+ In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
188
+
189
+ Args:
190
+ kernel_size (int):
191
+ sigma_x_range (tuple): [0.6, 5]
192
+ sigma_y_range (tuple): [0.6, 5]
193
+ rotation range (tuple): [-math.pi, math.pi]
194
+ noise_range(tuple, optional): multiplicative kernel noise,
195
+ [0.75, 1.25]. Default: None
196
+
197
+ Returns:
198
+ kernel (ndarray):
199
+ """
200
+ assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
201
+ assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
202
+ sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
203
+ if isotropic is False:
204
+ assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
205
+ assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
206
+ sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
207
+ rotation = np.random.uniform(rotation_range[0], rotation_range[1])
208
+ else:
209
+ sigma_y = sigma_x
210
+ rotation = 0
211
+
212
+ kernel = bivariate_Gaussian(kernel_size, sigma_x, sigma_y, rotation, isotropic=isotropic)
213
+
214
+ # add multiplicative noise
215
+ if noise_range is not None:
216
+ assert noise_range[0] < noise_range[1], 'Wrong noise range.'
217
+ noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
218
+ kernel = kernel * noise
219
+ kernel = kernel / np.sum(kernel)
220
+ return kernel
221
+
222
+
223
+ def random_bivariate_generalized_Gaussian(kernel_size,
224
+ sigma_x_range,
225
+ sigma_y_range,
226
+ rotation_range,
227
+ beta_range,
228
+ noise_range=None,
229
+ isotropic=True):
230
+ """Randomly generate bivariate generalized Gaussian kernels.
231
+
232
+ In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
233
+
234
+ Args:
235
+ kernel_size (int):
236
+ sigma_x_range (tuple): [0.6, 5]
237
+ sigma_y_range (tuple): [0.6, 5]
238
+ rotation range (tuple): [-math.pi, math.pi]
239
+ beta_range (tuple): [0.5, 8]
240
+ noise_range(tuple, optional): multiplicative kernel noise,
241
+ [0.75, 1.25]. Default: None
242
+
243
+ Returns:
244
+ kernel (ndarray):
245
+ """
246
+ assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
247
+ assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
248
+ sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
249
+ if isotropic is False:
250
+ assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
251
+ assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
252
+ sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
253
+ rotation = np.random.uniform(rotation_range[0], rotation_range[1])
254
+ else:
255
+ sigma_y = sigma_x
256
+ rotation = 0
257
+
258
+ # assume beta_range[0] < 1 < beta_range[1]
259
+ if np.random.uniform() < 0.5:
260
+ beta = np.random.uniform(beta_range[0], 1)
261
+ else:
262
+ beta = np.random.uniform(1, beta_range[1])
263
+
264
+ kernel = bivariate_generalized_Gaussian(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
265
+
266
+ # add multiplicative noise
267
+ if noise_range is not None:
268
+ assert noise_range[0] < noise_range[1], 'Wrong noise range.'
269
+ noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
270
+ kernel = kernel * noise
271
+ kernel = kernel / np.sum(kernel)
272
+ return kernel
273
+
274
+
275
+ def random_bivariate_plateau(kernel_size,
276
+ sigma_x_range,
277
+ sigma_y_range,
278
+ rotation_range,
279
+ beta_range,
280
+ noise_range=None,
281
+ isotropic=True):
282
+ """Randomly generate bivariate plateau kernels.
283
+
284
+ In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
285
+
286
+ Args:
287
+ kernel_size (int):
288
+ sigma_x_range (tuple): [0.6, 5]
289
+ sigma_y_range (tuple): [0.6, 5]
290
+ rotation range (tuple): [-math.pi/2, math.pi/2]
291
+ beta_range (tuple): [1, 4]
292
+ noise_range(tuple, optional): multiplicative kernel noise,
293
+ [0.75, 1.25]. Default: None
294
+
295
+ Returns:
296
+ kernel (ndarray):
297
+ """
298
+ assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
299
+ assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
300
+ sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
301
+ if isotropic is False:
302
+ assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
303
+ assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
304
+ sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
305
+ rotation = np.random.uniform(rotation_range[0], rotation_range[1])
306
+ else:
307
+ sigma_y = sigma_x
308
+ rotation = 0
309
+
310
+ # TODO: this may be not proper
311
+ if np.random.uniform() < 0.5:
312
+ beta = np.random.uniform(beta_range[0], 1)
313
+ else:
314
+ beta = np.random.uniform(1, beta_range[1])
315
+
316
+ kernel = bivariate_plateau(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
317
+ # add multiplicative noise
318
+ if noise_range is not None:
319
+ assert noise_range[0] < noise_range[1], 'Wrong noise range.'
320
+ noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
321
+ kernel = kernel * noise
322
+ kernel = kernel / np.sum(kernel)
323
+
324
+ return kernel
325
+
326
+
327
+ def random_mixed_kernels(kernel_list,
328
+ kernel_prob,
329
+ kernel_size=21,
330
+ sigma_x_range=(0.6, 5),
331
+ sigma_y_range=(0.6, 5),
332
+ rotation_range=(-math.pi, math.pi),
333
+ betag_range=(0.5, 8),
334
+ betap_range=(0.5, 8),
335
+ noise_range=None):
336
+ """Randomly generate mixed kernels.
337
+
338
+ Args:
339
+ kernel_list (tuple): a list name of kernel types,
340
+ support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso',
341
+ 'plateau_aniso']
342
+ kernel_prob (tuple): corresponding kernel probability for each
343
+ kernel type
344
+ kernel_size (int):
345
+ sigma_x_range (tuple): [0.6, 5]
346
+ sigma_y_range (tuple): [0.6, 5]
347
+ rotation range (tuple): [-math.pi, math.pi]
348
+ beta_range (tuple): [0.5, 8]
349
+ noise_range(tuple, optional): multiplicative kernel noise,
350
+ [0.75, 1.25]. Default: None
351
+
352
+ Returns:
353
+ kernel (ndarray):
354
+ """
355
+ kernel_type = random.choices(kernel_list, kernel_prob)[0]
356
+ if kernel_type == 'iso':
357
+ kernel = random_bivariate_Gaussian(
358
+ kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True)
359
+ elif kernel_type == 'aniso':
360
+ kernel = random_bivariate_Gaussian(
361
+ kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False)
362
+ elif kernel_type == 'generalized_iso':
363
+ kernel = random_bivariate_generalized_Gaussian(
364
+ kernel_size,
365
+ sigma_x_range,
366
+ sigma_y_range,
367
+ rotation_range,
368
+ betag_range,
369
+ noise_range=noise_range,
370
+ isotropic=True)
371
+ elif kernel_type == 'generalized_aniso':
372
+ kernel = random_bivariate_generalized_Gaussian(
373
+ kernel_size,
374
+ sigma_x_range,
375
+ sigma_y_range,
376
+ rotation_range,
377
+ betag_range,
378
+ noise_range=noise_range,
379
+ isotropic=False)
380
+ elif kernel_type == 'plateau_iso':
381
+ kernel = random_bivariate_plateau(
382
+ kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True)
383
+ elif kernel_type == 'plateau_aniso':
384
+ kernel = random_bivariate_plateau(
385
+ kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False)
386
+ return kernel
387
+
388
+
389
+ np.seterr(divide='ignore', invalid='ignore')
390
+
391
+
392
+ def circular_lowpass_kernel(cutoff, kernel_size, pad_to=0):
393
+ """2D sinc filter, ref: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter
394
+
395
+ Args:
396
+ cutoff (float): cutoff frequency in radians (pi is max)
397
+ kernel_size (int): horizontal and vertical size, must be odd.
398
+ pad_to (int): pad kernel size to desired size, must be odd or zero.
399
+ """
400
+ assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
401
+ kernel = np.fromfunction(
402
+ lambda x, y: cutoff * special.j1(cutoff * np.sqrt(
403
+ (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)) / (2 * np.pi * np.sqrt(
404
+ (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)), [kernel_size, kernel_size])
405
+ kernel[(kernel_size - 1) // 2, (kernel_size - 1) // 2] = cutoff**2 / (4 * np.pi)
406
+ kernel = kernel / np.sum(kernel)
407
+ if pad_to > kernel_size:
408
+ pad_size = (pad_to - kernel_size) // 2
409
+ kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
410
+ return kernel
411
+
412
+
413
+ # ------------------------------------------------------------- #
414
+ # --------------------------- noise --------------------------- #
415
+ # ------------------------------------------------------------- #
416
+
417
+ # ----------------------- Gaussian Noise ----------------------- #
418
+
419
+
420
+ def generate_gaussian_noise(img, sigma=10, gray_noise=False):
421
+ """Generate Gaussian noise.
422
+
423
+ Args:
424
+ img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
425
+ sigma (float): Noise scale (measured in range 255). Default: 10.
426
+
427
+ Returns:
428
+ (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
429
+ float32.
430
+ """
431
+ if gray_noise:
432
+ noise = np.float32(np.random.randn(*(img.shape[0:2]))) * sigma / 255.
433
+ noise = np.expand_dims(noise, axis=2).repeat(3, axis=2)
434
+ else:
435
+ noise = np.float32(np.random.randn(*(img.shape))) * sigma / 255.
436
+ return noise
437
+
438
+
439
+ def add_gaussian_noise(img, sigma=10, clip=True, rounds=False, gray_noise=False):
440
+ """Add Gaussian noise.
441
+
442
+ Args:
443
+ img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
444
+ sigma (float): Noise scale (measured in range 255). Default: 10.
445
+
446
+ Returns:
447
+ (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
448
+ float32.
449
+ """
450
+ noise = generate_gaussian_noise(img, sigma, gray_noise)
451
+ out = img + noise
452
+ if clip and rounds:
453
+ out = np.clip((out * 255.0).round(), 0, 255) / 255.
454
+ elif clip:
455
+ out = np.clip(out, 0, 1)
456
+ elif rounds:
457
+ out = (out * 255.0).round() / 255.
458
+ return out
459
+
460
+
461
+ def generate_gaussian_noise_pt(img, sigma=10, gray_noise=0):
462
+ """Add Gaussian noise (PyTorch version).
463
+
464
+ Args:
465
+ img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
466
+ scale (float | Tensor): Noise scale. Default: 1.0.
467
+
468
+ Returns:
469
+ (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
470
+ float32.
471
+ """
472
+ b, _, h, w = img.size()
473
+ if not isinstance(sigma, (float, int)):
474
+ sigma = sigma.view(img.size(0), 1, 1, 1)
475
+ if isinstance(gray_noise, (float, int)):
476
+ cal_gray_noise = gray_noise > 0
477
+ else:
478
+ gray_noise = gray_noise.view(b, 1, 1, 1)
479
+ cal_gray_noise = torch.sum(gray_noise) > 0
480
+
481
+ if cal_gray_noise:
482
+ noise_gray = torch.randn(*img.size()[2:4], dtype=img.dtype, device=img.device) * sigma / 255.
483
+ noise_gray = noise_gray.view(b, 1, h, w)
484
+
485
+ # always calculate color noise
486
+ noise = torch.randn(*img.size(), dtype=img.dtype, device=img.device) * sigma / 255.
487
+
488
+ if cal_gray_noise:
489
+ noise = noise * (1 - gray_noise) + noise_gray * gray_noise
490
+ return noise
491
+
492
+
493
+ def add_gaussian_noise_pt(img, sigma=10, gray_noise=0, clip=True, rounds=False):
494
+ """Add Gaussian noise (PyTorch version).
495
+
496
+ Args:
497
+ img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
498
+ scale (float | Tensor): Noise scale. Default: 1.0.
499
+
500
+ Returns:
501
+ (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
502
+ float32.
503
+ """
504
+ noise = generate_gaussian_noise_pt(img, sigma, gray_noise)
505
+ out = img + noise
506
+ if clip and rounds:
507
+ out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
508
+ elif clip:
509
+ out = torch.clamp(out, 0, 1)
510
+ elif rounds:
511
+ out = (out * 255.0).round() / 255.
512
+ return out
513
+
514
+
515
+ # ----------------------- Random Gaussian Noise ----------------------- #
516
+ def random_generate_gaussian_noise(img, sigma_range=(0, 10), gray_prob=0):
517
+ sigma = np.random.uniform(sigma_range[0], sigma_range[1])
518
+ if np.random.uniform() < gray_prob:
519
+ gray_noise = True
520
+ else:
521
+ gray_noise = False
522
+ return generate_gaussian_noise(img, sigma, gray_noise)
523
+
524
+
525
+ def random_add_gaussian_noise(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
526
+ noise = random_generate_gaussian_noise(img, sigma_range, gray_prob)
527
+ out = img + noise
528
+ if clip and rounds:
529
+ out = np.clip((out * 255.0).round(), 0, 255) / 255.
530
+ elif clip:
531
+ out = np.clip(out, 0, 1)
532
+ elif rounds:
533
+ out = (out * 255.0).round() / 255.
534
+ return out
535
+
536
+
537
+ def random_generate_gaussian_noise_pt(img, sigma_range=(0, 10), gray_prob=0):
538
+ sigma = torch.rand(
539
+ img.size(0), dtype=img.dtype, device=img.device) * (sigma_range[1] - sigma_range[0]) + sigma_range[0]
540
+ gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
541
+ gray_noise = (gray_noise < gray_prob).float()
542
+ return generate_gaussian_noise_pt(img, sigma, gray_noise)
543
+
544
+
545
+ def random_add_gaussian_noise_pt(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
546
+ noise = random_generate_gaussian_noise_pt(img, sigma_range, gray_prob)
547
+ out = img + noise
548
+ if clip and rounds:
549
+ out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
550
+ elif clip:
551
+ out = torch.clamp(out, 0, 1)
552
+ elif rounds:
553
+ out = (out * 255.0).round() / 255.
554
+ return out
555
+
556
+
557
+ # ----------------------- Poisson (Shot) Noise ----------------------- #
558
+
559
+
560
+ def generate_poisson_noise(img, scale=1.0, gray_noise=False):
561
+ """Generate poisson noise.
562
+
563
+ Ref: https://github.com/scikit-image/scikit-image/blob/main/skimage/util/noise.py#L37-L219
564
+
565
+ Args:
566
+ img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
567
+ scale (float): Noise scale. Default: 1.0.
568
+ gray_noise (bool): Whether generate gray noise. Default: False.
569
+
570
+ Returns:
571
+ (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
572
+ float32.
573
+ """
574
+ if gray_noise:
575
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
576
+ # round and clip image for counting vals correctly
577
+ img = np.clip((img * 255.0).round(), 0, 255) / 255.
578
+ vals = len(np.unique(img))
579
+ vals = 2**np.ceil(np.log2(vals))
580
+ out = np.float32(np.random.poisson(img * vals) / float(vals))
581
+ noise = out - img
582
+ if gray_noise:
583
+ noise = np.repeat(noise[:, :, np.newaxis], 3, axis=2)
584
+ return noise * scale
585
+
586
+
587
+ def add_poisson_noise(img, scale=1.0, clip=True, rounds=False, gray_noise=False):
588
+ """Add poisson noise.
589
+
590
+ Args:
591
+ img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
592
+ scale (float): Noise scale. Default: 1.0.
593
+ gray_noise (bool): Whether generate gray noise. Default: False.
594
+
595
+ Returns:
596
+ (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
597
+ float32.
598
+ """
599
+ noise = generate_poisson_noise(img, scale, gray_noise)
600
+ out = img + noise
601
+ if clip and rounds:
602
+ out = np.clip((out * 255.0).round(), 0, 255) / 255.
603
+ elif clip:
604
+ out = np.clip(out, 0, 1)
605
+ elif rounds:
606
+ out = (out * 255.0).round() / 255.
607
+ return out
608
+
609
+
610
+ def generate_poisson_noise_pt(img, scale=1.0, gray_noise=0):
611
+ """Generate a batch of poisson noise (PyTorch version)
612
+
613
+ Args:
614
+ img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
615
+ scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
616
+ Default: 1.0.
617
+ gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
618
+ 0 for False, 1 for True. Default: 0.
619
+
620
+ Returns:
621
+ (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
622
+ float32.
623
+ """
624
+ b, _, h, w = img.size()
625
+ if isinstance(gray_noise, (float, int)):
626
+ cal_gray_noise = gray_noise > 0
627
+ else:
628
+ gray_noise = gray_noise.view(b, 1, 1, 1)
629
+ cal_gray_noise = torch.sum(gray_noise) > 0
630
+ if cal_gray_noise:
631
+ img_gray = rgb_to_grayscale(img, num_output_channels=1)
632
+ # round and clip image for counting vals correctly
633
+ img_gray = torch.clamp((img_gray * 255.0).round(), 0, 255) / 255.
634
+ # use for-loop to get the unique values for each sample
635
+ vals_list = [len(torch.unique(img_gray[i, :, :, :])) for i in range(b)]
636
+ vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list]
637
+ vals = img_gray.new_tensor(vals_list).view(b, 1, 1, 1)
638
+ out = torch.poisson(img_gray * vals) / vals
639
+ noise_gray = out - img_gray
640
+ noise_gray = noise_gray.expand(b, 3, h, w)
641
+
642
+ # always calculate color noise
643
+ # round and clip image for counting vals correctly
644
+ img = torch.clamp((img * 255.0).round(), 0, 255) / 255.
645
+ # use for-loop to get the unique values for each sample
646
+ vals_list = [len(torch.unique(img[i, :, :, :])) for i in range(b)]
647
+ vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list]
648
+ vals = img.new_tensor(vals_list).view(b, 1, 1, 1)
649
+ out = torch.poisson(img * vals) / vals
650
+ noise = out - img
651
+ if cal_gray_noise:
652
+ noise = noise * (1 - gray_noise) + noise_gray * gray_noise
653
+ if not isinstance(scale, (float, int)):
654
+ scale = scale.view(b, 1, 1, 1)
655
+ return noise * scale
656
+
657
+
658
+ def add_poisson_noise_pt(img, scale=1.0, clip=True, rounds=False, gray_noise=0):
659
+ """Add poisson noise to a batch of images (PyTorch version).
660
+
661
+ Args:
662
+ img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
663
+ scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
664
+ Default: 1.0.
665
+ gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
666
+ 0 for False, 1 for True. Default: 0.
667
+
668
+ Returns:
669
+ (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
670
+ float32.
671
+ """
672
+ noise = generate_poisson_noise_pt(img, scale, gray_noise)
673
+ out = img + noise
674
+ if clip and rounds:
675
+ out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
676
+ elif clip:
677
+ out = torch.clamp(out, 0, 1)
678
+ elif rounds:
679
+ out = (out * 255.0).round() / 255.
680
+ return out
681
+
682
+
683
+ # ----------------------- Random Poisson (Shot) Noise ----------------------- #
684
+
685
+
686
+ def random_generate_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0):
687
+ scale = np.random.uniform(scale_range[0], scale_range[1])
688
+ if np.random.uniform() < gray_prob:
689
+ gray_noise = True
690
+ else:
691
+ gray_noise = False
692
+ return generate_poisson_noise(img, scale, gray_noise)
693
+
694
+
695
+ def random_add_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
696
+ noise = random_generate_poisson_noise(img, scale_range, gray_prob)
697
+ out = img + noise
698
+ if clip and rounds:
699
+ out = np.clip((out * 255.0).round(), 0, 255) / 255.
700
+ elif clip:
701
+ out = np.clip(out, 0, 1)
702
+ elif rounds:
703
+ out = (out * 255.0).round() / 255.
704
+ return out
705
+
706
+
707
+ def random_generate_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0):
708
+ scale = torch.rand(
709
+ img.size(0), dtype=img.dtype, device=img.device) * (scale_range[1] - scale_range[0]) + scale_range[0]
710
+ gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
711
+ gray_noise = (gray_noise < gray_prob).float()
712
+ return generate_poisson_noise_pt(img, scale, gray_noise)
713
+
714
+
715
+ def random_add_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
716
+ noise = random_generate_poisson_noise_pt(img, scale_range, gray_prob)
717
+ out = img + noise
718
+ if clip and rounds:
719
+ out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
720
+ elif clip:
721
+ out = torch.clamp(out, 0, 1)
722
+ elif rounds:
723
+ out = (out * 255.0).round() / 255.
724
+ return out
725
+
726
+
727
+ # ------------------------------------------------------------------------ #
728
+ # --------------------------- JPEG compression --------------------------- #
729
+ # ------------------------------------------------------------------------ #
730
+
731
+
732
+ def add_jpg_compression(img, quality=90):
733
+ """Add JPG compression artifacts.
734
+
735
+ Args:
736
+ img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
737
+ quality (float): JPG compression quality. 0 for lowest quality, 100 for
738
+ best quality. Default: 90.
739
+
740
+ Returns:
741
+ (Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
742
+ float32.
743
+ """
744
+ img = np.clip(img, 0, 1)
745
+ encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
746
+ _, encimg = cv2.imencode('.jpg', img * 255., encode_param)
747
+ img = np.float32(cv2.imdecode(encimg, 1)) / 255.
748
+ return img
749
+
750
+
751
+ def random_add_jpg_compression(img, quality_range=(90, 100)):
752
+ """Randomly add JPG compression artifacts.
753
+
754
+ Args:
755
+ img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
756
+ quality_range (tuple[float] | list[float]): JPG compression quality
757
+ range. 0 for lowest quality, 100 for best quality.
758
+ Default: (90, 100).
759
+
760
+ Returns:
761
+ (Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
762
+ float32.
763
+ """
764
+ quality = int(np.random.uniform(quality_range[0], quality_range[1]))
765
+ return add_jpg_compression(img, quality)
distributed.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import pickle
3
+
4
+ import torch
5
+ from torch import distributed as dist
6
+ from torch.utils.data.sampler import Sampler
7
+
8
+
9
+ def get_rank():
10
+ if not dist.is_available():
11
+ return 0
12
+
13
+ if not dist.is_initialized():
14
+ return 0
15
+
16
+ return dist.get_rank()
17
+
18
+
19
+ def synchronize():
20
+ if not dist.is_available():
21
+ return
22
+
23
+ if not dist.is_initialized():
24
+ return
25
+
26
+ world_size = dist.get_world_size()
27
+
28
+ if world_size == 1:
29
+ return
30
+
31
+ dist.barrier()
32
+
33
+
34
+ def get_world_size():
35
+ if not dist.is_available():
36
+ return 1
37
+
38
+ if not dist.is_initialized():
39
+ return 1
40
+
41
+ return dist.get_world_size()
42
+
43
+
44
+ def reduce_sum(tensor):
45
+ if not dist.is_available():
46
+ return tensor
47
+
48
+ if not dist.is_initialized():
49
+ return tensor
50
+
51
+ tensor = tensor.clone()
52
+ dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
53
+
54
+ return tensor
55
+
56
+
57
+ def gather_grad(params):
58
+ world_size = get_world_size()
59
+
60
+ if world_size == 1:
61
+ return
62
+
63
+ for param in params:
64
+ if param.grad is not None:
65
+ dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
66
+ param.grad.data.div_(world_size)
67
+
68
+
69
+ def all_gather(data):
70
+ world_size = get_world_size()
71
+
72
+ if world_size == 1:
73
+ return [data]
74
+
75
+ buffer = pickle.dumps(data)
76
+ storage = torch.ByteStorage.from_buffer(buffer)
77
+ tensor = torch.ByteTensor(storage).to('cuda')
78
+
79
+ local_size = torch.IntTensor([tensor.numel()]).to('cuda')
80
+ size_list = [torch.IntTensor([0]).to('cuda') for _ in range(world_size)]
81
+ dist.all_gather(size_list, local_size)
82
+ size_list = [int(size.item()) for size in size_list]
83
+ max_size = max(size_list)
84
+
85
+ tensor_list = []
86
+ for _ in size_list:
87
+ tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
88
+
89
+ if local_size != max_size:
90
+ padding = torch.ByteTensor(size=(max_size - local_size,)).to('cuda')
91
+ tensor = torch.cat((tensor, padding), 0)
92
+
93
+ dist.all_gather(tensor_list, tensor)
94
+
95
+ data_list = []
96
+
97
+ for size, tensor in zip(size_list, tensor_list):
98
+ buffer = tensor.cpu().numpy().tobytes()[:size]
99
+ data_list.append(pickle.loads(buffer))
100
+
101
+ return data_list
102
+
103
+
104
+ def reduce_loss_dict(loss_dict):
105
+ world_size = get_world_size()
106
+
107
+ if world_size < 2:
108
+ return loss_dict
109
+
110
+ with torch.no_grad():
111
+ keys = []
112
+ losses = []
113
+
114
+ for k in sorted(loss_dict.keys()):
115
+ keys.append(k)
116
+ losses.append(loss_dict[k])
117
+
118
+ losses = torch.stack(losses, 0)
119
+ dist.reduce(losses, dst=0)
120
+
121
+ if dist.get_rank() == 0:
122
+ losses /= world_size
123
+
124
+ reduced_losses = {k: v for k, v in zip(keys, losses)}
125
+
126
+ return reduced_losses
face_colorization.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ @paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
3
+ @author: yangxy (yangtao9009@gmail.com)
4
+ '''
5
+ import os
6
+ import cv2
7
+ import glob
8
+ import time
9
+ import numpy as np
10
+ from PIL import Image
11
+ import __init_paths
12
+ from face_model.face_gan import FaceGAN
13
+
14
+ class FaceColorization(object):
15
+ def __init__(self, base_dir='./', size=1024, model=None, channel_multiplier=2):
16
+ self.facegan = FaceGAN(base_dir, size, model, channel_multiplier)
17
+
18
+ # make sure the face image is well aligned. Please refer to face_enhancement.py
19
+ def process(self, gray):
20
+ # colorize the face
21
+ out = self.facegan.process(gray)
22
+
23
+ return out
24
+
25
+
26
+ if __name__=='__main__':
27
+ model = {'name':'GPEN-Colorization-1024', 'size':1024}
28
+
29
+ indir = 'examples/grays'
30
+ outdir = 'examples/outs-colorization'
31
+ os.makedirs(outdir, exist_ok=True)
32
+
33
+ facecolorizer = FaceColorization(size=model['size'], model=model['name'], channel_multiplier=2)
34
+
35
+ files = sorted(glob.glob(os.path.join(indir, '*.*g')))
36
+ for n, file in enumerate(files[:]):
37
+ filename = os.path.basename(file)
38
+
39
+ grayf = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
40
+ grayf = cv2.cvtColor(grayf, cv2.COLOR_GRAY2BGR) # channel: 1->3
41
+
42
+ colorf = facecolorizer.process(grayf)
43
+
44
+ grayf = cv2.resize(grayf, colorf.shape[:2])
45
+ cv2.imwrite(os.path.join(outdir, '.'.join(filename.split('.')[:-1])+'.jpg'), np.hstack((grayf, colorf)))
46
+
47
+ if n%10==0: print(n, file)
48
+
face_detect/data/FDDB/img_list.txt ADDED
@@ -0,0 +1,2845 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2002/08/11/big/img_591
2
+ 2002/08/26/big/img_265
3
+ 2002/07/19/big/img_423
4
+ 2002/08/24/big/img_490
5
+ 2002/08/31/big/img_17676
6
+ 2002/07/31/big/img_228
7
+ 2002/07/24/big/img_402
8
+ 2002/08/04/big/img_769
9
+ 2002/07/19/big/img_581
10
+ 2002/08/13/big/img_723
11
+ 2002/08/12/big/img_821
12
+ 2003/01/17/big/img_610
13
+ 2002/08/13/big/img_1116
14
+ 2002/08/28/big/img_19238
15
+ 2002/08/21/big/img_660
16
+ 2002/08/14/big/img_607
17
+ 2002/08/05/big/img_3708
18
+ 2002/08/19/big/img_511
19
+ 2002/08/07/big/img_1316
20
+ 2002/07/25/big/img_1047
21
+ 2002/07/23/big/img_474
22
+ 2002/07/27/big/img_970
23
+ 2002/09/02/big/img_15752
24
+ 2002/09/01/big/img_16378
25
+ 2002/09/01/big/img_16189
26
+ 2002/08/26/big/img_276
27
+ 2002/07/24/big/img_518
28
+ 2002/08/14/big/img_1027
29
+ 2002/08/24/big/img_733
30
+ 2002/08/15/big/img_249
31
+ 2003/01/15/big/img_1371
32
+ 2002/08/07/big/img_1348
33
+ 2003/01/01/big/img_331
34
+ 2002/08/23/big/img_536
35
+ 2002/07/30/big/img_224
36
+ 2002/08/10/big/img_763
37
+ 2002/08/21/big/img_293
38
+ 2002/08/15/big/img_1211
39
+ 2002/08/15/big/img_1194
40
+ 2003/01/15/big/img_390
41
+ 2002/08/06/big/img_2893
42
+ 2002/08/17/big/img_691
43
+ 2002/08/07/big/img_1695
44
+ 2002/08/16/big/img_829
45
+ 2002/07/25/big/img_201
46
+ 2002/08/23/big/img_36
47
+ 2003/01/15/big/img_763
48
+ 2003/01/15/big/img_637
49
+ 2002/08/22/big/img_592
50
+ 2002/07/25/big/img_817
51
+ 2003/01/15/big/img_1219
52
+ 2002/08/05/big/img_3508
53
+ 2002/08/15/big/img_1108
54
+ 2002/07/19/big/img_488
55
+ 2003/01/16/big/img_704
56
+ 2003/01/13/big/img_1087
57
+ 2002/08/10/big/img_670
58
+ 2002/07/24/big/img_104
59
+ 2002/08/27/big/img_19823
60
+ 2002/09/01/big/img_16229
61
+ 2003/01/13/big/img_846
62
+ 2002/08/04/big/img_412
63
+ 2002/07/22/big/img_554
64
+ 2002/08/12/big/img_331
65
+ 2002/08/02/big/img_533
66
+ 2002/08/12/big/img_259
67
+ 2002/08/18/big/img_328
68
+ 2003/01/14/big/img_630
69
+ 2002/08/05/big/img_3541
70
+ 2002/08/06/big/img_2390
71
+ 2002/08/20/big/img_150
72
+ 2002/08/02/big/img_1231
73
+ 2002/08/16/big/img_710
74
+ 2002/08/19/big/img_591
75
+ 2002/07/22/big/img_725
76
+ 2002/07/24/big/img_820
77
+ 2003/01/13/big/img_568
78
+ 2002/08/22/big/img_853
79
+ 2002/08/09/big/img_648
80
+ 2002/08/23/big/img_528
81
+ 2003/01/14/big/img_888
82
+ 2002/08/30/big/img_18201
83
+ 2002/08/13/big/img_965
84
+ 2003/01/14/big/img_660
85
+ 2002/07/19/big/img_517
86
+ 2003/01/14/big/img_406
87
+ 2002/08/30/big/img_18433
88
+ 2002/08/07/big/img_1630
89
+ 2002/08/06/big/img_2717
90
+ 2002/08/21/big/img_470
91
+ 2002/07/23/big/img_633
92
+ 2002/08/20/big/img_915
93
+ 2002/08/16/big/img_893
94
+ 2002/07/29/big/img_644
95
+ 2002/08/15/big/img_529
96
+ 2002/08/16/big/img_668
97
+ 2002/08/07/big/img_1871
98
+ 2002/07/25/big/img_192
99
+ 2002/07/31/big/img_961
100
+ 2002/08/19/big/img_738
101
+ 2002/07/31/big/img_382
102
+ 2002/08/19/big/img_298
103
+ 2003/01/17/big/img_608
104
+ 2002/08/21/big/img_514
105
+ 2002/07/23/big/img_183
106
+ 2003/01/17/big/img_536
107
+ 2002/07/24/big/img_478
108
+ 2002/08/06/big/img_2997
109
+ 2002/09/02/big/img_15380
110
+ 2002/08/07/big/img_1153
111
+ 2002/07/31/big/img_967
112
+ 2002/07/31/big/img_711
113
+ 2002/08/26/big/img_664
114
+ 2003/01/01/big/img_326
115
+ 2002/08/24/big/img_775
116
+ 2002/08/08/big/img_961
117
+ 2002/08/16/big/img_77
118
+ 2002/08/12/big/img_296
119
+ 2002/07/22/big/img_905
120
+ 2003/01/13/big/img_284
121
+ 2002/08/13/big/img_887
122
+ 2002/08/24/big/img_849
123
+ 2002/07/30/big/img_345
124
+ 2002/08/18/big/img_419
125
+ 2002/08/01/big/img_1347
126
+ 2002/08/05/big/img_3670
127
+ 2002/07/21/big/img_479
128
+ 2002/08/08/big/img_913
129
+ 2002/09/02/big/img_15828
130
+ 2002/08/30/big/img_18194
131
+ 2002/08/08/big/img_471
132
+ 2002/08/22/big/img_734
133
+ 2002/08/09/big/img_586
134
+ 2002/08/09/big/img_454
135
+ 2002/07/29/big/img_47
136
+ 2002/07/19/big/img_381
137
+ 2002/07/29/big/img_733
138
+ 2002/08/20/big/img_327
139
+ 2002/07/21/big/img_96
140
+ 2002/08/06/big/img_2680
141
+ 2002/07/25/big/img_919
142
+ 2002/07/21/big/img_158
143
+ 2002/07/22/big/img_801
144
+ 2002/07/22/big/img_567
145
+ 2002/07/24/big/img_804
146
+ 2002/07/24/big/img_690
147
+ 2003/01/15/big/img_576
148
+ 2002/08/14/big/img_335
149
+ 2003/01/13/big/img_390
150
+ 2002/08/11/big/img_258
151
+ 2002/07/23/big/img_917
152
+ 2002/08/15/big/img_525
153
+ 2003/01/15/big/img_505
154
+ 2002/07/30/big/img_886
155
+ 2003/01/16/big/img_640
156
+ 2003/01/14/big/img_642
157
+ 2003/01/17/big/img_844
158
+ 2002/08/04/big/img_571
159
+ 2002/08/29/big/img_18702
160
+ 2003/01/15/big/img_240
161
+ 2002/07/29/big/img_553
162
+ 2002/08/10/big/img_354
163
+ 2002/08/18/big/img_17
164
+ 2003/01/15/big/img_782
165
+ 2002/07/27/big/img_382
166
+ 2002/08/14/big/img_970
167
+ 2003/01/16/big/img_70
168
+ 2003/01/16/big/img_625
169
+ 2002/08/18/big/img_341
170
+ 2002/08/26/big/img_188
171
+ 2002/08/09/big/img_405
172
+ 2002/08/02/big/img_37
173
+ 2002/08/13/big/img_748
174
+ 2002/07/22/big/img_399
175
+ 2002/07/25/big/img_844
176
+ 2002/08/12/big/img_340
177
+ 2003/01/13/big/img_815
178
+ 2002/08/26/big/img_5
179
+ 2002/08/10/big/img_158
180
+ 2002/08/18/big/img_95
181
+ 2002/07/29/big/img_1297
182
+ 2003/01/13/big/img_508
183
+ 2002/09/01/big/img_16680
184
+ 2003/01/16/big/img_338
185
+ 2002/08/13/big/img_517
186
+ 2002/07/22/big/img_626
187
+ 2002/08/06/big/img_3024
188
+ 2002/07/26/big/img_499
189
+ 2003/01/13/big/img_387
190
+ 2002/08/31/big/img_18025
191
+ 2002/08/13/big/img_520
192
+ 2003/01/16/big/img_576
193
+ 2002/07/26/big/img_121
194
+ 2002/08/25/big/img_703
195
+ 2002/08/26/big/img_615
196
+ 2002/08/17/big/img_434
197
+ 2002/08/02/big/img_677
198
+ 2002/08/18/big/img_276
199
+ 2002/08/05/big/img_3672
200
+ 2002/07/26/big/img_700
201
+ 2002/07/31/big/img_277
202
+ 2003/01/14/big/img_220
203
+ 2002/08/23/big/img_232
204
+ 2002/08/31/big/img_17422
205
+ 2002/07/22/big/img_508
206
+ 2002/08/13/big/img_681
207
+ 2003/01/15/big/img_638
208
+ 2002/08/30/big/img_18408
209
+ 2003/01/14/big/img_533
210
+ 2003/01/17/big/img_12
211
+ 2002/08/28/big/img_19388
212
+ 2002/08/08/big/img_133
213
+ 2002/07/26/big/img_885
214
+ 2002/08/19/big/img_387
215
+ 2002/08/27/big/img_19976
216
+ 2002/08/26/big/img_118
217
+ 2002/08/28/big/img_19146
218
+ 2002/08/05/big/img_3259
219
+ 2002/08/15/big/img_536
220
+ 2002/07/22/big/img_279
221
+ 2002/07/22/big/img_9
222
+ 2002/08/13/big/img_301
223
+ 2002/08/15/big/img_974
224
+ 2002/08/06/big/img_2355
225
+ 2002/08/01/big/img_1526
226
+ 2002/08/03/big/img_417
227
+ 2002/08/04/big/img_407
228
+ 2002/08/15/big/img_1029
229
+ 2002/07/29/big/img_700
230
+ 2002/08/01/big/img_1463
231
+ 2002/08/31/big/img_17365
232
+ 2002/07/28/big/img_223
233
+ 2002/07/19/big/img_827
234
+ 2002/07/27/big/img_531
235
+ 2002/07/19/big/img_845
236
+ 2002/08/20/big/img_382
237
+ 2002/07/31/big/img_268
238
+ 2002/08/27/big/img_19705
239
+ 2002/08/02/big/img_830
240
+ 2002/08/23/big/img_250
241
+ 2002/07/20/big/img_777
242
+ 2002/08/21/big/img_879
243
+ 2002/08/26/big/img_20146
244
+ 2002/08/23/big/img_789
245
+ 2002/08/06/big/img_2683
246
+ 2002/08/25/big/img_576
247
+ 2002/08/09/big/img_498
248
+ 2002/08/08/big/img_384
249
+ 2002/08/26/big/img_592
250
+ 2002/07/29/big/img_1470
251
+ 2002/08/21/big/img_452
252
+ 2002/08/30/big/img_18395
253
+ 2002/08/15/big/img_215
254
+ 2002/07/21/big/img_643
255
+ 2002/07/22/big/img_209
256
+ 2003/01/17/big/img_346
257
+ 2002/08/25/big/img_658
258
+ 2002/08/21/big/img_221
259
+ 2002/08/14/big/img_60
260
+ 2003/01/17/big/img_885
261
+ 2003/01/16/big/img_482
262
+ 2002/08/19/big/img_593
263
+ 2002/08/08/big/img_233
264
+ 2002/07/30/big/img_458
265
+ 2002/07/23/big/img_384
266
+ 2003/01/15/big/img_670
267
+ 2003/01/15/big/img_267
268
+ 2002/08/26/big/img_540
269
+ 2002/07/29/big/img_552
270
+ 2002/07/30/big/img_997
271
+ 2003/01/17/big/img_377
272
+ 2002/08/21/big/img_265
273
+ 2002/08/09/big/img_561
274
+ 2002/07/31/big/img_945
275
+ 2002/09/02/big/img_15252
276
+ 2002/08/11/big/img_276
277
+ 2002/07/22/big/img_491
278
+ 2002/07/26/big/img_517
279
+ 2002/08/14/big/img_726
280
+ 2002/08/08/big/img_46
281
+ 2002/08/28/big/img_19458
282
+ 2002/08/06/big/img_2935
283
+ 2002/07/29/big/img_1392
284
+ 2002/08/13/big/img_776
285
+ 2002/08/24/big/img_616
286
+ 2002/08/14/big/img_1065
287
+ 2002/07/29/big/img_889
288
+ 2002/08/18/big/img_188
289
+ 2002/08/07/big/img_1453
290
+ 2002/08/02/big/img_760
291
+ 2002/07/28/big/img_416
292
+ 2002/08/07/big/img_1393
293
+ 2002/08/26/big/img_292
294
+ 2002/08/26/big/img_301
295
+ 2003/01/13/big/img_195
296
+ 2002/07/26/big/img_532
297
+ 2002/08/20/big/img_550
298
+ 2002/08/05/big/img_3658
299
+ 2002/08/26/big/img_738
300
+ 2002/09/02/big/img_15750
301
+ 2003/01/17/big/img_451
302
+ 2002/07/23/big/img_339
303
+ 2002/08/16/big/img_637
304
+ 2002/08/14/big/img_748
305
+ 2002/08/06/big/img_2739
306
+ 2002/07/25/big/img_482
307
+ 2002/08/19/big/img_191
308
+ 2002/08/26/big/img_537
309
+ 2003/01/15/big/img_716
310
+ 2003/01/15/big/img_767
311
+ 2002/08/02/big/img_452
312
+ 2002/08/08/big/img_1011
313
+ 2002/08/10/big/img_144
314
+ 2003/01/14/big/img_122
315
+ 2002/07/24/big/img_586
316
+ 2002/07/24/big/img_762
317
+ 2002/08/20/big/img_369
318
+ 2002/07/30/big/img_146
319
+ 2002/08/23/big/img_396
320
+ 2003/01/15/big/img_200
321
+ 2002/08/15/big/img_1183
322
+ 2003/01/14/big/img_698
323
+ 2002/08/09/big/img_792
324
+ 2002/08/06/big/img_2347
325
+ 2002/07/31/big/img_911
326
+ 2002/08/26/big/img_722
327
+ 2002/08/23/big/img_621
328
+ 2002/08/05/big/img_3790
329
+ 2003/01/13/big/img_633
330
+ 2002/08/09/big/img_224
331
+ 2002/07/24/big/img_454
332
+ 2002/07/21/big/img_202
333
+ 2002/08/02/big/img_630
334
+ 2002/08/30/big/img_18315
335
+ 2002/07/19/big/img_491
336
+ 2002/09/01/big/img_16456
337
+ 2002/08/09/big/img_242
338
+ 2002/07/25/big/img_595
339
+ 2002/07/22/big/img_522
340
+ 2002/08/01/big/img_1593
341
+ 2002/07/29/big/img_336
342
+ 2002/08/15/big/img_448
343
+ 2002/08/28/big/img_19281
344
+ 2002/07/29/big/img_342
345
+ 2002/08/12/big/img_78
346
+ 2003/01/14/big/img_525
347
+ 2002/07/28/big/img_147
348
+ 2002/08/11/big/img_353
349
+ 2002/08/22/big/img_513
350
+ 2002/08/04/big/img_721
351
+ 2002/08/17/big/img_247
352
+ 2003/01/14/big/img_891
353
+ 2002/08/20/big/img_853
354
+ 2002/07/19/big/img_414
355
+ 2002/08/01/big/img_1530
356
+ 2003/01/14/big/img_924
357
+ 2002/08/22/big/img_468
358
+ 2002/08/18/big/img_354
359
+ 2002/08/30/big/img_18193
360
+ 2002/08/23/big/img_492
361
+ 2002/08/15/big/img_871
362
+ 2002/08/12/big/img_494
363
+ 2002/08/06/big/img_2470
364
+ 2002/07/23/big/img_923
365
+ 2002/08/26/big/img_155
366
+ 2002/08/08/big/img_669
367
+ 2002/07/23/big/img_404
368
+ 2002/08/28/big/img_19421
369
+ 2002/08/29/big/img_18993
370
+ 2002/08/25/big/img_416
371
+ 2003/01/17/big/img_434
372
+ 2002/07/29/big/img_1370
373
+ 2002/07/28/big/img_483
374
+ 2002/08/11/big/img_50
375
+ 2002/08/10/big/img_404
376
+ 2002/09/02/big/img_15057
377
+ 2003/01/14/big/img_911
378
+ 2002/09/01/big/img_16697
379
+ 2003/01/16/big/img_665
380
+ 2002/09/01/big/img_16708
381
+ 2002/08/22/big/img_612
382
+ 2002/08/28/big/img_19471
383
+ 2002/08/02/big/img_198
384
+ 2003/01/16/big/img_527
385
+ 2002/08/22/big/img_209
386
+ 2002/08/30/big/img_18205
387
+ 2003/01/14/big/img_114
388
+ 2003/01/14/big/img_1028
389
+ 2003/01/16/big/img_894
390
+ 2003/01/14/big/img_837
391
+ 2002/07/30/big/img_9
392
+ 2002/08/06/big/img_2821
393
+ 2002/08/04/big/img_85
394
+ 2003/01/13/big/img_884
395
+ 2002/07/22/big/img_570
396
+ 2002/08/07/big/img_1773
397
+ 2002/07/26/big/img_208
398
+ 2003/01/17/big/img_946
399
+ 2002/07/19/big/img_930
400
+ 2003/01/01/big/img_698
401
+ 2003/01/17/big/img_612
402
+ 2002/07/19/big/img_372
403
+ 2002/07/30/big/img_721
404
+ 2003/01/14/big/img_649
405
+ 2002/08/19/big/img_4
406
+ 2002/07/25/big/img_1024
407
+ 2003/01/15/big/img_601
408
+ 2002/08/30/big/img_18470
409
+ 2002/07/22/big/img_29
410
+ 2002/08/07/big/img_1686
411
+ 2002/07/20/big/img_294
412
+ 2002/08/14/big/img_800
413
+ 2002/08/19/big/img_353
414
+ 2002/08/19/big/img_350
415
+ 2002/08/05/big/img_3392
416
+ 2002/08/09/big/img_622
417
+ 2003/01/15/big/img_236
418
+ 2002/08/11/big/img_643
419
+ 2002/08/05/big/img_3458
420
+ 2002/08/12/big/img_413
421
+ 2002/08/22/big/img_415
422
+ 2002/08/13/big/img_635
423
+ 2002/08/07/big/img_1198
424
+ 2002/08/04/big/img_873
425
+ 2002/08/12/big/img_407
426
+ 2003/01/15/big/img_346
427
+ 2002/08/02/big/img_275
428
+ 2002/08/17/big/img_997
429
+ 2002/08/21/big/img_958
430
+ 2002/08/20/big/img_579
431
+ 2002/07/29/big/img_142
432
+ 2003/01/14/big/img_1115
433
+ 2002/08/16/big/img_365
434
+ 2002/07/29/big/img_1414
435
+ 2002/08/17/big/img_489
436
+ 2002/08/13/big/img_1010
437
+ 2002/07/31/big/img_276
438
+ 2002/07/25/big/img_1000
439
+ 2002/08/23/big/img_524
440
+ 2002/08/28/big/img_19147
441
+ 2003/01/13/big/img_433
442
+ 2002/08/20/big/img_205
443
+ 2003/01/01/big/img_458
444
+ 2002/07/29/big/img_1449
445
+ 2003/01/16/big/img_696
446
+ 2002/08/28/big/img_19296
447
+ 2002/08/29/big/img_18688
448
+ 2002/08/21/big/img_767
449
+ 2002/08/20/big/img_532
450
+ 2002/08/26/big/img_187
451
+ 2002/07/26/big/img_183
452
+ 2002/07/27/big/img_890
453
+ 2003/01/13/big/img_576
454
+ 2002/07/30/big/img_15
455
+ 2002/07/31/big/img_889
456
+ 2002/08/31/big/img_17759
457
+ 2003/01/14/big/img_1114
458
+ 2002/07/19/big/img_445
459
+ 2002/08/03/big/img_593
460
+ 2002/07/24/big/img_750
461
+ 2002/07/30/big/img_133
462
+ 2002/08/25/big/img_671
463
+ 2002/07/20/big/img_351
464
+ 2002/08/31/big/img_17276
465
+ 2002/08/05/big/img_3231
466
+ 2002/09/02/big/img_15882
467
+ 2002/08/14/big/img_115
468
+ 2002/08/02/big/img_1148
469
+ 2002/07/25/big/img_936
470
+ 2002/07/31/big/img_639
471
+ 2002/08/04/big/img_427
472
+ 2002/08/22/big/img_843
473
+ 2003/01/17/big/img_17
474
+ 2003/01/13/big/img_690
475
+ 2002/08/13/big/img_472
476
+ 2002/08/09/big/img_425
477
+ 2002/08/05/big/img_3450
478
+ 2003/01/17/big/img_439
479
+ 2002/08/13/big/img_539
480
+ 2002/07/28/big/img_35
481
+ 2002/08/16/big/img_241
482
+ 2002/08/06/big/img_2898
483
+ 2003/01/16/big/img_429
484
+ 2002/08/05/big/img_3817
485
+ 2002/08/27/big/img_19919
486
+ 2002/07/19/big/img_422
487
+ 2002/08/15/big/img_560
488
+ 2002/07/23/big/img_750
489
+ 2002/07/30/big/img_353
490
+ 2002/08/05/big/img_43
491
+ 2002/08/23/big/img_305
492
+ 2002/08/01/big/img_2137
493
+ 2002/08/30/big/img_18097
494
+ 2002/08/01/big/img_1389
495
+ 2002/08/02/big/img_308
496
+ 2003/01/14/big/img_652
497
+ 2002/08/01/big/img_1798
498
+ 2003/01/14/big/img_732
499
+ 2003/01/16/big/img_294
500
+ 2002/08/26/big/img_213
501
+ 2002/07/24/big/img_842
502
+ 2003/01/13/big/img_630
503
+ 2003/01/13/big/img_634
504
+ 2002/08/06/big/img_2285
505
+ 2002/08/01/big/img_2162
506
+ 2002/08/30/big/img_18134
507
+ 2002/08/02/big/img_1045
508
+ 2002/08/01/big/img_2143
509
+ 2002/07/25/big/img_135
510
+ 2002/07/20/big/img_645
511
+ 2002/08/05/big/img_3666
512
+ 2002/08/14/big/img_523
513
+ 2002/08/04/big/img_425
514
+ 2003/01/14/big/img_137
515
+ 2003/01/01/big/img_176
516
+ 2002/08/15/big/img_505
517
+ 2002/08/24/big/img_386
518
+ 2002/08/05/big/img_3187
519
+ 2002/08/15/big/img_419
520
+ 2003/01/13/big/img_520
521
+ 2002/08/04/big/img_444
522
+ 2002/08/26/big/img_483
523
+ 2002/08/05/big/img_3449
524
+ 2002/08/30/big/img_18409
525
+ 2002/08/28/big/img_19455
526
+ 2002/08/27/big/img_20090
527
+ 2002/07/23/big/img_625
528
+ 2002/08/24/big/img_205
529
+ 2002/08/08/big/img_938
530
+ 2003/01/13/big/img_527
531
+ 2002/08/07/big/img_1712
532
+ 2002/07/24/big/img_801
533
+ 2002/08/09/big/img_579
534
+ 2003/01/14/big/img_41
535
+ 2003/01/15/big/img_1130
536
+ 2002/07/21/big/img_672
537
+ 2002/08/07/big/img_1590
538
+ 2003/01/01/big/img_532
539
+ 2002/08/02/big/img_529
540
+ 2002/08/05/big/img_3591
541
+ 2002/08/23/big/img_5
542
+ 2003/01/14/big/img_882
543
+ 2002/08/28/big/img_19234
544
+ 2002/07/24/big/img_398
545
+ 2003/01/14/big/img_592
546
+ 2002/08/22/big/img_548
547
+ 2002/08/12/big/img_761
548
+ 2003/01/16/big/img_497
549
+ 2002/08/18/big/img_133
550
+ 2002/08/08/big/img_874
551
+ 2002/07/19/big/img_247
552
+ 2002/08/15/big/img_170
553
+ 2002/08/27/big/img_19679
554
+ 2002/08/20/big/img_246
555
+ 2002/08/24/big/img_358
556
+ 2002/07/29/big/img_599
557
+ 2002/08/01/big/img_1555
558
+ 2002/07/30/big/img_491
559
+ 2002/07/30/big/img_371
560
+ 2003/01/16/big/img_682
561
+ 2002/07/25/big/img_619
562
+ 2003/01/15/big/img_587
563
+ 2002/08/02/big/img_1212
564
+ 2002/08/01/big/img_2152
565
+ 2002/07/25/big/img_668
566
+ 2003/01/16/big/img_574
567
+ 2002/08/28/big/img_19464
568
+ 2002/08/11/big/img_536
569
+ 2002/07/24/big/img_201
570
+ 2002/08/05/big/img_3488
571
+ 2002/07/25/big/img_887
572
+ 2002/07/22/big/img_789
573
+ 2002/07/30/big/img_432
574
+ 2002/08/16/big/img_166
575
+ 2002/09/01/big/img_16333
576
+ 2002/07/26/big/img_1010
577
+ 2002/07/21/big/img_793
578
+ 2002/07/22/big/img_720
579
+ 2002/07/31/big/img_337
580
+ 2002/07/27/big/img_185
581
+ 2002/08/23/big/img_440
582
+ 2002/07/31/big/img_801
583
+ 2002/07/25/big/img_478
584
+ 2003/01/14/big/img_171
585
+ 2002/08/07/big/img_1054
586
+ 2002/09/02/big/img_15659
587
+ 2002/07/29/big/img_1348
588
+ 2002/08/09/big/img_337
589
+ 2002/08/26/big/img_684
590
+ 2002/07/31/big/img_537
591
+ 2002/08/15/big/img_808
592
+ 2003/01/13/big/img_740
593
+ 2002/08/07/big/img_1667
594
+ 2002/08/03/big/img_404
595
+ 2002/08/06/big/img_2520
596
+ 2002/07/19/big/img_230
597
+ 2002/07/19/big/img_356
598
+ 2003/01/16/big/img_627
599
+ 2002/08/04/big/img_474
600
+ 2002/07/29/big/img_833
601
+ 2002/07/25/big/img_176
602
+ 2002/08/01/big/img_1684
603
+ 2002/08/21/big/img_643
604
+ 2002/08/27/big/img_19673
605
+ 2002/08/02/big/img_838
606
+ 2002/08/06/big/img_2378
607
+ 2003/01/15/big/img_48
608
+ 2002/07/30/big/img_470
609
+ 2002/08/15/big/img_963
610
+ 2002/08/24/big/img_444
611
+ 2002/08/16/big/img_662
612
+ 2002/08/15/big/img_1209
613
+ 2002/07/24/big/img_25
614
+ 2002/08/06/big/img_2740
615
+ 2002/07/29/big/img_996
616
+ 2002/08/31/big/img_18074
617
+ 2002/08/04/big/img_343
618
+ 2003/01/17/big/img_509
619
+ 2003/01/13/big/img_726
620
+ 2002/08/07/big/img_1466
621
+ 2002/07/26/big/img_307
622
+ 2002/08/10/big/img_598
623
+ 2002/08/13/big/img_890
624
+ 2002/08/14/big/img_997
625
+ 2002/07/19/big/img_392
626
+ 2002/08/02/big/img_475
627
+ 2002/08/29/big/img_19038
628
+ 2002/07/29/big/img_538
629
+ 2002/07/29/big/img_502
630
+ 2002/08/02/big/img_364
631
+ 2002/08/31/big/img_17353
632
+ 2002/08/08/big/img_539
633
+ 2002/08/01/big/img_1449
634
+ 2002/07/22/big/img_363
635
+ 2002/08/02/big/img_90
636
+ 2002/09/01/big/img_16867
637
+ 2002/08/05/big/img_3371
638
+ 2002/07/30/big/img_342
639
+ 2002/08/07/big/img_1363
640
+ 2002/08/22/big/img_790
641
+ 2003/01/15/big/img_404
642
+ 2002/08/05/big/img_3447
643
+ 2002/09/01/big/img_16167
644
+ 2003/01/13/big/img_840
645
+ 2002/08/22/big/img_1001
646
+ 2002/08/09/big/img_431
647
+ 2002/07/27/big/img_618
648
+ 2002/07/31/big/img_741
649
+ 2002/07/30/big/img_964
650
+ 2002/07/25/big/img_86
651
+ 2002/07/29/big/img_275
652
+ 2002/08/21/big/img_921
653
+ 2002/07/26/big/img_892
654
+ 2002/08/21/big/img_663
655
+ 2003/01/13/big/img_567
656
+ 2003/01/14/big/img_719
657
+ 2002/07/28/big/img_251
658
+ 2003/01/15/big/img_1123
659
+ 2002/07/29/big/img_260
660
+ 2002/08/24/big/img_337
661
+ 2002/08/01/big/img_1914
662
+ 2002/08/13/big/img_373
663
+ 2003/01/15/big/img_589
664
+ 2002/08/13/big/img_906
665
+ 2002/07/26/big/img_270
666
+ 2002/08/26/big/img_313
667
+ 2002/08/25/big/img_694
668
+ 2003/01/01/big/img_327
669
+ 2002/07/23/big/img_261
670
+ 2002/08/26/big/img_642
671
+ 2002/07/29/big/img_918
672
+ 2002/07/23/big/img_455
673
+ 2002/07/24/big/img_612
674
+ 2002/07/23/big/img_534
675
+ 2002/07/19/big/img_534
676
+ 2002/07/19/big/img_726
677
+ 2002/08/01/big/img_2146
678
+ 2002/08/02/big/img_543
679
+ 2003/01/16/big/img_777
680
+ 2002/07/30/big/img_484
681
+ 2002/08/13/big/img_1161
682
+ 2002/07/21/big/img_390
683
+ 2002/08/06/big/img_2288
684
+ 2002/08/21/big/img_677
685
+ 2002/08/13/big/img_747
686
+ 2002/08/15/big/img_1248
687
+ 2002/07/31/big/img_416
688
+ 2002/09/02/big/img_15259
689
+ 2002/08/16/big/img_781
690
+ 2002/08/24/big/img_754
691
+ 2002/07/24/big/img_803
692
+ 2002/08/20/big/img_609
693
+ 2002/08/28/big/img_19571
694
+ 2002/09/01/big/img_16140
695
+ 2002/08/26/big/img_769
696
+ 2002/07/20/big/img_588
697
+ 2002/08/02/big/img_898
698
+ 2002/07/21/big/img_466
699
+ 2002/08/14/big/img_1046
700
+ 2002/07/25/big/img_212
701
+ 2002/08/26/big/img_353
702
+ 2002/08/19/big/img_810
703
+ 2002/08/31/big/img_17824
704
+ 2002/08/12/big/img_631
705
+ 2002/07/19/big/img_828
706
+ 2002/07/24/big/img_130
707
+ 2002/08/25/big/img_580
708
+ 2002/07/31/big/img_699
709
+ 2002/07/23/big/img_808
710
+ 2002/07/31/big/img_377
711
+ 2003/01/16/big/img_570
712
+ 2002/09/01/big/img_16254
713
+ 2002/07/21/big/img_471
714
+ 2002/08/01/big/img_1548
715
+ 2002/08/18/big/img_252
716
+ 2002/08/19/big/img_576
717
+ 2002/08/20/big/img_464
718
+ 2002/07/27/big/img_735
719
+ 2002/08/21/big/img_589
720
+ 2003/01/15/big/img_1192
721
+ 2002/08/09/big/img_302
722
+ 2002/07/31/big/img_594
723
+ 2002/08/23/big/img_19
724
+ 2002/08/29/big/img_18819
725
+ 2002/08/19/big/img_293
726
+ 2002/07/30/big/img_331
727
+ 2002/08/23/big/img_607
728
+ 2002/07/30/big/img_363
729
+ 2002/08/16/big/img_766
730
+ 2003/01/13/big/img_481
731
+ 2002/08/06/big/img_2515
732
+ 2002/09/02/big/img_15913
733
+ 2002/09/02/big/img_15827
734
+ 2002/09/02/big/img_15053
735
+ 2002/08/07/big/img_1576
736
+ 2002/07/23/big/img_268
737
+ 2002/08/21/big/img_152
738
+ 2003/01/15/big/img_578
739
+ 2002/07/21/big/img_589
740
+ 2002/07/20/big/img_548
741
+ 2002/08/27/big/img_19693
742
+ 2002/08/31/big/img_17252
743
+ 2002/07/31/big/img_138
744
+ 2002/07/23/big/img_372
745
+ 2002/08/16/big/img_695
746
+ 2002/07/27/big/img_287
747
+ 2002/08/15/big/img_315
748
+ 2002/08/10/big/img_361
749
+ 2002/07/29/big/img_899
750
+ 2002/08/13/big/img_771
751
+ 2002/08/21/big/img_92
752
+ 2003/01/15/big/img_425
753
+ 2003/01/16/big/img_450
754
+ 2002/09/01/big/img_16942
755
+ 2002/08/02/big/img_51
756
+ 2002/09/02/big/img_15379
757
+ 2002/08/24/big/img_147
758
+ 2002/08/30/big/img_18122
759
+ 2002/07/26/big/img_950
760
+ 2002/08/07/big/img_1400
761
+ 2002/08/17/big/img_468
762
+ 2002/08/15/big/img_470
763
+ 2002/07/30/big/img_318
764
+ 2002/07/22/big/img_644
765
+ 2002/08/27/big/img_19732
766
+ 2002/07/23/big/img_601
767
+ 2002/08/26/big/img_398
768
+ 2002/08/21/big/img_428
769
+ 2002/08/06/big/img_2119
770
+ 2002/08/29/big/img_19103
771
+ 2003/01/14/big/img_933
772
+ 2002/08/11/big/img_674
773
+ 2002/08/28/big/img_19420
774
+ 2002/08/03/big/img_418
775
+ 2002/08/17/big/img_312
776
+ 2002/07/25/big/img_1044
777
+ 2003/01/17/big/img_671
778
+ 2002/08/30/big/img_18297
779
+ 2002/07/25/big/img_755
780
+ 2002/07/23/big/img_471
781
+ 2002/08/21/big/img_39
782
+ 2002/07/26/big/img_699
783
+ 2003/01/14/big/img_33
784
+ 2002/07/31/big/img_411
785
+ 2002/08/16/big/img_645
786
+ 2003/01/17/big/img_116
787
+ 2002/09/02/big/img_15903
788
+ 2002/08/20/big/img_120
789
+ 2002/08/22/big/img_176
790
+ 2002/07/29/big/img_1316
791
+ 2002/08/27/big/img_19914
792
+ 2002/07/22/big/img_719
793
+ 2002/08/28/big/img_19239
794
+ 2003/01/13/big/img_385
795
+ 2002/08/08/big/img_525
796
+ 2002/07/19/big/img_782
797
+ 2002/08/13/big/img_843
798
+ 2002/07/30/big/img_107
799
+ 2002/08/11/big/img_752
800
+ 2002/07/29/big/img_383
801
+ 2002/08/26/big/img_249
802
+ 2002/08/29/big/img_18860
803
+ 2002/07/30/big/img_70
804
+ 2002/07/26/big/img_194
805
+ 2002/08/15/big/img_530
806
+ 2002/08/08/big/img_816
807
+ 2002/07/31/big/img_286
808
+ 2003/01/13/big/img_294
809
+ 2002/07/31/big/img_251
810
+ 2002/07/24/big/img_13
811
+ 2002/08/31/big/img_17938
812
+ 2002/07/22/big/img_642
813
+ 2003/01/14/big/img_728
814
+ 2002/08/18/big/img_47
815
+ 2002/08/22/big/img_306
816
+ 2002/08/20/big/img_348
817
+ 2002/08/15/big/img_764
818
+ 2002/08/08/big/img_163
819
+ 2002/07/23/big/img_531
820
+ 2002/07/23/big/img_467
821
+ 2003/01/16/big/img_743
822
+ 2003/01/13/big/img_535
823
+ 2002/08/02/big/img_523
824
+ 2002/08/22/big/img_120
825
+ 2002/08/11/big/img_496
826
+ 2002/08/29/big/img_19075
827
+ 2002/08/08/big/img_465
828
+ 2002/08/09/big/img_790
829
+ 2002/08/19/big/img_588
830
+ 2002/08/23/big/img_407
831
+ 2003/01/17/big/img_435
832
+ 2002/08/24/big/img_398
833
+ 2002/08/27/big/img_19899
834
+ 2003/01/15/big/img_335
835
+ 2002/08/13/big/img_493
836
+ 2002/09/02/big/img_15460
837
+ 2002/07/31/big/img_470
838
+ 2002/08/05/big/img_3550
839
+ 2002/07/28/big/img_123
840
+ 2002/08/01/big/img_1498
841
+ 2002/08/04/big/img_504
842
+ 2003/01/17/big/img_427
843
+ 2002/08/27/big/img_19708
844
+ 2002/07/27/big/img_861
845
+ 2002/07/25/big/img_685
846
+ 2002/07/31/big/img_207
847
+ 2003/01/14/big/img_745
848
+ 2002/08/31/big/img_17756
849
+ 2002/08/24/big/img_288
850
+ 2002/08/18/big/img_181
851
+ 2002/08/10/big/img_520
852
+ 2002/08/25/big/img_705
853
+ 2002/08/23/big/img_226
854
+ 2002/08/04/big/img_727
855
+ 2002/07/24/big/img_625
856
+ 2002/08/28/big/img_19157
857
+ 2002/08/23/big/img_586
858
+ 2002/07/31/big/img_232
859
+ 2003/01/13/big/img_240
860
+ 2003/01/14/big/img_321
861
+ 2003/01/15/big/img_533
862
+ 2002/07/23/big/img_480
863
+ 2002/07/24/big/img_371
864
+ 2002/08/21/big/img_702
865
+ 2002/08/31/big/img_17075
866
+ 2002/09/02/big/img_15278
867
+ 2002/07/29/big/img_246
868
+ 2003/01/15/big/img_829
869
+ 2003/01/15/big/img_1213
870
+ 2003/01/16/big/img_441
871
+ 2002/08/14/big/img_921
872
+ 2002/07/23/big/img_425
873
+ 2002/08/15/big/img_296
874
+ 2002/07/19/big/img_135
875
+ 2002/07/26/big/img_402
876
+ 2003/01/17/big/img_88
877
+ 2002/08/20/big/img_872
878
+ 2002/08/13/big/img_1110
879
+ 2003/01/16/big/img_1040
880
+ 2002/07/23/big/img_9
881
+ 2002/08/13/big/img_700
882
+ 2002/08/16/big/img_371
883
+ 2002/08/27/big/img_19966
884
+ 2003/01/17/big/img_391
885
+ 2002/08/18/big/img_426
886
+ 2002/08/01/big/img_1618
887
+ 2002/07/21/big/img_754
888
+ 2003/01/14/big/img_1101
889
+ 2003/01/16/big/img_1022
890
+ 2002/07/22/big/img_275
891
+ 2002/08/24/big/img_86
892
+ 2002/08/17/big/img_582
893
+ 2003/01/15/big/img_765
894
+ 2003/01/17/big/img_449
895
+ 2002/07/28/big/img_265
896
+ 2003/01/13/big/img_552
897
+ 2002/07/28/big/img_115
898
+ 2003/01/16/big/img_56
899
+ 2002/08/02/big/img_1232
900
+ 2003/01/17/big/img_925
901
+ 2002/07/22/big/img_445
902
+ 2002/07/25/big/img_957
903
+ 2002/07/20/big/img_589
904
+ 2002/08/31/big/img_17107
905
+ 2002/07/29/big/img_483
906
+ 2002/08/14/big/img_1063
907
+ 2002/08/07/big/img_1545
908
+ 2002/08/14/big/img_680
909
+ 2002/09/01/big/img_16694
910
+ 2002/08/14/big/img_257
911
+ 2002/08/11/big/img_726
912
+ 2002/07/26/big/img_681
913
+ 2002/07/25/big/img_481
914
+ 2003/01/14/big/img_737
915
+ 2002/08/28/big/img_19480
916
+ 2003/01/16/big/img_362
917
+ 2002/08/27/big/img_19865
918
+ 2003/01/01/big/img_547
919
+ 2002/09/02/big/img_15074
920
+ 2002/08/01/big/img_1453
921
+ 2002/08/22/big/img_594
922
+ 2002/08/28/big/img_19263
923
+ 2002/08/13/big/img_478
924
+ 2002/07/29/big/img_1358
925
+ 2003/01/14/big/img_1022
926
+ 2002/08/16/big/img_450
927
+ 2002/08/02/big/img_159
928
+ 2002/07/26/big/img_781
929
+ 2003/01/13/big/img_601
930
+ 2002/08/20/big/img_407
931
+ 2002/08/15/big/img_468
932
+ 2002/08/31/big/img_17902
933
+ 2002/08/16/big/img_81
934
+ 2002/07/25/big/img_987
935
+ 2002/07/25/big/img_500
936
+ 2002/08/02/big/img_31
937
+ 2002/08/18/big/img_538
938
+ 2002/08/08/big/img_54
939
+ 2002/07/23/big/img_686
940
+ 2002/07/24/big/img_836
941
+ 2003/01/17/big/img_734
942
+ 2002/08/16/big/img_1055
943
+ 2003/01/16/big/img_521
944
+ 2002/07/25/big/img_612
945
+ 2002/08/22/big/img_778
946
+ 2002/08/03/big/img_251
947
+ 2002/08/12/big/img_436
948
+ 2002/08/23/big/img_705
949
+ 2002/07/28/big/img_243
950
+ 2002/07/25/big/img_1029
951
+ 2002/08/20/big/img_287
952
+ 2002/08/29/big/img_18739
953
+ 2002/08/05/big/img_3272
954
+ 2002/07/27/big/img_214
955
+ 2003/01/14/big/img_5
956
+ 2002/08/01/big/img_1380
957
+ 2002/08/29/big/img_19097
958
+ 2002/07/30/big/img_486
959
+ 2002/08/29/big/img_18707
960
+ 2002/08/10/big/img_559
961
+ 2002/08/15/big/img_365
962
+ 2002/08/09/big/img_525
963
+ 2002/08/10/big/img_689
964
+ 2002/07/25/big/img_502
965
+ 2002/08/03/big/img_667
966
+ 2002/08/10/big/img_855
967
+ 2002/08/10/big/img_706
968
+ 2002/08/18/big/img_603
969
+ 2003/01/16/big/img_1055
970
+ 2002/08/31/big/img_17890
971
+ 2002/08/15/big/img_761
972
+ 2003/01/15/big/img_489
973
+ 2002/08/26/big/img_351
974
+ 2002/08/01/big/img_1772
975
+ 2002/08/31/big/img_17729
976
+ 2002/07/25/big/img_609
977
+ 2003/01/13/big/img_539
978
+ 2002/07/27/big/img_686
979
+ 2002/07/31/big/img_311
980
+ 2002/08/22/big/img_799
981
+ 2003/01/16/big/img_936
982
+ 2002/08/31/big/img_17813
983
+ 2002/08/04/big/img_862
984
+ 2002/08/09/big/img_332
985
+ 2002/07/20/big/img_148
986
+ 2002/08/12/big/img_426
987
+ 2002/07/24/big/img_69
988
+ 2002/07/27/big/img_685
989
+ 2002/08/02/big/img_480
990
+ 2002/08/26/big/img_154
991
+ 2002/07/24/big/img_598
992
+ 2002/08/01/big/img_1881
993
+ 2002/08/20/big/img_667
994
+ 2003/01/14/big/img_495
995
+ 2002/07/21/big/img_744
996
+ 2002/07/30/big/img_150
997
+ 2002/07/23/big/img_924
998
+ 2002/08/08/big/img_272
999
+ 2002/07/23/big/img_310
1000
+ 2002/07/25/big/img_1011
1001
+ 2002/09/02/big/img_15725
1002
+ 2002/07/19/big/img_814
1003
+ 2002/08/20/big/img_936
1004
+ 2002/07/25/big/img_85
1005
+ 2002/08/24/big/img_662
1006
+ 2002/08/09/big/img_495
1007
+ 2003/01/15/big/img_196
1008
+ 2002/08/16/big/img_707
1009
+ 2002/08/28/big/img_19370
1010
+ 2002/08/06/big/img_2366
1011
+ 2002/08/06/big/img_3012
1012
+ 2002/08/01/big/img_1452
1013
+ 2002/07/31/big/img_742
1014
+ 2002/07/27/big/img_914
1015
+ 2003/01/13/big/img_290
1016
+ 2002/07/31/big/img_288
1017
+ 2002/08/02/big/img_171
1018
+ 2002/08/22/big/img_191
1019
+ 2002/07/27/big/img_1066
1020
+ 2002/08/12/big/img_383
1021
+ 2003/01/17/big/img_1018
1022
+ 2002/08/01/big/img_1785
1023
+ 2002/08/11/big/img_390
1024
+ 2002/08/27/big/img_20037
1025
+ 2002/08/12/big/img_38
1026
+ 2003/01/15/big/img_103
1027
+ 2002/08/26/big/img_31
1028
+ 2002/08/18/big/img_660
1029
+ 2002/07/22/big/img_694
1030
+ 2002/08/15/big/img_24
1031
+ 2002/07/27/big/img_1077
1032
+ 2002/08/01/big/img_1943
1033
+ 2002/07/22/big/img_292
1034
+ 2002/09/01/big/img_16857
1035
+ 2002/07/22/big/img_892
1036
+ 2003/01/14/big/img_46
1037
+ 2002/08/09/big/img_469
1038
+ 2002/08/09/big/img_414
1039
+ 2003/01/16/big/img_40
1040
+ 2002/08/28/big/img_19231
1041
+ 2002/07/27/big/img_978
1042
+ 2002/07/23/big/img_475
1043
+ 2002/07/25/big/img_92
1044
+ 2002/08/09/big/img_799
1045
+ 2002/07/25/big/img_491
1046
+ 2002/08/03/big/img_654
1047
+ 2003/01/15/big/img_687
1048
+ 2002/08/11/big/img_478
1049
+ 2002/08/07/big/img_1664
1050
+ 2002/08/20/big/img_362
1051
+ 2002/08/01/big/img_1298
1052
+ 2003/01/13/big/img_500
1053
+ 2002/08/06/big/img_2896
1054
+ 2002/08/30/big/img_18529
1055
+ 2002/08/16/big/img_1020
1056
+ 2002/07/29/big/img_892
1057
+ 2002/08/29/big/img_18726
1058
+ 2002/07/21/big/img_453
1059
+ 2002/08/17/big/img_437
1060
+ 2002/07/19/big/img_665
1061
+ 2002/07/22/big/img_440
1062
+ 2002/07/19/big/img_582
1063
+ 2002/07/21/big/img_233
1064
+ 2003/01/01/big/img_82
1065
+ 2002/07/25/big/img_341
1066
+ 2002/07/29/big/img_864
1067
+ 2002/08/02/big/img_276
1068
+ 2002/08/29/big/img_18654
1069
+ 2002/07/27/big/img_1024
1070
+ 2002/08/19/big/img_373
1071
+ 2003/01/15/big/img_241
1072
+ 2002/07/25/big/img_84
1073
+ 2002/08/13/big/img_834
1074
+ 2002/08/10/big/img_511
1075
+ 2002/08/01/big/img_1627
1076
+ 2002/08/08/big/img_607
1077
+ 2002/08/06/big/img_2083
1078
+ 2002/08/01/big/img_1486
1079
+ 2002/08/08/big/img_700
1080
+ 2002/08/01/big/img_1954
1081
+ 2002/08/21/big/img_54
1082
+ 2002/07/30/big/img_847
1083
+ 2002/08/28/big/img_19169
1084
+ 2002/07/21/big/img_549
1085
+ 2002/08/03/big/img_693
1086
+ 2002/07/31/big/img_1002
1087
+ 2003/01/14/big/img_1035
1088
+ 2003/01/16/big/img_622
1089
+ 2002/07/30/big/img_1201
1090
+ 2002/08/10/big/img_444
1091
+ 2002/07/31/big/img_374
1092
+ 2002/08/21/big/img_301
1093
+ 2002/08/13/big/img_1095
1094
+ 2003/01/13/big/img_288
1095
+ 2002/07/25/big/img_232
1096
+ 2003/01/13/big/img_967
1097
+ 2002/08/26/big/img_360
1098
+ 2002/08/05/big/img_67
1099
+ 2002/08/29/big/img_18969
1100
+ 2002/07/28/big/img_16
1101
+ 2002/08/16/big/img_515
1102
+ 2002/07/20/big/img_708
1103
+ 2002/08/18/big/img_178
1104
+ 2003/01/15/big/img_509
1105
+ 2002/07/25/big/img_430
1106
+ 2002/08/21/big/img_738
1107
+ 2002/08/16/big/img_886
1108
+ 2002/09/02/big/img_15605
1109
+ 2002/09/01/big/img_16242
1110
+ 2002/08/24/big/img_711
1111
+ 2002/07/25/big/img_90
1112
+ 2002/08/09/big/img_491
1113
+ 2002/07/30/big/img_534
1114
+ 2003/01/13/big/img_474
1115
+ 2002/08/25/big/img_510
1116
+ 2002/08/15/big/img_555
1117
+ 2002/08/02/big/img_775
1118
+ 2002/07/23/big/img_975
1119
+ 2002/08/19/big/img_229
1120
+ 2003/01/17/big/img_860
1121
+ 2003/01/02/big/img_10
1122
+ 2002/07/23/big/img_542
1123
+ 2002/08/06/big/img_2535
1124
+ 2002/07/22/big/img_37
1125
+ 2002/08/06/big/img_2342
1126
+ 2002/08/25/big/img_515
1127
+ 2002/08/25/big/img_336
1128
+ 2002/08/18/big/img_837
1129
+ 2002/08/21/big/img_616
1130
+ 2003/01/17/big/img_24
1131
+ 2002/07/26/big/img_936
1132
+ 2002/08/14/big/img_896
1133
+ 2002/07/29/big/img_465
1134
+ 2002/07/31/big/img_543
1135
+ 2002/08/01/big/img_1411
1136
+ 2002/08/02/big/img_423
1137
+ 2002/08/21/big/img_44
1138
+ 2002/07/31/big/img_11
1139
+ 2003/01/15/big/img_628
1140
+ 2003/01/15/big/img_605
1141
+ 2002/07/30/big/img_571
1142
+ 2002/07/23/big/img_428
1143
+ 2002/08/15/big/img_942
1144
+ 2002/07/26/big/img_531
1145
+ 2003/01/16/big/img_59
1146
+ 2002/08/02/big/img_410
1147
+ 2002/07/31/big/img_230
1148
+ 2002/08/19/big/img_806
1149
+ 2003/01/14/big/img_462
1150
+ 2002/08/16/big/img_370
1151
+ 2002/08/13/big/img_380
1152
+ 2002/08/16/big/img_932
1153
+ 2002/07/19/big/img_393
1154
+ 2002/08/20/big/img_764
1155
+ 2002/08/15/big/img_616
1156
+ 2002/07/26/big/img_267
1157
+ 2002/07/27/big/img_1069
1158
+ 2002/08/14/big/img_1041
1159
+ 2003/01/13/big/img_594
1160
+ 2002/09/01/big/img_16845
1161
+ 2002/08/09/big/img_229
1162
+ 2003/01/16/big/img_639
1163
+ 2002/08/19/big/img_398
1164
+ 2002/08/18/big/img_978
1165
+ 2002/08/24/big/img_296
1166
+ 2002/07/29/big/img_415
1167
+ 2002/07/30/big/img_923
1168
+ 2002/08/18/big/img_575
1169
+ 2002/08/22/big/img_182
1170
+ 2002/07/25/big/img_806
1171
+ 2002/07/22/big/img_49
1172
+ 2002/07/29/big/img_989
1173
+ 2003/01/17/big/img_789
1174
+ 2003/01/15/big/img_503
1175
+ 2002/09/01/big/img_16062
1176
+ 2003/01/17/big/img_794
1177
+ 2002/08/15/big/img_564
1178
+ 2003/01/15/big/img_222
1179
+ 2002/08/01/big/img_1656
1180
+ 2003/01/13/big/img_432
1181
+ 2002/07/19/big/img_426
1182
+ 2002/08/17/big/img_244
1183
+ 2002/08/13/big/img_805
1184
+ 2002/09/02/big/img_15067
1185
+ 2002/08/11/big/img_58
1186
+ 2002/08/22/big/img_636
1187
+ 2002/07/22/big/img_416
1188
+ 2002/08/13/big/img_836
1189
+ 2002/08/26/big/img_363
1190
+ 2002/07/30/big/img_917
1191
+ 2003/01/14/big/img_206
1192
+ 2002/08/12/big/img_311
1193
+ 2002/08/31/big/img_17623
1194
+ 2002/07/29/big/img_661
1195
+ 2003/01/13/big/img_417
1196
+ 2002/08/02/big/img_463
1197
+ 2002/08/02/big/img_669
1198
+ 2002/08/26/big/img_670
1199
+ 2002/08/02/big/img_375
1200
+ 2002/07/19/big/img_209
1201
+ 2002/08/08/big/img_115
1202
+ 2002/08/21/big/img_399
1203
+ 2002/08/20/big/img_911
1204
+ 2002/08/07/big/img_1212
1205
+ 2002/08/20/big/img_578
1206
+ 2002/08/22/big/img_554
1207
+ 2002/08/21/big/img_484
1208
+ 2002/07/25/big/img_450
1209
+ 2002/08/03/big/img_542
1210
+ 2002/08/15/big/img_561
1211
+ 2002/07/23/big/img_360
1212
+ 2002/08/30/big/img_18137
1213
+ 2002/07/25/big/img_250
1214
+ 2002/08/03/big/img_647
1215
+ 2002/08/20/big/img_375
1216
+ 2002/08/14/big/img_387
1217
+ 2002/09/01/big/img_16990
1218
+ 2002/08/28/big/img_19341
1219
+ 2003/01/15/big/img_239
1220
+ 2002/08/20/big/img_528
1221
+ 2002/08/12/big/img_130
1222
+ 2002/09/02/big/img_15108
1223
+ 2003/01/15/big/img_372
1224
+ 2002/08/16/big/img_678
1225
+ 2002/08/04/big/img_623
1226
+ 2002/07/23/big/img_477
1227
+ 2002/08/28/big/img_19590
1228
+ 2003/01/17/big/img_978
1229
+ 2002/09/01/big/img_16692
1230
+ 2002/07/20/big/img_109
1231
+ 2002/08/06/big/img_2660
1232
+ 2003/01/14/big/img_464
1233
+ 2002/08/09/big/img_618
1234
+ 2002/07/22/big/img_722
1235
+ 2002/08/25/big/img_419
1236
+ 2002/08/03/big/img_314
1237
+ 2002/08/25/big/img_40
1238
+ 2002/07/27/big/img_430
1239
+ 2002/08/10/big/img_569
1240
+ 2002/08/23/big/img_398
1241
+ 2002/07/23/big/img_893
1242
+ 2002/08/16/big/img_261
1243
+ 2002/08/06/big/img_2668
1244
+ 2002/07/22/big/img_835
1245
+ 2002/09/02/big/img_15093
1246
+ 2003/01/16/big/img_65
1247
+ 2002/08/21/big/img_448
1248
+ 2003/01/14/big/img_351
1249
+ 2003/01/17/big/img_133
1250
+ 2002/07/28/big/img_493
1251
+ 2003/01/15/big/img_640
1252
+ 2002/09/01/big/img_16880
1253
+ 2002/08/15/big/img_350
1254
+ 2002/08/20/big/img_624
1255
+ 2002/08/25/big/img_604
1256
+ 2002/08/06/big/img_2200
1257
+ 2002/08/23/big/img_290
1258
+ 2002/08/13/big/img_1152
1259
+ 2003/01/14/big/img_251
1260
+ 2002/08/02/big/img_538
1261
+ 2002/08/22/big/img_613
1262
+ 2003/01/13/big/img_351
1263
+ 2002/08/18/big/img_368
1264
+ 2002/07/23/big/img_392
1265
+ 2002/07/25/big/img_198
1266
+ 2002/07/25/big/img_418
1267
+ 2002/08/26/big/img_614
1268
+ 2002/07/23/big/img_405
1269
+ 2003/01/14/big/img_445
1270
+ 2002/07/25/big/img_326
1271
+ 2002/08/10/big/img_734
1272
+ 2003/01/14/big/img_530
1273
+ 2002/08/08/big/img_561
1274
+ 2002/08/29/big/img_18990
1275
+ 2002/08/10/big/img_576
1276
+ 2002/07/29/big/img_1494
1277
+ 2002/07/19/big/img_198
1278
+ 2002/08/10/big/img_562
1279
+ 2002/07/22/big/img_901
1280
+ 2003/01/14/big/img_37
1281
+ 2002/09/02/big/img_15629
1282
+ 2003/01/14/big/img_58
1283
+ 2002/08/01/big/img_1364
1284
+ 2002/07/27/big/img_636
1285
+ 2003/01/13/big/img_241
1286
+ 2002/09/01/big/img_16988
1287
+ 2003/01/13/big/img_560
1288
+ 2002/08/09/big/img_533
1289
+ 2002/07/31/big/img_249
1290
+ 2003/01/17/big/img_1007
1291
+ 2002/07/21/big/img_64
1292
+ 2003/01/13/big/img_537
1293
+ 2003/01/15/big/img_606
1294
+ 2002/08/18/big/img_651
1295
+ 2002/08/24/big/img_405
1296
+ 2002/07/26/big/img_837
1297
+ 2002/08/09/big/img_562
1298
+ 2002/08/01/big/img_1983
1299
+ 2002/08/03/big/img_514
1300
+ 2002/07/29/big/img_314
1301
+ 2002/08/12/big/img_493
1302
+ 2003/01/14/big/img_121
1303
+ 2003/01/14/big/img_479
1304
+ 2002/08/04/big/img_410
1305
+ 2002/07/22/big/img_607
1306
+ 2003/01/17/big/img_417
1307
+ 2002/07/20/big/img_547
1308
+ 2002/08/13/big/img_396
1309
+ 2002/08/31/big/img_17538
1310
+ 2002/08/13/big/img_187
1311
+ 2002/08/12/big/img_328
1312
+ 2003/01/14/big/img_569
1313
+ 2002/07/27/big/img_1081
1314
+ 2002/08/14/big/img_504
1315
+ 2002/08/23/big/img_785
1316
+ 2002/07/26/big/img_339
1317
+ 2002/08/07/big/img_1156
1318
+ 2002/08/07/big/img_1456
1319
+ 2002/08/23/big/img_378
1320
+ 2002/08/27/big/img_19719
1321
+ 2002/07/31/big/img_39
1322
+ 2002/07/31/big/img_883
1323
+ 2003/01/14/big/img_676
1324
+ 2002/07/29/big/img_214
1325
+ 2002/07/26/big/img_669
1326
+ 2002/07/25/big/img_202
1327
+ 2002/08/08/big/img_259
1328
+ 2003/01/17/big/img_943
1329
+ 2003/01/15/big/img_512
1330
+ 2002/08/05/big/img_3295
1331
+ 2002/08/27/big/img_19685
1332
+ 2002/08/08/big/img_277
1333
+ 2002/08/30/big/img_18154
1334
+ 2002/07/22/big/img_663
1335
+ 2002/08/29/big/img_18914
1336
+ 2002/07/31/big/img_908
1337
+ 2002/08/27/big/img_19926
1338
+ 2003/01/13/big/img_791
1339
+ 2003/01/15/big/img_827
1340
+ 2002/08/18/big/img_878
1341
+ 2002/08/14/big/img_670
1342
+ 2002/07/20/big/img_182
1343
+ 2002/08/15/big/img_291
1344
+ 2002/08/06/big/img_2600
1345
+ 2002/07/23/big/img_587
1346
+ 2002/08/14/big/img_577
1347
+ 2003/01/15/big/img_585
1348
+ 2002/07/30/big/img_310
1349
+ 2002/08/03/big/img_658
1350
+ 2002/08/10/big/img_157
1351
+ 2002/08/19/big/img_811
1352
+ 2002/07/29/big/img_1318
1353
+ 2002/08/04/big/img_104
1354
+ 2002/07/30/big/img_332
1355
+ 2002/07/24/big/img_789
1356
+ 2002/07/29/big/img_516
1357
+ 2002/07/23/big/img_843
1358
+ 2002/08/01/big/img_1528
1359
+ 2002/08/13/big/img_798
1360
+ 2002/08/07/big/img_1729
1361
+ 2002/08/28/big/img_19448
1362
+ 2003/01/16/big/img_95
1363
+ 2002/08/12/big/img_473
1364
+ 2002/07/27/big/img_269
1365
+ 2003/01/16/big/img_621
1366
+ 2002/07/29/big/img_772
1367
+ 2002/07/24/big/img_171
1368
+ 2002/07/19/big/img_429
1369
+ 2002/08/07/big/img_1933
1370
+ 2002/08/27/big/img_19629
1371
+ 2002/08/05/big/img_3688
1372
+ 2002/08/07/big/img_1691
1373
+ 2002/07/23/big/img_600
1374
+ 2002/07/29/big/img_666
1375
+ 2002/08/25/big/img_566
1376
+ 2002/08/06/big/img_2659
1377
+ 2002/08/29/big/img_18929
1378
+ 2002/08/16/big/img_407
1379
+ 2002/08/18/big/img_774
1380
+ 2002/08/19/big/img_249
1381
+ 2002/08/06/big/img_2427
1382
+ 2002/08/29/big/img_18899
1383
+ 2002/08/01/big/img_1818
1384
+ 2002/07/31/big/img_108
1385
+ 2002/07/29/big/img_500
1386
+ 2002/08/11/big/img_115
1387
+ 2002/07/19/big/img_521
1388
+ 2002/08/02/big/img_1163
1389
+ 2002/07/22/big/img_62
1390
+ 2002/08/13/big/img_466
1391
+ 2002/08/21/big/img_956
1392
+ 2002/08/23/big/img_602
1393
+ 2002/08/20/big/img_858
1394
+ 2002/07/25/big/img_690
1395
+ 2002/07/19/big/img_130
1396
+ 2002/08/04/big/img_874
1397
+ 2002/07/26/big/img_489
1398
+ 2002/07/22/big/img_548
1399
+ 2002/08/10/big/img_191
1400
+ 2002/07/25/big/img_1051
1401
+ 2002/08/18/big/img_473
1402
+ 2002/08/12/big/img_755
1403
+ 2002/08/18/big/img_413
1404
+ 2002/08/08/big/img_1044
1405
+ 2002/08/17/big/img_680
1406
+ 2002/08/26/big/img_235
1407
+ 2002/08/20/big/img_330
1408
+ 2002/08/22/big/img_344
1409
+ 2002/08/09/big/img_593
1410
+ 2002/07/31/big/img_1006
1411
+ 2002/08/14/big/img_337
1412
+ 2002/08/16/big/img_728
1413
+ 2002/07/24/big/img_834
1414
+ 2002/08/04/big/img_552
1415
+ 2002/09/02/big/img_15213
1416
+ 2002/07/25/big/img_725
1417
+ 2002/08/30/big/img_18290
1418
+ 2003/01/01/big/img_475
1419
+ 2002/07/27/big/img_1083
1420
+ 2002/08/29/big/img_18955
1421
+ 2002/08/31/big/img_17232
1422
+ 2002/08/08/big/img_480
1423
+ 2002/08/01/big/img_1311
1424
+ 2002/07/30/big/img_745
1425
+ 2002/08/03/big/img_649
1426
+ 2002/08/12/big/img_193
1427
+ 2002/07/29/big/img_228
1428
+ 2002/07/25/big/img_836
1429
+ 2002/08/20/big/img_400
1430
+ 2002/07/30/big/img_507
1431
+ 2002/09/02/big/img_15072
1432
+ 2002/07/26/big/img_658
1433
+ 2002/07/28/big/img_503
1434
+ 2002/08/05/big/img_3814
1435
+ 2002/08/24/big/img_745
1436
+ 2003/01/13/big/img_817
1437
+ 2002/08/08/big/img_579
1438
+ 2002/07/22/big/img_251
1439
+ 2003/01/13/big/img_689
1440
+ 2002/07/25/big/img_407
1441
+ 2002/08/13/big/img_1050
1442
+ 2002/08/14/big/img_733
1443
+ 2002/07/24/big/img_82
1444
+ 2003/01/17/big/img_288
1445
+ 2003/01/15/big/img_475
1446
+ 2002/08/14/big/img_620
1447
+ 2002/08/21/big/img_167
1448
+ 2002/07/19/big/img_300
1449
+ 2002/07/26/big/img_219
1450
+ 2002/08/01/big/img_1468
1451
+ 2002/07/23/big/img_260
1452
+ 2002/08/09/big/img_555
1453
+ 2002/07/19/big/img_160
1454
+ 2002/08/02/big/img_1060
1455
+ 2003/01/14/big/img_149
1456
+ 2002/08/15/big/img_346
1457
+ 2002/08/24/big/img_597
1458
+ 2002/08/22/big/img_502
1459
+ 2002/08/30/big/img_18228
1460
+ 2002/07/21/big/img_766
1461
+ 2003/01/15/big/img_841
1462
+ 2002/07/24/big/img_516
1463
+ 2002/08/02/big/img_265
1464
+ 2002/08/15/big/img_1243
1465
+ 2003/01/15/big/img_223
1466
+ 2002/08/04/big/img_236
1467
+ 2002/07/22/big/img_309
1468
+ 2002/07/20/big/img_656
1469
+ 2002/07/31/big/img_412
1470
+ 2002/09/01/big/img_16462
1471
+ 2003/01/16/big/img_431
1472
+ 2002/07/22/big/img_793
1473
+ 2002/08/15/big/img_877
1474
+ 2002/07/26/big/img_282
1475
+ 2002/07/25/big/img_529
1476
+ 2002/08/24/big/img_613
1477
+ 2003/01/17/big/img_700
1478
+ 2002/08/06/big/img_2526
1479
+ 2002/08/24/big/img_394
1480
+ 2002/08/21/big/img_521
1481
+ 2002/08/25/big/img_560
1482
+ 2002/07/29/big/img_966
1483
+ 2002/07/25/big/img_448
1484
+ 2003/01/13/big/img_782
1485
+ 2002/08/21/big/img_296
1486
+ 2002/09/01/big/img_16755
1487
+ 2002/08/05/big/img_3552
1488
+ 2002/09/02/big/img_15823
1489
+ 2003/01/14/big/img_193
1490
+ 2002/07/21/big/img_159
1491
+ 2002/08/02/big/img_564
1492
+ 2002/08/16/big/img_300
1493
+ 2002/07/19/big/img_269
1494
+ 2002/08/13/big/img_676
1495
+ 2002/07/28/big/img_57
1496
+ 2002/08/05/big/img_3318
1497
+ 2002/07/31/big/img_218
1498
+ 2002/08/21/big/img_898
1499
+ 2002/07/29/big/img_109
1500
+ 2002/07/19/big/img_854
1501
+ 2002/08/23/big/img_311
1502
+ 2002/08/14/big/img_318
1503
+ 2002/07/25/big/img_523
1504
+ 2002/07/21/big/img_678
1505
+ 2003/01/17/big/img_690
1506
+ 2002/08/28/big/img_19503
1507
+ 2002/08/18/big/img_251
1508
+ 2002/08/22/big/img_672
1509
+ 2002/08/20/big/img_663
1510
+ 2002/08/02/big/img_148
1511
+ 2002/09/02/big/img_15580
1512
+ 2002/07/25/big/img_778
1513
+ 2002/08/14/big/img_565
1514
+ 2002/08/12/big/img_374
1515
+ 2002/08/13/big/img_1018
1516
+ 2002/08/20/big/img_474
1517
+ 2002/08/25/big/img_33
1518
+ 2002/08/02/big/img_1190
1519
+ 2002/08/08/big/img_864
1520
+ 2002/08/14/big/img_1071
1521
+ 2002/08/30/big/img_18103
1522
+ 2002/08/18/big/img_533
1523
+ 2003/01/16/big/img_650
1524
+ 2002/07/25/big/img_108
1525
+ 2002/07/26/big/img_81
1526
+ 2002/07/27/big/img_543
1527
+ 2002/07/29/big/img_521
1528
+ 2003/01/13/big/img_434
1529
+ 2002/08/26/big/img_674
1530
+ 2002/08/06/big/img_2932
1531
+ 2002/08/07/big/img_1262
1532
+ 2003/01/15/big/img_201
1533
+ 2003/01/16/big/img_673
1534
+ 2002/09/02/big/img_15988
1535
+ 2002/07/29/big/img_1306
1536
+ 2003/01/14/big/img_1072
1537
+ 2002/08/30/big/img_18232
1538
+ 2002/08/05/big/img_3711
1539
+ 2002/07/23/big/img_775
1540
+ 2002/08/01/big/img_16
1541
+ 2003/01/16/big/img_630
1542
+ 2002/08/22/big/img_695
1543
+ 2002/08/14/big/img_51
1544
+ 2002/08/14/big/img_782
1545
+ 2002/08/24/big/img_742
1546
+ 2003/01/14/big/img_512
1547
+ 2003/01/15/big/img_1183
1548
+ 2003/01/15/big/img_714
1549
+ 2002/08/01/big/img_2078
1550
+ 2002/07/31/big/img_682
1551
+ 2002/09/02/big/img_15687
1552
+ 2002/07/26/big/img_518
1553
+ 2002/08/27/big/img_19676
1554
+ 2002/09/02/big/img_15969
1555
+ 2002/08/02/big/img_931
1556
+ 2002/08/25/big/img_508
1557
+ 2002/08/29/big/img_18616
1558
+ 2002/07/22/big/img_839
1559
+ 2002/07/28/big/img_313
1560
+ 2003/01/14/big/img_155
1561
+ 2002/08/02/big/img_1105
1562
+ 2002/08/09/big/img_53
1563
+ 2002/08/16/big/img_469
1564
+ 2002/08/15/big/img_502
1565
+ 2002/08/20/big/img_575
1566
+ 2002/07/25/big/img_138
1567
+ 2003/01/16/big/img_579
1568
+ 2002/07/19/big/img_352
1569
+ 2003/01/14/big/img_762
1570
+ 2003/01/01/big/img_588
1571
+ 2002/08/02/big/img_981
1572
+ 2002/08/21/big/img_447
1573
+ 2002/09/01/big/img_16151
1574
+ 2003/01/14/big/img_769
1575
+ 2002/08/23/big/img_461
1576
+ 2002/08/17/big/img_240
1577
+ 2002/09/02/big/img_15220
1578
+ 2002/07/19/big/img_408
1579
+ 2002/09/02/big/img_15496
1580
+ 2002/07/29/big/img_758
1581
+ 2002/08/28/big/img_19392
1582
+ 2002/08/06/big/img_2723
1583
+ 2002/08/31/big/img_17752
1584
+ 2002/08/23/big/img_469
1585
+ 2002/08/13/big/img_515
1586
+ 2002/09/02/big/img_15551
1587
+ 2002/08/03/big/img_462
1588
+ 2002/07/24/big/img_613
1589
+ 2002/07/22/big/img_61
1590
+ 2002/08/08/big/img_171
1591
+ 2002/08/21/big/img_177
1592
+ 2003/01/14/big/img_105
1593
+ 2002/08/02/big/img_1017
1594
+ 2002/08/22/big/img_106
1595
+ 2002/07/27/big/img_542
1596
+ 2002/07/21/big/img_665
1597
+ 2002/07/23/big/img_595
1598
+ 2002/08/04/big/img_657
1599
+ 2002/08/29/big/img_19002
1600
+ 2003/01/15/big/img_550
1601
+ 2002/08/14/big/img_662
1602
+ 2002/07/20/big/img_425
1603
+ 2002/08/30/big/img_18528
1604
+ 2002/07/26/big/img_611
1605
+ 2002/07/22/big/img_849
1606
+ 2002/08/07/big/img_1655
1607
+ 2002/08/21/big/img_638
1608
+ 2003/01/17/big/img_732
1609
+ 2003/01/01/big/img_496
1610
+ 2002/08/18/big/img_713
1611
+ 2002/08/08/big/img_109
1612
+ 2002/07/27/big/img_1008
1613
+ 2002/07/20/big/img_559
1614
+ 2002/08/16/big/img_699
1615
+ 2002/08/31/big/img_17702
1616
+ 2002/07/31/big/img_1013
1617
+ 2002/08/01/big/img_2027
1618
+ 2002/08/02/big/img_1001
1619
+ 2002/08/03/big/img_210
1620
+ 2002/08/01/big/img_2087
1621
+ 2003/01/14/big/img_199
1622
+ 2002/07/29/big/img_48
1623
+ 2002/07/19/big/img_727
1624
+ 2002/08/09/big/img_249
1625
+ 2002/08/04/big/img_632
1626
+ 2002/08/22/big/img_620
1627
+ 2003/01/01/big/img_457
1628
+ 2002/08/05/big/img_3223
1629
+ 2002/07/27/big/img_240
1630
+ 2002/07/25/big/img_797
1631
+ 2002/08/13/big/img_430
1632
+ 2002/07/25/big/img_615
1633
+ 2002/08/12/big/img_28
1634
+ 2002/07/30/big/img_220
1635
+ 2002/07/24/big/img_89
1636
+ 2002/08/21/big/img_357
1637
+ 2002/08/09/big/img_590
1638
+ 2003/01/13/big/img_525
1639
+ 2002/08/17/big/img_818
1640
+ 2003/01/02/big/img_7
1641
+ 2002/07/26/big/img_636
1642
+ 2003/01/13/big/img_1122
1643
+ 2002/07/23/big/img_810
1644
+ 2002/08/20/big/img_888
1645
+ 2002/07/27/big/img_3
1646
+ 2002/08/15/big/img_451
1647
+ 2002/09/02/big/img_15787
1648
+ 2002/07/31/big/img_281
1649
+ 2002/08/05/big/img_3274
1650
+ 2002/08/07/big/img_1254
1651
+ 2002/07/31/big/img_27
1652
+ 2002/08/01/big/img_1366
1653
+ 2002/07/30/big/img_182
1654
+ 2002/08/27/big/img_19690
1655
+ 2002/07/29/big/img_68
1656
+ 2002/08/23/big/img_754
1657
+ 2002/07/30/big/img_540
1658
+ 2002/08/27/big/img_20063
1659
+ 2002/08/14/big/img_471
1660
+ 2002/08/02/big/img_615
1661
+ 2002/07/30/big/img_186
1662
+ 2002/08/25/big/img_150
1663
+ 2002/07/27/big/img_626
1664
+ 2002/07/20/big/img_225
1665
+ 2003/01/15/big/img_1252
1666
+ 2002/07/19/big/img_367
1667
+ 2003/01/15/big/img_582
1668
+ 2002/08/09/big/img_572
1669
+ 2002/08/08/big/img_428
1670
+ 2003/01/15/big/img_639
1671
+ 2002/08/28/big/img_19245
1672
+ 2002/07/24/big/img_321
1673
+ 2002/08/02/big/img_662
1674
+ 2002/08/08/big/img_1033
1675
+ 2003/01/17/big/img_867
1676
+ 2002/07/22/big/img_652
1677
+ 2003/01/14/big/img_224
1678
+ 2002/08/18/big/img_49
1679
+ 2002/07/26/big/img_46
1680
+ 2002/08/31/big/img_18021
1681
+ 2002/07/25/big/img_151
1682
+ 2002/08/23/big/img_540
1683
+ 2002/08/25/big/img_693
1684
+ 2002/07/23/big/img_340
1685
+ 2002/07/28/big/img_117
1686
+ 2002/09/02/big/img_15768
1687
+ 2002/08/26/big/img_562
1688
+ 2002/07/24/big/img_480
1689
+ 2003/01/15/big/img_341
1690
+ 2002/08/10/big/img_783
1691
+ 2002/08/20/big/img_132
1692
+ 2003/01/14/big/img_370
1693
+ 2002/07/20/big/img_720
1694
+ 2002/08/03/big/img_144
1695
+ 2002/08/20/big/img_538
1696
+ 2002/08/01/big/img_1745
1697
+ 2002/08/11/big/img_683
1698
+ 2002/08/03/big/img_328
1699
+ 2002/08/10/big/img_793
1700
+ 2002/08/14/big/img_689
1701
+ 2002/08/02/big/img_162
1702
+ 2003/01/17/big/img_411
1703
+ 2002/07/31/big/img_361
1704
+ 2002/08/15/big/img_289
1705
+ 2002/08/08/big/img_254
1706
+ 2002/08/15/big/img_996
1707
+ 2002/08/20/big/img_785
1708
+ 2002/07/24/big/img_511
1709
+ 2002/08/06/big/img_2614
1710
+ 2002/08/29/big/img_18733
1711
+ 2002/08/17/big/img_78
1712
+ 2002/07/30/big/img_378
1713
+ 2002/08/31/big/img_17947
1714
+ 2002/08/26/big/img_88
1715
+ 2002/07/30/big/img_558
1716
+ 2002/08/02/big/img_67
1717
+ 2003/01/14/big/img_325
1718
+ 2002/07/29/big/img_1357
1719
+ 2002/07/19/big/img_391
1720
+ 2002/07/30/big/img_307
1721
+ 2003/01/13/big/img_219
1722
+ 2002/07/24/big/img_807
1723
+ 2002/08/23/big/img_543
1724
+ 2002/08/29/big/img_18620
1725
+ 2002/07/22/big/img_769
1726
+ 2002/08/26/big/img_503
1727
+ 2002/07/30/big/img_78
1728
+ 2002/08/14/big/img_1036
1729
+ 2002/08/09/big/img_58
1730
+ 2002/07/24/big/img_616
1731
+ 2002/08/02/big/img_464
1732
+ 2002/07/26/big/img_576
1733
+ 2002/07/22/big/img_273
1734
+ 2003/01/16/big/img_470
1735
+ 2002/07/29/big/img_329
1736
+ 2002/07/30/big/img_1086
1737
+ 2002/07/31/big/img_353
1738
+ 2002/09/02/big/img_15275
1739
+ 2003/01/17/big/img_555
1740
+ 2002/08/26/big/img_212
1741
+ 2002/08/01/big/img_1692
1742
+ 2003/01/15/big/img_600
1743
+ 2002/07/29/big/img_825
1744
+ 2002/08/08/big/img_68
1745
+ 2002/08/10/big/img_719
1746
+ 2002/07/31/big/img_636
1747
+ 2002/07/29/big/img_325
1748
+ 2002/07/21/big/img_515
1749
+ 2002/07/22/big/img_705
1750
+ 2003/01/13/big/img_818
1751
+ 2002/08/09/big/img_486
1752
+ 2002/08/22/big/img_141
1753
+ 2002/07/22/big/img_303
1754
+ 2002/08/09/big/img_393
1755
+ 2002/07/29/big/img_963
1756
+ 2002/08/02/big/img_1215
1757
+ 2002/08/19/big/img_674
1758
+ 2002/08/12/big/img_690
1759
+ 2002/08/21/big/img_637
1760
+ 2002/08/21/big/img_841
1761
+ 2002/08/24/big/img_71
1762
+ 2002/07/25/big/img_596
1763
+ 2002/07/24/big/img_864
1764
+ 2002/08/18/big/img_293
1765
+ 2003/01/14/big/img_657
1766
+ 2002/08/15/big/img_411
1767
+ 2002/08/16/big/img_348
1768
+ 2002/08/05/big/img_3157
1769
+ 2002/07/20/big/img_663
1770
+ 2003/01/13/big/img_654
1771
+ 2003/01/16/big/img_433
1772
+ 2002/08/30/big/img_18200
1773
+ 2002/08/12/big/img_226
1774
+ 2003/01/16/big/img_491
1775
+ 2002/08/08/big/img_666
1776
+ 2002/07/19/big/img_576
1777
+ 2003/01/15/big/img_776
1778
+ 2003/01/16/big/img_899
1779
+ 2002/07/19/big/img_397
1780
+ 2002/08/14/big/img_44
1781
+ 2003/01/15/big/img_762
1782
+ 2002/08/02/big/img_982
1783
+ 2002/09/02/big/img_15234
1784
+ 2002/08/17/big/img_556
1785
+ 2002/08/21/big/img_410
1786
+ 2002/08/21/big/img_386
1787
+ 2002/07/19/big/img_690
1788
+ 2002/08/05/big/img_3052
1789
+ 2002/08/14/big/img_219
1790
+ 2002/08/16/big/img_273
1791
+ 2003/01/15/big/img_752
1792
+ 2002/08/08/big/img_184
1793
+ 2002/07/31/big/img_743
1794
+ 2002/08/23/big/img_338
1795
+ 2003/01/14/big/img_1055
1796
+ 2002/08/05/big/img_3405
1797
+ 2003/01/15/big/img_17
1798
+ 2002/08/03/big/img_141
1799
+ 2002/08/14/big/img_549
1800
+ 2002/07/27/big/img_1034
1801
+ 2002/07/31/big/img_932
1802
+ 2002/08/30/big/img_18487
1803
+ 2002/09/02/big/img_15814
1804
+ 2002/08/01/big/img_2086
1805
+ 2002/09/01/big/img_16535
1806
+ 2002/07/22/big/img_500
1807
+ 2003/01/13/big/img_400
1808
+ 2002/08/25/big/img_607
1809
+ 2002/08/30/big/img_18384
1810
+ 2003/01/14/big/img_951
1811
+ 2002/08/13/big/img_1150
1812
+ 2002/08/08/big/img_1022
1813
+ 2002/08/10/big/img_428
1814
+ 2002/08/28/big/img_19242
1815
+ 2002/08/05/big/img_3098
1816
+ 2002/07/23/big/img_400
1817
+ 2002/08/26/big/img_365
1818
+ 2002/07/20/big/img_318
1819
+ 2002/08/13/big/img_740
1820
+ 2003/01/16/big/img_37
1821
+ 2002/08/26/big/img_274
1822
+ 2002/08/02/big/img_205
1823
+ 2002/08/21/big/img_695
1824
+ 2002/08/06/big/img_2289
1825
+ 2002/08/20/big/img_794
1826
+ 2002/08/18/big/img_438
1827
+ 2002/08/07/big/img_1380
1828
+ 2002/08/02/big/img_737
1829
+ 2002/08/07/big/img_1651
1830
+ 2002/08/15/big/img_1238
1831
+ 2002/08/01/big/img_1681
1832
+ 2002/08/06/big/img_3017
1833
+ 2002/07/23/big/img_706
1834
+ 2002/07/31/big/img_392
1835
+ 2002/08/09/big/img_539
1836
+ 2002/07/29/big/img_835
1837
+ 2002/08/26/big/img_723
1838
+ 2002/08/28/big/img_19235
1839
+ 2003/01/16/big/img_353
1840
+ 2002/08/10/big/img_150
1841
+ 2002/08/29/big/img_19025
1842
+ 2002/08/21/big/img_310
1843
+ 2002/08/10/big/img_823
1844
+ 2002/07/26/big/img_981
1845
+ 2002/08/11/big/img_288
1846
+ 2002/08/19/big/img_534
1847
+ 2002/08/21/big/img_300
1848
+ 2002/07/31/big/img_49
1849
+ 2002/07/30/big/img_469
1850
+ 2002/08/28/big/img_19197
1851
+ 2002/08/25/big/img_205
1852
+ 2002/08/10/big/img_390
1853
+ 2002/08/23/big/img_291
1854
+ 2002/08/26/big/img_230
1855
+ 2002/08/18/big/img_76
1856
+ 2002/07/23/big/img_409
1857
+ 2002/08/14/big/img_1053
1858
+ 2003/01/14/big/img_291
1859
+ 2002/08/10/big/img_503
1860
+ 2002/08/27/big/img_19928
1861
+ 2002/08/03/big/img_563
1862
+ 2002/08/17/big/img_250
1863
+ 2002/08/06/big/img_2381
1864
+ 2002/08/17/big/img_948
1865
+ 2002/08/06/big/img_2710
1866
+ 2002/07/22/big/img_696
1867
+ 2002/07/31/big/img_670
1868
+ 2002/08/12/big/img_594
1869
+ 2002/07/29/big/img_624
1870
+ 2003/01/17/big/img_934
1871
+ 2002/08/03/big/img_584
1872
+ 2002/08/22/big/img_1003
1873
+ 2002/08/05/big/img_3396
1874
+ 2003/01/13/big/img_570
1875
+ 2002/08/02/big/img_219
1876
+ 2002/09/02/big/img_15774
1877
+ 2002/08/16/big/img_818
1878
+ 2002/08/23/big/img_402
1879
+ 2003/01/14/big/img_552
1880
+ 2002/07/29/big/img_71
1881
+ 2002/08/05/big/img_3592
1882
+ 2002/08/16/big/img_80
1883
+ 2002/07/27/big/img_672
1884
+ 2003/01/13/big/img_470
1885
+ 2003/01/16/big/img_702
1886
+ 2002/09/01/big/img_16130
1887
+ 2002/08/08/big/img_240
1888
+ 2002/09/01/big/img_16338
1889
+ 2002/07/26/big/img_312
1890
+ 2003/01/14/big/img_538
1891
+ 2002/07/20/big/img_695
1892
+ 2002/08/30/big/img_18098
1893
+ 2002/08/25/big/img_259
1894
+ 2002/08/16/big/img_1042
1895
+ 2002/08/09/big/img_837
1896
+ 2002/08/31/big/img_17760
1897
+ 2002/07/31/big/img_14
1898
+ 2002/08/09/big/img_361
1899
+ 2003/01/16/big/img_107
1900
+ 2002/08/14/big/img_124
1901
+ 2002/07/19/big/img_463
1902
+ 2003/01/15/big/img_275
1903
+ 2002/07/25/big/img_1151
1904
+ 2002/07/29/big/img_1501
1905
+ 2002/08/27/big/img_19889
1906
+ 2002/08/29/big/img_18603
1907
+ 2003/01/17/big/img_601
1908
+ 2002/08/25/big/img_355
1909
+ 2002/08/08/big/img_297
1910
+ 2002/08/20/big/img_290
1911
+ 2002/07/31/big/img_195
1912
+ 2003/01/01/big/img_336
1913
+ 2002/08/18/big/img_369
1914
+ 2002/07/25/big/img_621
1915
+ 2002/08/11/big/img_508
1916
+ 2003/01/14/big/img_458
1917
+ 2003/01/15/big/img_795
1918
+ 2002/08/12/big/img_498
1919
+ 2002/08/01/big/img_1734
1920
+ 2002/08/02/big/img_246
1921
+ 2002/08/16/big/img_565
1922
+ 2002/08/11/big/img_475
1923
+ 2002/08/22/big/img_408
1924
+ 2002/07/28/big/img_78
1925
+ 2002/07/21/big/img_81
1926
+ 2003/01/14/big/img_697
1927
+ 2002/08/14/big/img_661
1928
+ 2002/08/15/big/img_507
1929
+ 2002/08/19/big/img_55
1930
+ 2002/07/22/big/img_152
1931
+ 2003/01/14/big/img_470
1932
+ 2002/08/03/big/img_379
1933
+ 2002/08/22/big/img_506
1934
+ 2003/01/16/big/img_966
1935
+ 2002/08/18/big/img_698
1936
+ 2002/08/24/big/img_528
1937
+ 2002/08/23/big/img_10
1938
+ 2002/08/01/big/img_1655
1939
+ 2002/08/22/big/img_953
1940
+ 2002/07/19/big/img_630
1941
+ 2002/07/22/big/img_889
1942
+ 2002/08/16/big/img_351
1943
+ 2003/01/16/big/img_83
1944
+ 2002/07/19/big/img_805
1945
+ 2002/08/14/big/img_704
1946
+ 2002/07/19/big/img_389
1947
+ 2002/08/31/big/img_17765
1948
+ 2002/07/29/big/img_606
1949
+ 2003/01/17/big/img_939
1950
+ 2002/09/02/big/img_15081
1951
+ 2002/08/21/big/img_181
1952
+ 2002/07/29/big/img_1321
1953
+ 2002/07/21/big/img_497
1954
+ 2002/07/20/big/img_539
1955
+ 2002/08/24/big/img_119
1956
+ 2002/08/01/big/img_1281
1957
+ 2002/07/26/big/img_207
1958
+ 2002/07/26/big/img_432
1959
+ 2002/07/27/big/img_1006
1960
+ 2002/08/05/big/img_3087
1961
+ 2002/08/14/big/img_252
1962
+ 2002/08/14/big/img_798
1963
+ 2002/07/24/big/img_538
1964
+ 2002/09/02/big/img_15507
1965
+ 2002/08/08/big/img_901
1966
+ 2003/01/14/big/img_557
1967
+ 2002/08/07/big/img_1819
1968
+ 2002/08/04/big/img_470
1969
+ 2002/08/01/big/img_1504
1970
+ 2002/08/16/big/img_1070
1971
+ 2002/08/16/big/img_372
1972
+ 2002/08/23/big/img_416
1973
+ 2002/08/30/big/img_18208
1974
+ 2002/08/01/big/img_2043
1975
+ 2002/07/22/big/img_385
1976
+ 2002/08/22/big/img_466
1977
+ 2002/08/21/big/img_869
1978
+ 2002/08/28/big/img_19429
1979
+ 2002/08/02/big/img_770
1980
+ 2002/07/23/big/img_433
1981
+ 2003/01/14/big/img_13
1982
+ 2002/07/27/big/img_953
1983
+ 2002/09/02/big/img_15728
1984
+ 2002/08/01/big/img_1361
1985
+ 2002/08/29/big/img_18897
1986
+ 2002/08/26/big/img_534
1987
+ 2002/08/11/big/img_121
1988
+ 2002/08/26/big/img_20130
1989
+ 2002/07/31/big/img_363
1990
+ 2002/08/13/big/img_978
1991
+ 2002/07/25/big/img_835
1992
+ 2002/08/02/big/img_906
1993
+ 2003/01/14/big/img_548
1994
+ 2002/07/30/big/img_80
1995
+ 2002/07/26/big/img_982
1996
+ 2003/01/16/big/img_99
1997
+ 2002/08/19/big/img_362
1998
+ 2002/08/24/big/img_376
1999
+ 2002/08/07/big/img_1264
2000
+ 2002/07/27/big/img_938
2001
+ 2003/01/17/big/img_535
2002
+ 2002/07/26/big/img_457
2003
+ 2002/08/08/big/img_848
2004
+ 2003/01/15/big/img_859
2005
+ 2003/01/15/big/img_622
2006
+ 2002/07/30/big/img_403
2007
+ 2002/07/29/big/img_217
2008
+ 2002/07/26/big/img_891
2009
+ 2002/07/24/big/img_70
2010
+ 2002/08/25/big/img_619
2011
+ 2002/08/05/big/img_3375
2012
+ 2002/08/01/big/img_2160
2013
+ 2002/08/06/big/img_2227
2014
+ 2003/01/14/big/img_117
2015
+ 2002/08/14/big/img_227
2016
+ 2002/08/13/big/img_565
2017
+ 2002/08/19/big/img_625
2018
+ 2002/08/03/big/img_812
2019
+ 2002/07/24/big/img_41
2020
+ 2002/08/16/big/img_235
2021
+ 2002/07/29/big/img_759
2022
+ 2002/07/21/big/img_433
2023
+ 2002/07/29/big/img_190
2024
+ 2003/01/16/big/img_435
2025
+ 2003/01/13/big/img_708
2026
+ 2002/07/30/big/img_57
2027
+ 2002/08/22/big/img_162
2028
+ 2003/01/01/big/img_558
2029
+ 2003/01/15/big/img_604
2030
+ 2002/08/16/big/img_935
2031
+ 2002/08/20/big/img_394
2032
+ 2002/07/28/big/img_465
2033
+ 2002/09/02/big/img_15534
2034
+ 2002/08/16/big/img_87
2035
+ 2002/07/22/big/img_469
2036
+ 2002/08/12/big/img_245
2037
+ 2003/01/13/big/img_236
2038
+ 2002/08/06/big/img_2736
2039
+ 2002/08/03/big/img_348
2040
+ 2003/01/14/big/img_218
2041
+ 2002/07/26/big/img_232
2042
+ 2003/01/15/big/img_244
2043
+ 2002/07/25/big/img_1121
2044
+ 2002/08/01/big/img_1484
2045
+ 2002/07/26/big/img_541
2046
+ 2002/08/07/big/img_1244
2047
+ 2002/07/31/big/img_3
2048
+ 2002/08/30/big/img_18437
2049
+ 2002/08/29/big/img_19094
2050
+ 2002/08/01/big/img_1355
2051
+ 2002/08/19/big/img_338
2052
+ 2002/07/19/big/img_255
2053
+ 2002/07/21/big/img_76
2054
+ 2002/08/25/big/img_199
2055
+ 2002/08/12/big/img_740
2056
+ 2002/07/30/big/img_852
2057
+ 2002/08/15/big/img_599
2058
+ 2002/08/23/big/img_254
2059
+ 2002/08/19/big/img_125
2060
+ 2002/07/24/big/img_2
2061
+ 2002/08/04/big/img_145
2062
+ 2002/08/05/big/img_3137
2063
+ 2002/07/28/big/img_463
2064
+ 2003/01/14/big/img_801
2065
+ 2002/07/23/big/img_366
2066
+ 2002/08/26/big/img_600
2067
+ 2002/08/26/big/img_649
2068
+ 2002/09/02/big/img_15849
2069
+ 2002/07/26/big/img_248
2070
+ 2003/01/13/big/img_200
2071
+ 2002/08/07/big/img_1794
2072
+ 2002/08/31/big/img_17270
2073
+ 2002/08/23/big/img_608
2074
+ 2003/01/13/big/img_837
2075
+ 2002/08/23/big/img_581
2076
+ 2002/08/20/big/img_754
2077
+ 2002/08/18/big/img_183
2078
+ 2002/08/20/big/img_328
2079
+ 2002/07/22/big/img_494
2080
+ 2002/07/29/big/img_399
2081
+ 2002/08/28/big/img_19284
2082
+ 2002/08/08/big/img_566
2083
+ 2002/07/25/big/img_376
2084
+ 2002/07/23/big/img_138
2085
+ 2002/07/25/big/img_435
2086
+ 2002/08/17/big/img_685
2087
+ 2002/07/19/big/img_90
2088
+ 2002/07/20/big/img_716
2089
+ 2002/08/31/big/img_17458
2090
+ 2002/08/26/big/img_461
2091
+ 2002/07/25/big/img_355
2092
+ 2002/08/06/big/img_2152
2093
+ 2002/07/27/big/img_932
2094
+ 2002/07/23/big/img_232
2095
+ 2002/08/08/big/img_1020
2096
+ 2002/07/31/big/img_366
2097
+ 2002/08/06/big/img_2667
2098
+ 2002/08/21/big/img_465
2099
+ 2002/08/15/big/img_305
2100
+ 2002/08/02/big/img_247
2101
+ 2002/07/28/big/img_46
2102
+ 2002/08/27/big/img_19922
2103
+ 2002/08/23/big/img_643
2104
+ 2003/01/13/big/img_624
2105
+ 2002/08/23/big/img_625
2106
+ 2002/08/05/big/img_3787
2107
+ 2003/01/13/big/img_627
2108
+ 2002/09/01/big/img_16381
2109
+ 2002/08/05/big/img_3668
2110
+ 2002/07/21/big/img_535
2111
+ 2002/08/27/big/img_19680
2112
+ 2002/07/22/big/img_413
2113
+ 2002/07/29/big/img_481
2114
+ 2003/01/15/big/img_496
2115
+ 2002/07/23/big/img_701
2116
+ 2002/08/29/big/img_18670
2117
+ 2002/07/28/big/img_319
2118
+ 2003/01/14/big/img_517
2119
+ 2002/07/26/big/img_256
2120
+ 2003/01/16/big/img_593
2121
+ 2002/07/30/big/img_956
2122
+ 2002/07/30/big/img_667
2123
+ 2002/07/25/big/img_100
2124
+ 2002/08/11/big/img_570
2125
+ 2002/07/26/big/img_745
2126
+ 2002/08/04/big/img_834
2127
+ 2002/08/25/big/img_521
2128
+ 2002/08/01/big/img_2148
2129
+ 2002/09/02/big/img_15183
2130
+ 2002/08/22/big/img_514
2131
+ 2002/08/23/big/img_477
2132
+ 2002/07/23/big/img_336
2133
+ 2002/07/26/big/img_481
2134
+ 2002/08/20/big/img_409
2135
+ 2002/07/23/big/img_918
2136
+ 2002/08/09/big/img_474
2137
+ 2002/08/02/big/img_929
2138
+ 2002/08/31/big/img_17932
2139
+ 2002/08/19/big/img_161
2140
+ 2002/08/09/big/img_667
2141
+ 2002/07/31/big/img_805
2142
+ 2002/09/02/big/img_15678
2143
+ 2002/08/31/big/img_17509
2144
+ 2002/08/29/big/img_18998
2145
+ 2002/07/23/big/img_301
2146
+ 2002/08/07/big/img_1612
2147
+ 2002/08/06/big/img_2472
2148
+ 2002/07/23/big/img_466
2149
+ 2002/08/27/big/img_19634
2150
+ 2003/01/16/big/img_16
2151
+ 2002/08/14/big/img_193
2152
+ 2002/08/21/big/img_340
2153
+ 2002/08/27/big/img_19799
2154
+ 2002/08/01/big/img_1345
2155
+ 2002/08/07/big/img_1448
2156
+ 2002/08/11/big/img_324
2157
+ 2003/01/16/big/img_754
2158
+ 2002/08/13/big/img_418
2159
+ 2003/01/16/big/img_544
2160
+ 2002/08/19/big/img_135
2161
+ 2002/08/10/big/img_455
2162
+ 2002/08/10/big/img_693
2163
+ 2002/08/31/big/img_17967
2164
+ 2002/08/28/big/img_19229
2165
+ 2002/08/04/big/img_811
2166
+ 2002/09/01/big/img_16225
2167
+ 2003/01/16/big/img_428
2168
+ 2002/09/02/big/img_15295
2169
+ 2002/07/26/big/img_108
2170
+ 2002/07/21/big/img_477
2171
+ 2002/08/07/big/img_1354
2172
+ 2002/08/23/big/img_246
2173
+ 2002/08/16/big/img_652
2174
+ 2002/07/27/big/img_553
2175
+ 2002/07/31/big/img_346
2176
+ 2002/08/04/big/img_537
2177
+ 2002/08/08/big/img_498
2178
+ 2002/08/29/big/img_18956
2179
+ 2003/01/13/big/img_922
2180
+ 2002/08/31/big/img_17425
2181
+ 2002/07/26/big/img_438
2182
+ 2002/08/19/big/img_185
2183
+ 2003/01/16/big/img_33
2184
+ 2002/08/10/big/img_252
2185
+ 2002/07/29/big/img_598
2186
+ 2002/08/27/big/img_19820
2187
+ 2002/08/06/big/img_2664
2188
+ 2002/08/20/big/img_705
2189
+ 2003/01/14/big/img_816
2190
+ 2002/08/03/big/img_552
2191
+ 2002/07/25/big/img_561
2192
+ 2002/07/25/big/img_934
2193
+ 2002/08/01/big/img_1893
2194
+ 2003/01/14/big/img_746
2195
+ 2003/01/16/big/img_519
2196
+ 2002/08/03/big/img_681
2197
+ 2002/07/24/big/img_808
2198
+ 2002/08/14/big/img_803
2199
+ 2002/08/25/big/img_155
2200
+ 2002/07/30/big/img_1107
2201
+ 2002/08/29/big/img_18882
2202
+ 2003/01/15/big/img_598
2203
+ 2002/08/19/big/img_122
2204
+ 2002/07/30/big/img_428
2205
+ 2002/07/24/big/img_684
2206
+ 2002/08/22/big/img_192
2207
+ 2002/08/22/big/img_543
2208
+ 2002/08/07/big/img_1318
2209
+ 2002/08/18/big/img_25
2210
+ 2002/07/26/big/img_583
2211
+ 2002/07/20/big/img_464
2212
+ 2002/08/19/big/img_664
2213
+ 2002/08/24/big/img_861
2214
+ 2002/09/01/big/img_16136
2215
+ 2002/08/22/big/img_400
2216
+ 2002/08/12/big/img_445
2217
+ 2003/01/14/big/img_174
2218
+ 2002/08/27/big/img_19677
2219
+ 2002/08/31/big/img_17214
2220
+ 2002/08/30/big/img_18175
2221
+ 2003/01/17/big/img_402
2222
+ 2002/08/06/big/img_2396
2223
+ 2002/08/18/big/img_448
2224
+ 2002/08/21/big/img_165
2225
+ 2002/08/31/big/img_17609
2226
+ 2003/01/01/big/img_151
2227
+ 2002/08/26/big/img_372
2228
+ 2002/09/02/big/img_15994
2229
+ 2002/07/26/big/img_660
2230
+ 2002/09/02/big/img_15197
2231
+ 2002/07/29/big/img_258
2232
+ 2002/08/30/big/img_18525
2233
+ 2003/01/13/big/img_368
2234
+ 2002/07/29/big/img_1538
2235
+ 2002/07/21/big/img_787
2236
+ 2002/08/18/big/img_152
2237
+ 2002/08/06/big/img_2379
2238
+ 2003/01/17/big/img_864
2239
+ 2002/08/27/big/img_19998
2240
+ 2002/08/01/big/img_1634
2241
+ 2002/07/25/big/img_414
2242
+ 2002/08/22/big/img_627
2243
+ 2002/08/07/big/img_1669
2244
+ 2002/08/16/big/img_1052
2245
+ 2002/08/31/big/img_17796
2246
+ 2002/08/18/big/img_199
2247
+ 2002/09/02/big/img_15147
2248
+ 2002/08/09/big/img_460
2249
+ 2002/08/14/big/img_581
2250
+ 2002/08/30/big/img_18286
2251
+ 2002/07/26/big/img_337
2252
+ 2002/08/18/big/img_589
2253
+ 2003/01/14/big/img_866
2254
+ 2002/07/20/big/img_624
2255
+ 2002/08/01/big/img_1801
2256
+ 2002/07/24/big/img_683
2257
+ 2002/08/09/big/img_725
2258
+ 2003/01/14/big/img_34
2259
+ 2002/07/30/big/img_144
2260
+ 2002/07/30/big/img_706
2261
+ 2002/08/08/big/img_394
2262
+ 2002/08/19/big/img_619
2263
+ 2002/08/06/big/img_2703
2264
+ 2002/08/29/big/img_19034
2265
+ 2002/07/24/big/img_67
2266
+ 2002/08/27/big/img_19841
2267
+ 2002/08/19/big/img_427
2268
+ 2003/01/14/big/img_333
2269
+ 2002/09/01/big/img_16406
2270
+ 2002/07/19/big/img_882
2271
+ 2002/08/17/big/img_238
2272
+ 2003/01/14/big/img_739
2273
+ 2002/07/22/big/img_151
2274
+ 2002/08/21/big/img_743
2275
+ 2002/07/25/big/img_1048
2276
+ 2002/07/30/big/img_395
2277
+ 2003/01/13/big/img_584
2278
+ 2002/08/13/big/img_742
2279
+ 2002/08/13/big/img_1168
2280
+ 2003/01/14/big/img_147
2281
+ 2002/07/26/big/img_803
2282
+ 2002/08/05/big/img_3298
2283
+ 2002/08/07/big/img_1451
2284
+ 2002/08/16/big/img_424
2285
+ 2002/07/29/big/img_1069
2286
+ 2002/09/01/big/img_16735
2287
+ 2002/07/21/big/img_637
2288
+ 2003/01/14/big/img_585
2289
+ 2002/08/02/big/img_358
2290
+ 2003/01/13/big/img_358
2291
+ 2002/08/14/big/img_198
2292
+ 2002/08/17/big/img_935
2293
+ 2002/08/04/big/img_42
2294
+ 2002/08/30/big/img_18245
2295
+ 2002/07/25/big/img_158
2296
+ 2002/08/22/big/img_744
2297
+ 2002/08/06/big/img_2291
2298
+ 2002/08/05/big/img_3044
2299
+ 2002/07/30/big/img_272
2300
+ 2002/08/23/big/img_641
2301
+ 2002/07/24/big/img_797
2302
+ 2002/07/30/big/img_392
2303
+ 2003/01/14/big/img_447
2304
+ 2002/07/31/big/img_898
2305
+ 2002/08/06/big/img_2812
2306
+ 2002/08/13/big/img_564
2307
+ 2002/07/22/big/img_43
2308
+ 2002/07/26/big/img_634
2309
+ 2002/07/19/big/img_843
2310
+ 2002/08/26/big/img_58
2311
+ 2002/07/21/big/img_375
2312
+ 2002/08/25/big/img_729
2313
+ 2002/07/19/big/img_561
2314
+ 2003/01/15/big/img_884
2315
+ 2002/07/25/big/img_891
2316
+ 2002/08/09/big/img_558
2317
+ 2002/08/26/big/img_587
2318
+ 2002/08/13/big/img_1146
2319
+ 2002/09/02/big/img_15153
2320
+ 2002/07/26/big/img_316
2321
+ 2002/08/01/big/img_1940
2322
+ 2002/08/26/big/img_90
2323
+ 2003/01/13/big/img_347
2324
+ 2002/07/25/big/img_520
2325
+ 2002/08/29/big/img_18718
2326
+ 2002/08/28/big/img_19219
2327
+ 2002/08/13/big/img_375
2328
+ 2002/07/20/big/img_719
2329
+ 2002/08/31/big/img_17431
2330
+ 2002/07/28/big/img_192
2331
+ 2002/08/26/big/img_259
2332
+ 2002/08/18/big/img_484
2333
+ 2002/07/29/big/img_580
2334
+ 2002/07/26/big/img_84
2335
+ 2002/08/02/big/img_302
2336
+ 2002/08/31/big/img_17007
2337
+ 2003/01/15/big/img_543
2338
+ 2002/09/01/big/img_16488
2339
+ 2002/08/22/big/img_798
2340
+ 2002/07/30/big/img_383
2341
+ 2002/08/04/big/img_668
2342
+ 2002/08/13/big/img_156
2343
+ 2002/08/07/big/img_1353
2344
+ 2002/07/25/big/img_281
2345
+ 2003/01/14/big/img_587
2346
+ 2003/01/15/big/img_524
2347
+ 2002/08/19/big/img_726
2348
+ 2002/08/21/big/img_709
2349
+ 2002/08/26/big/img_465
2350
+ 2002/07/31/big/img_658
2351
+ 2002/08/28/big/img_19148
2352
+ 2002/07/23/big/img_423
2353
+ 2002/08/16/big/img_758
2354
+ 2002/08/22/big/img_523
2355
+ 2002/08/16/big/img_591
2356
+ 2002/08/23/big/img_845
2357
+ 2002/07/26/big/img_678
2358
+ 2002/08/09/big/img_806
2359
+ 2002/08/06/big/img_2369
2360
+ 2002/07/29/big/img_457
2361
+ 2002/07/19/big/img_278
2362
+ 2002/08/30/big/img_18107
2363
+ 2002/07/26/big/img_444
2364
+ 2002/08/20/big/img_278
2365
+ 2002/08/26/big/img_92
2366
+ 2002/08/26/big/img_257
2367
+ 2002/07/25/big/img_266
2368
+ 2002/08/05/big/img_3829
2369
+ 2002/07/26/big/img_757
2370
+ 2002/07/29/big/img_1536
2371
+ 2002/08/09/big/img_472
2372
+ 2003/01/17/big/img_480
2373
+ 2002/08/28/big/img_19355
2374
+ 2002/07/26/big/img_97
2375
+ 2002/08/06/big/img_2503
2376
+ 2002/07/19/big/img_254
2377
+ 2002/08/01/big/img_1470
2378
+ 2002/08/21/big/img_42
2379
+ 2002/08/20/big/img_217
2380
+ 2002/08/06/big/img_2459
2381
+ 2002/07/19/big/img_552
2382
+ 2002/08/13/big/img_717
2383
+ 2002/08/12/big/img_586
2384
+ 2002/08/20/big/img_411
2385
+ 2003/01/13/big/img_768
2386
+ 2002/08/07/big/img_1747
2387
+ 2002/08/15/big/img_385
2388
+ 2002/08/01/big/img_1648
2389
+ 2002/08/15/big/img_311
2390
+ 2002/08/21/big/img_95
2391
+ 2002/08/09/big/img_108
2392
+ 2002/08/21/big/img_398
2393
+ 2002/08/17/big/img_340
2394
+ 2002/08/14/big/img_474
2395
+ 2002/08/13/big/img_294
2396
+ 2002/08/24/big/img_840
2397
+ 2002/08/09/big/img_808
2398
+ 2002/08/23/big/img_491
2399
+ 2002/07/28/big/img_33
2400
+ 2003/01/13/big/img_664
2401
+ 2002/08/02/big/img_261
2402
+ 2002/08/09/big/img_591
2403
+ 2002/07/26/big/img_309
2404
+ 2003/01/14/big/img_372
2405
+ 2002/08/19/big/img_581
2406
+ 2002/08/19/big/img_168
2407
+ 2002/08/26/big/img_422
2408
+ 2002/07/24/big/img_106
2409
+ 2002/08/01/big/img_1936
2410
+ 2002/08/05/big/img_3764
2411
+ 2002/08/21/big/img_266
2412
+ 2002/08/31/big/img_17968
2413
+ 2002/08/01/big/img_1941
2414
+ 2002/08/15/big/img_550
2415
+ 2002/08/14/big/img_13
2416
+ 2002/07/30/big/img_171
2417
+ 2003/01/13/big/img_490
2418
+ 2002/07/25/big/img_427
2419
+ 2002/07/19/big/img_770
2420
+ 2002/08/12/big/img_759
2421
+ 2003/01/15/big/img_1360
2422
+ 2002/08/05/big/img_3692
2423
+ 2003/01/16/big/img_30
2424
+ 2002/07/25/big/img_1026
2425
+ 2002/07/22/big/img_288
2426
+ 2002/08/29/big/img_18801
2427
+ 2002/07/24/big/img_793
2428
+ 2002/08/13/big/img_178
2429
+ 2002/08/06/big/img_2322
2430
+ 2003/01/14/big/img_560
2431
+ 2002/08/18/big/img_408
2432
+ 2003/01/16/big/img_915
2433
+ 2003/01/16/big/img_679
2434
+ 2002/08/07/big/img_1552
2435
+ 2002/08/29/big/img_19050
2436
+ 2002/08/01/big/img_2172
2437
+ 2002/07/31/big/img_30
2438
+ 2002/07/30/big/img_1019
2439
+ 2002/07/30/big/img_587
2440
+ 2003/01/13/big/img_773
2441
+ 2002/07/30/big/img_410
2442
+ 2002/07/28/big/img_65
2443
+ 2002/08/05/big/img_3138
2444
+ 2002/07/23/big/img_541
2445
+ 2002/08/22/big/img_963
2446
+ 2002/07/27/big/img_657
2447
+ 2002/07/30/big/img_1051
2448
+ 2003/01/16/big/img_150
2449
+ 2002/07/31/big/img_519
2450
+ 2002/08/01/big/img_1961
2451
+ 2002/08/05/big/img_3752
2452
+ 2002/07/23/big/img_631
2453
+ 2003/01/14/big/img_237
2454
+ 2002/07/28/big/img_21
2455
+ 2002/07/22/big/img_813
2456
+ 2002/08/05/big/img_3563
2457
+ 2003/01/17/big/img_620
2458
+ 2002/07/19/big/img_523
2459
+ 2002/07/30/big/img_904
2460
+ 2002/08/29/big/img_18642
2461
+ 2002/08/11/big/img_492
2462
+ 2002/08/01/big/img_2130
2463
+ 2002/07/25/big/img_618
2464
+ 2002/08/17/big/img_305
2465
+ 2003/01/16/big/img_520
2466
+ 2002/07/26/big/img_495
2467
+ 2002/08/17/big/img_164
2468
+ 2002/08/03/big/img_440
2469
+ 2002/07/24/big/img_441
2470
+ 2002/08/06/big/img_2146
2471
+ 2002/08/11/big/img_558
2472
+ 2002/08/02/big/img_545
2473
+ 2002/08/31/big/img_18090
2474
+ 2003/01/01/big/img_136
2475
+ 2002/07/25/big/img_1099
2476
+ 2003/01/13/big/img_728
2477
+ 2003/01/16/big/img_197
2478
+ 2002/07/26/big/img_651
2479
+ 2002/08/11/big/img_676
2480
+ 2003/01/15/big/img_10
2481
+ 2002/08/21/big/img_250
2482
+ 2002/08/14/big/img_325
2483
+ 2002/08/04/big/img_390
2484
+ 2002/07/24/big/img_554
2485
+ 2003/01/16/big/img_333
2486
+ 2002/07/31/big/img_922
2487
+ 2002/09/02/big/img_15586
2488
+ 2003/01/16/big/img_184
2489
+ 2002/07/22/big/img_766
2490
+ 2002/07/21/big/img_608
2491
+ 2002/08/07/big/img_1578
2492
+ 2002/08/17/big/img_961
2493
+ 2002/07/27/big/img_324
2494
+ 2002/08/05/big/img_3765
2495
+ 2002/08/23/big/img_462
2496
+ 2003/01/16/big/img_382
2497
+ 2002/08/27/big/img_19838
2498
+ 2002/08/01/big/img_1505
2499
+ 2002/08/21/big/img_662
2500
+ 2002/08/14/big/img_605
2501
+ 2002/08/19/big/img_816
2502
+ 2002/07/29/big/img_136
2503
+ 2002/08/20/big/img_719
2504
+ 2002/08/06/big/img_2826
2505
+ 2002/08/10/big/img_630
2506
+ 2003/01/17/big/img_973
2507
+ 2002/08/14/big/img_116
2508
+ 2002/08/02/big/img_666
2509
+ 2002/08/21/big/img_710
2510
+ 2002/08/05/big/img_55
2511
+ 2002/07/31/big/img_229
2512
+ 2002/08/01/big/img_1549
2513
+ 2002/07/23/big/img_432
2514
+ 2002/07/21/big/img_430
2515
+ 2002/08/21/big/img_549
2516
+ 2002/08/08/big/img_985
2517
+ 2002/07/20/big/img_610
2518
+ 2002/07/23/big/img_978
2519
+ 2002/08/23/big/img_219
2520
+ 2002/07/25/big/img_175
2521
+ 2003/01/15/big/img_230
2522
+ 2002/08/23/big/img_385
2523
+ 2002/07/31/big/img_879
2524
+ 2002/08/12/big/img_495
2525
+ 2002/08/22/big/img_499
2526
+ 2002/08/30/big/img_18322
2527
+ 2002/08/15/big/img_795
2528
+ 2002/08/13/big/img_835
2529
+ 2003/01/17/big/img_930
2530
+ 2002/07/30/big/img_873
2531
+ 2002/08/11/big/img_257
2532
+ 2002/07/31/big/img_593
2533
+ 2002/08/21/big/img_916
2534
+ 2003/01/13/big/img_814
2535
+ 2002/07/25/big/img_722
2536
+ 2002/08/16/big/img_379
2537
+ 2002/07/31/big/img_497
2538
+ 2002/07/22/big/img_602
2539
+ 2002/08/21/big/img_642
2540
+ 2002/08/21/big/img_614
2541
+ 2002/08/23/big/img_482
2542
+ 2002/07/29/big/img_603
2543
+ 2002/08/13/big/img_705
2544
+ 2002/07/23/big/img_833
2545
+ 2003/01/14/big/img_511
2546
+ 2002/07/24/big/img_376
2547
+ 2002/08/17/big/img_1030
2548
+ 2002/08/05/big/img_3576
2549
+ 2002/08/16/big/img_540
2550
+ 2002/07/22/big/img_630
2551
+ 2002/08/10/big/img_180
2552
+ 2002/08/14/big/img_905
2553
+ 2002/08/29/big/img_18777
2554
+ 2002/08/22/big/img_693
2555
+ 2003/01/16/big/img_933
2556
+ 2002/08/20/big/img_555
2557
+ 2002/08/15/big/img_549
2558
+ 2003/01/14/big/img_830
2559
+ 2003/01/16/big/img_64
2560
+ 2002/08/27/big/img_19670
2561
+ 2002/08/22/big/img_729
2562
+ 2002/07/27/big/img_981
2563
+ 2002/08/09/big/img_458
2564
+ 2003/01/17/big/img_884
2565
+ 2002/07/25/big/img_639
2566
+ 2002/08/31/big/img_18008
2567
+ 2002/08/22/big/img_249
2568
+ 2002/08/17/big/img_971
2569
+ 2002/08/04/big/img_308
2570
+ 2002/07/28/big/img_362
2571
+ 2002/08/12/big/img_142
2572
+ 2002/08/26/big/img_61
2573
+ 2002/08/14/big/img_422
2574
+ 2002/07/19/big/img_607
2575
+ 2003/01/15/big/img_717
2576
+ 2002/08/01/big/img_1475
2577
+ 2002/08/29/big/img_19061
2578
+ 2003/01/01/big/img_346
2579
+ 2002/07/20/big/img_315
2580
+ 2003/01/15/big/img_756
2581
+ 2002/08/15/big/img_879
2582
+ 2002/08/08/big/img_615
2583
+ 2003/01/13/big/img_431
2584
+ 2002/08/05/big/img_3233
2585
+ 2002/08/24/big/img_526
2586
+ 2003/01/13/big/img_717
2587
+ 2002/09/01/big/img_16408
2588
+ 2002/07/22/big/img_217
2589
+ 2002/07/31/big/img_960
2590
+ 2002/08/21/big/img_610
2591
+ 2002/08/05/big/img_3753
2592
+ 2002/08/03/big/img_151
2593
+ 2002/08/21/big/img_267
2594
+ 2002/08/01/big/img_2175
2595
+ 2002/08/04/big/img_556
2596
+ 2002/08/21/big/img_527
2597
+ 2002/09/02/big/img_15800
2598
+ 2002/07/27/big/img_156
2599
+ 2002/07/20/big/img_590
2600
+ 2002/08/15/big/img_700
2601
+ 2002/08/08/big/img_444
2602
+ 2002/07/25/big/img_94
2603
+ 2002/07/24/big/img_778
2604
+ 2002/08/14/big/img_694
2605
+ 2002/07/20/big/img_666
2606
+ 2002/08/02/big/img_200
2607
+ 2002/08/02/big/img_578
2608
+ 2003/01/17/big/img_332
2609
+ 2002/09/01/big/img_16352
2610
+ 2002/08/27/big/img_19668
2611
+ 2002/07/23/big/img_823
2612
+ 2002/08/13/big/img_431
2613
+ 2003/01/16/big/img_463
2614
+ 2002/08/27/big/img_19711
2615
+ 2002/08/23/big/img_154
2616
+ 2002/07/31/big/img_360
2617
+ 2002/08/23/big/img_555
2618
+ 2002/08/10/big/img_561
2619
+ 2003/01/14/big/img_550
2620
+ 2002/08/07/big/img_1370
2621
+ 2002/07/30/big/img_1184
2622
+ 2002/08/01/big/img_1445
2623
+ 2002/08/23/big/img_22
2624
+ 2002/07/30/big/img_606
2625
+ 2003/01/17/big/img_271
2626
+ 2002/08/31/big/img_17316
2627
+ 2002/08/16/big/img_973
2628
+ 2002/07/26/big/img_77
2629
+ 2002/07/20/big/img_788
2630
+ 2002/08/06/big/img_2426
2631
+ 2002/08/07/big/img_1498
2632
+ 2002/08/16/big/img_358
2633
+ 2002/08/06/big/img_2851
2634
+ 2002/08/12/big/img_359
2635
+ 2002/08/01/big/img_1521
2636
+ 2002/08/02/big/img_709
2637
+ 2002/08/20/big/img_935
2638
+ 2002/08/12/big/img_188
2639
+ 2002/08/24/big/img_411
2640
+ 2002/08/22/big/img_680
2641
+ 2002/08/06/big/img_2480
2642
+ 2002/07/20/big/img_627
2643
+ 2002/07/30/big/img_214
2644
+ 2002/07/25/big/img_354
2645
+ 2002/08/02/big/img_636
2646
+ 2003/01/15/big/img_661
2647
+ 2002/08/07/big/img_1327
2648
+ 2002/08/01/big/img_2108
2649
+ 2002/08/31/big/img_17919
2650
+ 2002/08/29/big/img_18768
2651
+ 2002/08/05/big/img_3840
2652
+ 2002/07/26/big/img_242
2653
+ 2003/01/14/big/img_451
2654
+ 2002/08/20/big/img_923
2655
+ 2002/08/27/big/img_19908
2656
+ 2002/08/16/big/img_282
2657
+ 2002/08/19/big/img_440
2658
+ 2003/01/01/big/img_230
2659
+ 2002/08/08/big/img_212
2660
+ 2002/07/20/big/img_443
2661
+ 2002/08/25/big/img_635
2662
+ 2003/01/13/big/img_1169
2663
+ 2002/07/26/big/img_998
2664
+ 2002/08/15/big/img_995
2665
+ 2002/08/06/big/img_3002
2666
+ 2002/07/29/big/img_460
2667
+ 2003/01/14/big/img_925
2668
+ 2002/07/23/big/img_539
2669
+ 2002/08/16/big/img_694
2670
+ 2003/01/13/big/img_459
2671
+ 2002/07/23/big/img_249
2672
+ 2002/08/20/big/img_539
2673
+ 2002/08/04/big/img_186
2674
+ 2002/08/26/big/img_264
2675
+ 2002/07/22/big/img_704
2676
+ 2002/08/25/big/img_277
2677
+ 2002/08/22/big/img_988
2678
+ 2002/07/29/big/img_504
2679
+ 2002/08/05/big/img_3600
2680
+ 2002/08/30/big/img_18380
2681
+ 2003/01/14/big/img_937
2682
+ 2002/08/21/big/img_254
2683
+ 2002/08/10/big/img_130
2684
+ 2002/08/20/big/img_339
2685
+ 2003/01/14/big/img_428
2686
+ 2002/08/20/big/img_889
2687
+ 2002/08/31/big/img_17637
2688
+ 2002/07/26/big/img_644
2689
+ 2002/09/01/big/img_16776
2690
+ 2002/08/06/big/img_2239
2691
+ 2002/08/06/big/img_2646
2692
+ 2003/01/13/big/img_491
2693
+ 2002/08/10/big/img_579
2694
+ 2002/08/21/big/img_713
2695
+ 2002/08/22/big/img_482
2696
+ 2002/07/22/big/img_167
2697
+ 2002/07/24/big/img_539
2698
+ 2002/08/14/big/img_721
2699
+ 2002/07/25/big/img_389
2700
+ 2002/09/01/big/img_16591
2701
+ 2002/08/13/big/img_543
2702
+ 2003/01/14/big/img_432
2703
+ 2002/08/09/big/img_287
2704
+ 2002/07/26/big/img_126
2705
+ 2002/08/23/big/img_412
2706
+ 2002/08/15/big/img_1034
2707
+ 2002/08/28/big/img_19485
2708
+ 2002/07/31/big/img_236
2709
+ 2002/07/30/big/img_523
2710
+ 2002/07/19/big/img_141
2711
+ 2003/01/17/big/img_957
2712
+ 2002/08/04/big/img_81
2713
+ 2002/07/25/big/img_206
2714
+ 2002/08/15/big/img_716
2715
+ 2002/08/13/big/img_403
2716
+ 2002/08/15/big/img_685
2717
+ 2002/07/26/big/img_884
2718
+ 2002/07/19/big/img_499
2719
+ 2002/07/23/big/img_772
2720
+ 2002/07/27/big/img_752
2721
+ 2003/01/14/big/img_493
2722
+ 2002/08/25/big/img_664
2723
+ 2002/07/31/big/img_334
2724
+ 2002/08/26/big/img_678
2725
+ 2002/09/01/big/img_16541
2726
+ 2003/01/14/big/img_347
2727
+ 2002/07/23/big/img_187
2728
+ 2002/07/30/big/img_1163
2729
+ 2002/08/05/big/img_35
2730
+ 2002/08/22/big/img_944
2731
+ 2002/08/07/big/img_1239
2732
+ 2002/07/29/big/img_1215
2733
+ 2002/08/03/big/img_312
2734
+ 2002/08/05/big/img_3523
2735
+ 2002/07/29/big/img_218
2736
+ 2002/08/13/big/img_672
2737
+ 2002/08/16/big/img_205
2738
+ 2002/08/17/big/img_594
2739
+ 2002/07/29/big/img_1411
2740
+ 2002/07/30/big/img_942
2741
+ 2003/01/16/big/img_312
2742
+ 2002/08/08/big/img_312
2743
+ 2002/07/25/big/img_15
2744
+ 2002/08/09/big/img_839
2745
+ 2002/08/01/big/img_2069
2746
+ 2002/08/31/big/img_17512
2747
+ 2002/08/01/big/img_3
2748
+ 2002/07/31/big/img_320
2749
+ 2003/01/15/big/img_1265
2750
+ 2002/08/14/big/img_563
2751
+ 2002/07/31/big/img_167
2752
+ 2002/08/20/big/img_374
2753
+ 2002/08/13/big/img_406
2754
+ 2002/08/08/big/img_625
2755
+ 2002/08/02/big/img_314
2756
+ 2002/08/27/big/img_19964
2757
+ 2002/09/01/big/img_16670
2758
+ 2002/07/31/big/img_599
2759
+ 2002/08/29/big/img_18906
2760
+ 2002/07/24/big/img_373
2761
+ 2002/07/26/big/img_513
2762
+ 2002/09/02/big/img_15497
2763
+ 2002/08/19/big/img_117
2764
+ 2003/01/01/big/img_158
2765
+ 2002/08/24/big/img_178
2766
+ 2003/01/13/big/img_935
2767
+ 2002/08/13/big/img_609
2768
+ 2002/08/30/big/img_18341
2769
+ 2002/08/25/big/img_674
2770
+ 2003/01/13/big/img_209
2771
+ 2002/08/13/big/img_258
2772
+ 2002/08/05/big/img_3543
2773
+ 2002/08/07/big/img_1970
2774
+ 2002/08/06/big/img_3004
2775
+ 2003/01/17/big/img_487
2776
+ 2002/08/24/big/img_873
2777
+ 2002/08/29/big/img_18730
2778
+ 2002/08/09/big/img_375
2779
+ 2003/01/16/big/img_751
2780
+ 2002/08/02/big/img_603
2781
+ 2002/08/19/big/img_325
2782
+ 2002/09/01/big/img_16420
2783
+ 2002/08/05/big/img_3633
2784
+ 2002/08/21/big/img_516
2785
+ 2002/07/19/big/img_501
2786
+ 2002/07/26/big/img_688
2787
+ 2002/07/24/big/img_256
2788
+ 2002/07/25/big/img_438
2789
+ 2002/07/31/big/img_1017
2790
+ 2002/08/22/big/img_512
2791
+ 2002/07/21/big/img_543
2792
+ 2002/08/08/big/img_223
2793
+ 2002/08/19/big/img_189
2794
+ 2002/08/12/big/img_630
2795
+ 2002/07/30/big/img_958
2796
+ 2002/07/28/big/img_208
2797
+ 2002/08/31/big/img_17691
2798
+ 2002/07/22/big/img_542
2799
+ 2002/07/19/big/img_741
2800
+ 2002/07/19/big/img_158
2801
+ 2002/08/15/big/img_399
2802
+ 2002/08/01/big/img_2159
2803
+ 2002/08/14/big/img_455
2804
+ 2002/08/17/big/img_1011
2805
+ 2002/08/26/big/img_744
2806
+ 2002/08/12/big/img_624
2807
+ 2003/01/17/big/img_821
2808
+ 2002/08/16/big/img_980
2809
+ 2002/07/28/big/img_281
2810
+ 2002/07/25/big/img_171
2811
+ 2002/08/03/big/img_116
2812
+ 2002/07/22/big/img_467
2813
+ 2002/07/31/big/img_750
2814
+ 2002/07/26/big/img_435
2815
+ 2002/07/19/big/img_822
2816
+ 2002/08/13/big/img_626
2817
+ 2002/08/11/big/img_344
2818
+ 2002/08/02/big/img_473
2819
+ 2002/09/01/big/img_16817
2820
+ 2002/08/01/big/img_1275
2821
+ 2002/08/28/big/img_19270
2822
+ 2002/07/23/big/img_607
2823
+ 2002/08/09/big/img_316
2824
+ 2002/07/29/big/img_626
2825
+ 2002/07/24/big/img_824
2826
+ 2002/07/22/big/img_342
2827
+ 2002/08/08/big/img_794
2828
+ 2002/08/07/big/img_1209
2829
+ 2002/07/19/big/img_18
2830
+ 2002/08/25/big/img_634
2831
+ 2002/07/24/big/img_730
2832
+ 2003/01/17/big/img_356
2833
+ 2002/07/23/big/img_305
2834
+ 2002/07/30/big/img_453
2835
+ 2003/01/13/big/img_972
2836
+ 2002/08/06/big/img_2610
2837
+ 2002/08/29/big/img_18920
2838
+ 2002/07/31/big/img_123
2839
+ 2002/07/26/big/img_979
2840
+ 2002/08/24/big/img_635
2841
+ 2002/08/05/big/img_3704
2842
+ 2002/08/07/big/img_1358
2843
+ 2002/07/22/big/img_306
2844
+ 2002/08/13/big/img_619
2845
+ 2002/08/02/big/img_366
face_detect/data/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .wider_face import WiderFaceDetection, detection_collate
2
+ from .data_augment import *
3
+ from .config import *
face_detect/data/config.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # config.py
2
+
3
+ cfg_mnet = {
4
+ 'name': 'mobilenet0.25',
5
+ 'min_sizes': [[16, 32], [64, 128], [256, 512]],
6
+ 'steps': [8, 16, 32],
7
+ 'variance': [0.1, 0.2],
8
+ 'clip': False,
9
+ 'loc_weight': 2.0,
10
+ 'gpu_train': True,
11
+ 'batch_size': 32,
12
+ 'ngpu': 1,
13
+ 'epoch': 250,
14
+ 'decay1': 190,
15
+ 'decay2': 220,
16
+ 'image_size': 640,
17
+ 'pretrain': False,
18
+ 'return_layers': {'stage1': 1, 'stage2': 2, 'stage3': 3},
19
+ 'in_channel': 32,
20
+ 'out_channel': 64
21
+ }
22
+
23
+ cfg_re50 = {
24
+ 'name': 'Resnet50',
25
+ 'min_sizes': [[16, 32], [64, 128], [256, 512]],
26
+ 'steps': [8, 16, 32],
27
+ 'variance': [0.1, 0.2],
28
+ 'clip': False,
29
+ 'loc_weight': 2.0,
30
+ 'gpu_train': True,
31
+ 'batch_size': 24,
32
+ 'ngpu': 4,
33
+ 'epoch': 100,
34
+ 'decay1': 70,
35
+ 'decay2': 90,
36
+ 'image_size': 840,
37
+ 'pretrain': False,
38
+ 'return_layers': {'layer2': 1, 'layer3': 2, 'layer4': 3},
39
+ 'in_channel': 256,
40
+ 'out_channel': 256
41
+ }
42
+
face_detect/data/data_augment.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import random
4
+ from utils.box_utils import matrix_iof
5
+
6
+
7
+ def _crop(image, boxes, labels, landm, img_dim):
8
+ height, width, _ = image.shape
9
+ pad_image_flag = True
10
+
11
+ for _ in range(250):
12
+ """
13
+ if random.uniform(0, 1) <= 0.2:
14
+ scale = 1.0
15
+ else:
16
+ scale = random.uniform(0.3, 1.0)
17
+ """
18
+ PRE_SCALES = [0.3, 0.45, 0.6, 0.8, 1.0]
19
+ scale = random.choice(PRE_SCALES)
20
+ short_side = min(width, height)
21
+ w = int(scale * short_side)
22
+ h = w
23
+
24
+ if width == w:
25
+ l = 0
26
+ else:
27
+ l = random.randrange(width - w)
28
+ if height == h:
29
+ t = 0
30
+ else:
31
+ t = random.randrange(height - h)
32
+ roi = np.array((l, t, l + w, t + h))
33
+
34
+ value = matrix_iof(boxes, roi[np.newaxis])
35
+ flag = (value >= 1)
36
+ if not flag.any():
37
+ continue
38
+
39
+ centers = (boxes[:, :2] + boxes[:, 2:]) / 2
40
+ mask_a = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)
41
+ boxes_t = boxes[mask_a].copy()
42
+ labels_t = labels[mask_a].copy()
43
+ landms_t = landm[mask_a].copy()
44
+ landms_t = landms_t.reshape([-1, 5, 2])
45
+
46
+ if boxes_t.shape[0] == 0:
47
+ continue
48
+
49
+ image_t = image[roi[1]:roi[3], roi[0]:roi[2]]
50
+
51
+ boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2])
52
+ boxes_t[:, :2] -= roi[:2]
53
+ boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:])
54
+ boxes_t[:, 2:] -= roi[:2]
55
+
56
+ # landm
57
+ landms_t[:, :, :2] = landms_t[:, :, :2] - roi[:2]
58
+ landms_t[:, :, :2] = np.maximum(landms_t[:, :, :2], np.array([0, 0]))
59
+ landms_t[:, :, :2] = np.minimum(landms_t[:, :, :2], roi[2:] - roi[:2])
60
+ landms_t = landms_t.reshape([-1, 10])
61
+
62
+
63
+ # make sure that the cropped image contains at least one face > 16 pixel at training image scale
64
+ b_w_t = (boxes_t[:, 2] - boxes_t[:, 0] + 1) / w * img_dim
65
+ b_h_t = (boxes_t[:, 3] - boxes_t[:, 1] + 1) / h * img_dim
66
+ mask_b = np.minimum(b_w_t, b_h_t) > 0.0
67
+ boxes_t = boxes_t[mask_b]
68
+ labels_t = labels_t[mask_b]
69
+ landms_t = landms_t[mask_b]
70
+
71
+ if boxes_t.shape[0] == 0:
72
+ continue
73
+
74
+ pad_image_flag = False
75
+
76
+ return image_t, boxes_t, labels_t, landms_t, pad_image_flag
77
+ return image, boxes, labels, landm, pad_image_flag
78
+
79
+
80
+ def _distort(image):
81
+
82
+ def _convert(image, alpha=1, beta=0):
83
+ tmp = image.astype(float) * alpha + beta
84
+ tmp[tmp < 0] = 0
85
+ tmp[tmp > 255] = 255
86
+ image[:] = tmp
87
+
88
+ image = image.copy()
89
+
90
+ if random.randrange(2):
91
+
92
+ #brightness distortion
93
+ if random.randrange(2):
94
+ _convert(image, beta=random.uniform(-32, 32))
95
+
96
+ #contrast distortion
97
+ if random.randrange(2):
98
+ _convert(image, alpha=random.uniform(0.5, 1.5))
99
+
100
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
101
+
102
+ #saturation distortion
103
+ if random.randrange(2):
104
+ _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
105
+
106
+ #hue distortion
107
+ if random.randrange(2):
108
+ tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
109
+ tmp %= 180
110
+ image[:, :, 0] = tmp
111
+
112
+ image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
113
+
114
+ else:
115
+
116
+ #brightness distortion
117
+ if random.randrange(2):
118
+ _convert(image, beta=random.uniform(-32, 32))
119
+
120
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
121
+
122
+ #saturation distortion
123
+ if random.randrange(2):
124
+ _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
125
+
126
+ #hue distortion
127
+ if random.randrange(2):
128
+ tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
129
+ tmp %= 180
130
+ image[:, :, 0] = tmp
131
+
132
+ image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
133
+
134
+ #contrast distortion
135
+ if random.randrange(2):
136
+ _convert(image, alpha=random.uniform(0.5, 1.5))
137
+
138
+ return image
139
+
140
+
141
+ def _expand(image, boxes, fill, p):
142
+ if random.randrange(2):
143
+ return image, boxes
144
+
145
+ height, width, depth = image.shape
146
+
147
+ scale = random.uniform(1, p)
148
+ w = int(scale * width)
149
+ h = int(scale * height)
150
+
151
+ left = random.randint(0, w - width)
152
+ top = random.randint(0, h - height)
153
+
154
+ boxes_t = boxes.copy()
155
+ boxes_t[:, :2] += (left, top)
156
+ boxes_t[:, 2:] += (left, top)
157
+ expand_image = np.empty(
158
+ (h, w, depth),
159
+ dtype=image.dtype)
160
+ expand_image[:, :] = fill
161
+ expand_image[top:top + height, left:left + width] = image
162
+ image = expand_image
163
+
164
+ return image, boxes_t
165
+
166
+
167
+ def _mirror(image, boxes, landms):
168
+ _, width, _ = image.shape
169
+ if random.randrange(2):
170
+ image = image[:, ::-1]
171
+ boxes = boxes.copy()
172
+ boxes[:, 0::2] = width - boxes[:, 2::-2]
173
+
174
+ # landm
175
+ landms = landms.copy()
176
+ landms = landms.reshape([-1, 5, 2])
177
+ landms[:, :, 0] = width - landms[:, :, 0]
178
+ tmp = landms[:, 1, :].copy()
179
+ landms[:, 1, :] = landms[:, 0, :]
180
+ landms[:, 0, :] = tmp
181
+ tmp1 = landms[:, 4, :].copy()
182
+ landms[:, 4, :] = landms[:, 3, :]
183
+ landms[:, 3, :] = tmp1
184
+ landms = landms.reshape([-1, 10])
185
+
186
+ return image, boxes, landms
187
+
188
+
189
+ def _pad_to_square(image, rgb_mean, pad_image_flag):
190
+ if not pad_image_flag:
191
+ return image
192
+ height, width, _ = image.shape
193
+ long_side = max(width, height)
194
+ image_t = np.empty((long_side, long_side, 3), dtype=image.dtype)
195
+ image_t[:, :] = rgb_mean
196
+ image_t[0:0 + height, 0:0 + width] = image
197
+ return image_t
198
+
199
+
200
+ def _resize_subtract_mean(image, insize, rgb_mean):
201
+ interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
202
+ interp_method = interp_methods[random.randrange(5)]
203
+ image = cv2.resize(image, (insize, insize), interpolation=interp_method)
204
+ image = image.astype(np.float32)
205
+ image -= rgb_mean
206
+ return image.transpose(2, 0, 1)
207
+
208
+
209
+ class preproc(object):
210
+
211
+ def __init__(self, img_dim, rgb_means):
212
+ self.img_dim = img_dim
213
+ self.rgb_means = rgb_means
214
+
215
+ def __call__(self, image, targets):
216
+ assert targets.shape[0] > 0, "this image does not have gt"
217
+
218
+ boxes = targets[:, :4].copy()
219
+ labels = targets[:, -1].copy()
220
+ landm = targets[:, 4:-1].copy()
221
+
222
+ image_t, boxes_t, labels_t, landm_t, pad_image_flag = _crop(image, boxes, labels, landm, self.img_dim)
223
+ image_t = _distort(image_t)
224
+ image_t = _pad_to_square(image_t,self.rgb_means, pad_image_flag)
225
+ image_t, boxes_t, landm_t = _mirror(image_t, boxes_t, landm_t)
226
+ height, width, _ = image_t.shape
227
+ image_t = _resize_subtract_mean(image_t, self.img_dim, self.rgb_means)
228
+ boxes_t[:, 0::2] /= width
229
+ boxes_t[:, 1::2] /= height
230
+
231
+ landm_t[:, 0::2] /= width
232
+ landm_t[:, 1::2] /= height
233
+
234
+ labels_t = np.expand_dims(labels_t, 1)
235
+ targets_t = np.hstack((boxes_t, landm_t, labels_t))
236
+
237
+ return image_t, targets_t
face_detect/data/wider_face.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ import sys
4
+ import torch
5
+ import torch.utils.data as data
6
+ import cv2
7
+ import numpy as np
8
+
9
+ class WiderFaceDetection(data.Dataset):
10
+ def __init__(self, txt_path, preproc=None):
11
+ self.preproc = preproc
12
+ self.imgs_path = []
13
+ self.words = []
14
+ f = open(txt_path,'r')
15
+ lines = f.readlines()
16
+ isFirst = True
17
+ labels = []
18
+ for line in lines:
19
+ line = line.rstrip()
20
+ if line.startswith('#'):
21
+ if isFirst is True:
22
+ isFirst = False
23
+ else:
24
+ labels_copy = labels.copy()
25
+ self.words.append(labels_copy)
26
+ labels.clear()
27
+ path = line[2:]
28
+ path = txt_path.replace('label.txt','images/') + path
29
+ self.imgs_path.append(path)
30
+ else:
31
+ line = line.split(' ')
32
+ label = [float(x) for x in line]
33
+ labels.append(label)
34
+
35
+ self.words.append(labels)
36
+
37
+ def __len__(self):
38
+ return len(self.imgs_path)
39
+
40
+ def __getitem__(self, index):
41
+ img = cv2.imread(self.imgs_path[index])
42
+ height, width, _ = img.shape
43
+
44
+ labels = self.words[index]
45
+ annotations = np.zeros((0, 15))
46
+ if len(labels) == 0:
47
+ return annotations
48
+ for idx, label in enumerate(labels):
49
+ annotation = np.zeros((1, 15))
50
+ # bbox
51
+ annotation[0, 0] = label[0] # x1
52
+ annotation[0, 1] = label[1] # y1
53
+ annotation[0, 2] = label[0] + label[2] # x2
54
+ annotation[0, 3] = label[1] + label[3] # y2
55
+
56
+ # landmarks
57
+ annotation[0, 4] = label[4] # l0_x
58
+ annotation[0, 5] = label[5] # l0_y
59
+ annotation[0, 6] = label[7] # l1_x
60
+ annotation[0, 7] = label[8] # l1_y
61
+ annotation[0, 8] = label[10] # l2_x
62
+ annotation[0, 9] = label[11] # l2_y
63
+ annotation[0, 10] = label[13] # l3_x
64
+ annotation[0, 11] = label[14] # l3_y
65
+ annotation[0, 12] = label[16] # l4_x
66
+ annotation[0, 13] = label[17] # l4_y
67
+ if (annotation[0, 4]<0):
68
+ annotation[0, 14] = -1
69
+ else:
70
+ annotation[0, 14] = 1
71
+
72
+ annotations = np.append(annotations, annotation, axis=0)
73
+ target = np.array(annotations)
74
+ if self.preproc is not None:
75
+ img, target = self.preproc(img, target)
76
+
77
+ return torch.from_numpy(img), target
78
+
79
+ def detection_collate(batch):
80
+ """Custom collate fn for dealing with batches of images that have a different
81
+ number of associated object annotations (bounding boxes).
82
+
83
+ Arguments:
84
+ batch: (tuple) A tuple of tensor images and lists of annotations
85
+
86
+ Return:
87
+ A tuple containing:
88
+ 1) (tensor) batch of images stacked on their 0 dim
89
+ 2) (list of tensors) annotations for a given image are stacked on 0 dim
90
+ """
91
+ targets = []
92
+ imgs = []
93
+ for _, sample in enumerate(batch):
94
+ for _, tup in enumerate(sample):
95
+ if torch.is_tensor(tup):
96
+ imgs.append(tup)
97
+ elif isinstance(tup, type(np.empty(0))):
98
+ annos = torch.from_numpy(tup).float()
99
+ targets.append(annos)
100
+
101
+ return (torch.stack(imgs, 0), targets)
face_detect/facemodels/__init__.py ADDED
File without changes
face_detect/facemodels/net.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import torch
3
+ import torch.nn as nn
4
+ import torchvision.models._utils as _utils
5
+ import torchvision.models as models
6
+ import torch.nn.functional as F
7
+ from torch.autograd import Variable
8
+
9
+ def conv_bn(inp, oup, stride = 1, leaky = 0):
10
+ return nn.Sequential(
11
+ nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
12
+ nn.BatchNorm2d(oup),
13
+ nn.LeakyReLU(negative_slope=leaky, inplace=True)
14
+ )
15
+
16
+ def conv_bn_no_relu(inp, oup, stride):
17
+ return nn.Sequential(
18
+ nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
19
+ nn.BatchNorm2d(oup),
20
+ )
21
+
22
+ def conv_bn1X1(inp, oup, stride, leaky=0):
23
+ return nn.Sequential(
24
+ nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False),
25
+ nn.BatchNorm2d(oup),
26
+ nn.LeakyReLU(negative_slope=leaky, inplace=True)
27
+ )
28
+
29
+ def conv_dw(inp, oup, stride, leaky=0.1):
30
+ return nn.Sequential(
31
+ nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
32
+ nn.BatchNorm2d(inp),
33
+ nn.LeakyReLU(negative_slope= leaky,inplace=True),
34
+
35
+ nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
36
+ nn.BatchNorm2d(oup),
37
+ nn.LeakyReLU(negative_slope= leaky,inplace=True),
38
+ )
39
+
40
+ class SSH(nn.Module):
41
+ def __init__(self, in_channel, out_channel):
42
+ super(SSH, self).__init__()
43
+ assert out_channel % 4 == 0
44
+ leaky = 0
45
+ if (out_channel <= 64):
46
+ leaky = 0.1
47
+ self.conv3X3 = conv_bn_no_relu(in_channel, out_channel//2, stride=1)
48
+
49
+ self.conv5X5_1 = conv_bn(in_channel, out_channel//4, stride=1, leaky = leaky)
50
+ self.conv5X5_2 = conv_bn_no_relu(out_channel//4, out_channel//4, stride=1)
51
+
52
+ self.conv7X7_2 = conv_bn(out_channel//4, out_channel//4, stride=1, leaky = leaky)
53
+ self.conv7x7_3 = conv_bn_no_relu(out_channel//4, out_channel//4, stride=1)
54
+
55
+ def forward(self, input):
56
+ conv3X3 = self.conv3X3(input)
57
+
58
+ conv5X5_1 = self.conv5X5_1(input)
59
+ conv5X5 = self.conv5X5_2(conv5X5_1)
60
+
61
+ conv7X7_2 = self.conv7X7_2(conv5X5_1)
62
+ conv7X7 = self.conv7x7_3(conv7X7_2)
63
+
64
+ out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)
65
+ out = F.relu(out)
66
+ return out
67
+
68
+ class FPN(nn.Module):
69
+ def __init__(self,in_channels_list,out_channels):
70
+ super(FPN,self).__init__()
71
+ leaky = 0
72
+ if (out_channels <= 64):
73
+ leaky = 0.1
74
+ self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride = 1, leaky = leaky)
75
+ self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride = 1, leaky = leaky)
76
+ self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride = 1, leaky = leaky)
77
+
78
+ self.merge1 = conv_bn(out_channels, out_channels, leaky = leaky)
79
+ self.merge2 = conv_bn(out_channels, out_channels, leaky = leaky)
80
+
81
+ def forward(self, input):
82
+ # names = list(input.keys())
83
+ input = list(input.values())
84
+
85
+ output1 = self.output1(input[0])
86
+ output2 = self.output2(input[1])
87
+ output3 = self.output3(input[2])
88
+
89
+ up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode="nearest")
90
+ output2 = output2 + up3
91
+ output2 = self.merge2(output2)
92
+
93
+ up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode="nearest")
94
+ output1 = output1 + up2
95
+ output1 = self.merge1(output1)
96
+
97
+ out = [output1, output2, output3]
98
+ return out
99
+
100
+
101
+
102
+ class MobileNetV1(nn.Module):
103
+ def __init__(self):
104
+ super(MobileNetV1, self).__init__()
105
+ self.stage1 = nn.Sequential(
106
+ conv_bn(3, 8, 2, leaky = 0.1), # 3
107
+ conv_dw(8, 16, 1), # 7
108
+ conv_dw(16, 32, 2), # 11
109
+ conv_dw(32, 32, 1), # 19
110
+ conv_dw(32, 64, 2), # 27
111
+ conv_dw(64, 64, 1), # 43
112
+ )
113
+ self.stage2 = nn.Sequential(
114
+ conv_dw(64, 128, 2), # 43 + 16 = 59
115
+ conv_dw(128, 128, 1), # 59 + 32 = 91
116
+ conv_dw(128, 128, 1), # 91 + 32 = 123
117
+ conv_dw(128, 128, 1), # 123 + 32 = 155
118
+ conv_dw(128, 128, 1), # 155 + 32 = 187
119
+ conv_dw(128, 128, 1), # 187 + 32 = 219
120
+ )
121
+ self.stage3 = nn.Sequential(
122
+ conv_dw(128, 256, 2), # 219 +3 2 = 241
123
+ conv_dw(256, 256, 1), # 241 + 64 = 301
124
+ )
125
+ self.avg = nn.AdaptiveAvgPool2d((1,1))
126
+ self.fc = nn.Linear(256, 1000)
127
+
128
+ def forward(self, x):
129
+ x = self.stage1(x)
130
+ x = self.stage2(x)
131
+ x = self.stage3(x)
132
+ x = self.avg(x)
133
+ # x = self.model(x)
134
+ x = x.view(-1, 256)
135
+ x = self.fc(x)
136
+ return x
137
+
face_detect/facemodels/retinaface.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torchvision.models.detection.backbone_utils as backbone_utils
4
+ import torchvision.models._utils as _utils
5
+ import torch.nn.functional as F
6
+ from collections import OrderedDict
7
+
8
+ from facemodels.net import MobileNetV1 as MobileNetV1
9
+ from facemodels.net import FPN as FPN
10
+ from facemodels.net import SSH as SSH
11
+
12
+
13
+
14
+ class ClassHead(nn.Module):
15
+ def __init__(self,inchannels=512,num_anchors=3):
16
+ super(ClassHead,self).__init__()
17
+ self.num_anchors = num_anchors
18
+ self.conv1x1 = nn.Conv2d(inchannels,self.num_anchors*2,kernel_size=(1,1),stride=1,padding=0)
19
+
20
+ def forward(self,x):
21
+ out = self.conv1x1(x)
22
+ out = out.permute(0,2,3,1).contiguous()
23
+
24
+ return out.view(out.shape[0], -1, 2)
25
+
26
+ class BboxHead(nn.Module):
27
+ def __init__(self,inchannels=512,num_anchors=3):
28
+ super(BboxHead,self).__init__()
29
+ self.conv1x1 = nn.Conv2d(inchannels,num_anchors*4,kernel_size=(1,1),stride=1,padding=0)
30
+
31
+ def forward(self,x):
32
+ out = self.conv1x1(x)
33
+ out = out.permute(0,2,3,1).contiguous()
34
+
35
+ return out.view(out.shape[0], -1, 4)
36
+
37
+ class LandmarkHead(nn.Module):
38
+ def __init__(self,inchannels=512,num_anchors=3):
39
+ super(LandmarkHead,self).__init__()
40
+ self.conv1x1 = nn.Conv2d(inchannels,num_anchors*10,kernel_size=(1,1),stride=1,padding=0)
41
+
42
+ def forward(self,x):
43
+ out = self.conv1x1(x)
44
+ out = out.permute(0,2,3,1).contiguous()
45
+
46
+ return out.view(out.shape[0], -1, 10)
47
+
48
+ class RetinaFace(nn.Module):
49
+ def __init__(self, cfg = None, phase = 'train'):
50
+ """
51
+ :param cfg: Network related settings.
52
+ :param phase: train or test.
53
+ """
54
+ super(RetinaFace,self).__init__()
55
+ self.phase = phase
56
+ backbone = None
57
+ if cfg['name'] == 'mobilenet0.25':
58
+ backbone = MobileNetV1()
59
+ if cfg['pretrain']:
60
+ checkpoint = torch.load("./weights/mobilenetV1X0.25_pretrain.tar", map_location=torch.device('cpu'))
61
+ from collections import OrderedDict
62
+ new_state_dict = OrderedDict()
63
+ for k, v in checkpoint['state_dict'].items():
64
+ name = k[7:] # remove module.
65
+ new_state_dict[name] = v
66
+ # load params
67
+ backbone.load_state_dict(new_state_dict)
68
+ elif cfg['name'] == 'Resnet50':
69
+ import torchvision.models as models
70
+ backbone = models.resnet50(pretrained=cfg['pretrain'])
71
+
72
+ self.body = _utils.IntermediateLayerGetter(backbone, cfg['return_layers'])
73
+ in_channels_stage2 = cfg['in_channel']
74
+ in_channels_list = [
75
+ in_channels_stage2 * 2,
76
+ in_channels_stage2 * 4,
77
+ in_channels_stage2 * 8,
78
+ ]
79
+ out_channels = cfg['out_channel']
80
+ self.fpn = FPN(in_channels_list,out_channels)
81
+ self.ssh1 = SSH(out_channels, out_channels)
82
+ self.ssh2 = SSH(out_channels, out_channels)
83
+ self.ssh3 = SSH(out_channels, out_channels)
84
+
85
+ self.ClassHead = self._make_class_head(fpn_num=3, inchannels=cfg['out_channel'])
86
+ self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])
87
+ self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])
88
+
89
+ def _make_class_head(self,fpn_num=3,inchannels=64,anchor_num=2):
90
+ classhead = nn.ModuleList()
91
+ for i in range(fpn_num):
92
+ classhead.append(ClassHead(inchannels,anchor_num))
93
+ return classhead
94
+
95
+ def _make_bbox_head(self,fpn_num=3,inchannels=64,anchor_num=2):
96
+ bboxhead = nn.ModuleList()
97
+ for i in range(fpn_num):
98
+ bboxhead.append(BboxHead(inchannels,anchor_num))
99
+ return bboxhead
100
+
101
+ def _make_landmark_head(self,fpn_num=3,inchannels=64,anchor_num=2):
102
+ landmarkhead = nn.ModuleList()
103
+ for i in range(fpn_num):
104
+ landmarkhead.append(LandmarkHead(inchannels,anchor_num))
105
+ return landmarkhead
106
+
107
+ def forward(self,inputs):
108
+ out = self.body(inputs)
109
+
110
+ # FPN
111
+ fpn = self.fpn(out)
112
+
113
+ # SSH
114
+ feature1 = self.ssh1(fpn[0])
115
+ feature2 = self.ssh2(fpn[1])
116
+ feature3 = self.ssh3(fpn[2])
117
+ features = [feature1, feature2, feature3]
118
+
119
+ bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)
120
+ classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)],dim=1)
121
+ ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1)
122
+
123
+ if self.phase == 'train':
124
+ output = (bbox_regressions, classifications, ldm_regressions)
125
+ else:
126
+ output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
127
+ return output
face_detect/layers/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .functions import *
2
+ from .modules import *
face_detect/layers/functions/prior_box.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from itertools import product as product
3
+ import numpy as np
4
+ from math import ceil
5
+
6
+
7
+ class PriorBox(object):
8
+ def __init__(self, cfg, image_size=None, phase='train'):
9
+ super(PriorBox, self).__init__()
10
+ self.min_sizes = cfg['min_sizes']
11
+ self.steps = cfg['steps']
12
+ self.clip = cfg['clip']
13
+ self.image_size = image_size
14
+ self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps]
15
+ self.name = "s"
16
+
17
+ def forward(self):
18
+ anchors = []
19
+ for k, f in enumerate(self.feature_maps):
20
+ min_sizes = self.min_sizes[k]
21
+ for i, j in product(range(f[0]), range(f[1])):
22
+ for min_size in min_sizes:
23
+ s_kx = min_size / self.image_size[1]
24
+ s_ky = min_size / self.image_size[0]
25
+ dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
26
+ dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
27
+ for cy, cx in product(dense_cy, dense_cx):
28
+ anchors += [cx, cy, s_kx, s_ky]
29
+
30
+ # back to torch land
31
+ output = torch.Tensor(anchors).view(-1, 4)
32
+ if self.clip:
33
+ output.clamp_(max=1, min=0)
34
+ return output
face_detect/layers/modules/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .multibox_loss import MultiBoxLoss
2
+
3
+ __all__ = ['MultiBoxLoss']
face_detect/layers/modules/multibox_loss.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from torch.autograd import Variable
5
+ from utils.box_utils import match, log_sum_exp
6
+ from data import cfg_mnet
7
+ GPU = cfg_mnet['gpu_train']
8
+
9
+ class MultiBoxLoss(nn.Module):
10
+ """SSD Weighted Loss Function
11
+ Compute Targets:
12
+ 1) Produce Confidence Target Indices by matching ground truth boxes
13
+ with (default) 'priorboxes' that have jaccard index > threshold parameter
14
+ (default threshold: 0.5).
15
+ 2) Produce localization target by 'encoding' variance into offsets of ground
16
+ truth boxes and their matched 'priorboxes'.
17
+ 3) Hard negative mining to filter the excessive number of negative examples
18
+ that comes with using a large number of default bounding boxes.
19
+ (default negative:positive ratio 3:1)
20
+ Objective Loss:
21
+ L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
22
+ Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
23
+ weighted by α which is set to 1 by cross val.
24
+ Args:
25
+ c: class confidences,
26
+ l: predicted boxes,
27
+ g: ground truth boxes
28
+ N: number of matched default boxes
29
+ See: https://arxiv.org/pdf/1512.02325.pdf for more details.
30
+ """
31
+
32
+ def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap, encode_target):
33
+ super(MultiBoxLoss, self).__init__()
34
+ self.num_classes = num_classes
35
+ self.threshold = overlap_thresh
36
+ self.background_label = bkg_label
37
+ self.encode_target = encode_target
38
+ self.use_prior_for_matching = prior_for_matching
39
+ self.do_neg_mining = neg_mining
40
+ self.negpos_ratio = neg_pos
41
+ self.neg_overlap = neg_overlap
42
+ self.variance = [0.1, 0.2]
43
+
44
+ def forward(self, predictions, priors, targets):
45
+ """Multibox Loss
46
+ Args:
47
+ predictions (tuple): A tuple containing loc preds, conf preds,
48
+ and prior boxes from SSD net.
49
+ conf shape: torch.size(batch_size,num_priors,num_classes)
50
+ loc shape: torch.size(batch_size,num_priors,4)
51
+ priors shape: torch.size(num_priors,4)
52
+
53
+ ground_truth (tensor): Ground truth boxes and labels for a batch,
54
+ shape: [batch_size,num_objs,5] (last idx is the label).
55
+ """
56
+
57
+ loc_data, conf_data, landm_data = predictions
58
+ priors = priors
59
+ num = loc_data.size(0)
60
+ num_priors = (priors.size(0))
61
+
62
+ # match priors (default boxes) and ground truth boxes
63
+ loc_t = torch.Tensor(num, num_priors, 4)
64
+ landm_t = torch.Tensor(num, num_priors, 10)
65
+ conf_t = torch.LongTensor(num, num_priors)
66
+ for idx in range(num):
67
+ truths = targets[idx][:, :4].data
68
+ labels = targets[idx][:, -1].data
69
+ landms = targets[idx][:, 4:14].data
70
+ defaults = priors.data
71
+ match(self.threshold, truths, defaults, self.variance, labels, landms, loc_t, conf_t, landm_t, idx)
72
+ if GPU:
73
+ loc_t = loc_t.cuda()
74
+ conf_t = conf_t.cuda()
75
+ landm_t = landm_t.cuda()
76
+
77
+ zeros = torch.tensor(0).cuda()
78
+ # landm Loss (Smooth L1)
79
+ # Shape: [batch,num_priors,10]
80
+ pos1 = conf_t > zeros
81
+ num_pos_landm = pos1.long().sum(1, keepdim=True)
82
+ N1 = max(num_pos_landm.data.sum().float(), 1)
83
+ pos_idx1 = pos1.unsqueeze(pos1.dim()).expand_as(landm_data)
84
+ landm_p = landm_data[pos_idx1].view(-1, 10)
85
+ landm_t = landm_t[pos_idx1].view(-1, 10)
86
+ loss_landm = F.smooth_l1_loss(landm_p, landm_t, reduction='sum')
87
+
88
+
89
+ pos = conf_t != zeros
90
+ conf_t[pos] = 1
91
+
92
+ # Localization Loss (Smooth L1)
93
+ # Shape: [batch,num_priors,4]
94
+ pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)
95
+ loc_p = loc_data[pos_idx].view(-1, 4)
96
+ loc_t = loc_t[pos_idx].view(-1, 4)
97
+ loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')
98
+
99
+ # Compute max conf across batch for hard negative mining
100
+ batch_conf = conf_data.view(-1, self.num_classes)
101
+ loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
102
+
103
+ # Hard Negative Mining
104
+ loss_c[pos.view(-1, 1)] = 0 # filter out pos boxes for now
105
+ loss_c = loss_c.view(num, -1)
106
+ _, loss_idx = loss_c.sort(1, descending=True)
107
+ _, idx_rank = loss_idx.sort(1)
108
+ num_pos = pos.long().sum(1, keepdim=True)
109
+ num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)
110
+ neg = idx_rank < num_neg.expand_as(idx_rank)
111
+
112
+ # Confidence Loss Including Positive and Negative Examples
113
+ pos_idx = pos.unsqueeze(2).expand_as(conf_data)
114
+ neg_idx = neg.unsqueeze(2).expand_as(conf_data)
115
+ conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1,self.num_classes)
116
+ targets_weighted = conf_t[(pos+neg).gt(0)]
117
+ loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')
118
+
119
+ # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
120
+ N = max(num_pos.data.sum().float(), 1)
121
+ loss_l /= N
122
+ loss_c /= N
123
+ loss_landm /= N1
124
+
125
+ return loss_l, loss_c, loss_landm
face_detect/retinaface_detection.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ @paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
3
+ @author: yangxy (yangtao9009@gmail.com)
4
+ '''
5
+ import os
6
+ import torch
7
+ import torch.backends.cudnn as cudnn
8
+ import numpy as np
9
+ from data import cfg_re50
10
+ from layers.functions.prior_box import PriorBox
11
+ from utils.nms.py_cpu_nms import py_cpu_nms
12
+ import cv2
13
+ from facemodels.retinaface import RetinaFace
14
+ from utils.box_utils import decode, decode_landm
15
+ import time
16
+ import torch.nn.functional as F
17
+
18
+
19
+ class RetinaFaceDetection(object):
20
+ def __init__(self, base_dir, device='cuda', network='RetinaFace-R50'):
21
+ torch.set_grad_enabled(False)
22
+ cudnn.benchmark = True
23
+ self.pretrained_path = os.path.join(base_dir, 'weights', network+'.pth')
24
+ self.device = device #torch.cuda.current_device()
25
+ self.cfg = cfg_re50
26
+ self.net = RetinaFace(cfg=self.cfg, phase='test')
27
+ self.load_model()
28
+ self.net = self.net.to(device)
29
+
30
+ self.mean = torch.tensor([[[[104]], [[117]], [[123]]]]).to(device)
31
+
32
+ def check_keys(self, pretrained_state_dict):
33
+ ckpt_keys = set(pretrained_state_dict.keys())
34
+ model_keys = set(self.net.state_dict().keys())
35
+ used_pretrained_keys = model_keys & ckpt_keys
36
+ unused_pretrained_keys = ckpt_keys - model_keys
37
+ missing_keys = model_keys - ckpt_keys
38
+ assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
39
+ return True
40
+
41
+ def remove_prefix(self, state_dict, prefix):
42
+ ''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
43
+ f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
44
+ return {f(key): value for key, value in state_dict.items()}
45
+
46
+ def load_model(self, load_to_cpu=False):
47
+ #if load_to_cpu:
48
+ # pretrained_dict = torch.load(self.pretrained_path, map_location=lambda storage, loc: storage)
49
+ #else:
50
+ # pretrained_dict = torch.load(self.pretrained_path, map_location=lambda storage, loc: storage.cuda())
51
+ pretrained_dict = torch.load(self.pretrained_path, map_location=torch.device('cpu'))
52
+ if "state_dict" in pretrained_dict.keys():
53
+ pretrained_dict = self.remove_prefix(pretrained_dict['state_dict'], 'module.')
54
+ else:
55
+ pretrained_dict = self.remove_prefix(pretrained_dict, 'module.')
56
+ self.check_keys(pretrained_dict)
57
+ self.net.load_state_dict(pretrained_dict, strict=False)
58
+ self.net.eval()
59
+
60
+ def detect(self, img_raw, resize=1, confidence_threshold=0.9, nms_threshold=0.4, top_k=5000, keep_top_k=750, save_image=False):
61
+ img = np.float32(img_raw)
62
+
63
+ im_height, im_width = img.shape[:2]
64
+ ss = 1.0
65
+ # tricky
66
+ if max(im_height, im_width) > 1500:
67
+ ss = 1000.0/max(im_height, im_width)
68
+ img = cv2.resize(img, (0,0), fx=ss, fy=ss)
69
+ im_height, im_width = img.shape[:2]
70
+
71
+ scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
72
+ img -= (104, 117, 123)
73
+ img = img.transpose(2, 0, 1)
74
+ img = torch.from_numpy(img).unsqueeze(0)
75
+ img = img.to(self.device)
76
+ scale = scale.to(self.device)
77
+
78
+ loc, conf, landms = self.net(img) # forward pass
79
+
80
+ priorbox = PriorBox(self.cfg, image_size=(im_height, im_width))
81
+ priors = priorbox.forward()
82
+ priors = priors.to(self.device)
83
+ prior_data = priors.data
84
+ boxes = decode(loc.data.squeeze(0), prior_data, self.cfg['variance'])
85
+ boxes = boxes * scale / resize
86
+ boxes = boxes.cpu().numpy()
87
+ scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
88
+ landms = decode_landm(landms.data.squeeze(0), prior_data, self.cfg['variance'])
89
+ scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
90
+ img.shape[3], img.shape[2], img.shape[3], img.shape[2],
91
+ img.shape[3], img.shape[2]])
92
+ scale1 = scale1.to(self.device)
93
+ landms = landms * scale1 / resize
94
+ landms = landms.cpu().numpy()
95
+
96
+ # ignore low scores
97
+ inds = np.where(scores > confidence_threshold)[0]
98
+ boxes = boxes[inds]
99
+ landms = landms[inds]
100
+ scores = scores[inds]
101
+
102
+ # keep top-K before NMS
103
+ order = scores.argsort()[::-1][:top_k]
104
+ boxes = boxes[order]
105
+ landms = landms[order]
106
+ scores = scores[order]
107
+
108
+ # do NMS
109
+ dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
110
+ keep = py_cpu_nms(dets, nms_threshold)
111
+ # keep = nms(dets, nms_threshold,force_cpu=args.cpu)
112
+ dets = dets[keep, :]
113
+ landms = landms[keep]
114
+
115
+ # keep top-K faster NMS
116
+ dets = dets[:keep_top_k, :]
117
+ landms = landms[:keep_top_k, :]
118
+
119
+ # sort faces(delete)
120
+ '''
121
+ fscores = [det[4] for det in dets]
122
+ sorted_idx = sorted(range(len(fscores)), key=lambda k:fscores[k], reverse=False) # sort index
123
+ tmp = [landms[idx] for idx in sorted_idx]
124
+ landms = np.asarray(tmp)
125
+ '''
126
+
127
+ landms = landms.reshape((-1, 5, 2))
128
+ landms = landms.transpose((0, 2, 1))
129
+ landms = landms.reshape(-1, 10, )
130
+ return dets/ss, landms/ss
131
+
132
+ def detect_tensor(self, img, resize=1, confidence_threshold=0.9, nms_threshold=0.4, top_k=5000, keep_top_k=750, save_image=False):
133
+ im_height, im_width = img.shape[-2:]
134
+ ss = 1000/max(im_height, im_width)
135
+ img = F.interpolate(img, scale_factor=ss)
136
+ im_height, im_width = img.shape[-2:]
137
+ scale = torch.Tensor([im_width, im_height, im_width, im_height]).to(self.device)
138
+ img -= self.mean
139
+
140
+ loc, conf, landms = self.net(img) # forward pass
141
+
142
+ priorbox = PriorBox(self.cfg, image_size=(im_height, im_width))
143
+ priors = priorbox.forward()
144
+ priors = priors.to(self.device)
145
+ prior_data = priors.data
146
+ boxes = decode(loc.data.squeeze(0), prior_data, self.cfg['variance'])
147
+ boxes = boxes * scale / resize
148
+ boxes = boxes.cpu().numpy()
149
+ scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
150
+ landms = decode_landm(landms.data.squeeze(0), prior_data, self.cfg['variance'])
151
+ scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
152
+ img.shape[3], img.shape[2], img.shape[3], img.shape[2],
153
+ img.shape[3], img.shape[2]])
154
+ scale1 = scale1.to(self.device)
155
+ landms = landms * scale1 / resize
156
+ landms = landms.cpu().numpy()
157
+
158
+ # ignore low scores
159
+ inds = np.where(scores > confidence_threshold)[0]
160
+ boxes = boxes[inds]
161
+ landms = landms[inds]
162
+ scores = scores[inds]
163
+
164
+ # keep top-K before NMS
165
+ order = scores.argsort()[::-1][:top_k]
166
+ boxes = boxes[order]
167
+ landms = landms[order]
168
+ scores = scores[order]
169
+
170
+ # do NMS
171
+ dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
172
+ keep = py_cpu_nms(dets, nms_threshold)
173
+ # keep = nms(dets, nms_threshold,force_cpu=args.cpu)
174
+ dets = dets[keep, :]
175
+ landms = landms[keep]
176
+
177
+ # keep top-K faster NMS
178
+ dets = dets[:keep_top_k, :]
179
+ landms = landms[:keep_top_k, :]
180
+
181
+ # sort faces(delete)
182
+ '''
183
+ fscores = [det[4] for det in dets]
184
+ sorted_idx = sorted(range(len(fscores)), key=lambda k:fscores[k], reverse=False) # sort index
185
+ tmp = [landms[idx] for idx in sorted_idx]
186
+ landms = np.asarray(tmp)
187
+ '''
188
+
189
+ landms = landms.reshape((-1, 5, 2))
190
+ landms = landms.transpose((0, 2, 1))
191
+ landms = landms.reshape(-1, 10, )
192
+ return dets/ss, landms/ss
face_detect/utils/__init__.py ADDED
File without changes
face_detect/utils/box_utils.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+
4
+
5
+ def point_form(boxes):
6
+ """ Convert prior_boxes to (xmin, ymin, xmax, ymax)
7
+ representation for comparison to point form ground truth data.
8
+ Args:
9
+ boxes: (tensor) center-size default boxes from priorbox layers.
10
+ Return:
11
+ boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
12
+ """
13
+ return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin
14
+ boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax
15
+
16
+
17
+ def center_size(boxes):
18
+ """ Convert prior_boxes to (cx, cy, w, h)
19
+ representation for comparison to center-size form ground truth data.
20
+ Args:
21
+ boxes: (tensor) point_form boxes
22
+ Return:
23
+ boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
24
+ """
25
+ return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
26
+ boxes[:, 2:] - boxes[:, :2], 1) # w, h
27
+
28
+
29
+ def intersect(box_a, box_b):
30
+ """ We resize both tensors to [A,B,2] without new malloc:
31
+ [A,2] -> [A,1,2] -> [A,B,2]
32
+ [B,2] -> [1,B,2] -> [A,B,2]
33
+ Then we compute the area of intersect between box_a and box_b.
34
+ Args:
35
+ box_a: (tensor) bounding boxes, Shape: [A,4].
36
+ box_b: (tensor) bounding boxes, Shape: [B,4].
37
+ Return:
38
+ (tensor) intersection area, Shape: [A,B].
39
+ """
40
+ A = box_a.size(0)
41
+ B = box_b.size(0)
42
+ max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
43
+ box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
44
+ min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
45
+ box_b[:, :2].unsqueeze(0).expand(A, B, 2))
46
+ inter = torch.clamp((max_xy - min_xy), min=0)
47
+ return inter[:, :, 0] * inter[:, :, 1]
48
+
49
+
50
+ def jaccard(box_a, box_b):
51
+ """Compute the jaccard overlap of two sets of boxes. The jaccard overlap
52
+ is simply the intersection over union of two boxes. Here we operate on
53
+ ground truth boxes and default boxes.
54
+ E.g.:
55
+ A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
56
+ Args:
57
+ box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
58
+ box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
59
+ Return:
60
+ jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
61
+ """
62
+ inter = intersect(box_a, box_b)
63
+ area_a = ((box_a[:, 2]-box_a[:, 0]) *
64
+ (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
65
+ area_b = ((box_b[:, 2]-box_b[:, 0]) *
66
+ (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
67
+ union = area_a + area_b - inter
68
+ return inter / union # [A,B]
69
+
70
+
71
+ def matrix_iou(a, b):
72
+ """
73
+ return iou of a and b, numpy version for data augenmentation
74
+ """
75
+ lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
76
+ rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
77
+
78
+ area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
79
+ area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
80
+ area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
81
+ return area_i / (area_a[:, np.newaxis] + area_b - area_i)
82
+
83
+
84
+ def matrix_iof(a, b):
85
+ """
86
+ return iof of a and b, numpy version for data augenmentation
87
+ """
88
+ lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
89
+ rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
90
+
91
+ area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
92
+ area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
93
+ return area_i / np.maximum(area_a[:, np.newaxis], 1)
94
+
95
+
96
+ def match(threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx):
97
+ """Match each prior box with the ground truth box of the highest jaccard
98
+ overlap, encode the bounding boxes, then return the matched indices
99
+ corresponding to both confidence and location preds.
100
+ Args:
101
+ threshold: (float) The overlap threshold used when mathing boxes.
102
+ truths: (tensor) Ground truth boxes, Shape: [num_obj, 4].
103
+ priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
104
+ variances: (tensor) Variances corresponding to each prior coord,
105
+ Shape: [num_priors, 4].
106
+ labels: (tensor) All the class labels for the image, Shape: [num_obj].
107
+ landms: (tensor) Ground truth landms, Shape [num_obj, 10].
108
+ loc_t: (tensor) Tensor to be filled w/ endcoded location targets.
109
+ conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
110
+ landm_t: (tensor) Tensor to be filled w/ endcoded landm targets.
111
+ idx: (int) current batch index
112
+ Return:
113
+ The matched indices corresponding to 1)location 2)confidence 3)landm preds.
114
+ """
115
+ # jaccard index
116
+ overlaps = jaccard(
117
+ truths,
118
+ point_form(priors)
119
+ )
120
+ # (Bipartite Matching)
121
+ # [1,num_objects] best prior for each ground truth
122
+ best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
123
+
124
+ # ignore hard gt
125
+ valid_gt_idx = best_prior_overlap[:, 0] >= 0.2
126
+ best_prior_idx_filter = best_prior_idx[valid_gt_idx, :]
127
+ if best_prior_idx_filter.shape[0] <= 0:
128
+ loc_t[idx] = 0
129
+ conf_t[idx] = 0
130
+ return
131
+
132
+ # [1,num_priors] best ground truth for each prior
133
+ best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
134
+ best_truth_idx.squeeze_(0)
135
+ best_truth_overlap.squeeze_(0)
136
+ best_prior_idx.squeeze_(1)
137
+ best_prior_idx_filter.squeeze_(1)
138
+ best_prior_overlap.squeeze_(1)
139
+ best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2) # ensure best prior
140
+ # TODO refactor: index best_prior_idx with long tensor
141
+ # ensure every gt matches with its prior of max overlap
142
+ for j in range(best_prior_idx.size(0)): # 判别此anchor是预测哪一个boxes
143
+ best_truth_idx[best_prior_idx[j]] = j
144
+ matches = truths[best_truth_idx] # Shape: [num_priors,4] 此处为每一个anchor对应的bbox取出来
145
+ conf = labels[best_truth_idx] # Shape: [num_priors] 此处为每一个anchor对应的label取出来
146
+ conf[best_truth_overlap < threshold] = 0 # label as background overlap<0.35的全部作为负样本
147
+ loc = encode(matches, priors, variances)
148
+
149
+ matches_landm = landms[best_truth_idx]
150
+ landm = encode_landm(matches_landm, priors, variances)
151
+ loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
152
+ conf_t[idx] = conf # [num_priors] top class label for each prior
153
+ landm_t[idx] = landm
154
+
155
+
156
+ def encode(matched, priors, variances):
157
+ """Encode the variances from the priorbox layers into the ground truth boxes
158
+ we have matched (based on jaccard overlap) with the prior boxes.
159
+ Args:
160
+ matched: (tensor) Coords of ground truth for each prior in point-form
161
+ Shape: [num_priors, 4].
162
+ priors: (tensor) Prior boxes in center-offset form
163
+ Shape: [num_priors,4].
164
+ variances: (list[float]) Variances of priorboxes
165
+ Return:
166
+ encoded boxes (tensor), Shape: [num_priors, 4]
167
+ """
168
+
169
+ # dist b/t match center and prior's center
170
+ g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
171
+ # encode variance
172
+ g_cxcy /= (variances[0] * priors[:, 2:])
173
+ # match wh / prior wh
174
+ g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
175
+ g_wh = torch.log(g_wh) / variances[1]
176
+ # return target for smooth_l1_loss
177
+ return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
178
+
179
+ def encode_landm(matched, priors, variances):
180
+ """Encode the variances from the priorbox layers into the ground truth boxes
181
+ we have matched (based on jaccard overlap) with the prior boxes.
182
+ Args:
183
+ matched: (tensor) Coords of ground truth for each prior in point-form
184
+ Shape: [num_priors, 10].
185
+ priors: (tensor) Prior boxes in center-offset form
186
+ Shape: [num_priors,4].
187
+ variances: (list[float]) Variances of priorboxes
188
+ Return:
189
+ encoded landm (tensor), Shape: [num_priors, 10]
190
+ """
191
+
192
+ # dist b/t match center and prior's center
193
+ matched = torch.reshape(matched, (matched.size(0), 5, 2))
194
+ priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
195
+ priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
196
+ priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
197
+ priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
198
+ priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2)
199
+ g_cxcy = matched[:, :, :2] - priors[:, :, :2]
200
+ # encode variance
201
+ g_cxcy /= (variances[0] * priors[:, :, 2:])
202
+ # g_cxcy /= priors[:, :, 2:]
203
+ g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1)
204
+ # return target for smooth_l1_loss
205
+ return g_cxcy
206
+
207
+
208
+ # Adapted from https://github.com/Hakuyume/chainer-ssd
209
+ def decode(loc, priors, variances):
210
+ """Decode locations from predictions using priors to undo
211
+ the encoding we did for offset regression at train time.
212
+ Args:
213
+ loc (tensor): location predictions for loc layers,
214
+ Shape: [num_priors,4]
215
+ priors (tensor): Prior boxes in center-offset form.
216
+ Shape: [num_priors,4].
217
+ variances: (list[float]) Variances of priorboxes
218
+ Return:
219
+ decoded bounding box predictions
220
+ """
221
+
222
+ boxes = torch.cat((
223
+ priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
224
+ priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
225
+ boxes[:, :2] -= boxes[:, 2:] / 2
226
+ boxes[:, 2:] += boxes[:, :2]
227
+ return boxes
228
+
229
+ def decode_landm(pre, priors, variances):
230
+ """Decode landm from predictions using priors to undo
231
+ the encoding we did for offset regression at train time.
232
+ Args:
233
+ pre (tensor): landm predictions for loc layers,
234
+ Shape: [num_priors,10]
235
+ priors (tensor): Prior boxes in center-offset form.
236
+ Shape: [num_priors,4].
237
+ variances: (list[float]) Variances of priorboxes
238
+ Return:
239
+ decoded landm predictions
240
+ """
241
+ landms = torch.cat((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
242
+ priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
243
+ priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
244
+ priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
245
+ priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],
246
+ ), dim=1)
247
+ return landms
248
+
249
+
250
+ def log_sum_exp(x):
251
+ """Utility function for computing log_sum_exp while determining
252
+ This will be used to determine unaveraged confidence loss across
253
+ all examples in a batch.
254
+ Args:
255
+ x (Variable(tensor)): conf_preds from conf layers
256
+ """
257
+ x_max = x.data.max()
258
+ return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max
259
+
260
+
261
+ # Original author: Francisco Massa:
262
+ # https://github.com/fmassa/object-detection.torch
263
+ # Ported to PyTorch by Max deGroot (02/01/2017)
264
+ def nms(boxes, scores, overlap=0.5, top_k=200):
265
+ """Apply non-maximum suppression at test time to avoid detecting too many
266
+ overlapping bounding boxes for a given object.
267
+ Args:
268
+ boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
269
+ scores: (tensor) The class predscores for the img, Shape:[num_priors].
270
+ overlap: (float) The overlap thresh for suppressing unnecessary boxes.
271
+ top_k: (int) The Maximum number of box preds to consider.
272
+ Return:
273
+ The indices of the kept boxes with respect to num_priors.
274
+ """
275
+
276
+ keep = torch.Tensor(scores.size(0)).fill_(0).long()
277
+ if boxes.numel() == 0:
278
+ return keep
279
+ x1 = boxes[:, 0]
280
+ y1 = boxes[:, 1]
281
+ x2 = boxes[:, 2]
282
+ y2 = boxes[:, 3]
283
+ area = torch.mul(x2 - x1, y2 - y1)
284
+ v, idx = scores.sort(0) # sort in ascending order
285
+ # I = I[v >= 0.01]
286
+ idx = idx[-top_k:] # indices of the top-k largest vals
287
+ xx1 = boxes.new()
288
+ yy1 = boxes.new()
289
+ xx2 = boxes.new()
290
+ yy2 = boxes.new()
291
+ w = boxes.new()
292
+ h = boxes.new()
293
+
294
+ # keep = torch.Tensor()
295
+ count = 0
296
+ while idx.numel() > 0:
297
+ i = idx[-1] # index of current largest val
298
+ # keep.append(i)
299
+ keep[count] = i
300
+ count += 1
301
+ if idx.size(0) == 1:
302
+ break
303
+ idx = idx[:-1] # remove kept element from view
304
+ # load bboxes of next highest vals
305
+ torch.index_select(x1, 0, idx, out=xx1)
306
+ torch.index_select(y1, 0, idx, out=yy1)
307
+ torch.index_select(x2, 0, idx, out=xx2)
308
+ torch.index_select(y2, 0, idx, out=yy2)
309
+ # store element-wise max with next highest score
310
+ xx1 = torch.clamp(xx1, min=x1[i])
311
+ yy1 = torch.clamp(yy1, min=y1[i])
312
+ xx2 = torch.clamp(xx2, max=x2[i])
313
+ yy2 = torch.clamp(yy2, max=y2[i])
314
+ w.resize_as_(xx2)
315
+ h.resize_as_(yy2)
316
+ w = xx2 - xx1
317
+ h = yy2 - yy1
318
+ # check sizes of xx1 and xx2.. after each iteration
319
+ w = torch.clamp(w, min=0.0)
320
+ h = torch.clamp(h, min=0.0)
321
+ inter = w*h
322
+ # IoU = i / (area(a) + area(b) - i)
323
+ rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
324
+ union = (rem_areas - inter) + area[i]
325
+ IoU = inter/union # store result in iou
326
+ # keep only elements with an IoU <= overlap
327
+ idx = idx[IoU.le(overlap)]
328
+ return keep, count
329
+
330
+
face_detect/utils/nms/__init__.py ADDED
File without changes
face_detect/utils/nms/py_cpu_nms.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # Fast R-CNN
3
+ # Copyright (c) 2015 Microsoft
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Ross Girshick
6
+ # --------------------------------------------------------
7
+
8
+ import numpy as np
9
+
10
+ def py_cpu_nms(dets, thresh):
11
+ """Pure Python NMS baseline."""
12
+ x1 = dets[:, 0]
13
+ y1 = dets[:, 1]
14
+ x2 = dets[:, 2]
15
+ y2 = dets[:, 3]
16
+ scores = dets[:, 4]
17
+
18
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
19
+ order = scores.argsort()[::-1]
20
+
21
+ keep = []
22
+ while order.size > 0:
23
+ i = order[0]
24
+ keep.append(i)
25
+ xx1 = np.maximum(x1[i], x1[order[1:]])
26
+ yy1 = np.maximum(y1[i], y1[order[1:]])
27
+ xx2 = np.minimum(x2[i], x2[order[1:]])
28
+ yy2 = np.minimum(y2[i], y2[order[1:]])
29
+
30
+ w = np.maximum(0.0, xx2 - xx1 + 1)
31
+ h = np.maximum(0.0, yy2 - yy1 + 1)
32
+ inter = w * h
33
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
34
+
35
+ inds = np.where(ovr <= thresh)[0]
36
+ order = order[inds + 1]
37
+
38
+ return keep
face_detect/utils/timer.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # Fast R-CNN
3
+ # Copyright (c) 2015 Microsoft
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Ross Girshick
6
+ # --------------------------------------------------------
7
+
8
+ import time
9
+
10
+
11
+ class Timer(object):
12
+ """A simple timer."""
13
+ def __init__(self):
14
+ self.total_time = 0.
15
+ self.calls = 0
16
+ self.start_time = 0.
17
+ self.diff = 0.
18
+ self.average_time = 0.
19
+
20
+ def tic(self):
21
+ # using time.time instead of time.clock because time time.clock
22
+ # does not normalize for multithreading
23
+ self.start_time = time.time()
24
+
25
+ def toc(self, average=True):
26
+ self.diff = time.time() - self.start_time
27
+ self.total_time += self.diff
28
+ self.calls += 1
29
+ self.average_time = self.total_time / self.calls
30
+ if average:
31
+ return self.average_time
32
+ else:
33
+ return self.diff
34
+
35
+ def clear(self):
36
+ self.total_time = 0.
37
+ self.calls = 0
38
+ self.start_time = 0.
39
+ self.diff = 0.
40
+ self.average_time = 0.
face_enhancement.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ @paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
3
+ @author: yangxy (yangtao9009@gmail.com)
4
+ '''
5
+ import os
6
+ import cv2
7
+ import glob
8
+ import time
9
+ import argparse
10
+ import numpy as np
11
+ from PIL import Image
12
+ import __init_paths
13
+ from face_detect.retinaface_detection import RetinaFaceDetection
14
+ from face_parse.face_parsing import FaceParse
15
+ from face_model.face_gan import FaceGAN
16
+ from sr_model.real_esrnet import RealESRNet
17
+ from align_faces import warp_and_crop_face, get_reference_facial_points
18
+
19
+ class FaceEnhancement(object):
20
+ def __init__(self, base_dir='./', size=512, model=None, use_sr=True, sr_model=None, channel_multiplier=2, narrow=1, key=None, device='cuda'):
21
+ self.facedetector = RetinaFaceDetection(base_dir, device)
22
+ self.facegan = FaceGAN(base_dir, size, model, channel_multiplier, narrow, key, device=device)
23
+ self.srmodel = RealESRNet(base_dir, sr_model, device=device)
24
+ self.faceparser = FaceParse(base_dir, device=device)
25
+ self.use_sr = use_sr
26
+ self.size = size
27
+ self.threshold = 0.9
28
+
29
+ # the mask for pasting restored faces back
30
+ self.mask = np.zeros((512, 512), np.float32)
31
+ cv2.rectangle(self.mask, (26, 26), (486, 486), (1, 1, 1), -1, cv2.LINE_AA)
32
+ self.mask = cv2.GaussianBlur(self.mask, (101, 101), 11)
33
+ self.mask = cv2.GaussianBlur(self.mask, (101, 101), 11)
34
+
35
+ self.kernel = np.array((
36
+ [0.0625, 0.125, 0.0625],
37
+ [0.125, 0.25, 0.125],
38
+ [0.0625, 0.125, 0.0625]), dtype="float32")
39
+
40
+ # get the reference 5 landmarks position in the crop settings
41
+ default_square = True
42
+ inner_padding_factor = 0.25
43
+ outer_padding = (0, 0)
44
+ self.reference_5pts = get_reference_facial_points(
45
+ (self.size, self.size), inner_padding_factor, outer_padding, default_square)
46
+
47
+ def mask_postprocess(self, mask, thres=20):
48
+ mask[:thres, :] = 0; mask[-thres:, :] = 0
49
+ mask[:, :thres] = 0; mask[:, -thres:] = 0
50
+ mask = cv2.GaussianBlur(mask, (101, 101), 11)
51
+ mask = cv2.GaussianBlur(mask, (101, 101), 11)
52
+ return mask.astype(np.float32)
53
+
54
+ def process(self, img):
55
+ if self.use_sr:
56
+ img_sr = self.srmodel.process(img)
57
+ if img_sr is not None:
58
+ img = cv2.resize(img, img_sr.shape[:2][::-1])
59
+
60
+ facebs, landms = self.facedetector.detect(img)
61
+
62
+ orig_faces, enhanced_faces = [], []
63
+ height, width = img.shape[:2]
64
+ full_mask = np.zeros((height, width), dtype=np.float32)
65
+ full_img = np.zeros(img.shape, dtype=np.uint8)
66
+
67
+ for i, (faceb, facial5points) in enumerate(zip(facebs, landms)):
68
+ if faceb[4]<self.threshold: continue
69
+ fh, fw = (faceb[3]-faceb[1]), (faceb[2]-faceb[0])
70
+
71
+ facial5points = np.reshape(facial5points, (2, 5))
72
+
73
+ of, tfm_inv = warp_and_crop_face(img, facial5points, reference_pts=self.reference_5pts, crop_size=(self.size, self.size))
74
+
75
+ # enhance the face
76
+ ef = self.facegan.process(of)
77
+
78
+ orig_faces.append(of)
79
+ enhanced_faces.append(ef)
80
+
81
+ #tmp_mask = self.mask
82
+ tmp_mask = self.mask_postprocess(self.faceparser.process(ef)[0]/255.)
83
+ tmp_mask = cv2.resize(tmp_mask, ef.shape[:2])
84
+ tmp_mask = cv2.warpAffine(tmp_mask, tfm_inv, (width, height), flags=3)
85
+
86
+ if min(fh, fw)<100: # gaussian filter for small faces
87
+ ef = cv2.filter2D(ef, -1, self.kernel)
88
+
89
+ tmp_img = cv2.warpAffine(ef, tfm_inv, (width, height), flags=3)
90
+
91
+ mask = tmp_mask - full_mask
92
+ full_mask[np.where(mask>0)] = tmp_mask[np.where(mask>0)]
93
+ full_img[np.where(mask>0)] = tmp_img[np.where(mask>0)]
94
+
95
+ full_mask = full_mask[:, :, np.newaxis]
96
+ if self.use_sr and img_sr is not None:
97
+ img = cv2.convertScaleAbs(img_sr*(1-full_mask) + full_img*full_mask)
98
+ else:
99
+ img = cv2.convertScaleAbs(img*(1-full_mask) + full_img*full_mask)
100
+
101
+ return img, orig_faces, enhanced_faces
102
+
103
+
104
+ if __name__=='__main__':
105
+ parser = argparse.ArgumentParser()
106
+ parser.add_argument('--model', type=str, default='GPEN-BFR-512', help='GPEN model')
107
+ parser.add_argument('--key', type=str, default=None, help='key of GPEN model')
108
+ parser.add_argument('--size', type=int, default=512, help='resolution of GPEN')
109
+ parser.add_argument('--channel_multiplier', type=int, default=2, help='channel multiplier of GPEN')
110
+ parser.add_argument('--narrow', type=float, default=1, help='channel narrow scale')
111
+ parser.add_argument('--use_sr', action='store_true', help='use sr or not')
112
+ parser.add_argument('--use_cuda', action='store_true', help='use cuda or not')
113
+ parser.add_argument('--sr_model', type=str, default='rrdb_realesrnet_psnr', help='SR model')
114
+ parser.add_argument('--sr_scale', type=int, default=2, help='SR scale')
115
+ parser.add_argument('--indir', type=str, default='examples/imgs', help='input folder')
116
+ parser.add_argument('--outdir', type=str, default='results/outs-BFR', help='output folder')
117
+ args = parser.parse_args()
118
+
119
+ #model = {'name':'GPEN-BFR-512', 'size':512, 'channel_multiplier':2, 'narrow':1}
120
+ #model = {'name':'GPEN-BFR-256', 'size':256, 'channel_multiplier':1, 'narrow':0.5}
121
+
122
+ os.makedirs(args.outdir, exist_ok=True)
123
+
124
+ faceenhancer = FaceEnhancement(size=args.size, model=args.model, use_sr=args.use_sr, sr_model=args.sr_model, channel_multiplier=args.channel_multiplier, narrow=args.narrow, key=args.key, device='cuda' if args.use_cuda else 'cpu')
125
+
126
+ files = sorted(glob.glob(os.path.join(args.indir, '*.*g')))
127
+ for n, file in enumerate(files[:]):
128
+ filename = os.path.basename(file)
129
+
130
+ im = cv2.imread(file, cv2.IMREAD_COLOR) # BGR
131
+ if not isinstance(im, np.ndarray): print(filename, 'error'); continue
132
+ #im = cv2.resize(im, (0,0), fx=2, fy=2) # optional
133
+
134
+ img, orig_faces, enhanced_faces = faceenhancer.process(im)
135
+
136
+ im = cv2.resize(im, img.shape[:2][::-1])
137
+ cv2.imwrite(os.path.join(args.outdir, '.'.join(filename.split('.')[:-1])+'_COMP.jpg'), np.hstack((im, img)))
138
+ cv2.imwrite(os.path.join(args.outdir, '.'.join(filename.split('.')[:-1])+'_GPEN.jpg'), img)
139
+
140
+ for m, (ef, of) in enumerate(zip(enhanced_faces, orig_faces)):
141
+ of = cv2.resize(of, ef.shape[:2])
142
+ cv2.imwrite(os.path.join(args.outdir, '.'.join(filename.split('.')[:-1])+'_face%02d'%m+'.jpg'), np.hstack((of, ef)))
143
+
144
+ if n%10==0: print(n, filename)
145
+
face_inpainting.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ @paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
3
+ @author: yangxy (yangtao9009@gmail.com)
4
+ '''
5
+ import os
6
+ import cv2
7
+ import glob
8
+ import time
9
+ import math
10
+ import numpy as np
11
+ from PIL import Image, ImageDraw
12
+ import __init_paths
13
+ from face_model.face_gan import FaceGAN
14
+
15
+ # modified by yangxy
16
+ def brush_stroke_mask(img, color=(255,255,255)):
17
+ min_num_vertex = 8
18
+ max_num_vertex = 28
19
+ mean_angle = 2*math.pi / 5
20
+ angle_range = 2*math.pi / 15
21
+ min_width = 12
22
+ max_width = 80
23
+ def generate_mask(H, W, img=None):
24
+ average_radius = math.sqrt(H*H+W*W) / 8
25
+ mask = Image.new('RGB', (W, H), 0)
26
+ if img is not None: mask = img #Image.fromarray(img)
27
+
28
+ for _ in range(np.random.randint(1, 4)):
29
+ num_vertex = np.random.randint(min_num_vertex, max_num_vertex)
30
+ angle_min = mean_angle - np.random.uniform(0, angle_range)
31
+ angle_max = mean_angle + np.random.uniform(0, angle_range)
32
+ angles = []
33
+ vertex = []
34
+ for i in range(num_vertex):
35
+ if i % 2 == 0:
36
+ angles.append(2*math.pi - np.random.uniform(angle_min, angle_max))
37
+ else:
38
+ angles.append(np.random.uniform(angle_min, angle_max))
39
+
40
+ h, w = mask.size
41
+ vertex.append((int(np.random.randint(0, w)), int(np.random.randint(0, h))))
42
+ for i in range(num_vertex):
43
+ r = np.clip(
44
+ np.random.normal(loc=average_radius, scale=average_radius//2),
45
+ 0, 2*average_radius)
46
+ new_x = np.clip(vertex[-1][0] + r * math.cos(angles[i]), 0, w)
47
+ new_y = np.clip(vertex[-1][1] + r * math.sin(angles[i]), 0, h)
48
+ vertex.append((int(new_x), int(new_y)))
49
+
50
+ draw = ImageDraw.Draw(mask)
51
+ width = int(np.random.uniform(min_width, max_width))
52
+ draw.line(vertex, fill=color, width=width)
53
+ for v in vertex:
54
+ draw.ellipse((v[0] - width//2,
55
+ v[1] - width//2,
56
+ v[0] + width//2,
57
+ v[1] + width//2),
58
+ fill=color)
59
+
60
+ return mask
61
+
62
+ width, height = img.size
63
+ mask = generate_mask(height, width, img)
64
+ return mask
65
+
66
+ class FaceInpainting(object):
67
+ def __init__(self, base_dir='./', size=1024, model=None, channel_multiplier=2):
68
+ self.facegan = FaceGAN(base_dir, size, model, channel_multiplier)
69
+
70
+ # make sure the face image is well aligned. Please refer to face_enhancement.py
71
+ def process(self, brokenf):
72
+ # complete the face
73
+ out = self.facegan.process(brokenf)
74
+
75
+ return out
76
+
77
+ if __name__=='__main__':
78
+ model = {'name':'GPEN-Inpainting-1024', 'size':1024}
79
+
80
+ indir = 'examples/ffhq-10'
81
+ outdir = 'examples/outs-inpainting'
82
+ os.makedirs(outdir, exist_ok=True)
83
+
84
+ faceinpainter = FaceInpainting(size=model['size'], model=model['name'], channel_multiplier=2)
85
+
86
+ files = sorted(glob.glob(os.path.join(indir, '*.*g')))
87
+ for n, file in enumerate(files[:]):
88
+ filename = os.path.basename(file)
89
+
90
+ originf = cv2.imread(file, cv2.IMREAD_COLOR)
91
+
92
+ brokenf = np.asarray(brush_stroke_mask(Image.fromarray(originf)))
93
+
94
+ completef = faceinpainter.process(brokenf)
95
+
96
+ originf = cv2.resize(originf, completef.shape[:2])
97
+ brokenf = cv2.resize(brokenf, completef.shape[:2])
98
+ cv2.imwrite(os.path.join(outdir, '.'.join(filename.split('.')[:-1])+'.jpg'), np.hstack((brokenf, completef, originf)))
99
+
100
+ if n%10==0: print(n, file)
101
+
face_model/face_gan.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ @paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
3
+ @author: yangxy (yangtao9009@gmail.com)
4
+ '''
5
+ import torch
6
+ import os
7
+ import cv2
8
+ import glob
9
+ import numpy as np
10
+ from torch import nn
11
+ import torch.nn.functional as F
12
+ from torchvision import transforms, utils
13
+ from gpen_model import FullGenerator
14
+
15
+ class FaceGAN(object):
16
+ def __init__(self, base_dir='./', size=512, model=None, channel_multiplier=2, narrow=1, key=None, is_norm=True, device='cuda'):
17
+ self.mfile = os.path.join(base_dir, 'weights', model+'.pth')
18
+ self.n_mlp = 8
19
+ self.device = device
20
+ self.is_norm = is_norm
21
+ self.resolution = size
22
+ self.key = key
23
+ self.load_model(channel_multiplier, narrow)
24
+
25
+ def load_model(self, channel_multiplier=2, narrow=1):
26
+ self.model = FullGenerator(self.resolution, 512, self.n_mlp, channel_multiplier, narrow=narrow, device=self.device)
27
+ pretrained_dict = torch.load(self.mfile, map_location=torch.device('cpu'))
28
+ if self.key is not None: pretrained_dict = pretrained_dict[self.key]
29
+ self.model.load_state_dict(pretrained_dict)
30
+ self.model.to(self.device)
31
+ self.model.eval()
32
+
33
+ def process(self, img):
34
+ img = cv2.resize(img, (self.resolution, self.resolution))
35
+ img_t = self.img2tensor(img)
36
+
37
+ with torch.no_grad():
38
+ out, __ = self.model(img_t)
39
+
40
+ out = self.tensor2img(out)
41
+
42
+ return out
43
+
44
+ def img2tensor(self, img):
45
+ img_t = torch.from_numpy(img).to(self.device)/255.
46
+ if self.is_norm:
47
+ img_t = (img_t - 0.5) / 0.5
48
+ img_t = img_t.permute(2, 0, 1).unsqueeze(0).flip(1) # BGR->RGB
49
+ return img_t
50
+
51
+ def tensor2img(self, img_t, pmax=255.0, imtype=np.uint8):
52
+ if self.is_norm:
53
+ img_t = img_t * 0.5 + 0.5
54
+ img_t = img_t.squeeze(0).permute(1, 2, 0).flip(2) # RGB->BGR
55
+ img_np = np.clip(img_t.float().cpu().numpy(), 0, 1) * pmax
56
+
57
+ return img_np.astype(imtype)
face_model/gpen_model.py ADDED
@@ -0,0 +1,747 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ @paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
3
+ @author: yangxy (yangtao9009@gmail.com)
4
+ '''
5
+ import math
6
+ import random
7
+ import functools
8
+ import operator
9
+ import itertools
10
+
11
+ import torch
12
+ from torch import nn
13
+ from torch.nn import functional as F
14
+ from torch.autograd import Function
15
+
16
+ from op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
17
+
18
+ class PixelNorm(nn.Module):
19
+ def __init__(self):
20
+ super().__init__()
21
+
22
+ def forward(self, input):
23
+ return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
24
+
25
+
26
+ def make_kernel(k):
27
+ k = torch.tensor(k, dtype=torch.float32)
28
+
29
+ if k.ndim == 1:
30
+ k = k[None, :] * k[:, None]
31
+
32
+ k /= k.sum()
33
+
34
+ return k
35
+
36
+
37
+ class Upsample(nn.Module):
38
+ def __init__(self, kernel, factor=2, device='cpu'):
39
+ super().__init__()
40
+
41
+ self.factor = factor
42
+ kernel = make_kernel(kernel) * (factor ** 2)
43
+ self.register_buffer('kernel', kernel)
44
+
45
+ p = kernel.shape[0] - factor
46
+
47
+ pad0 = (p + 1) // 2 + factor - 1
48
+ pad1 = p // 2
49
+
50
+ self.pad = (pad0, pad1)
51
+ self.device = device
52
+
53
+ def forward(self, input):
54
+ out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad, device=self.device)
55
+
56
+ return out
57
+
58
+
59
+ class Downsample(nn.Module):
60
+ def __init__(self, kernel, factor=2, device='cpu'):
61
+ super().__init__()
62
+
63
+ self.factor = factor
64
+ kernel = make_kernel(kernel)
65
+ self.register_buffer('kernel', kernel)
66
+
67
+ p = kernel.shape[0] - factor
68
+
69
+ pad0 = (p + 1) // 2
70
+ pad1 = p // 2
71
+
72
+ self.pad = (pad0, pad1)
73
+ self.device = device
74
+
75
+ def forward(self, input):
76
+ out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad, device=self.device)
77
+
78
+ return out
79
+
80
+
81
+ class Blur(nn.Module):
82
+ def __init__(self, kernel, pad, upsample_factor=1, device='cpu'):
83
+ super().__init__()
84
+
85
+ kernel = make_kernel(kernel)
86
+
87
+ if upsample_factor > 1:
88
+ kernel = kernel * (upsample_factor ** 2)
89
+
90
+ self.register_buffer('kernel', kernel)
91
+
92
+ self.pad = pad
93
+ self.device = device
94
+
95
+ def forward(self, input):
96
+ out = upfirdn2d(input, self.kernel, pad=self.pad, device=self.device)
97
+
98
+ return out
99
+
100
+
101
+ class EqualConv2d(nn.Module):
102
+ def __init__(
103
+ self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
104
+ ):
105
+ super().__init__()
106
+
107
+ self.weight = nn.Parameter(
108
+ torch.randn(out_channel, in_channel, kernel_size, kernel_size)
109
+ )
110
+ self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
111
+
112
+ self.stride = stride
113
+ self.padding = padding
114
+
115
+ if bias:
116
+ self.bias = nn.Parameter(torch.zeros(out_channel))
117
+
118
+ else:
119
+ self.bias = None
120
+
121
+ def forward(self, input):
122
+ out = F.conv2d(
123
+ input,
124
+ self.weight * self.scale,
125
+ bias=self.bias,
126
+ stride=self.stride,
127
+ padding=self.padding,
128
+ )
129
+
130
+ return out
131
+
132
+ def __repr__(self):
133
+ return (
134
+ f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
135
+ f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
136
+ )
137
+
138
+
139
+ class EqualLinear(nn.Module):
140
+ def __init__(
141
+ self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None, device='cpu'
142
+ ):
143
+ super().__init__()
144
+
145
+ self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
146
+
147
+ if bias:
148
+ self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
149
+
150
+ else:
151
+ self.bias = None
152
+
153
+ self.activation = activation
154
+ self.device = device
155
+
156
+ self.scale = (1 / math.sqrt(in_dim)) * lr_mul
157
+ self.lr_mul = lr_mul
158
+
159
+ def forward(self, input):
160
+ if self.activation:
161
+ out = F.linear(input, self.weight * self.scale)
162
+ out = fused_leaky_relu(out, self.bias * self.lr_mul, device=self.device)
163
+
164
+ else:
165
+ out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul)
166
+
167
+ return out
168
+
169
+ def __repr__(self):
170
+ return (
171
+ f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
172
+ )
173
+
174
+
175
+ class ScaledLeakyReLU(nn.Module):
176
+ def __init__(self, negative_slope=0.2):
177
+ super().__init__()
178
+
179
+ self.negative_slope = negative_slope
180
+
181
+ def forward(self, input):
182
+ out = F.leaky_relu(input, negative_slope=self.negative_slope)
183
+
184
+ return out * math.sqrt(2)
185
+
186
+
187
+ class ModulatedConv2d(nn.Module):
188
+ def __init__(
189
+ self,
190
+ in_channel,
191
+ out_channel,
192
+ kernel_size,
193
+ style_dim,
194
+ demodulate=True,
195
+ upsample=False,
196
+ downsample=False,
197
+ blur_kernel=[1, 3, 3, 1],
198
+ device='cpu'
199
+ ):
200
+ super().__init__()
201
+
202
+ self.eps = 1e-8
203
+ self.kernel_size = kernel_size
204
+ self.in_channel = in_channel
205
+ self.out_channel = out_channel
206
+ self.upsample = upsample
207
+ self.downsample = downsample
208
+
209
+ if upsample:
210
+ factor = 2
211
+ p = (len(blur_kernel) - factor) - (kernel_size - 1)
212
+ pad0 = (p + 1) // 2 + factor - 1
213
+ pad1 = p // 2 + 1
214
+
215
+ self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor, device=device)
216
+
217
+ if downsample:
218
+ factor = 2
219
+ p = (len(blur_kernel) - factor) + (kernel_size - 1)
220
+ pad0 = (p + 1) // 2
221
+ pad1 = p // 2
222
+
223
+ self.blur = Blur(blur_kernel, pad=(pad0, pad1), device=device)
224
+
225
+ fan_in = in_channel * kernel_size ** 2
226
+ self.scale = 1 / math.sqrt(fan_in)
227
+ self.padding = kernel_size // 2
228
+
229
+ self.weight = nn.Parameter(
230
+ torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
231
+ )
232
+
233
+ self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
234
+
235
+ self.demodulate = demodulate
236
+
237
+ def __repr__(self):
238
+ return (
239
+ f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
240
+ f'upsample={self.upsample}, downsample={self.downsample})'
241
+ )
242
+
243
+ def forward(self, input, style):
244
+ batch, in_channel, height, width = input.shape
245
+
246
+ style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
247
+ weight = self.scale * self.weight * style
248
+
249
+ if self.demodulate:
250
+ demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
251
+ weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
252
+
253
+ weight = weight.view(
254
+ batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
255
+ )
256
+
257
+ if self.upsample:
258
+ input = input.view(1, batch * in_channel, height, width)
259
+ weight = weight.view(
260
+ batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
261
+ )
262
+ weight = weight.transpose(1, 2).reshape(
263
+ batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
264
+ )
265
+ out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
266
+ _, _, height, width = out.shape
267
+ out = out.view(batch, self.out_channel, height, width)
268
+ out = self.blur(out)
269
+
270
+ elif self.downsample:
271
+ input = self.blur(input)
272
+ _, _, height, width = input.shape
273
+ input = input.view(1, batch * in_channel, height, width)
274
+ out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
275
+ _, _, height, width = out.shape
276
+ out = out.view(batch, self.out_channel, height, width)
277
+
278
+ else:
279
+ input = input.view(1, batch * in_channel, height, width)
280
+ out = F.conv2d(input, weight, padding=self.padding, groups=batch)
281
+ _, _, height, width = out.shape
282
+ out = out.view(batch, self.out_channel, height, width)
283
+
284
+ return out
285
+
286
+
287
+ class NoiseInjection(nn.Module):
288
+ def __init__(self, isconcat=True):
289
+ super().__init__()
290
+
291
+ self.isconcat = isconcat
292
+ self.weight = nn.Parameter(torch.zeros(1))
293
+
294
+ def forward(self, image, noise=None):
295
+ if noise is None:
296
+ batch, _, height, width = image.shape
297
+ noise = image.new_empty(batch, 1, height, width).normal_()
298
+
299
+ if self.isconcat:
300
+ return torch.cat((image, self.weight * noise), dim=1)
301
+ else:
302
+ return image + self.weight * noise
303
+
304
+
305
+ class ConstantInput(nn.Module):
306
+ def __init__(self, channel, size=4):
307
+ super().__init__()
308
+
309
+ self.input = nn.Parameter(torch.randn(1, channel, size, size))
310
+
311
+ def forward(self, input):
312
+ batch = input.shape[0]
313
+ out = self.input.repeat(batch, 1, 1, 1)
314
+
315
+ return out
316
+
317
+
318
+ class StyledConv(nn.Module):
319
+ def __init__(
320
+ self,
321
+ in_channel,
322
+ out_channel,
323
+ kernel_size,
324
+ style_dim,
325
+ upsample=False,
326
+ blur_kernel=[1, 3, 3, 1],
327
+ demodulate=True,
328
+ isconcat=True,
329
+ device='cpu'
330
+ ):
331
+ super().__init__()
332
+
333
+ self.conv = ModulatedConv2d(
334
+ in_channel,
335
+ out_channel,
336
+ kernel_size,
337
+ style_dim,
338
+ upsample=upsample,
339
+ blur_kernel=blur_kernel,
340
+ demodulate=demodulate,
341
+ device=device
342
+ )
343
+
344
+ self.noise = NoiseInjection(isconcat)
345
+ #self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
346
+ #self.activate = ScaledLeakyReLU(0.2)
347
+ feat_multiplier = 2 if isconcat else 1
348
+ self.activate = FusedLeakyReLU(out_channel*feat_multiplier, device=device)
349
+
350
+ def forward(self, input, style, noise=None):
351
+ out = self.conv(input, style)
352
+ out = self.noise(out, noise=noise)
353
+ # out = out + self.bias
354
+ out = self.activate(out)
355
+
356
+ return out
357
+
358
+
359
+ class ToRGB(nn.Module):
360
+ def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1], device='cpu'):
361
+ super().__init__()
362
+
363
+ if upsample:
364
+ self.upsample = Upsample(blur_kernel, device=device)
365
+
366
+ self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False, device=device)
367
+ self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
368
+
369
+ def forward(self, input, style, skip=None):
370
+ out = self.conv(input, style)
371
+ out = out + self.bias
372
+
373
+ if skip is not None:
374
+ skip = self.upsample(skip)
375
+
376
+ out = out + skip
377
+
378
+ return out
379
+
380
+ class Generator(nn.Module):
381
+ def __init__(
382
+ self,
383
+ size,
384
+ style_dim,
385
+ n_mlp,
386
+ channel_multiplier=2,
387
+ blur_kernel=[1, 3, 3, 1],
388
+ lr_mlp=0.01,
389
+ isconcat=True,
390
+ narrow=1,
391
+ device='cpu'
392
+ ):
393
+ super().__init__()
394
+
395
+ self.size = size
396
+ self.n_mlp = n_mlp
397
+ self.style_dim = style_dim
398
+ self.feat_multiplier = 2 if isconcat else 1
399
+
400
+ layers = [PixelNorm()]
401
+
402
+ for i in range(n_mlp):
403
+ layers.append(
404
+ EqualLinear(
405
+ style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu', device=device
406
+ )
407
+ )
408
+
409
+ self.style = nn.Sequential(*layers)
410
+
411
+ self.channels = {
412
+ 4: int(512 * narrow),
413
+ 8: int(512 * narrow),
414
+ 16: int(512 * narrow),
415
+ 32: int(512 * narrow),
416
+ 64: int(256 * channel_multiplier * narrow),
417
+ 128: int(128 * channel_multiplier * narrow),
418
+ 256: int(64 * channel_multiplier * narrow),
419
+ 512: int(32 * channel_multiplier * narrow),
420
+ 1024: int(16 * channel_multiplier * narrow)
421
+ }
422
+
423
+ self.input = ConstantInput(self.channels[4])
424
+ self.conv1 = StyledConv(
425
+ self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel, isconcat=isconcat, device=device
426
+ )
427
+ self.to_rgb1 = ToRGB(self.channels[4]*self.feat_multiplier, style_dim, upsample=False, device=device)
428
+
429
+ self.log_size = int(math.log(size, 2))
430
+
431
+ self.convs = nn.ModuleList()
432
+ self.upsamples = nn.ModuleList()
433
+ self.to_rgbs = nn.ModuleList()
434
+
435
+ in_channel = self.channels[4]
436
+
437
+ for i in range(3, self.log_size + 1):
438
+ out_channel = self.channels[2 ** i]
439
+
440
+ self.convs.append(
441
+ StyledConv(
442
+ in_channel*self.feat_multiplier,
443
+ out_channel,
444
+ 3,
445
+ style_dim,
446
+ upsample=True,
447
+ blur_kernel=blur_kernel,
448
+ isconcat=isconcat,
449
+ device=device
450
+ )
451
+ )
452
+
453
+ self.convs.append(
454
+ StyledConv(
455
+ out_channel*self.feat_multiplier, out_channel, 3, style_dim, blur_kernel=blur_kernel, isconcat=isconcat, device=device
456
+ )
457
+ )
458
+
459
+ self.to_rgbs.append(ToRGB(out_channel*self.feat_multiplier, style_dim, device=device))
460
+
461
+ in_channel = out_channel
462
+
463
+ self.n_latent = self.log_size * 2 - 2
464
+
465
+ def make_noise(self):
466
+ device = self.input.input.device
467
+
468
+ noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
469
+
470
+ for i in range(3, self.log_size + 1):
471
+ for _ in range(2):
472
+ noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
473
+
474
+ return noises
475
+
476
+ def mean_latent(self, n_latent):
477
+ latent_in = torch.randn(
478
+ n_latent, self.style_dim, device=self.input.input.device
479
+ )
480
+ latent = self.style(latent_in).mean(0, keepdim=True)
481
+
482
+ return latent
483
+
484
+ def get_latent(self, input):
485
+ return self.style(input)
486
+
487
+ def forward(
488
+ self,
489
+ styles,
490
+ return_latents=False,
491
+ inject_index=None,
492
+ truncation=1,
493
+ truncation_latent=None,
494
+ input_is_latent=False,
495
+ noise=None,
496
+ ):
497
+ if not input_is_latent:
498
+ styles = [self.style(s) for s in styles]
499
+
500
+ if noise is None:
501
+ '''
502
+ noise = [None] * (2 * (self.log_size - 2) + 1)
503
+ '''
504
+ noise = []
505
+ batch = styles[0].shape[0]
506
+ for i in range(self.n_mlp + 1):
507
+ size = 2 ** (i+2)
508
+ noise.append(torch.randn(batch, self.channels[size], size, size, device=styles[0].device))
509
+
510
+ if truncation < 1:
511
+ style_t = []
512
+
513
+ for style in styles:
514
+ style_t.append(
515
+ truncation_latent + truncation * (style - truncation_latent)
516
+ )
517
+
518
+ styles = style_t
519
+
520
+ if len(styles) < 2:
521
+ inject_index = self.n_latent
522
+
523
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
524
+
525
+ else:
526
+ if inject_index is None:
527
+ inject_index = random.randint(1, self.n_latent - 1)
528
+
529
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
530
+ latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
531
+
532
+ latent = torch.cat([latent, latent2], 1)
533
+
534
+ out = self.input(latent)
535
+ out = self.conv1(out, latent[:, 0], noise=noise[0])
536
+
537
+ skip = self.to_rgb1(out, latent[:, 1])
538
+
539
+ i = 1
540
+ for conv1, conv2, noise1, noise2, to_rgb in zip(
541
+ self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
542
+ ):
543
+ out = conv1(out, latent[:, i], noise=noise1)
544
+ out = conv2(out, latent[:, i + 1], noise=noise2)
545
+ skip = to_rgb(out, latent[:, i + 2], skip)
546
+
547
+ i += 2
548
+
549
+ image = skip
550
+
551
+ if return_latents:
552
+ return image, latent
553
+
554
+ else:
555
+ return image, None
556
+
557
+ class ConvLayer(nn.Sequential):
558
+ def __init__(
559
+ self,
560
+ in_channel,
561
+ out_channel,
562
+ kernel_size,
563
+ downsample=False,
564
+ blur_kernel=[1, 3, 3, 1],
565
+ bias=True,
566
+ activate=True,
567
+ device='cpu'
568
+ ):
569
+ layers = []
570
+
571
+ if downsample:
572
+ factor = 2
573
+ p = (len(blur_kernel) - factor) + (kernel_size - 1)
574
+ pad0 = (p + 1) // 2
575
+ pad1 = p // 2
576
+
577
+ layers.append(Blur(blur_kernel, pad=(pad0, pad1), device=device))
578
+
579
+ stride = 2
580
+ self.padding = 0
581
+
582
+ else:
583
+ stride = 1
584
+ self.padding = kernel_size // 2
585
+
586
+ layers.append(
587
+ EqualConv2d(
588
+ in_channel,
589
+ out_channel,
590
+ kernel_size,
591
+ padding=self.padding,
592
+ stride=stride,
593
+ bias=bias and not activate,
594
+ )
595
+ )
596
+
597
+ if activate:
598
+ if bias:
599
+ layers.append(FusedLeakyReLU(out_channel, device=device))
600
+
601
+ else:
602
+ layers.append(ScaledLeakyReLU(0.2))
603
+
604
+ super().__init__(*layers)
605
+
606
+
607
+ class ResBlock(nn.Module):
608
+ def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1], device='cpu'):
609
+ super().__init__()
610
+
611
+ self.conv1 = ConvLayer(in_channel, in_channel, 3, device=device)
612
+ self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True, device=device)
613
+
614
+ self.skip = ConvLayer(
615
+ in_channel, out_channel, 1, downsample=True, activate=False, bias=False
616
+ )
617
+
618
+ def forward(self, input):
619
+ out = self.conv1(input)
620
+ out = self.conv2(out)
621
+
622
+ skip = self.skip(input)
623
+ out = (out + skip) / math.sqrt(2)
624
+
625
+ return out
626
+
627
+ class FullGenerator(nn.Module):
628
+ def __init__(
629
+ self,
630
+ size,
631
+ style_dim,
632
+ n_mlp,
633
+ channel_multiplier=2,
634
+ blur_kernel=[1, 3, 3, 1],
635
+ lr_mlp=0.01,
636
+ isconcat=True,
637
+ narrow=1,
638
+ device='cpu'
639
+ ):
640
+ super().__init__()
641
+ channels = {
642
+ 4: int(512 * narrow),
643
+ 8: int(512 * narrow),
644
+ 16: int(512 * narrow),
645
+ 32: int(512 * narrow),
646
+ 64: int(256 * channel_multiplier * narrow),
647
+ 128: int(128 * channel_multiplier * narrow),
648
+ 256: int(64 * channel_multiplier * narrow),
649
+ 512: int(32 * channel_multiplier * narrow),
650
+ 1024: int(16 * channel_multiplier * narrow)
651
+ }
652
+
653
+ self.log_size = int(math.log(size, 2))
654
+ self.generator = Generator(size, style_dim, n_mlp, channel_multiplier=channel_multiplier, blur_kernel=blur_kernel, lr_mlp=lr_mlp, isconcat=isconcat, narrow=narrow, device=device)
655
+
656
+ conv = [ConvLayer(3, channels[size], 1, device=device)]
657
+ self.ecd0 = nn.Sequential(*conv)
658
+ in_channel = channels[size]
659
+
660
+ self.names = ['ecd%d'%i for i in range(self.log_size-1)]
661
+ for i in range(self.log_size, 2, -1):
662
+ out_channel = channels[2 ** (i - 1)]
663
+ #conv = [ResBlock(in_channel, out_channel, blur_kernel)]
664
+ conv = [ConvLayer(in_channel, out_channel, 3, downsample=True, device=device)]
665
+ setattr(self, self.names[self.log_size-i+1], nn.Sequential(*conv))
666
+ in_channel = out_channel
667
+ self.final_linear = nn.Sequential(EqualLinear(channels[4] * 4 * 4, style_dim, activation='fused_lrelu', device=device))
668
+
669
+ def forward(self,
670
+ inputs,
671
+ return_latents=False,
672
+ inject_index=None,
673
+ truncation=1,
674
+ truncation_latent=None,
675
+ input_is_latent=False,
676
+ ):
677
+ noise = []
678
+ for i in range(self.log_size-1):
679
+ ecd = getattr(self, self.names[i])
680
+ inputs = ecd(inputs)
681
+ noise.append(inputs)
682
+ #print(inputs.shape)
683
+ inputs = inputs.view(inputs.shape[0], -1)
684
+ outs = self.final_linear(inputs)
685
+ #print(outs.shape)
686
+ noise = list(itertools.chain.from_iterable(itertools.repeat(x, 2) for x in noise))[::-1]
687
+ outs = self.generator([outs], return_latents, inject_index, truncation, truncation_latent, input_is_latent, noise=noise[1:])
688
+ return outs
689
+
690
+ class Discriminator(nn.Module):
691
+ def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], narrow=1, device='cpu'):
692
+ super().__init__()
693
+
694
+ channels = {
695
+ 4: int(512 * narrow),
696
+ 8: int(512 * narrow),
697
+ 16: int(512 * narrow),
698
+ 32: int(512 * narrow),
699
+ 64: int(256 * channel_multiplier * narrow),
700
+ 128: int(128 * channel_multiplier * narrow),
701
+ 256: int(64 * channel_multiplier * narrow),
702
+ 512: int(32 * channel_multiplier * narrow),
703
+ 1024: int(16 * channel_multiplier * narrow)
704
+ }
705
+
706
+ convs = [ConvLayer(3, channels[size], 1, device=device)]
707
+
708
+ log_size = int(math.log(size, 2))
709
+
710
+ in_channel = channels[size]
711
+
712
+ for i in range(log_size, 2, -1):
713
+ out_channel = channels[2 ** (i - 1)]
714
+
715
+ convs.append(ResBlock(in_channel, out_channel, blur_kernel, device=device))
716
+
717
+ in_channel = out_channel
718
+
719
+ self.convs = nn.Sequential(*convs)
720
+
721
+ self.stddev_group = 4
722
+ self.stddev_feat = 1
723
+
724
+ self.final_conv = ConvLayer(in_channel + 1, channels[4], 3, device=device)
725
+ self.final_linear = nn.Sequential(
726
+ EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu', device=device),
727
+ EqualLinear(channels[4], 1),
728
+ )
729
+
730
+ def forward(self, input):
731
+ out = self.convs(input)
732
+
733
+ batch, channel, height, width = out.shape
734
+ group = min(batch, self.stddev_group)
735
+ stddev = out.view(
736
+ group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
737
+ )
738
+ stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
739
+ stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
740
+ stddev = stddev.repeat(group, 1, height, width)
741
+ out = torch.cat([out, stddev], 1)
742
+
743
+ out = self.final_conv(out)
744
+
745
+ out = out.view(batch, -1)
746
+ out = self.final_linear(out)
747
+ return out
face_model/op/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .fused_act import FusedLeakyReLU, fused_leaky_relu
2
+ from .upfirdn2d import upfirdn2d
face_model/op/fused_act.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import platform
3
+
4
+ import torch
5
+ from torch import nn
6
+ import torch.nn.functional as F
7
+ from torch.autograd import Function
8
+ from torch.utils.cpp_extension import load, _import_module_from_library
9
+
10
+ # if running GPEN without cuda, please comment line 11-19
11
+ if platform.system() == 'Linux' and torch.cuda.is_available():
12
+ module_path = os.path.dirname(__file__)
13
+ fused = load(
14
+ 'fused',
15
+ sources=[
16
+ os.path.join(module_path, 'fused_bias_act.cpp'),
17
+ os.path.join(module_path, 'fused_bias_act_kernel.cu'),
18
+ ],
19
+ )
20
+
21
+
22
+ #fused = _import_module_from_library('fused', '/tmp/torch_extensions/fused', True)
23
+
24
+
25
+ class FusedLeakyReLUFunctionBackward(Function):
26
+ @staticmethod
27
+ def forward(ctx, grad_output, out, negative_slope, scale):
28
+ ctx.save_for_backward(out)
29
+ ctx.negative_slope = negative_slope
30
+ ctx.scale = scale
31
+
32
+ empty = grad_output.new_empty(0)
33
+
34
+ grad_input = fused.fused_bias_act(
35
+ grad_output, empty, out, 3, 1, negative_slope, scale
36
+ )
37
+
38
+ dim = [0]
39
+
40
+ if grad_input.ndim > 2:
41
+ dim += list(range(2, grad_input.ndim))
42
+
43
+ grad_bias = grad_input.sum(dim).detach()
44
+
45
+ return grad_input, grad_bias
46
+
47
+ @staticmethod
48
+ def backward(ctx, gradgrad_input, gradgrad_bias):
49
+ out, = ctx.saved_tensors
50
+ gradgrad_out = fused.fused_bias_act(
51
+ gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
52
+ )
53
+
54
+ return gradgrad_out, None, None, None
55
+
56
+
57
+ class FusedLeakyReLUFunction(Function):
58
+ @staticmethod
59
+ def forward(ctx, input, bias, negative_slope, scale):
60
+ empty = input.new_empty(0)
61
+ out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
62
+ ctx.save_for_backward(out)
63
+ ctx.negative_slope = negative_slope
64
+ ctx.scale = scale
65
+
66
+ return out
67
+
68
+ @staticmethod
69
+ def backward(ctx, grad_output):
70
+ out, = ctx.saved_tensors
71
+
72
+ grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
73
+ grad_output, out, ctx.negative_slope, ctx.scale
74
+ )
75
+
76
+ return grad_input, grad_bias, None, None
77
+
78
+
79
+ class FusedLeakyReLU(nn.Module):
80
+ def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5, device='cpu'):
81
+ super().__init__()
82
+
83
+ self.bias = nn.Parameter(torch.zeros(channel))
84
+ self.negative_slope = negative_slope
85
+ self.scale = scale
86
+ self.device = device
87
+
88
+ def forward(self, input):
89
+ return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale, self.device)
90
+
91
+
92
+ def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5, device='cpu'):
93
+ if platform.system() == 'Linux' and torch.cuda.is_available() and device != 'cpu':
94
+ return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
95
+ else:
96
+ return scale * F.leaky_relu(input + bias.view((1, -1)+(1,)*(len(input.shape)-2)), negative_slope=negative_slope)
face_model/op/fused_bias_act.cpp ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+
3
+
4
+ torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
5
+ int act, int grad, float alpha, float scale);
6
+
7
+ #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
8
+ #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
9
+ #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
10
+
11
+ torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
12
+ int act, int grad, float alpha, float scale) {
13
+ CHECK_CUDA(input);
14
+ CHECK_CUDA(bias);
15
+
16
+ return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
17
+ }
18
+
19
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
20
+ m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
21
+ }
face_model/op/fused_bias_act_kernel.cu ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
2
+ //
3
+ // This work is made available under the Nvidia Source Code License-NC.
4
+ // To view a copy of this license, visit
5
+ // https://nvlabs.github.io/stylegan2/license.html
6
+
7
+ #include <torch/types.h>
8
+
9
+ #include <ATen/ATen.h>
10
+ #include <ATen/AccumulateType.h>
11
+ #include <ATen/cuda/CUDAContext.h>
12
+ #include <ATen/cuda/CUDAApplyUtils.cuh>
13
+
14
+ #include <cuda.h>
15
+ #include <cuda_runtime.h>
16
+
17
+
18
+ template <typename scalar_t>
19
+ static __global__ void fused_bias_act_kernel(scalar_t* out, const scalar_t* p_x, const scalar_t* p_b, const scalar_t* p_ref,
20
+ int act, int grad, scalar_t alpha, scalar_t scale, int loop_x, int size_x, int step_b, int size_b, int use_bias, int use_ref) {
21
+ int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x;
22
+
23
+ scalar_t zero = 0.0;
24
+
25
+ for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; loop_idx++, xi += blockDim.x) {
26
+ scalar_t x = p_x[xi];
27
+
28
+ if (use_bias) {
29
+ x += p_b[(xi / step_b) % size_b];
30
+ }
31
+
32
+ scalar_t ref = use_ref ? p_ref[xi] : zero;
33
+
34
+ scalar_t y;
35
+
36
+ switch (act * 10 + grad) {
37
+ default:
38
+ case 10: y = x; break;
39
+ case 11: y = x; break;
40
+ case 12: y = 0.0; break;
41
+
42
+ case 30: y = (x > 0.0) ? x : x * alpha; break;
43
+ case 31: y = (ref > 0.0) ? x : x * alpha; break;
44
+ case 32: y = 0.0; break;
45
+ }
46
+
47
+ out[xi] = y * scale;
48
+ }
49
+ }
50
+
51
+
52
+ torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
53
+ int act, int grad, float alpha, float scale) {
54
+ int curDevice = -1;
55
+ cudaGetDevice(&curDevice);
56
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
57
+
58
+ auto x = input.contiguous();
59
+ auto b = bias.contiguous();
60
+ auto ref = refer.contiguous();
61
+
62
+ int use_bias = b.numel() ? 1 : 0;
63
+ int use_ref = ref.numel() ? 1 : 0;
64
+
65
+ int size_x = x.numel();
66
+ int size_b = b.numel();
67
+ int step_b = 1;
68
+
69
+ for (int i = 1 + 1; i < x.dim(); i++) {
70
+ step_b *= x.size(i);
71
+ }
72
+
73
+ int loop_x = 4;
74
+ int block_size = 4 * 32;
75
+ int grid_size = (size_x - 1) / (loop_x * block_size) + 1;
76
+
77
+ auto y = torch::empty_like(x);
78
+
79
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "fused_bias_act_kernel", [&] {
80
+ fused_bias_act_kernel<scalar_t><<<grid_size, block_size, 0, stream>>>(
81
+ y.data_ptr<scalar_t>(),
82
+ x.data_ptr<scalar_t>(),
83
+ b.data_ptr<scalar_t>(),
84
+ ref.data_ptr<scalar_t>(),
85
+ act,
86
+ grad,
87
+ alpha,
88
+ scale,
89
+ loop_x,
90
+ size_x,
91
+ step_b,
92
+ size_b,
93
+ use_bias,
94
+ use_ref
95
+ );
96
+ });
97
+
98
+ return y;
99
+ }
face_model/op/upfirdn2d.cpp ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+
3
+
4
+ torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,
5
+ int up_x, int up_y, int down_x, int down_y,
6
+ int pad_x0, int pad_x1, int pad_y0, int pad_y1);
7
+
8
+ #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
9
+ #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
10
+ #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
11
+
12
+ torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel,
13
+ int up_x, int up_y, int down_x, int down_y,
14
+ int pad_x0, int pad_x1, int pad_y0, int pad_y1) {
15
+ CHECK_CUDA(input);
16
+ CHECK_CUDA(kernel);
17
+
18
+ return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1);
19
+ }
20
+
21
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
22
+ m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)");
23
+ }
face_model/op/upfirdn2d.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import platform
3
+
4
+ import torch
5
+ import torch.nn.functional as F
6
+ from torch.autograd import Function
7
+ from torch.utils.cpp_extension import load, _import_module_from_library
8
+
9
+ # if running GPEN without cuda, please comment line 10-18
10
+ if platform.system() == 'Linux' and torch.cuda.is_available():
11
+ module_path = os.path.dirname(__file__)
12
+ upfirdn2d_op = load(
13
+ 'upfirdn2d',
14
+ sources=[
15
+ os.path.join(module_path, 'upfirdn2d.cpp'),
16
+ os.path.join(module_path, 'upfirdn2d_kernel.cu'),
17
+ ],
18
+ )
19
+
20
+
21
+ #upfirdn2d_op = _import_module_from_library('upfirdn2d', '/tmp/torch_extensions/upfirdn2d', True)
22
+
23
+ class UpFirDn2dBackward(Function):
24
+ @staticmethod
25
+ def forward(
26
+ ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
27
+ ):
28
+
29
+ up_x, up_y = up
30
+ down_x, down_y = down
31
+ g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
32
+
33
+ grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
34
+
35
+ grad_input = upfirdn2d_op.upfirdn2d(
36
+ grad_output,
37
+ grad_kernel,
38
+ down_x,
39
+ down_y,
40
+ up_x,
41
+ up_y,
42
+ g_pad_x0,
43
+ g_pad_x1,
44
+ g_pad_y0,
45
+ g_pad_y1,
46
+ )
47
+ grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
48
+
49
+ ctx.save_for_backward(kernel)
50
+
51
+ pad_x0, pad_x1, pad_y0, pad_y1 = pad
52
+
53
+ ctx.up_x = up_x
54
+ ctx.up_y = up_y
55
+ ctx.down_x = down_x
56
+ ctx.down_y = down_y
57
+ ctx.pad_x0 = pad_x0
58
+ ctx.pad_x1 = pad_x1
59
+ ctx.pad_y0 = pad_y0
60
+ ctx.pad_y1 = pad_y1
61
+ ctx.in_size = in_size
62
+ ctx.out_size = out_size
63
+
64
+ return grad_input
65
+
66
+ @staticmethod
67
+ def backward(ctx, gradgrad_input):
68
+ kernel, = ctx.saved_tensors
69
+
70
+ gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
71
+
72
+ gradgrad_out = upfirdn2d_op.upfirdn2d(
73
+ gradgrad_input,
74
+ kernel,
75
+ ctx.up_x,
76
+ ctx.up_y,
77
+ ctx.down_x,
78
+ ctx.down_y,
79
+ ctx.pad_x0,
80
+ ctx.pad_x1,
81
+ ctx.pad_y0,
82
+ ctx.pad_y1,
83
+ )
84
+ # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
85
+ gradgrad_out = gradgrad_out.view(
86
+ ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
87
+ )
88
+
89
+ return gradgrad_out, None, None, None, None, None, None, None, None
90
+
91
+
92
+ class UpFirDn2d(Function):
93
+ @staticmethod
94
+ def forward(ctx, input, kernel, up, down, pad):
95
+ up_x, up_y = up
96
+ down_x, down_y = down
97
+ pad_x0, pad_x1, pad_y0, pad_y1 = pad
98
+
99
+ kernel_h, kernel_w = kernel.shape
100
+ batch, channel, in_h, in_w = input.shape
101
+ ctx.in_size = input.shape
102
+
103
+ input = input.reshape(-1, in_h, in_w, 1)
104
+
105
+ ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
106
+
107
+ out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
108
+ out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
109
+ ctx.out_size = (out_h, out_w)
110
+
111
+ ctx.up = (up_x, up_y)
112
+ ctx.down = (down_x, down_y)
113
+ ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
114
+
115
+ g_pad_x0 = kernel_w - pad_x0 - 1
116
+ g_pad_y0 = kernel_h - pad_y0 - 1
117
+ g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
118
+ g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
119
+
120
+ ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
121
+
122
+ out = upfirdn2d_op.upfirdn2d(
123
+ input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
124
+ )
125
+ # out = out.view(major, out_h, out_w, minor)
126
+ out = out.view(-1, channel, out_h, out_w)
127
+
128
+ return out
129
+
130
+ @staticmethod
131
+ def backward(ctx, grad_output):
132
+ kernel, grad_kernel = ctx.saved_tensors
133
+
134
+ grad_input = UpFirDn2dBackward.apply(
135
+ grad_output,
136
+ kernel,
137
+ grad_kernel,
138
+ ctx.up,
139
+ ctx.down,
140
+ ctx.pad,
141
+ ctx.g_pad,
142
+ ctx.in_size,
143
+ ctx.out_size,
144
+ )
145
+
146
+ return grad_input, None, None, None, None
147
+
148
+
149
+ def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0), device='cpu'):
150
+ if platform.system() == 'Linux' and torch.cuda.is_available() and device != 'cpu':
151
+ out = UpFirDn2d.apply(
152
+ input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
153
+ )
154
+ else:
155
+ out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1])
156
+
157
+ return out
158
+
159
+
160
+ def upfirdn2d_native(
161
+ input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
162
+ ):
163
+ input = input.permute(0, 2, 3, 1)
164
+ _, in_h, in_w, minor = input.shape
165
+ kernel_h, kernel_w = kernel.shape
166
+ out = input.view(-1, in_h, 1, in_w, 1, minor)
167
+ out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
168
+ out = out.view(-1, in_h * up_y, in_w * up_x, minor)
169
+
170
+ out = F.pad(
171
+ out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
172
+ )
173
+ out = out[
174
+ :,
175
+ max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
176
+ max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
177
+ :,
178
+ ]
179
+
180
+ out = out.permute(0, 3, 1, 2)
181
+ out = out.reshape(
182
+ [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
183
+ )
184
+ w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
185
+ out = F.conv2d(out, w)
186
+ out = out.reshape(
187
+ -1,
188
+ minor,
189
+ in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
190
+ in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
191
+ )
192
+ # out = out.permute(0, 2, 3, 1)
193
+ return out[:, :, ::down_y, ::down_x]
194
+
face_model/op/upfirdn2d_kernel.cu ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
2
+ //
3
+ // This work is made available under the Nvidia Source Code License-NC.
4
+ // To view a copy of this license, visit
5
+ // https://nvlabs.github.io/stylegan2/license.html
6
+
7
+ #include <torch/types.h>
8
+
9
+ #include <ATen/ATen.h>
10
+ #include <ATen/AccumulateType.h>
11
+ #include <ATen/cuda/CUDAContext.h>
12
+ #include <ATen/cuda/CUDAApplyUtils.cuh>
13
+
14
+ #include <cuda.h>
15
+ #include <cuda_runtime.h>
16
+
17
+
18
+ static __host__ __device__ __forceinline__ int floor_div(int a, int b) {
19
+ int c = a / b;
20
+
21
+ if (c * b > a) {
22
+ c--;
23
+ }
24
+
25
+ return c;
26
+ }
27
+
28
+
29
+ struct UpFirDn2DKernelParams {
30
+ int up_x;
31
+ int up_y;
32
+ int down_x;
33
+ int down_y;
34
+ int pad_x0;
35
+ int pad_x1;
36
+ int pad_y0;
37
+ int pad_y1;
38
+
39
+ int major_dim;
40
+ int in_h;
41
+ int in_w;
42
+ int minor_dim;
43
+ int kernel_h;
44
+ int kernel_w;
45
+ int out_h;
46
+ int out_w;
47
+ int loop_major;
48
+ int loop_x;
49
+ };
50
+
51
+
52
+ template <typename scalar_t, int up_x, int up_y, int down_x, int down_y, int kernel_h, int kernel_w, int tile_out_h, int tile_out_w>
53
+ __global__ void upfirdn2d_kernel(scalar_t* out, const scalar_t* input, const scalar_t* kernel, const UpFirDn2DKernelParams p) {
54
+ const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1;
55
+ const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1;
56
+
57
+ __shared__ volatile float sk[kernel_h][kernel_w];
58
+ __shared__ volatile float sx[tile_in_h][tile_in_w];
59
+
60
+ int minor_idx = blockIdx.x;
61
+ int tile_out_y = minor_idx / p.minor_dim;
62
+ minor_idx -= tile_out_y * p.minor_dim;
63
+ tile_out_y *= tile_out_h;
64
+ int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w;
65
+ int major_idx_base = blockIdx.z * p.loop_major;
66
+
67
+ if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | major_idx_base >= p.major_dim) {
68
+ return;
69
+ }
70
+
71
+ for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; tap_idx += blockDim.x) {
72
+ int ky = tap_idx / kernel_w;
73
+ int kx = tap_idx - ky * kernel_w;
74
+ scalar_t v = 0.0;
75
+
76
+ if (kx < p.kernel_w & ky < p.kernel_h) {
77
+ v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)];
78
+ }
79
+
80
+ sk[ky][kx] = v;
81
+ }
82
+
83
+ for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major & major_idx < p.major_dim; loop_major++, major_idx++) {
84
+ for (int loop_x = 0, tile_out_x = tile_out_x_base; loop_x < p.loop_x & tile_out_x < p.out_w; loop_x++, tile_out_x += tile_out_w) {
85
+ int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0;
86
+ int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0;
87
+ int tile_in_x = floor_div(tile_mid_x, up_x);
88
+ int tile_in_y = floor_div(tile_mid_y, up_y);
89
+
90
+ __syncthreads();
91
+
92
+ for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; in_idx += blockDim.x) {
93
+ int rel_in_y = in_idx / tile_in_w;
94
+ int rel_in_x = in_idx - rel_in_y * tile_in_w;
95
+ int in_x = rel_in_x + tile_in_x;
96
+ int in_y = rel_in_y + tile_in_y;
97
+
98
+ scalar_t v = 0.0;
99
+
100
+ if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) {
101
+ v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx];
102
+ }
103
+
104
+ sx[rel_in_y][rel_in_x] = v;
105
+ }
106
+
107
+ __syncthreads();
108
+ for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; out_idx += blockDim.x) {
109
+ int rel_out_y = out_idx / tile_out_w;
110
+ int rel_out_x = out_idx - rel_out_y * tile_out_w;
111
+ int out_x = rel_out_x + tile_out_x;
112
+ int out_y = rel_out_y + tile_out_y;
113
+
114
+ int mid_x = tile_mid_x + rel_out_x * down_x;
115
+ int mid_y = tile_mid_y + rel_out_y * down_y;
116
+ int in_x = floor_div(mid_x, up_x);
117
+ int in_y = floor_div(mid_y, up_y);
118
+ int rel_in_x = in_x - tile_in_x;
119
+ int rel_in_y = in_y - tile_in_y;
120
+ int kernel_x = (in_x + 1) * up_x - mid_x - 1;
121
+ int kernel_y = (in_y + 1) * up_y - mid_y - 1;
122
+
123
+ scalar_t v = 0.0;
124
+
125
+ #pragma unroll
126
+ for (int y = 0; y < kernel_h / up_y; y++)
127
+ #pragma unroll
128
+ for (int x = 0; x < kernel_w / up_x; x++)
129
+ v += sx[rel_in_y + y][rel_in_x + x] * sk[kernel_y + y * up_y][kernel_x + x * up_x];
130
+
131
+ if (out_x < p.out_w & out_y < p.out_h) {
132
+ out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v;
133
+ }
134
+ }
135
+ }
136
+ }
137
+ }
138
+
139
+
140
+ torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,
141
+ int up_x, int up_y, int down_x, int down_y,
142
+ int pad_x0, int pad_x1, int pad_y0, int pad_y1) {
143
+ int curDevice = -1;
144
+ cudaGetDevice(&curDevice);
145
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
146
+
147
+ UpFirDn2DKernelParams p;
148
+
149
+ auto x = input.contiguous();
150
+ auto k = kernel.contiguous();
151
+
152
+ p.major_dim = x.size(0);
153
+ p.in_h = x.size(1);
154
+ p.in_w = x.size(2);
155
+ p.minor_dim = x.size(3);
156
+ p.kernel_h = k.size(0);
157
+ p.kernel_w = k.size(1);
158
+ p.up_x = up_x;
159
+ p.up_y = up_y;
160
+ p.down_x = down_x;
161
+ p.down_y = down_y;
162
+ p.pad_x0 = pad_x0;
163
+ p.pad_x1 = pad_x1;
164
+ p.pad_y0 = pad_y0;
165
+ p.pad_y1 = pad_y1;
166
+
167
+ p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / p.down_y;
168
+ p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / p.down_x;
169
+
170
+ auto out = at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options());
171
+
172
+ int mode = -1;
173
+
174
+ int tile_out_h;
175
+ int tile_out_w;
176
+
177
+ if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) {
178
+ mode = 1;
179
+ tile_out_h = 16;
180
+ tile_out_w = 64;
181
+ }
182
+
183
+ if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 3 && p.kernel_w <= 3) {
184
+ mode = 2;
185
+ tile_out_h = 16;
186
+ tile_out_w = 64;
187
+ }
188
+
189
+ if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) {
190
+ mode = 3;
191
+ tile_out_h = 16;
192
+ tile_out_w = 64;
193
+ }
194
+
195
+ if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 2 && p.kernel_w <= 2) {
196
+ mode = 4;
197
+ tile_out_h = 16;
198
+ tile_out_w = 64;
199
+ }
200
+
201
+ if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 4 && p.kernel_w <= 4) {
202
+ mode = 5;
203
+ tile_out_h = 8;
204
+ tile_out_w = 32;
205
+ }
206
+
207
+ if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 2 && p.kernel_w <= 2) {
208
+ mode = 6;
209
+ tile_out_h = 8;
210
+ tile_out_w = 32;
211
+ }
212
+
213
+ dim3 block_size;
214
+ dim3 grid_size;
215
+
216
+ if (tile_out_h > 0 && tile_out_w) {
217
+ p.loop_major = (p.major_dim - 1) / 16384 + 1;
218
+ p.loop_x = 1;
219
+ block_size = dim3(32 * 8, 1, 1);
220
+ grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim,
221
+ (p.out_w - 1) / (p.loop_x * tile_out_w) + 1,
222
+ (p.major_dim - 1) / p.loop_major + 1);
223
+ }
224
+
225
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] {
226
+ switch (mode) {
227
+ case 1:
228
+ upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64><<<grid_size, block_size, 0, stream>>>(
229
+ out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
230
+ );
231
+
232
+ break;
233
+
234
+ case 2:
235
+ upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64><<<grid_size, block_size, 0, stream>>>(
236
+ out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
237
+ );
238
+
239
+ break;
240
+
241
+ case 3:
242
+ upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64><<<grid_size, block_size, 0, stream>>>(
243
+ out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
244
+ );
245
+
246
+ break;
247
+
248
+ case 4:
249
+ upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64><<<grid_size, block_size, 0, stream>>>(
250
+ out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
251
+ );
252
+
253
+ break;
254
+
255
+ case 5:
256
+ upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32><<<grid_size, block_size, 0, stream>>>(
257
+ out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
258
+ );
259
+
260
+ break;
261
+
262
+ case 6:
263
+ upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32><<<grid_size, block_size, 0, stream>>>(
264
+ out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
265
+ );
266
+
267
+ break;
268
+ }
269
+ });
270
+
271
+ return out;
272
+ }
face_parse/blocks.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.nn.parameter import Parameter
5
+ from torch.nn import functional as F
6
+ import numpy as np
7
+
8
+ class NormLayer(nn.Module):
9
+ """Normalization Layers.
10
+ ------------
11
+ # Arguments
12
+ - channels: input channels, for batch norm and instance norm.
13
+ - input_size: input shape without batch size, for layer norm.
14
+ """
15
+ def __init__(self, channels, normalize_shape=None, norm_type='bn', ref_channels=None):
16
+ super(NormLayer, self).__init__()
17
+ norm_type = norm_type.lower()
18
+ self.norm_type = norm_type
19
+ if norm_type == 'bn':
20
+ self.norm = nn.BatchNorm2d(channels, affine=True)
21
+ elif norm_type == 'in':
22
+ self.norm = nn.InstanceNorm2d(channels, affine=False)
23
+ elif norm_type == 'gn':
24
+ self.norm = nn.GroupNorm(32, channels, affine=True)
25
+ elif norm_type == 'pixel':
26
+ self.norm = lambda x: F.normalize(x, p=2, dim=1)
27
+ elif norm_type == 'layer':
28
+ self.norm = nn.LayerNorm(normalize_shape)
29
+ elif norm_type == 'none':
30
+ self.norm = lambda x: x*1.0
31
+ else:
32
+ assert 1==0, 'Norm type {} not support.'.format(norm_type)
33
+
34
+ def forward(self, x, ref=None):
35
+ if self.norm_type == 'spade':
36
+ return self.norm(x, ref)
37
+ else:
38
+ return self.norm(x)
39
+
40
+
41
+ class ReluLayer(nn.Module):
42
+ """Relu Layer.
43
+ ------------
44
+ # Arguments
45
+ - relu type: type of relu layer, candidates are
46
+ - ReLU
47
+ - LeakyReLU: default relu slope 0.2
48
+ - PRelu
49
+ - SELU
50
+ - none: direct pass
51
+ """
52
+ def __init__(self, channels, relu_type='relu'):
53
+ super(ReluLayer, self).__init__()
54
+ relu_type = relu_type.lower()
55
+ if relu_type == 'relu':
56
+ self.func = nn.ReLU(True)
57
+ elif relu_type == 'leakyrelu':
58
+ self.func = nn.LeakyReLU(0.2, inplace=True)
59
+ elif relu_type == 'prelu':
60
+ self.func = nn.PReLU(channels)
61
+ elif relu_type == 'selu':
62
+ self.func = nn.SELU(True)
63
+ elif relu_type == 'none':
64
+ self.func = lambda x: x*1.0
65
+ else:
66
+ assert 1==0, 'Relu type {} not support.'.format(relu_type)
67
+
68
+ def forward(self, x):
69
+ return self.func(x)
70
+
71
+
72
+ class ConvLayer(nn.Module):
73
+ def __init__(self, in_channels, out_channels, kernel_size=3, scale='none', norm_type='none', relu_type='none', use_pad=True, bias=True):
74
+ super(ConvLayer, self).__init__()
75
+ self.use_pad = use_pad
76
+ self.norm_type = norm_type
77
+ if norm_type in ['bn']:
78
+ bias = False
79
+
80
+ stride = 2 if scale == 'down' else 1
81
+
82
+ self.scale_func = lambda x: x
83
+ if scale == 'up':
84
+ self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest')
85
+
86
+ self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.)/2)))
87
+ self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias)
88
+
89
+ self.relu = ReluLayer(out_channels, relu_type)
90
+ self.norm = NormLayer(out_channels, norm_type=norm_type)
91
+
92
+ def forward(self, x):
93
+ out = self.scale_func(x)
94
+ if self.use_pad:
95
+ out = self.reflection_pad(out)
96
+ out = self.conv2d(out)
97
+ out = self.norm(out)
98
+ out = self.relu(out)
99
+ return out
100
+
101
+
102
+ class ResidualBlock(nn.Module):
103
+ """
104
+ Residual block recommended in: http://torch.ch/blog/2016/02/04/resnets.html
105
+ """
106
+ def __init__(self, c_in, c_out, relu_type='prelu', norm_type='bn', scale='none'):
107
+ super(ResidualBlock, self).__init__()
108
+
109
+ if scale == 'none' and c_in == c_out:
110
+ self.shortcut_func = lambda x: x
111
+ else:
112
+ self.shortcut_func = ConvLayer(c_in, c_out, 3, scale)
113
+
114
+ scale_config_dict = {'down': ['none', 'down'], 'up': ['up', 'none'], 'none': ['none', 'none']}
115
+ scale_conf = scale_config_dict[scale]
116
+
117
+ self.conv1 = ConvLayer(c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type)
118
+ self.conv2 = ConvLayer(c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type='none')
119
+
120
+ def forward(self, x):
121
+ identity = self.shortcut_func(x)
122
+
123
+ res = self.conv1(x)
124
+ res = self.conv2(res)
125
+ return identity + res
126
+
127
+
face_parse/face_parsing.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ @paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
3
+ @author: yangxy (yangtao9009@gmail.com)
4
+ '''
5
+ import os
6
+ import cv2
7
+ import torch
8
+ import numpy as np
9
+ from parse_model import ParseNet
10
+ import torch.nn.functional as F
11
+
12
+ class FaceParse(object):
13
+ def __init__(self, base_dir='./', model='ParseNet-latest', device='cuda'):
14
+ self.mfile = os.path.join(base_dir, 'weights', model+'.pth')
15
+ self.size = 512
16
+ self.device = device
17
+
18
+ '''
19
+ 0: 'background' 1: 'skin' 2: 'nose'
20
+ 3: 'eye_g' 4: 'l_eye' 5: 'r_eye'
21
+ 6: 'l_brow' 7: 'r_brow' 8: 'l_ear'
22
+ 9: 'r_ear' 10: 'mouth' 11: 'u_lip'
23
+ 12: 'l_lip' 13: 'hair' 14: 'hat'
24
+ 15: 'ear_r' 16: 'neck_l' 17: 'neck'
25
+ 18: 'cloth'
26
+ '''
27
+ #self.MASK_COLORMAP = [[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255], [255, 204, 204], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51, 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]]
28
+ #self.#MASK_COLORMAP = [[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255], [255, 204, 204], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51, 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]] = [[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255], [255, 204, 204], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51, 153], [0, 204, 204], [0, 51, 0], [0, 0, 0], [0, 0, 0]]
29
+ self.MASK_COLORMAP = [0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 0, 0, 0]
30
+ self.load_model()
31
+
32
+ def load_model(self):
33
+ self.faceparse = ParseNet(self.size, self.size, 32, 64, 19, norm_type='bn', relu_type='LeakyReLU', ch_range=[32, 256])
34
+ self.faceparse.load_state_dict(torch.load(self.mfile, map_location=torch.device('cpu')))
35
+ self.faceparse.to(self.device)
36
+ self.faceparse.eval()
37
+
38
+ def process(self, im):
39
+ im = cv2.resize(im, (self.size, self.size))
40
+ imt = self.img2tensor(im)
41
+ pred_mask, sr_img_tensor = self.faceparse(imt)
42
+ mask = self.tenor2mask(pred_mask)
43
+
44
+ return mask
45
+
46
+ def process_tensor(self, imt):
47
+ imt = F.interpolate(imt.flip(1)*2-1, (self.size, self.size))
48
+ pred_mask, sr_img_tensor = self.faceparse(imt)
49
+
50
+ mask = pred_mask.argmax(dim=1)
51
+ for idx, color in enumerate(self.MASK_COLORMAP):
52
+ mask = torch.where(mask==idx, color, mask)
53
+ #mask = mask.repeat(3, 1, 1).unsqueeze(0) #.cpu().float().numpy()
54
+ mask = mask.unsqueeze(0)
55
+
56
+ return mask
57
+
58
+ def img2tensor(self, img):
59
+ img = img[..., ::-1]
60
+ img = img / 255. * 2 - 1
61
+ img_tensor = torch.from_numpy(img.transpose(2, 0, 1)).unsqueeze(0).to(self.device)
62
+ return img_tensor.float()
63
+
64
+ def tenor2mask(self, tensor):
65
+ if len(tensor.shape) < 4:
66
+ tensor = tensor.unsqueeze(0)
67
+ if tensor.shape[1] > 1:
68
+ tensor = tensor.argmax(dim=1)
69
+
70
+ tensor = tensor.squeeze(1).data.cpu().numpy()
71
+ color_maps = []
72
+ for t in tensor:
73
+ #tmp_img = np.zeros(tensor.shape[1:] + (3,))
74
+ tmp_img = np.zeros(tensor.shape[1:])
75
+ for idx, color in enumerate(self.MASK_COLORMAP):
76
+ tmp_img[t == idx] = color
77
+ color_maps.append(tmp_img.astype(np.uint8))
78
+ return color_maps
face_parse/mask.png ADDED
face_parse/parse_model.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ @Created by chaofengc (chaofenghust@gmail.com)
3
+
4
+ @Modified by yangxy (yangtao9009@gmail.com)
5
+ '''
6
+
7
+ from blocks import *
8
+ import torch
9
+ from torch import nn
10
+ import numpy as np
11
+
12
+ def define_P(in_size=512, out_size=512, min_feat_size=32, relu_type='LeakyReLU', isTrain=False, weight_path=None):
13
+ net = ParseNet(in_size, out_size, min_feat_size, 64, 19, norm_type='bn', relu_type=relu_type, ch_range=[32, 256])
14
+ if not isTrain:
15
+ net.eval()
16
+ if weight_path is not None:
17
+ net.load_state_dict(torch.load(weight_path))
18
+ return net
19
+
20
+
21
+ class ParseNet(nn.Module):
22
+ def __init__(self,
23
+ in_size=128,
24
+ out_size=128,
25
+ min_feat_size=32,
26
+ base_ch=64,
27
+ parsing_ch=19,
28
+ res_depth=10,
29
+ relu_type='prelu',
30
+ norm_type='bn',
31
+ ch_range=[32, 512],
32
+ ):
33
+ super().__init__()
34
+ self.res_depth = res_depth
35
+ act_args = {'norm_type': norm_type, 'relu_type': relu_type}
36
+ min_ch, max_ch = ch_range
37
+
38
+ ch_clip = lambda x: max(min_ch, min(x, max_ch))
39
+ min_feat_size = min(in_size, min_feat_size)
40
+
41
+ down_steps = int(np.log2(in_size//min_feat_size))
42
+ up_steps = int(np.log2(out_size//min_feat_size))
43
+
44
+ # =============== define encoder-body-decoder ====================
45
+ self.encoder = []
46
+ self.encoder.append(ConvLayer(3, base_ch, 3, 1))
47
+ head_ch = base_ch
48
+ for i in range(down_steps):
49
+ cin, cout = ch_clip(head_ch), ch_clip(head_ch * 2)
50
+ self.encoder.append(ResidualBlock(cin, cout, scale='down', **act_args))
51
+ head_ch = head_ch * 2
52
+
53
+ self.body = []
54
+ for i in range(res_depth):
55
+ self.body.append(ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args))
56
+
57
+ self.decoder = []
58
+ for i in range(up_steps):
59
+ cin, cout = ch_clip(head_ch), ch_clip(head_ch // 2)
60
+ self.decoder.append(ResidualBlock(cin, cout, scale='up', **act_args))
61
+ head_ch = head_ch // 2
62
+
63
+ self.encoder = nn.Sequential(*self.encoder)
64
+ self.body = nn.Sequential(*self.body)
65
+ self.decoder = nn.Sequential(*self.decoder)
66
+ self.out_img_conv = ConvLayer(ch_clip(head_ch), 3)
67
+ self.out_mask_conv = ConvLayer(ch_clip(head_ch), parsing_ch)
68
+
69
+ def forward(self, x):
70
+ feat = self.encoder(x)
71
+ x = feat + self.body(feat)
72
+ x = self.decoder(x)
73
+ out_img = self.out_img_conv(x)
74
+ out_mask = self.out_mask_conv(x)
75
+ return out_mask, out_img
76
+
77
+
face_parse/test.png ADDED
loss/helpers.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+ import torch
3
+ from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
4
+
5
+ """
6
+ ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
7
+ """
8
+
9
+
10
+ class Flatten(Module):
11
+ def forward(self, input):
12
+ return input.view(input.size(0), -1)
13
+
14
+
15
+ def l2_norm(input, axis=1):
16
+ norm = torch.norm(input, 2, axis, True)
17
+ output = torch.div(input, norm)
18
+ return output
19
+
20
+
21
+ class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
22
+ """ A named tuple describing a ResNet block. """
23
+
24
+
25
+ def get_block(in_channel, depth, num_units, stride=2):
26
+ return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
27
+
28
+
29
+ def get_blocks(num_layers):
30
+ if num_layers == 50:
31
+ blocks = [
32
+ get_block(in_channel=64, depth=64, num_units=3),
33
+ get_block(in_channel=64, depth=128, num_units=4),
34
+ get_block(in_channel=128, depth=256, num_units=14),
35
+ get_block(in_channel=256, depth=512, num_units=3)
36
+ ]
37
+ elif num_layers == 100:
38
+ blocks = [
39
+ get_block(in_channel=64, depth=64, num_units=3),
40
+ get_block(in_channel=64, depth=128, num_units=13),
41
+ get_block(in_channel=128, depth=256, num_units=30),
42
+ get_block(in_channel=256, depth=512, num_units=3)
43
+ ]
44
+ elif num_layers == 152:
45
+ blocks = [
46
+ get_block(in_channel=64, depth=64, num_units=3),
47
+ get_block(in_channel=64, depth=128, num_units=8),
48
+ get_block(in_channel=128, depth=256, num_units=36),
49
+ get_block(in_channel=256, depth=512, num_units=3)
50
+ ]
51
+ else:
52
+ raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
53
+ return blocks
54
+
55
+
56
+ class SEModule(Module):
57
+ def __init__(self, channels, reduction):
58
+ super(SEModule, self).__init__()
59
+ self.avg_pool = AdaptiveAvgPool2d(1)
60
+ self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
61
+ self.relu = ReLU(inplace=True)
62
+ self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
63
+ self.sigmoid = Sigmoid()
64
+
65
+ def forward(self, x):
66
+ module_input = x
67
+ x = self.avg_pool(x)
68
+ x = self.fc1(x)
69
+ x = self.relu(x)
70
+ x = self.fc2(x)
71
+ x = self.sigmoid(x)
72
+ return module_input * x
73
+
74
+
75
+ class bottleneck_IR(Module):
76
+ def __init__(self, in_channel, depth, stride):
77
+ super(bottleneck_IR, self).__init__()
78
+ if in_channel == depth:
79
+ self.shortcut_layer = MaxPool2d(1, stride)
80
+ else:
81
+ self.shortcut_layer = Sequential(
82
+ Conv2d(in_channel, depth, (1, 1), stride, bias=False),
83
+ BatchNorm2d(depth)
84
+ )
85
+ self.res_layer = Sequential(
86
+ BatchNorm2d(in_channel),
87
+ Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
88
+ Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
89
+ )
90
+
91
+ def forward(self, x):
92
+ shortcut = self.shortcut_layer(x)
93
+ res = self.res_layer(x)
94
+ return res + shortcut
95
+
96
+
97
+ class bottleneck_IR_SE(Module):
98
+ def __init__(self, in_channel, depth, stride):
99
+ super(bottleneck_IR_SE, self).__init__()
100
+ if in_channel == depth:
101
+ self.shortcut_layer = MaxPool2d(1, stride)
102
+ else:
103
+ self.shortcut_layer = Sequential(
104
+ Conv2d(in_channel, depth, (1, 1), stride, bias=False),
105
+ BatchNorm2d(depth)
106
+ )
107
+ self.res_layer = Sequential(
108
+ BatchNorm2d(in_channel),
109
+ Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
110
+ PReLU(depth),
111
+ Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
112
+ BatchNorm2d(depth),
113
+ SEModule(depth, 16)
114
+ )
115
+
116
+ def forward(self, x):
117
+ shortcut = self.shortcut_layer(x)
118
+ res = self.res_layer(x)
119
+ return res + shortcut
loss/id_loss.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from torch import nn
4
+ from model_irse import Backbone
5
+
6
+ class IDLoss(nn.Module):
7
+ def __init__(self, base_dir='./', device='cuda', ckpt_dict=None):
8
+ super(IDLoss, self).__init__()
9
+ print('Loading ResNet ArcFace')
10
+ self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se').to(device)
11
+ if ckpt_dict is None:
12
+ self.facenet.load_state_dict(torch.load(os.path.join(base_dir, 'weights', 'model_ir_se50.pth'), map_location=torch.device('cpu')))
13
+ else:
14
+ self.facenet.load_state_dict(ckpt_dict)
15
+ self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112))
16
+ self.facenet.eval()
17
+
18
+ def extract_feats(self, x):
19
+ _, _, h, w = x.shape
20
+ assert h==w
21
+ ss = h//256
22
+ x = x[:, :, 35*ss:-33*ss, 32*ss:-36*ss] # Crop interesting region
23
+ x = self.face_pool(x)
24
+ x_feats = self.facenet(x)
25
+ return x_feats
26
+
27
+ def forward(self, y_hat, y, x):
28
+ n_samples = x.shape[0]
29
+ x_feats = self.extract_feats(x)
30
+ y_feats = self.extract_feats(y) # Otherwise use the feature from there
31
+ y_hat_feats = self.extract_feats(y_hat)
32
+ y_feats = y_feats.detach()
33
+ loss = 0
34
+ sim_improvement = 0
35
+ id_logs = []
36
+ count = 0
37
+ for i in range(n_samples):
38
+ diff_target = y_hat_feats[i].dot(y_feats[i])
39
+ diff_input = y_hat_feats[i].dot(x_feats[i])
40
+ diff_views = y_feats[i].dot(x_feats[i])
41
+ id_logs.append({'diff_target': float(diff_target),
42
+ 'diff_input': float(diff_input),
43
+ 'diff_views': float(diff_views)})
44
+ loss += 1 - diff_target
45
+ id_diff = float(diff_target) - float(diff_views)
46
+ sim_improvement += id_diff
47
+ count += 1
48
+
49
+ return loss / count, sim_improvement / count, id_logs
50
+
loss/model_irse.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
2
+ #from models.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
3
+ from helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
4
+
5
+ """
6
+ Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
7
+ """
8
+
9
+
10
+ class Backbone(Module):
11
+ def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
12
+ super(Backbone, self).__init__()
13
+ assert input_size in [112, 224], "input_size should be 112 or 224"
14
+ assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
15
+ assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
16
+ blocks = get_blocks(num_layers)
17
+ if mode == 'ir':
18
+ unit_module = bottleneck_IR
19
+ elif mode == 'ir_se':
20
+ unit_module = bottleneck_IR_SE
21
+ self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
22
+ BatchNorm2d(64),
23
+ PReLU(64))
24
+ if input_size == 112:
25
+ self.output_layer = Sequential(BatchNorm2d(512),
26
+ Dropout(drop_ratio),
27
+ Flatten(),
28
+ Linear(512 * 7 * 7, 512),
29
+ BatchNorm1d(512, affine=affine))
30
+ else:
31
+ self.output_layer = Sequential(BatchNorm2d(512),
32
+ Dropout(drop_ratio),
33
+ Flatten(),
34
+ Linear(512 * 14 * 14, 512),
35
+ BatchNorm1d(512, affine=affine))
36
+
37
+ modules = []
38
+ for block in blocks:
39
+ for bottleneck in block:
40
+ modules.append(unit_module(bottleneck.in_channel,
41
+ bottleneck.depth,
42
+ bottleneck.stride))
43
+ self.body = Sequential(*modules)
44
+
45
+ def forward(self, x):
46
+ x = self.input_layer(x)
47
+ x = self.body(x)
48
+ x = self.output_layer(x)
49
+ return l2_norm(x)
50
+
51
+
52
+ def IR_50(input_size):
53
+ """Constructs a ir-50 model."""
54
+ model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False)
55
+ return model
56
+
57
+
58
+ def IR_101(input_size):
59
+ """Constructs a ir-101 model."""
60
+ model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False)
61
+ return model
62
+
63
+
64
+ def IR_152(input_size):
65
+ """Constructs a ir-152 model."""
66
+ model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False)
67
+ return model
68
+
69
+
70
+ def IR_SE_50(input_size):
71
+ """Constructs a ir_se-50 model."""
72
+ model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False)
73
+ return model
74
+
75
+
76
+ def IR_SE_101(input_size):
77
+ """Constructs a ir_se-101 model."""
78
+ model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False)
79
+ return model
80
+
81
+
82
+ def IR_SE_152(input_size):
83
+ """Constructs a ir_se-152 model."""
84
+ model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False)
85
+ return model
lpips/__init__.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from __future__ import absolute_import
3
+ from __future__ import division
4
+ from __future__ import print_function
5
+
6
+ import numpy as np
7
+ import torch
8
+ # from torch.autograd import Variable
9
+
10
+ from lpips.trainer import *
11
+ from lpips.lpips import *
12
+
13
+ # class PerceptualLoss(torch.nn.Module):
14
+ # def __init__(self, model='lpips', net='alex', spatial=False, use_gpu=False, gpu_ids=[0], version='0.1'): # VGG using our perceptually-learned weights (LPIPS metric)
15
+ # # def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss
16
+ # super(PerceptualLoss, self).__init__()
17
+ # print('Setting up Perceptual loss...')
18
+ # self.use_gpu = use_gpu
19
+ # self.spatial = spatial
20
+ # self.gpu_ids = gpu_ids
21
+ # self.model = dist_model.DistModel()
22
+ # self.model.initialize(model=model, net=net, use_gpu=use_gpu, spatial=self.spatial, gpu_ids=gpu_ids, version=version)
23
+ # print('...[%s] initialized'%self.model.name())
24
+ # print('...Done')
25
+
26
+ # def forward(self, pred, target, normalize=False):
27
+ # """
28
+ # Pred and target are Variables.
29
+ # If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1]
30
+ # If normalize is False, assumes the images are already between [-1,+1]
31
+
32
+ # Inputs pred and target are Nx3xHxW
33
+ # Output pytorch Variable N long
34
+ # """
35
+
36
+ # if normalize:
37
+ # target = 2 * target - 1
38
+ # pred = 2 * pred - 1
39
+
40
+ # return self.model.forward(target, pred)
41
+
42
+ def normalize_tensor(in_feat,eps=1e-10):
43
+ norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1,keepdim=True))
44
+ return in_feat/(norm_factor+eps)
45
+
46
+ def l2(p0, p1, range=255.):
47
+ return .5*np.mean((p0 / range - p1 / range)**2)
48
+
49
+ def psnr(p0, p1, peak=255.):
50
+ return 10*np.log10(peak**2/np.mean((1.*p0-1.*p1)**2))
51
+
52
+ def dssim(p0, p1, range=255.):
53
+ from skimage.measure import compare_ssim
54
+ return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2.
55
+
56
+ def rgb2lab(in_img,mean_cent=False):
57
+ from skimage import color
58
+ img_lab = color.rgb2lab(in_img)
59
+ if(mean_cent):
60
+ img_lab[:,:,0] = img_lab[:,:,0]-50
61
+ return img_lab
62
+
63
+ def tensor2np(tensor_obj):
64
+ # change dimension of a tensor object into a numpy array
65
+ return tensor_obj[0].cpu().float().numpy().transpose((1,2,0))
66
+
67
+ def np2tensor(np_obj):
68
+ # change dimenion of np array into tensor array
69
+ return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
70
+
71
+ def tensor2tensorlab(image_tensor,to_norm=True,mc_only=False):
72
+ # image tensor to lab tensor
73
+ from skimage import color
74
+
75
+ img = tensor2im(image_tensor)
76
+ img_lab = color.rgb2lab(img)
77
+ if(mc_only):
78
+ img_lab[:,:,0] = img_lab[:,:,0]-50
79
+ if(to_norm and not mc_only):
80
+ img_lab[:,:,0] = img_lab[:,:,0]-50
81
+ img_lab = img_lab/100.
82
+
83
+ return np2tensor(img_lab)
84
+
85
+ def tensorlab2tensor(lab_tensor,return_inbnd=False):
86
+ from skimage import color
87
+ import warnings
88
+ warnings.filterwarnings("ignore")
89
+
90
+ lab = tensor2np(lab_tensor)*100.
91
+ lab[:,:,0] = lab[:,:,0]+50
92
+
93
+ rgb_back = 255.*np.clip(color.lab2rgb(lab.astype('float')),0,1)
94
+ if(return_inbnd):
95
+ # convert back to lab, see if we match
96
+ lab_back = color.rgb2lab(rgb_back.astype('uint8'))
97
+ mask = 1.*np.isclose(lab_back,lab,atol=2.)
98
+ mask = np2tensor(np.prod(mask,axis=2)[:,:,np.newaxis])
99
+ return (im2tensor(rgb_back),mask)
100
+ else:
101
+ return im2tensor(rgb_back)
102
+
103
+ def load_image(path):
104
+ if(path[-3:] == 'dng'):
105
+ import rawpy
106
+ with rawpy.imread(path) as raw:
107
+ img = raw.postprocess()
108
+ elif(path[-3:]=='bmp' or path[-3:]=='jpg' or path[-3:]=='png'):
109
+ import cv2
110
+ return cv2.imread(path)[:,:,::-1]
111
+ else:
112
+ img = (255*plt.imread(path)[:,:,:3]).astype('uint8')
113
+
114
+ return img
115
+
116
+ def rgb2lab(input):
117
+ from skimage import color
118
+ return color.rgb2lab(input / 255.)
119
+
120
+ def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):
121
+ image_numpy = image_tensor[0].cpu().float().numpy()
122
+ image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
123
+ return image_numpy.astype(imtype)
124
+
125
+ def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
126
+ return torch.Tensor((image / factor - cent)
127
+ [:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
128
+
129
+ def tensor2vec(vector_tensor):
130
+ return vector_tensor.data.cpu().numpy()[:, :, 0, 0]
131
+
132
+
133
+ def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):
134
+ # def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.):
135
+ image_numpy = image_tensor[0].cpu().float().numpy()
136
+ image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
137
+ return image_numpy.astype(imtype)
138
+
139
+ def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
140
+ # def im2tensor(image, imtype=np.uint8, cent=1., factor=1.):
141
+ return torch.Tensor((image / factor - cent)
142
+ [:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
143
+
144
+
145
+
146
+ def voc_ap(rec, prec, use_07_metric=False):
147
+ """ ap = voc_ap(rec, prec, [use_07_metric])
148
+ Compute VOC AP given precision and recall.
149
+ If use_07_metric is true, uses the
150
+ VOC 07 11 point method (default:False).
151
+ """
152
+ if use_07_metric:
153
+ # 11 point metric
154
+ ap = 0.
155
+ for t in np.arange(0., 1.1, 0.1):
156
+ if np.sum(rec >= t) == 0:
157
+ p = 0
158
+ else:
159
+ p = np.max(prec[rec >= t])
160
+ ap = ap + p / 11.
161
+ else:
162
+ # correct AP calculation
163
+ # first append sentinel values at the end
164
+ mrec = np.concatenate(([0.], rec, [1.]))
165
+ mpre = np.concatenate(([0.], prec, [0.]))
166
+
167
+ # compute the precision envelope
168
+ for i in range(mpre.size - 1, 0, -1):
169
+ mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
170
+
171
+ # to calculate area under PR curve, look for points
172
+ # where X axis (recall) changes value
173
+ i = np.where(mrec[1:] != mrec[:-1])[0]
174
+
175
+ # and sum (\Delta recall) * prec
176
+ ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
177
+ return ap
178
+
lpips/lpips.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from __future__ import absolute_import
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.init as init
7
+ from torch.autograd import Variable
8
+ import numpy as np
9
+ from . import pretrained_networks as pn
10
+ import torch.nn
11
+
12
+ import lpips
13
+
14
+ def spatial_average(in_tens, keepdim=True):
15
+ return in_tens.mean([2,3],keepdim=keepdim)
16
+
17
+ def upsample(in_tens, out_HW=(64,64)): # assumes scale factor is same for H and W
18
+ in_H, in_W = in_tens.shape[2], in_tens.shape[3]
19
+ return nn.Upsample(size=out_HW, mode='bilinear', align_corners=False)(in_tens)
20
+
21
+ # Learned perceptual metric
22
+ class LPIPS(nn.Module):
23
+ def __init__(self, pretrained=True, net='alex', version='0.1', lpips=True, spatial=False,
24
+ pnet_rand=False, pnet_tune=False, use_dropout=True, model_path=None, eval_mode=True, verbose=True):
25
+ # lpips - [True] means with linear calibration on top of base network
26
+ # pretrained - [True] means load linear weights
27
+
28
+ super(LPIPS, self).__init__()
29
+ if(verbose):
30
+ print('Setting up [%s] perceptual loss: trunk [%s], v[%s], spatial [%s]'%
31
+ ('LPIPS' if lpips else 'baseline', net, version, 'on' if spatial else 'off'))
32
+
33
+ self.pnet_type = net
34
+ self.pnet_tune = pnet_tune
35
+ self.pnet_rand = pnet_rand
36
+ self.spatial = spatial
37
+ self.lpips = lpips # false means baseline of just averaging all layers
38
+ self.version = version
39
+ self.scaling_layer = ScalingLayer()
40
+
41
+ if(self.pnet_type in ['vgg','vgg16']):
42
+ net_type = pn.vgg16
43
+ self.chns = [64,128,256,512,512]
44
+ elif(self.pnet_type=='alex'):
45
+ net_type = pn.alexnet
46
+ self.chns = [64,192,384,256,256]
47
+ elif(self.pnet_type=='squeeze'):
48
+ net_type = pn.squeezenet
49
+ self.chns = [64,128,256,384,384,512,512]
50
+ self.L = len(self.chns)
51
+
52
+ self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune)
53
+
54
+ if(lpips):
55
+ self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
56
+ self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
57
+ self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
58
+ self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
59
+ self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
60
+ self.lins = [self.lin0,self.lin1,self.lin2,self.lin3,self.lin4]
61
+ if(self.pnet_type=='squeeze'): # 7 layers for squeezenet
62
+ self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout)
63
+ self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout)
64
+ self.lins+=[self.lin5,self.lin6]
65
+ self.lins = nn.ModuleList(self.lins)
66
+
67
+ if(pretrained):
68
+ if(model_path is None):
69
+ import inspect
70
+ import os
71
+ model_path = os.path.abspath(os.path.join(inspect.getfile(self.__init__), '..', 'weights/v%s/%s.pth'%(version,net)))
72
+
73
+ if(verbose):
74
+ print('Loading model from: %s'%model_path)
75
+ self.load_state_dict(torch.load(model_path, map_location='cpu'), strict=False)
76
+
77
+ if(eval_mode):
78
+ self.eval()
79
+
80
+ def forward(self, in0, in1, retPerLayer=False, normalize=False):
81
+ if normalize: # turn on this flag if input is [0,1] so it can be adjusted to [-1, +1]
82
+ in0 = 2 * in0 - 1
83
+ in1 = 2 * in1 - 1
84
+
85
+ # v0.0 - original release had a bug, where input was not scaled
86
+ in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version=='0.1' else (in0, in1)
87
+ outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input)
88
+ feats0, feats1, diffs = {}, {}, {}
89
+
90
+ for kk in range(self.L):
91
+ feats0[kk], feats1[kk] = lpips.normalize_tensor(outs0[kk]), lpips.normalize_tensor(outs1[kk])
92
+ diffs[kk] = (feats0[kk]-feats1[kk])**2
93
+
94
+ if(self.lpips):
95
+ if(self.spatial):
96
+ res = [upsample(self.lins[kk](diffs[kk]), out_HW=in0.shape[2:]) for kk in range(self.L)]
97
+ else:
98
+ res = [spatial_average(self.lins[kk](diffs[kk]), keepdim=True) for kk in range(self.L)]
99
+ else:
100
+ if(self.spatial):
101
+ res = [upsample(diffs[kk].sum(dim=1,keepdim=True), out_HW=in0.shape[2:]) for kk in range(self.L)]
102
+ else:
103
+ res = [spatial_average(diffs[kk].sum(dim=1,keepdim=True), keepdim=True) for kk in range(self.L)]
104
+
105
+ val = res[0]
106
+ for l in range(1,self.L):
107
+ val += res[l]
108
+
109
+ # a = spatial_average(self.lins[kk](diffs[kk]), keepdim=True)
110
+ # b = torch.max(self.lins[kk](feats0[kk]**2))
111
+ # for kk in range(self.L):
112
+ # a += spatial_average(self.lins[kk](diffs[kk]), keepdim=True)
113
+ # b = torch.max(b,torch.max(self.lins[kk](feats0[kk]**2)))
114
+ # a = a/self.L
115
+ # from IPython import embed
116
+ # embed()
117
+ # return 10*torch.log10(b/a)
118
+
119
+ if(retPerLayer):
120
+ return (val, res)
121
+ else:
122
+ return val
123
+
124
+
125
+ class ScalingLayer(nn.Module):
126
+ def __init__(self):
127
+ super(ScalingLayer, self).__init__()
128
+ self.register_buffer('shift', torch.Tensor([-.030,-.088,-.188])[None,:,None,None])
129
+ self.register_buffer('scale', torch.Tensor([.458,.448,.450])[None,:,None,None])
130
+
131
+ def forward(self, inp):
132
+ return (inp - self.shift) / self.scale
133
+
134
+
135
+ class NetLinLayer(nn.Module):
136
+ ''' A single linear layer which does a 1x1 conv '''
137
+ def __init__(self, chn_in, chn_out=1, use_dropout=False):
138
+ super(NetLinLayer, self).__init__()
139
+
140
+ layers = [nn.Dropout(),] if(use_dropout) else []
141
+ layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),]
142
+ self.model = nn.Sequential(*layers)
143
+
144
+ def forward(self, x):
145
+ return self.model(x)
146
+
147
+ class Dist2LogitLayer(nn.Module):
148
+ ''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) '''
149
+ def __init__(self, chn_mid=32, use_sigmoid=True):
150
+ super(Dist2LogitLayer, self).__init__()
151
+
152
+ layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True),]
153
+ layers += [nn.LeakyReLU(0.2,True),]
154
+ layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True),]
155
+ layers += [nn.LeakyReLU(0.2,True),]
156
+ layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True),]
157
+ if(use_sigmoid):
158
+ layers += [nn.Sigmoid(),]
159
+ self.model = nn.Sequential(*layers)
160
+
161
+ def forward(self,d0,d1,eps=0.1):
162
+ return self.model.forward(torch.cat((d0,d1,d0-d1,d0/(d1+eps),d1/(d0+eps)),dim=1))
163
+
164
+ class BCERankingLoss(nn.Module):
165
+ def __init__(self, chn_mid=32):
166
+ super(BCERankingLoss, self).__init__()
167
+ self.net = Dist2LogitLayer(chn_mid=chn_mid)
168
+ # self.parameters = list(self.net.parameters())
169
+ self.loss = torch.nn.BCELoss()
170
+
171
+ def forward(self, d0, d1, judge):
172
+ per = (judge+1.)/2.
173
+ self.logit = self.net.forward(d0,d1)
174
+ return self.loss(self.logit, per)
175
+
176
+ # L2, DSSIM metrics
177
+ class FakeNet(nn.Module):
178
+ def __init__(self, use_gpu=True, colorspace='Lab'):
179
+ super(FakeNet, self).__init__()
180
+ self.use_gpu = use_gpu
181
+ self.colorspace = colorspace
182
+
183
+ class L2(FakeNet):
184
+ def forward(self, in0, in1, retPerLayer=None):
185
+ assert(in0.size()[0]==1) # currently only supports batchSize 1
186
+
187
+ if(self.colorspace=='RGB'):
188
+ (N,C,X,Y) = in0.size()
189
+ value = torch.mean(torch.mean(torch.mean((in0-in1)**2,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N)
190
+ return value
191
+ elif(self.colorspace=='Lab'):
192
+ value = lpips.l2(lpips.tensor2np(lpips.tensor2tensorlab(in0.data,to_norm=False)),
193
+ lpips.tensor2np(lpips.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
194
+ ret_var = Variable( torch.Tensor((value,) ) )
195
+ if(self.use_gpu):
196
+ ret_var = ret_var.cuda()
197
+ return ret_var
198
+
199
+ class DSSIM(FakeNet):
200
+
201
+ def forward(self, in0, in1, retPerLayer=None):
202
+ assert(in0.size()[0]==1) # currently only supports batchSize 1
203
+
204
+ if(self.colorspace=='RGB'):
205
+ value = lpips.dssim(1.*lpips.tensor2im(in0.data), 1.*lpips.tensor2im(in1.data), range=255.).astype('float')
206
+ elif(self.colorspace=='Lab'):
207
+ value = lpips.dssim(lpips.tensor2np(lpips.tensor2tensorlab(in0.data,to_norm=False)),
208
+ lpips.tensor2np(lpips.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
209
+ ret_var = Variable( torch.Tensor((value,) ) )
210
+ if(self.use_gpu):
211
+ ret_var = ret_var.cuda()
212
+ return ret_var
213
+
214
+ def print_network(net):
215
+ num_params = 0
216
+ for param in net.parameters():
217
+ num_params += param.numel()
218
+ print('Network',net)
219
+ print('Total number of parameters: %d' % num_params)
lpips/pretrained_networks.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+ import torch
3
+ from torchvision import models as tv
4
+
5
+ class squeezenet(torch.nn.Module):
6
+ def __init__(self, requires_grad=False, pretrained=True):
7
+ super(squeezenet, self).__init__()
8
+ pretrained_features = tv.squeezenet1_1(pretrained=pretrained).features
9
+ self.slice1 = torch.nn.Sequential()
10
+ self.slice2 = torch.nn.Sequential()
11
+ self.slice3 = torch.nn.Sequential()
12
+ self.slice4 = torch.nn.Sequential()
13
+ self.slice5 = torch.nn.Sequential()
14
+ self.slice6 = torch.nn.Sequential()
15
+ self.slice7 = torch.nn.Sequential()
16
+ self.N_slices = 7
17
+ for x in range(2):
18
+ self.slice1.add_module(str(x), pretrained_features[x])
19
+ for x in range(2,5):
20
+ self.slice2.add_module(str(x), pretrained_features[x])
21
+ for x in range(5, 8):
22
+ self.slice3.add_module(str(x), pretrained_features[x])
23
+ for x in range(8, 10):
24
+ self.slice4.add_module(str(x), pretrained_features[x])
25
+ for x in range(10, 11):
26
+ self.slice5.add_module(str(x), pretrained_features[x])
27
+ for x in range(11, 12):
28
+ self.slice6.add_module(str(x), pretrained_features[x])
29
+ for x in range(12, 13):
30
+ self.slice7.add_module(str(x), pretrained_features[x])
31
+ if not requires_grad:
32
+ for param in self.parameters():
33
+ param.requires_grad = False
34
+
35
+ def forward(self, X):
36
+ h = self.slice1(X)
37
+ h_relu1 = h
38
+ h = self.slice2(h)
39
+ h_relu2 = h
40
+ h = self.slice3(h)
41
+ h_relu3 = h
42
+ h = self.slice4(h)
43
+ h_relu4 = h
44
+ h = self.slice5(h)
45
+ h_relu5 = h
46
+ h = self.slice6(h)
47
+ h_relu6 = h
48
+ h = self.slice7(h)
49
+ h_relu7 = h
50
+ vgg_outputs = namedtuple("SqueezeOutputs", ['relu1','relu2','relu3','relu4','relu5','relu6','relu7'])
51
+ out = vgg_outputs(h_relu1,h_relu2,h_relu3,h_relu4,h_relu5,h_relu6,h_relu7)
52
+
53
+ return out
54
+
55
+
56
+ class alexnet(torch.nn.Module):
57
+ def __init__(self, requires_grad=False, pretrained=True):
58
+ super(alexnet, self).__init__()
59
+ alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features
60
+ self.slice1 = torch.nn.Sequential()
61
+ self.slice2 = torch.nn.Sequential()
62
+ self.slice3 = torch.nn.Sequential()
63
+ self.slice4 = torch.nn.Sequential()
64
+ self.slice5 = torch.nn.Sequential()
65
+ self.N_slices = 5
66
+ for x in range(2):
67
+ self.slice1.add_module(str(x), alexnet_pretrained_features[x])
68
+ for x in range(2, 5):
69
+ self.slice2.add_module(str(x), alexnet_pretrained_features[x])
70
+ for x in range(5, 8):
71
+ self.slice3.add_module(str(x), alexnet_pretrained_features[x])
72
+ for x in range(8, 10):
73
+ self.slice4.add_module(str(x), alexnet_pretrained_features[x])
74
+ for x in range(10, 12):
75
+ self.slice5.add_module(str(x), alexnet_pretrained_features[x])
76
+ if not requires_grad:
77
+ for param in self.parameters():
78
+ param.requires_grad = False
79
+
80
+ def forward(self, X):
81
+ h = self.slice1(X)
82
+ h_relu1 = h
83
+ h = self.slice2(h)
84
+ h_relu2 = h
85
+ h = self.slice3(h)
86
+ h_relu3 = h
87
+ h = self.slice4(h)
88
+ h_relu4 = h
89
+ h = self.slice5(h)
90
+ h_relu5 = h
91
+ alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5'])
92
+ out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
93
+
94
+ return out
95
+
96
+ class vgg16(torch.nn.Module):
97
+ def __init__(self, requires_grad=False, pretrained=True):
98
+ super(vgg16, self).__init__()
99
+ vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features
100
+ self.slice1 = torch.nn.Sequential()
101
+ self.slice2 = torch.nn.Sequential()
102
+ self.slice3 = torch.nn.Sequential()
103
+ self.slice4 = torch.nn.Sequential()
104
+ self.slice5 = torch.nn.Sequential()
105
+ self.N_slices = 5
106
+ for x in range(4):
107
+ self.slice1.add_module(str(x), vgg_pretrained_features[x])
108
+ for x in range(4, 9):
109
+ self.slice2.add_module(str(x), vgg_pretrained_features[x])
110
+ for x in range(9, 16):
111
+ self.slice3.add_module(str(x), vgg_pretrained_features[x])
112
+ for x in range(16, 23):
113
+ self.slice4.add_module(str(x), vgg_pretrained_features[x])
114
+ for x in range(23, 30):
115
+ self.slice5.add_module(str(x), vgg_pretrained_features[x])
116
+ if not requires_grad:
117
+ for param in self.parameters():
118
+ param.requires_grad = False
119
+
120
+ def forward(self, X):
121
+ h = self.slice1(X)
122
+ h_relu1_2 = h
123
+ h = self.slice2(h)
124
+ h_relu2_2 = h
125
+ h = self.slice3(h)
126
+ h_relu3_3 = h
127
+ h = self.slice4(h)
128
+ h_relu4_3 = h
129
+ h = self.slice5(h)
130
+ h_relu5_3 = h
131
+ vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
132
+ out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
133
+
134
+ return out
135
+
136
+
137
+
138
+ class resnet(torch.nn.Module):
139
+ def __init__(self, requires_grad=False, pretrained=True, num=18):
140
+ super(resnet, self).__init__()
141
+ if(num==18):
142
+ self.net = tv.resnet18(pretrained=pretrained)
143
+ elif(num==34):
144
+ self.net = tv.resnet34(pretrained=pretrained)
145
+ elif(num==50):
146
+ self.net = tv.resnet50(pretrained=pretrained)
147
+ elif(num==101):
148
+ self.net = tv.resnet101(pretrained=pretrained)
149
+ elif(num==152):
150
+ self.net = tv.resnet152(pretrained=pretrained)
151
+ self.N_slices = 5
152
+
153
+ self.conv1 = self.net.conv1
154
+ self.bn1 = self.net.bn1
155
+ self.relu = self.net.relu
156
+ self.maxpool = self.net.maxpool
157
+ self.layer1 = self.net.layer1
158
+ self.layer2 = self.net.layer2
159
+ self.layer3 = self.net.layer3
160
+ self.layer4 = self.net.layer4
161
+
162
+ def forward(self, X):
163
+ h = self.conv1(X)
164
+ h = self.bn1(h)
165
+ h = self.relu(h)
166
+ h_relu1 = h
167
+ h = self.maxpool(h)
168
+ h = self.layer1(h)
169
+ h_conv2 = h
170
+ h = self.layer2(h)
171
+ h_conv3 = h
172
+ h = self.layer3(h)
173
+ h_conv4 = h
174
+ h = self.layer4(h)
175
+ h_conv5 = h
176
+
177
+ outputs = namedtuple("Outputs", ['relu1','conv2','conv3','conv4','conv5'])
178
+ out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)
179
+
180
+ return out
lpips/trainer.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from __future__ import absolute_import
3
+
4
+ import numpy as np
5
+ import torch
6
+ from torch import nn
7
+ from collections import OrderedDict
8
+ from torch.autograd import Variable
9
+ from scipy.ndimage import zoom
10
+ from tqdm import tqdm
11
+ import lpips
12
+ import os
13
+
14
+
15
+ class Trainer():
16
+ def name(self):
17
+ return self.model_name
18
+
19
+ def initialize(self, model='lpips', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None,
20
+ use_gpu=True, printNet=False, spatial=False,
21
+ is_train=False, lr=.0001, beta1=0.5, version='0.1', gpu_ids=[0]):
22
+ '''
23
+ INPUTS
24
+ model - ['lpips'] for linearly calibrated network
25
+ ['baseline'] for off-the-shelf network
26
+ ['L2'] for L2 distance in Lab colorspace
27
+ ['SSIM'] for ssim in RGB colorspace
28
+ net - ['squeeze','alex','vgg']
29
+ model_path - if None, will look in weights/[NET_NAME].pth
30
+ colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
31
+ use_gpu - bool - whether or not to use a GPU
32
+ printNet - bool - whether or not to print network architecture out
33
+ spatial - bool - whether to output an array containing varying distances across spatial dimensions
34
+ is_train - bool - [True] for training mode
35
+ lr - float - initial learning rate
36
+ beta1 - float - initial momentum term for adam
37
+ version - 0.1 for latest, 0.0 was original (with a bug)
38
+ gpu_ids - int array - [0] by default, gpus to use
39
+ '''
40
+ self.use_gpu = use_gpu
41
+ self.gpu_ids = gpu_ids
42
+ self.model = model
43
+ self.net = net
44
+ self.is_train = is_train
45
+ self.spatial = spatial
46
+ self.model_name = '%s [%s]'%(model,net)
47
+
48
+ if(self.model == 'lpips'): # pretrained net + linear layer
49
+ self.net = lpips.LPIPS(pretrained=not is_train, net=net, version=version, lpips=True, spatial=spatial,
50
+ pnet_rand=pnet_rand, pnet_tune=pnet_tune,
51
+ use_dropout=True, model_path=model_path, eval_mode=False)
52
+ elif(self.model=='baseline'): # pretrained network
53
+ self.net = lpips.LPIPS(pnet_rand=pnet_rand, net=net, lpips=False)
54
+ elif(self.model in ['L2','l2']):
55
+ self.net = lpips.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing
56
+ self.model_name = 'L2'
57
+ elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
58
+ self.net = lpips.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
59
+ self.model_name = 'SSIM'
60
+ else:
61
+ raise ValueError("Model [%s] not recognized." % self.model)
62
+
63
+ self.parameters = list(self.net.parameters())
64
+
65
+ if self.is_train: # training mode
66
+ # extra network on top to go from distances (d0,d1) => predicted human judgment (h*)
67
+ self.rankLoss = lpips.BCERankingLoss()
68
+ self.parameters += list(self.rankLoss.net.parameters())
69
+ self.lr = lr
70
+ self.old_lr = lr
71
+ self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
72
+ else: # test mode
73
+ self.net.eval()
74
+
75
+ if(use_gpu):
76
+ self.net.to(gpu_ids[0])
77
+ self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids)
78
+ if(self.is_train):
79
+ self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0
80
+
81
+ if(printNet):
82
+ print('---------- Networks initialized -------------')
83
+ networks.print_network(self.net)
84
+ print('-----------------------------------------------')
85
+
86
+ def forward(self, in0, in1, retPerLayer=False):
87
+ ''' Function computes the distance between image patches in0 and in1
88
+ INPUTS
89
+ in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
90
+ OUTPUT
91
+ computed distances between in0 and in1
92
+ '''
93
+
94
+ return self.net.forward(in0, in1, retPerLayer=retPerLayer)
95
+
96
+ # ***** TRAINING FUNCTIONS *****
97
+ def optimize_parameters(self):
98
+ self.forward_train()
99
+ self.optimizer_net.zero_grad()
100
+ self.backward_train()
101
+ self.optimizer_net.step()
102
+ self.clamp_weights()
103
+
104
+ def clamp_weights(self):
105
+ for module in self.net.modules():
106
+ if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
107
+ module.weight.data = torch.clamp(module.weight.data,min=0)
108
+
109
+ def set_input(self, data):
110
+ self.input_ref = data['ref']
111
+ self.input_p0 = data['p0']
112
+ self.input_p1 = data['p1']
113
+ self.input_judge = data['judge']
114
+
115
+ if(self.use_gpu):
116
+ self.input_ref = self.input_ref.to(device=self.gpu_ids[0])
117
+ self.input_p0 = self.input_p0.to(device=self.gpu_ids[0])
118
+ self.input_p1 = self.input_p1.to(device=self.gpu_ids[0])
119
+ self.input_judge = self.input_judge.to(device=self.gpu_ids[0])
120
+
121
+ self.var_ref = Variable(self.input_ref,requires_grad=True)
122
+ self.var_p0 = Variable(self.input_p0,requires_grad=True)
123
+ self.var_p1 = Variable(self.input_p1,requires_grad=True)
124
+
125
+ def forward_train(self): # run forward pass
126
+ self.d0 = self.forward(self.var_ref, self.var_p0)
127
+ self.d1 = self.forward(self.var_ref, self.var_p1)
128
+ self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
129
+
130
+ self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
131
+
132
+ self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
133
+
134
+ return self.loss_total
135
+
136
+ def backward_train(self):
137
+ torch.mean(self.loss_total).backward()
138
+
139
+ def compute_accuracy(self,d0,d1,judge):
140
+ ''' d0, d1 are Variables, judge is a Tensor '''
141
+ d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
142
+ judge_per = judge.cpu().numpy().flatten()
143
+ return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
144
+
145
+ def get_current_errors(self):
146
+ retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
147
+ ('acc_r', self.acc_r)])
148
+
149
+ for key in retDict.keys():
150
+ retDict[key] = np.mean(retDict[key])
151
+
152
+ return retDict
153
+
154
+ def get_current_visuals(self):
155
+ zoom_factor = 256/self.var_ref.data.size()[2]
156
+
157
+ ref_img = lpips.tensor2im(self.var_ref.data)
158
+ p0_img = lpips.tensor2im(self.var_p0.data)
159
+ p1_img = lpips.tensor2im(self.var_p1.data)
160
+
161
+ ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
162
+ p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
163
+ p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
164
+
165
+ return OrderedDict([('ref', ref_img_vis),
166
+ ('p0', p0_img_vis),
167
+ ('p1', p1_img_vis)])
168
+
169
+ def save(self, path, label):
170
+ if(self.use_gpu):
171
+ self.save_network(self.net.module, path, '', label)
172
+ else:
173
+ self.save_network(self.net, path, '', label)
174
+ self.save_network(self.rankLoss.net, path, 'rank', label)
175
+
176
+ # helper saving function that can be used by subclasses
177
+ def save_network(self, network, path, network_label, epoch_label):
178
+ save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
179
+ save_path = os.path.join(path, save_filename)
180
+ torch.save(network.state_dict(), save_path)
181
+
182
+ # helper loading function that can be used by subclasses
183
+ def load_network(self, network, network_label, epoch_label):
184
+ save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
185
+ save_path = os.path.join(self.save_dir, save_filename)
186
+ print('Loading network from %s'%save_path)
187
+ network.load_state_dict(torch.load(save_path))
188
+
189
+ def update_learning_rate(self,nepoch_decay):
190
+ lrd = self.lr / nepoch_decay
191
+ lr = self.old_lr - lrd
192
+
193
+ for param_group in self.optimizer_net.param_groups:
194
+ param_group['lr'] = lr
195
+
196
+ print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
197
+ self.old_lr = lr
198
+
199
+
200
+ def get_image_paths(self):
201
+ return self.image_paths
202
+
203
+ def save_done(self, flag=False):
204
+ np.save(os.path.join(self.save_dir, 'done_flag'),flag)
205
+ np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i')
206
+
207
+
208
+ def score_2afc_dataset(data_loader, func, name=''):
209
+ ''' Function computes Two Alternative Forced Choice (2AFC) score using
210
+ distance function 'func' in dataset 'data_loader'
211
+ INPUTS
212
+ data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
213
+ func - callable distance function - calling d=func(in0,in1) should take 2
214
+ pytorch tensors with shape Nx3xXxY, and return numpy array of length N
215
+ OUTPUTS
216
+ [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
217
+ [1] - dictionary with following elements
218
+ d0s,d1s - N arrays containing distances between reference patch to perturbed patches
219
+ gts - N array in [0,1], preferred patch selected by human evaluators
220
+ (closer to "0" for left patch p0, "1" for right patch p1,
221
+ "0.6" means 60pct people preferred right patch, 40pct preferred left)
222
+ scores - N array in [0,1], corresponding to what percentage function agreed with humans
223
+ CONSTS
224
+ N - number of test triplets in data_loader
225
+ '''
226
+
227
+ d0s = []
228
+ d1s = []
229
+ gts = []
230
+
231
+ for data in tqdm(data_loader.load_data(), desc=name):
232
+ d0s+=func(data['ref'],data['p0']).data.cpu().numpy().flatten().tolist()
233
+ d1s+=func(data['ref'],data['p1']).data.cpu().numpy().flatten().tolist()
234
+ gts+=data['judge'].cpu().numpy().flatten().tolist()
235
+
236
+ d0s = np.array(d0s)
237
+ d1s = np.array(d1s)
238
+ gts = np.array(gts)
239
+ scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
240
+
241
+ return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
242
+
243
+ def score_jnd_dataset(data_loader, func, name=''):
244
+ ''' Function computes JND score using distance function 'func' in dataset 'data_loader'
245
+ INPUTS
246
+ data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
247
+ func - callable distance function - calling d=func(in0,in1) should take 2
248
+ pytorch tensors with shape Nx3xXxY, and return pytorch array of length N
249
+ OUTPUTS
250
+ [0] - JND score in [0,1], mAP score (area under precision-recall curve)
251
+ [1] - dictionary with following elements
252
+ ds - N array containing distances between two patches shown to human evaluator
253
+ sames - N array containing fraction of people who thought the two patches were identical
254
+ CONSTS
255
+ N - number of test triplets in data_loader
256
+ '''
257
+
258
+ ds = []
259
+ gts = []
260
+
261
+ for data in tqdm(data_loader.load_data(), desc=name):
262
+ ds+=func(data['p0'],data['p1']).data.cpu().numpy().tolist()
263
+ gts+=data['same'].cpu().numpy().flatten().tolist()
264
+
265
+ sames = np.array(gts)
266
+ ds = np.array(ds)
267
+
268
+ sorted_inds = np.argsort(ds)
269
+ ds_sorted = ds[sorted_inds]
270
+ sames_sorted = sames[sorted_inds]
271
+
272
+ TPs = np.cumsum(sames_sorted)
273
+ FPs = np.cumsum(1-sames_sorted)
274
+ FNs = np.sum(sames_sorted)-TPs
275
+
276
+ precs = TPs/(TPs+FPs)
277
+ recs = TPs/(TPs+FNs)
278
+ score = lpips.voc_ap(recs,precs)
279
+
280
+ return(score, dict(ds=ds,sames=sames))
lpips/weights/v0.0/alex.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18720f55913d0af89042f13faa7e536a6ce1444a0914e6db9461355ece1e8cd5
3
+ size 5455
lpips/weights/v0.0/squeeze.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c27abd3a0145541baa50990817df58d3759c3f8154949f42af3b59b4e042d0bf
3
+ size 10057
lpips/weights/v0.0/vgg.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9e4236260c3dd988fc79d2a48d645d885afcbb21f9fd595e6744cf7419b582c
3
+ size 6735