Zongsheng commited on
Commit
06f26d7
1 Parent(s): 3b1aa86

first upload

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ResizeRight/LICENSE +21 -0
  2. ResizeRight/README.md +87 -0
  3. ResizeRight/interp_methods.py +65 -0
  4. ResizeRight/resize_right.py +341 -0
  5. basicsr/__init__.py +12 -0
  6. basicsr/archs/__init__.py +24 -0
  7. basicsr/archs/arch_util.py +313 -0
  8. basicsr/archs/basicvsr_arch.py +336 -0
  9. basicsr/archs/basicvsrpp_arch.py +417 -0
  10. basicsr/archs/dfdnet_arch.py +169 -0
  11. basicsr/archs/dfdnet_util.py +162 -0
  12. basicsr/archs/discriminator_arch.py +150 -0
  13. basicsr/archs/duf_arch.py +276 -0
  14. basicsr/archs/ecbsr_arch.py +275 -0
  15. basicsr/archs/edsr_arch.py +61 -0
  16. basicsr/archs/edvr_arch.py +382 -0
  17. basicsr/archs/hifacegan_arch.py +260 -0
  18. basicsr/archs/hifacegan_util.py +255 -0
  19. basicsr/archs/inception.py +307 -0
  20. basicsr/archs/rcan_arch.py +135 -0
  21. basicsr/archs/ridnet_arch.py +180 -0
  22. basicsr/archs/rrdbnet_arch.py +119 -0
  23. basicsr/archs/spynet_arch.py +96 -0
  24. basicsr/archs/srresnet_arch.py +65 -0
  25. basicsr/archs/srvgg_arch.py +70 -0
  26. basicsr/archs/stylegan2_arch.py +799 -0
  27. basicsr/archs/stylegan2_bilinear_arch.py +614 -0
  28. basicsr/archs/swinir_arch.py +956 -0
  29. basicsr/archs/tof_arch.py +172 -0
  30. basicsr/archs/vgg_arch.py +161 -0
  31. basicsr/data/__init__.py +101 -0
  32. basicsr/data/data_sampler.py +48 -0
  33. basicsr/data/data_util.py +315 -0
  34. basicsr/data/degradations.py +764 -0
  35. basicsr/data/ffhq_dataset.py +80 -0
  36. basicsr/data/meta_info/meta_info_DIV2K800sub_GT.txt +0 -0
  37. basicsr/data/meta_info/meta_info_REDS4_test_GT.txt +4 -0
  38. basicsr/data/meta_info/meta_info_REDS_GT.txt +270 -0
  39. basicsr/data/meta_info/meta_info_REDSofficial4_test_GT.txt +4 -0
  40. basicsr/data/meta_info/meta_info_REDSval_official_test_GT.txt +30 -0
  41. basicsr/data/meta_info/meta_info_Vimeo90K_test_GT.txt +0 -0
  42. basicsr/data/meta_info/meta_info_Vimeo90K_test_fast_GT.txt +1225 -0
  43. basicsr/data/meta_info/meta_info_Vimeo90K_test_medium_GT.txt +0 -0
  44. basicsr/data/meta_info/meta_info_Vimeo90K_test_slow_GT.txt +1613 -0
  45. basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt +0 -0
  46. basicsr/data/paired_image_dataset.py +106 -0
  47. basicsr/data/prefetch_dataloader.py +122 -0
  48. basicsr/data/realesrgan_dataset.py +181 -0
  49. basicsr/data/realesrgan_paired_dataset.py +106 -0
  50. basicsr/data/reds_dataset.py +352 -0
ResizeRight/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 Assaf Shocher
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
ResizeRight/README.md ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ResizeRight
2
+ This is a resizing packge for images or tensors, that supports both Numpy and PyTorch seamlessly. The main motivation for creating this is to address some **crucial incorrectness issues** that exist in all other resizing packages I am aware of. As far as I know, it is the only one that performs correctly in all cases. ResizeRight is specially made for machine learning, image enhancement and restoration challenges.
3
+
4
+ In the Pytorch case, it is **fully differentiable and can be used inside a neural network** both as a dynamic function (eg. inside some "forward" method) or as a PyTorch layer (nn.Module).
5
+
6
+ The code is inspired by MATLAB's imresize function, but with crucial differences. It is specifically useful due to the following reasons:
7
+
8
+ 1. ResizeRight produces results **identical (PSNR>60dB) to MATLAB for the simple cases** (scale_factor * in_size is integer). None of the Python packages I am aware of, currently resize images with results similar to MATLAB's imresize (which is a common benchmark for image resotration tasks, especially super-resolution).
9
+
10
+ 2. No other **differntiable** method I am aware of supports **AntiAliasing** as in MATLAB. Actually very few non-differentiable ones, including popular ones, do. This causes artifacts and inconsistency in downscaling. (see [this tweet](https://twitter.com/jaakkolehtinen/status/1258102168176951299) by [Jaakko Lehtinen](https://users.aalto.fi/~lehtinj7/)
11
+ for example).
12
+
13
+ 3. The most important part: In the general case where scale_factor * in_size is non-integer, **no existing resizing method I am aware of (including MATLAB) performs consistently.** ResizeRight is accurate and consistent due to its ability to process **both scale-factor and output-size** provided by the user. This is a super important feature for super-resolution and learning. One must acknowledge that the same output-size can be resulted with varying scale-factors as output-size is usually determined by *ceil(input_size * scale_factor)*. This situation creates dangerous lack of consistency. Best explained by example: say you have an image of size 9x9 and you resize by scale-factor of 0.5. Result size is 5x5. now you resize with scale-factor of 2. you get result sized 10x10. "no big deal", you must be thinking now, "I can resize it according to output-size 9x9", right? but then you will not get the correct scale-fcator which is calculated as output-size / input-size = 1.8.
14
+ Due to a simple observation regarding the projection of the output grid to the input grid, ResizeRight is the only one that consistently maintains the image centered, as in optical zoom while complying with the exact scale-factor and output size the user requires.
15
+ This is one of the main reasons for creating this repository. this downscale-upscale consistency is often crucial for learning based tasks (e.g. ["Zero-Shot Super-Resolution"](http://www.wisdom.weizmann.ac.il/~vision/zssr/)), and does not exist in other python packages nor in MATLAB.
16
+
17
+ 4. Misalignment in resizing is a pandemic! Many existing packages actually return misaligned results. it is visually not apparent but can cause great damage to image enhancement tasks.(for example, see [how tensorflow's image resize stole 60 days of my life](https://hackernoon.com/how-tensorflows-tf-image-resize-stole-60-days-of-my-life-aba5eb093f35)). I personally also suffered from many misfortunate consequences of such missalignment before and throughout making this method.
18
+
19
+ 5. Resizing supports **both Numpy and PyTorch** tensors seamlessly, just by the type of input tensor given. Results are checked to be identical in both modes, so you can safely apply to different tensor types and maintain consistency. No Numpy <-> Torch conversion takes part at any step. The process is done exclusively with one of the frameworks. No direct dependency is needed, so you can run it without having PyTorch installed at all, or without Numpy. You only need one of them.
20
+
21
+ 6. For PyTorch you can either use a **ResizeLayer (nn.Module)** that calculates the resizing weights once on initialization and then applies them at each network pass. This also means resizing can be performed efficiently on **GPU** and on batches of images.
22
+
23
+ 7. On the other hand, you can use a **dynamic resize** function to scale to different scal-factors at each pass. Both ways built upon the same building-blocks and produce identical results. Both are differntiable and can be used inside a neural network.
24
+
25
+ 8. Differently from some existing methods, including MATLAB, You can **resize N-D tensors in M-D dimensions.** for any M<=N.
26
+
27
+ 9. You can specify a list of scale-factors to resize each dimension using a different scale-factor.
28
+
29
+ 10. You can easily add and embed your own interpolation methods for the resizer to use (see interp_mehods.py)
30
+
31
+ 11. Some calculations are done more efficiently than the MATLAB version (one example is that MATLAB extends the kernel size by 2, and then searches for zero columns in the weights and cancels them. ResizeRight uses an observation that resizing is actually a continuous convolution and avoids having these redundancies ahead, see Shocher et al. ["From Discrete to Continuous Convolution Layers"](https://arxiv.org/abs/2006.11120)).
32
+ --------
33
+
34
+ ### Usage:
35
+ For dynamic resize using either Numpy or PyTorch:
36
+ ```
37
+ resize_reight.resize(input, scale_factors=None, out_shape=None,
38
+ interp_method=interp_methods.cubic, support_sz=None,
39
+ antialiasing=True)
40
+ ```
41
+ For a PyTorch layer (nn.Module):
42
+ ```
43
+ resize_layer = resize_reight.ResizeLayer(in_shape, scale_factors=None, out_shape=None,
44
+ interp_method=interp_methods.cubic, support_sz=None,
45
+ antialiasing=True
46
+
47
+ resize_layer(input)
48
+ ```
49
+
50
+ __input__ :
51
+ the input image/tensor, a Numpy or Torch tensor.
52
+
53
+ __in_shape__ (only specified for a static layer):
54
+ the input tensor shape. a list of integers.
55
+
56
+ __scale_factors__:
57
+ can be specified as-
58
+ 1. one scalar scale - then it will be assumed that you want to resize first two dims with this scale for Numpy or last two dims for PyTorch.
59
+ 2. a list or tupple of scales - one for each dimension you want to resize. note: if length of the list is L then first L dims will be rescaled for Numpy and last L for PyTorch.
60
+ 3. not specified - then it will be calculated using output_size. this is not recomended (see advantage 3 in the list above).
61
+
62
+ __out_shape__:
63
+ A list or tupple. if shorter than input.shape then only the first/last (depending np/torch) dims are resized. if not specified, can be calcualated from scale_factor.
64
+
65
+ __interp_method__:
66
+ The type of interpolation used to calculate the weights. this is a scalar to scalar function that can be applied to tensors pointwise. The classical methods are implemented and can be found in interp_methods.py. (cubic, linear, laczos2, lanczos3, box).
67
+
68
+ __support_sz__:
69
+ This is the support of the interpolation function, i.e length of non-zero segment over its 1d input domain. this is a characteristic of the function. eg. for bicubic 4, linear 2, laczos2 4, lanczos3 6, box 1.
70
+
71
+ __antialiasing__:
72
+ This is an option similar to MATLAB's default. only relevant for downscaling. if true it basicly means that the kernel is stretched with 1/scale_factor to prevent aliasing (low-pass filtering)
73
+
74
+ --------
75
+
76
+ ### Cite / credit
77
+ If you find our work useful in your research or publication, please cite this work:
78
+ ```
79
+ @misc{ResizeRight,
80
+ author = {Shocher, Assaf},
81
+ title = {ResizeRight},
82
+ year = {2018},
83
+ publisher = {GitHub},
84
+ journal = {GitHub repository},
85
+ howpublished = {\url{https://github.com/assafshocher/ResizeRight}},
86
+ }
87
+ ```
ResizeRight/interp_methods.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from math import pi
2
+
3
+ try:
4
+ import torch
5
+ except ImportError:
6
+ torch = None
7
+
8
+ try:
9
+ import numpy
10
+ except ImportError:
11
+ numpy = None
12
+
13
+ if numpy is None and torch is None:
14
+ raise ImportError("Must have either Numpy or PyTorch but both not found")
15
+
16
+
17
+ def set_framework_dependencies(x):
18
+ if type(x) is numpy.ndarray:
19
+ to_dtype = lambda a: a
20
+ fw = numpy
21
+ else:
22
+ to_dtype = lambda a: a.to(x.dtype)
23
+ fw = torch
24
+ eps = fw.finfo(fw.float32).eps
25
+ return fw, to_dtype, eps
26
+
27
+
28
+ def support_sz(sz):
29
+ def wrapper(f):
30
+ f.support_sz = sz
31
+ return f
32
+ return wrapper
33
+
34
+ @support_sz(4)
35
+ def cubic(x):
36
+ fw, to_dtype, eps = set_framework_dependencies(x)
37
+ absx = fw.abs(x)
38
+ absx2 = absx ** 2
39
+ absx3 = absx ** 3
40
+ return ((1.5 * absx3 - 2.5 * absx2 + 1.) * to_dtype(absx <= 1.) +
41
+ (-0.5 * absx3 + 2.5 * absx2 - 4. * absx + 2.) *
42
+ to_dtype((1. < absx) & (absx <= 2.)))
43
+
44
+ @support_sz(4)
45
+ def lanczos2(x):
46
+ fw, to_dtype, eps = set_framework_dependencies(x)
47
+ return (((fw.sin(pi * x) * fw.sin(pi * x / 2) + eps) /
48
+ ((pi**2 * x**2 / 2) + eps)) * to_dtype(abs(x) < 2))
49
+
50
+ @support_sz(6)
51
+ def lanczos3(x):
52
+ fw, to_dtype, eps = set_framework_dependencies(x)
53
+ return (((fw.sin(pi * x) * fw.sin(pi * x / 3) + eps) /
54
+ ((pi**2 * x**2 / 3) + eps)) * to_dtype(abs(x) < 3))
55
+
56
+ @support_sz(2)
57
+ def linear(x):
58
+ fw, to_dtype, eps = set_framework_dependencies(x)
59
+ return ((x + 1) * to_dtype((-1 <= x) & (x < 0)) + (1 - x) *
60
+ to_dtype((0 <= x) & (x <= 1)))
61
+
62
+ @support_sz(1)
63
+ def box(x):
64
+ fw, to_dtype, eps = set_framework_dependencies(x)
65
+ return to_dtype((-1 <= x) & (x < 0)) + to_dtype((0 <= x) & (x <= 1))
ResizeRight/resize_right.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from math import ceil
3
+ from . import interp_methods
4
+
5
+
6
+ class NoneClass:
7
+ pass
8
+
9
+ try:
10
+ import torch
11
+ from torch import nn
12
+ nnModuleWrapped = nn.Module
13
+ except ImportError:
14
+ warnings.warn('No PyTorch found, will work only with Numpy')
15
+ torch = None
16
+ nnModuleWrapped = NoneClass
17
+
18
+ try:
19
+ import numpy
20
+ except ImportError:
21
+ warnings.warn('No Numpy found, will work only with PyTorch')
22
+ numpy = None
23
+
24
+
25
+ if numpy is None and torch is None:
26
+ raise ImportError("Must have either Numpy or PyTorch but both not found")
27
+
28
+
29
+ def resize(input, scale_factors=None, out_shape=None,
30
+ interp_method=interp_methods.cubic, support_sz=None,
31
+ antialiasing=True):
32
+ # get properties of the input tensor
33
+ in_shape, n_dims = input.shape, input.ndim
34
+
35
+ # fw stands for framework that can be either numpy or torch,
36
+ # determined by the input type
37
+ fw = numpy if type(input) is numpy.ndarray else torch
38
+ eps = fw.finfo(fw.float32).eps
39
+
40
+ # set missing scale factors or output shapem one according to another,
41
+ # scream if both missing
42
+ scale_factors, out_shape = set_scale_and_out_sz(in_shape, out_shape,
43
+ scale_factors, fw)
44
+
45
+ # sort indices of dimensions according to scale of each dimension.
46
+ # since we are going dim by dim this is efficient
47
+ sorted_filtered_dims_and_scales = [(dim, scale_factors[dim])
48
+ for dim in sorted(range(n_dims),
49
+ key=lambda ind: scale_factors[ind])
50
+ if scale_factors[dim] != 1.]
51
+
52
+ # unless support size is specified by the user, it is an attribute
53
+ # of the interpolation method
54
+ if support_sz is None:
55
+ support_sz = interp_method.support_sz
56
+
57
+ # when using pytorch, we need to know what is the input tensor device
58
+ device = input.device if fw is torch else None
59
+
60
+ # output begins identical to input and changes with each iteration
61
+ output = input
62
+
63
+ # iterate over dims
64
+ for dim, scale_factor in sorted_filtered_dims_and_scales:
65
+
66
+ # get 1d set of weights and fields of view for each output location
67
+ # along this dim
68
+ field_of_view, weights = prepare_weights_and_field_of_view_1d(
69
+ dim, scale_factor, in_shape[dim], out_shape[dim], interp_method,
70
+ support_sz, antialiasing, fw, eps, device)
71
+
72
+ # multiply the weights by the values in the field of view and
73
+ # aggreagate
74
+ output = apply_weights(output, field_of_view, weights, dim, n_dims,
75
+ fw)
76
+ return output
77
+
78
+
79
+ class ResizeLayer(nnModuleWrapped):
80
+ def __init__(self, in_shape, scale_factors=None, out_shape=None,
81
+ interp_method=interp_methods.cubic, support_sz=None,
82
+ antialiasing=True):
83
+ super(ResizeLayer, self).__init__()
84
+
85
+ # fw stands for framework, that can be either numpy or torch. since
86
+ # this is a torch layer, only one option in this case.
87
+ fw = torch
88
+ eps = fw.finfo(fw.float32).eps
89
+
90
+ # set missing scale factors or output shapem one according to another,
91
+ # scream if both missing
92
+ scale_factors, out_shape = set_scale_and_out_sz(in_shape, out_shape,
93
+ scale_factors, fw)
94
+
95
+ # unless support size is specified by the user, it is an attribute
96
+ # of the interpolation method
97
+ if support_sz is None:
98
+ support_sz = interp_method.support_sz
99
+
100
+ self.n_dims = len(in_shape)
101
+
102
+ # sort indices of dimensions according to scale of each dimension.
103
+ # since we are going dim by dim this is efficient
104
+ self.sorted_filtered_dims_and_scales = [(dim, scale_factors[dim])
105
+ for dim in
106
+ sorted(range(self.n_dims),
107
+ key=lambda ind:
108
+ scale_factors[ind])
109
+ if scale_factors[dim] != 1.]
110
+
111
+ # iterate over dims
112
+ field_of_view_list = []
113
+ weights_list = []
114
+ for dim, scale_factor in self.sorted_filtered_dims_and_scales:
115
+
116
+ # get 1d set of weights and fields of view for each output
117
+ # location along this dim
118
+ field_of_view, weights = prepare_weights_and_field_of_view_1d(
119
+ dim, scale_factor, in_shape[dim], out_shape[dim],
120
+ interp_method, support_sz, antialiasing, fw, eps, input.device)
121
+
122
+ # keep weights and fields of views for all dims
123
+ weights_list.append(nn.Parameter(weights, requires_grad=False))
124
+ field_of_view_list.append(nn.Parameter(field_of_view,
125
+ requires_grad=False))
126
+
127
+ self.field_of_view = nn.ParameterList(field_of_view_list)
128
+ self.weights = nn.ParameterList(weights_list)
129
+ self.in_shape = in_shape
130
+
131
+ def forward(self, input):
132
+ # output begins identical to input and changes with each iteration
133
+ output = input
134
+
135
+ for (dim, scale_factor), field_of_view, weights in zip(
136
+ self.sorted_filtered_dims_and_scales,
137
+ self.field_of_view,
138
+ self.weights):
139
+ # multiply the weights by the values in the field of view and
140
+ # aggreagate
141
+ output = apply_weights(output, field_of_view, weights, dim,
142
+ self.n_dims, torch)
143
+ return output
144
+
145
+
146
+ def prepare_weights_and_field_of_view_1d(dim, scale_factor, in_sz, out_sz,
147
+ interp_method, support_sz,
148
+ antialiasing, fw, eps, device=None):
149
+ # If antialiasing is taking place, we modify the window size and the
150
+ # interpolation method (see inside function)
151
+ interp_method, cur_support_sz = apply_antialiasing_if_needed(
152
+ interp_method,
153
+ support_sz,
154
+ scale_factor,
155
+ antialiasing)
156
+
157
+ # STEP 1- PROJECTED GRID: The non-integer locations of the projection of
158
+ # output pixel locations to the input tensor
159
+ projected_grid = get_projected_grid(in_sz, out_sz, scale_factor, fw, device)
160
+
161
+ # STEP 2- FIELDS OF VIEW: for each output pixels, map the input pixels
162
+ # that influence it
163
+ field_of_view = get_field_of_view(projected_grid, cur_support_sz, in_sz,
164
+ fw, eps, device)
165
+
166
+ # STEP 3- CALCULATE WEIGHTS: Match a set of weights to the pixels in the
167
+ # field of view for each output pixel
168
+ weights = get_weights(interp_method, projected_grid, field_of_view)
169
+
170
+ return field_of_view, weights
171
+
172
+
173
+ def apply_weights(input, field_of_view, weights, dim, n_dims, fw):
174
+ # STEP 4- APPLY WEIGHTS: Each output pixel is calculated by multiplying
175
+ # its set of weights with the pixel values in its field of view.
176
+ # We now multiply the fields of view with their matching weights.
177
+ # We do this by tensor multiplication and broadcasting.
178
+ # this step is separated to a different function, so that it can be
179
+ # repeated with the same calculated weights and fields.
180
+
181
+ # for this operations we assume the resized dim is the first one.
182
+ # so we transpose and will transpose back after multiplying
183
+ tmp_input = fw_swapaxes(input, dim, 0, fw)
184
+
185
+ # field_of_view is a tensor of order 2: for each output (1d location
186
+ # along cur dim)- a list of 1d neighbors locations.
187
+ # note that this whole operations is applied to each dim separately,
188
+ # this is why it is all in 1d.
189
+ # neighbors = tmp_input[field_of_view] is a tensor of order image_dims+1:
190
+ # for each output pixel (this time indicated in all dims), these are the
191
+ # values of the neighbors in the 1d field of view. note that we only
192
+ # consider neighbors along the current dim, but such set exists for every
193
+ # multi-dim location, hence the final tensor order is image_dims+1.
194
+ neighbors = tmp_input[field_of_view]
195
+
196
+ # weights is an order 2 tensor: for each output location along 1d- a list
197
+ # of weighs matching the field of view. we augment it with ones, for
198
+ # broadcasting, so that when multiplies some tensor the weights affect
199
+ # only its first dim.
200
+ tmp_weights = fw.reshape(weights, (*weights.shape, * [1] * (n_dims - 1)))
201
+
202
+ # now we simply multiply the weights with the neighbors, and then sum
203
+ # along the field of view, to get a single value per out pixel
204
+ tmp_output = (neighbors * tmp_weights).sum(1)
205
+
206
+ # we transpose back the resized dim to its original position
207
+ return fw_swapaxes(tmp_output, 0, dim, fw)
208
+
209
+
210
+ def set_scale_and_out_sz(in_shape, out_shape, scale_factors, fw):
211
+ # eventually we must have both scale-factors and out-sizes for all in/out
212
+ # dims. however, we support many possible partial arguments
213
+ if scale_factors is None and out_shape is None:
214
+ raise ValueError("either scale_factors or out_shape should be "
215
+ "provided")
216
+ if out_shape is not None:
217
+ # if out_shape has less dims than in_shape, we defaultly resize the
218
+ # first dims for numpy and last dims for torch
219
+ # out_shape = (list(out_shape) + list(in_shape[:-len(out_shape)])
220
+ # if fw is numpy
221
+ # else list(in_shape[:-len(out_shape)]) + list(out_shape))
222
+ out_shape = (list(out_shape) + list(in_shape[-len(out_shape):])
223
+ if fw is numpy
224
+ else list(in_shape[:-len(out_shape)]) + list(out_shape))
225
+ if scale_factors is None:
226
+ # if no scale given, we calculate it as the out to in ratio
227
+ # (not recomended)
228
+ scale_factors = [out_sz / in_sz for out_sz, in_sz
229
+ in zip(out_shape, in_shape)]
230
+ if scale_factors is not None:
231
+ # by default, if a single number is given as scale, we assume resizing
232
+ # two dims (most common are images with 2 spatial dims)
233
+ scale_factors = (scale_factors
234
+ if isinstance(scale_factors, (list, tuple))
235
+ else [scale_factors, scale_factors])
236
+ # if less scale_factors than in_shape dims, we defaultly resize the
237
+ # first dims for numpy and last dims for torch
238
+ scale_factors = (list(scale_factors) + [1] *
239
+ (len(in_shape) - len(scale_factors)) if fw is numpy
240
+ else [1] * (len(in_shape) - len(scale_factors)) +
241
+ list(scale_factors))
242
+ if out_shape is None:
243
+ # when no out_shape given, it is calculated by multiplying the
244
+ # scale by the in_shape (not recomended)
245
+ out_shape = [ceil(scale_factor * in_sz)
246
+ for scale_factor, in_sz in
247
+ zip(scale_factors, in_shape)]
248
+ # next line intentionally after out_shape determined for stability
249
+ scale_factors = [float(sf) for sf in scale_factors]
250
+ return scale_factors, out_shape
251
+
252
+
253
+ def get_projected_grid(in_sz, out_sz, scale_factor, fw, device=None):
254
+ # we start by having the ouput coordinates which are just integer locations
255
+ out_coordinates = fw.arange(out_sz)
256
+
257
+ # if using torch we need to match the grid tensor device to the input device
258
+ out_coordinates = fw_set_device(out_coordinates, device, fw)
259
+
260
+ # This is projecting the ouput pixel locations in 1d to the input tensor,
261
+ # as non-integer locations.
262
+ # the following fomrula is derived in the paper
263
+ # "From Discrete to Continuous Convolutions" by Shocher et al.
264
+ return (out_coordinates / scale_factor +
265
+ (in_sz - 1) / 2 - (out_sz - 1) / (2 * scale_factor))
266
+
267
+
268
+ def get_field_of_view(projected_grid, cur_support_sz, in_sz, fw, eps, device):
269
+ # for each output pixel, map which input pixels influence it, in 1d.
270
+ # we start by calculating the leftmost neighbor, using half of the window
271
+ # size (eps is for when boundary is exact int)
272
+ left_boundaries = fw_ceil(projected_grid - cur_support_sz / 2 - eps, fw)
273
+
274
+ # then we simply take all the pixel centers in the field by counting
275
+ # window size pixels from the left boundary
276
+ ordinal_numbers = fw.arange(ceil(cur_support_sz - eps))
277
+ # in case using torch we need to match the device
278
+ ordinal_numbers = fw_set_device(ordinal_numbers, device, fw)
279
+ field_of_view = left_boundaries[:, None] + ordinal_numbers
280
+
281
+ # next we do a trick instead of padding, we map the field of view so that
282
+ # it would be like mirror padding, without actually padding
283
+ # (which would require enlarging the input tensor)
284
+ mirror = fw_cat((fw.arange(in_sz), fw.arange(in_sz - 1, -1, step=-1)), fw)
285
+ field_of_view = mirror[fw.remainder(field_of_view, mirror.shape[0])]
286
+ field_of_view = fw_set_device(field_of_view, device, fw)
287
+ return field_of_view
288
+
289
+
290
+ def get_weights(interp_method, projected_grid, field_of_view):
291
+ # the set of weights per each output pixels is the result of the chosen
292
+ # interpolation method applied to the distances between projected grid
293
+ # locations and the pixel-centers in the field of view (distances are
294
+ # directed, can be positive or negative)
295
+ weights = interp_method(projected_grid[:, None] - field_of_view)
296
+
297
+ # we now carefully normalize the weights to sum to 1 per each output pixel
298
+ sum_weights = weights.sum(1, keepdims=True)
299
+ sum_weights[sum_weights == 0] = 1
300
+ return weights / sum_weights
301
+
302
+
303
+ def apply_antialiasing_if_needed(interp_method, support_sz, scale_factor,
304
+ antialiasing):
305
+ # antialiasing is "stretching" the field of view according to the scale
306
+ # factor (only for downscaling). this is low-pass filtering. this
307
+ # requires modifying both the interpolation (stretching the 1d
308
+ # function and multiplying by the scale-factor) and the window size.
309
+ if scale_factor >= 1.0 or not antialiasing:
310
+ return interp_method, support_sz
311
+ cur_interp_method = (lambda arg: scale_factor *
312
+ interp_method(scale_factor * arg))
313
+ cur_support_sz = support_sz / scale_factor
314
+ return cur_interp_method, cur_support_sz
315
+
316
+
317
+ def fw_ceil(x, fw):
318
+ if fw is numpy:
319
+ return fw.int_(fw.ceil(x))
320
+ else:
321
+ return x.ceil().long()
322
+
323
+
324
+ def fw_cat(x, fw):
325
+ if fw is numpy:
326
+ return fw.concatenate(x)
327
+ else:
328
+ return fw.cat(x)
329
+
330
+
331
+ def fw_swapaxes(x, ax_1, ax_2, fw):
332
+ if fw is numpy:
333
+ return fw.swapaxes(x, ax_1, ax_2)
334
+ else:
335
+ return x.transpose(ax_1, ax_2)
336
+
337
+ def fw_set_device(x, device, fw):
338
+ if fw is numpy:
339
+ return x
340
+ else:
341
+ return x.to(device)
basicsr/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/xinntao/BasicSR
2
+ # flake8: noqa
3
+ from .archs import *
4
+ from .data import *
5
+ from .losses import *
6
+ from .metrics import *
7
+ from .models import *
8
+ from .ops import *
9
+ from .test import *
10
+ from .train import *
11
+ from .utils import *
12
+ # from .version import __gitsha__, __version__
basicsr/archs/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ from copy import deepcopy
3
+ from os import path as osp
4
+
5
+ from basicsr.utils import get_root_logger, scandir
6
+ from basicsr.utils.registry import ARCH_REGISTRY
7
+
8
+ __all__ = ['build_network']
9
+
10
+ # automatically scan and import arch modules for registry
11
+ # scan all the files under the 'archs' folder and collect files ending with '_arch.py'
12
+ arch_folder = osp.dirname(osp.abspath(__file__))
13
+ arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')]
14
+ # import all the arch modules
15
+ _arch_modules = [importlib.import_module(f'basicsr.archs.{file_name}') for file_name in arch_filenames]
16
+
17
+
18
+ def build_network(opt):
19
+ opt = deepcopy(opt)
20
+ network_type = opt.pop('type')
21
+ net = ARCH_REGISTRY.get(network_type)(**opt)
22
+ logger = get_root_logger()
23
+ logger.info(f'Network [{net.__class__.__name__}] is created.')
24
+ return net
basicsr/archs/arch_util.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections.abc
2
+ import math
3
+ import torch
4
+ import torchvision
5
+ import warnings
6
+ from distutils.version import LooseVersion
7
+ from itertools import repeat
8
+ from torch import nn as nn
9
+ from torch.nn import functional as F
10
+ from torch.nn import init as init
11
+ from torch.nn.modules.batchnorm import _BatchNorm
12
+
13
+ from basicsr.ops.dcn import ModulatedDeformConvPack, modulated_deform_conv
14
+ from basicsr.utils import get_root_logger
15
+
16
+
17
+ @torch.no_grad()
18
+ def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
19
+ """Initialize network weights.
20
+
21
+ Args:
22
+ module_list (list[nn.Module] | nn.Module): Modules to be initialized.
23
+ scale (float): Scale initialized weights, especially for residual
24
+ blocks. Default: 1.
25
+ bias_fill (float): The value to fill bias. Default: 0
26
+ kwargs (dict): Other arguments for initialization function.
27
+ """
28
+ if not isinstance(module_list, list):
29
+ module_list = [module_list]
30
+ for module in module_list:
31
+ for m in module.modules():
32
+ if isinstance(m, nn.Conv2d):
33
+ init.kaiming_normal_(m.weight, **kwargs)
34
+ m.weight.data *= scale
35
+ if m.bias is not None:
36
+ m.bias.data.fill_(bias_fill)
37
+ elif isinstance(m, nn.Linear):
38
+ init.kaiming_normal_(m.weight, **kwargs)
39
+ m.weight.data *= scale
40
+ if m.bias is not None:
41
+ m.bias.data.fill_(bias_fill)
42
+ elif isinstance(m, _BatchNorm):
43
+ init.constant_(m.weight, 1)
44
+ if m.bias is not None:
45
+ m.bias.data.fill_(bias_fill)
46
+
47
+
48
+ def make_layer(basic_block, num_basic_block, **kwarg):
49
+ """Make layers by stacking the same blocks.
50
+
51
+ Args:
52
+ basic_block (nn.module): nn.module class for basic block.
53
+ num_basic_block (int): number of blocks.
54
+
55
+ Returns:
56
+ nn.Sequential: Stacked blocks in nn.Sequential.
57
+ """
58
+ layers = []
59
+ for _ in range(num_basic_block):
60
+ layers.append(basic_block(**kwarg))
61
+ return nn.Sequential(*layers)
62
+
63
+
64
+ class ResidualBlockNoBN(nn.Module):
65
+ """Residual block without BN.
66
+
67
+ Args:
68
+ num_feat (int): Channel number of intermediate features.
69
+ Default: 64.
70
+ res_scale (float): Residual scale. Default: 1.
71
+ pytorch_init (bool): If set to True, use pytorch default init,
72
+ otherwise, use default_init_weights. Default: False.
73
+ """
74
+
75
+ def __init__(self, num_feat=64, res_scale=1, pytorch_init=False):
76
+ super(ResidualBlockNoBN, self).__init__()
77
+ self.res_scale = res_scale
78
+ self.conv1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
79
+ self.conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
80
+ self.relu = nn.ReLU(inplace=True)
81
+
82
+ if not pytorch_init:
83
+ default_init_weights([self.conv1, self.conv2], 0.1)
84
+
85
+ def forward(self, x):
86
+ identity = x
87
+ out = self.conv2(self.relu(self.conv1(x)))
88
+ return identity + out * self.res_scale
89
+
90
+
91
+ class Upsample(nn.Sequential):
92
+ """Upsample module.
93
+
94
+ Args:
95
+ scale (int): Scale factor. Supported scales: 2^n and 3.
96
+ num_feat (int): Channel number of intermediate features.
97
+ """
98
+
99
+ def __init__(self, scale, num_feat):
100
+ m = []
101
+ if (scale & (scale - 1)) == 0: # scale = 2^n
102
+ for _ in range(int(math.log(scale, 2))):
103
+ m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
104
+ m.append(nn.PixelShuffle(2))
105
+ elif scale == 3:
106
+ m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
107
+ m.append(nn.PixelShuffle(3))
108
+ else:
109
+ raise ValueError(f'scale {scale} is not supported. Supported scales: 2^n and 3.')
110
+ super(Upsample, self).__init__(*m)
111
+
112
+
113
+ def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros', align_corners=True):
114
+ """Warp an image or feature map with optical flow.
115
+
116
+ Args:
117
+ x (Tensor): Tensor with size (n, c, h, w).
118
+ flow (Tensor): Tensor with size (n, h, w, 2), normal value.
119
+ interp_mode (str): 'nearest' or 'bilinear'. Default: 'bilinear'.
120
+ padding_mode (str): 'zeros' or 'border' or 'reflection'.
121
+ Default: 'zeros'.
122
+ align_corners (bool): Before pytorch 1.3, the default value is
123
+ align_corners=True. After pytorch 1.3, the default value is
124
+ align_corners=False. Here, we use the True as default.
125
+
126
+ Returns:
127
+ Tensor: Warped image or feature map.
128
+ """
129
+ assert x.size()[-2:] == flow.size()[1:3]
130
+ _, _, h, w = x.size()
131
+ # create mesh grid
132
+ grid_y, grid_x = torch.meshgrid(torch.arange(0, h).type_as(x), torch.arange(0, w).type_as(x))
133
+ grid = torch.stack((grid_x, grid_y), 2).float() # W(x), H(y), 2
134
+ grid.requires_grad = False
135
+
136
+ vgrid = grid + flow
137
+ # scale grid to [-1,1]
138
+ vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(w - 1, 1) - 1.0
139
+ vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(h - 1, 1) - 1.0
140
+ vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)
141
+ output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode, align_corners=align_corners)
142
+
143
+ # TODO, what if align_corners=False
144
+ return output
145
+
146
+
147
+ def resize_flow(flow, size_type, sizes, interp_mode='bilinear', align_corners=False):
148
+ """Resize a flow according to ratio or shape.
149
+
150
+ Args:
151
+ flow (Tensor): Precomputed flow. shape [N, 2, H, W].
152
+ size_type (str): 'ratio' or 'shape'.
153
+ sizes (list[int | float]): the ratio for resizing or the final output
154
+ shape.
155
+ 1) The order of ratio should be [ratio_h, ratio_w]. For
156
+ downsampling, the ratio should be smaller than 1.0 (i.e., ratio
157
+ < 1.0). For upsampling, the ratio should be larger than 1.0 (i.e.,
158
+ ratio > 1.0).
159
+ 2) The order of output_size should be [out_h, out_w].
160
+ interp_mode (str): The mode of interpolation for resizing.
161
+ Default: 'bilinear'.
162
+ align_corners (bool): Whether align corners. Default: False.
163
+
164
+ Returns:
165
+ Tensor: Resized flow.
166
+ """
167
+ _, _, flow_h, flow_w = flow.size()
168
+ if size_type == 'ratio':
169
+ output_h, output_w = int(flow_h * sizes[0]), int(flow_w * sizes[1])
170
+ elif size_type == 'shape':
171
+ output_h, output_w = sizes[0], sizes[1]
172
+ else:
173
+ raise ValueError(f'Size type should be ratio or shape, but got type {size_type}.')
174
+
175
+ input_flow = flow.clone()
176
+ ratio_h = output_h / flow_h
177
+ ratio_w = output_w / flow_w
178
+ input_flow[:, 0, :, :] *= ratio_w
179
+ input_flow[:, 1, :, :] *= ratio_h
180
+ resized_flow = F.interpolate(
181
+ input=input_flow, size=(output_h, output_w), mode=interp_mode, align_corners=align_corners)
182
+ return resized_flow
183
+
184
+
185
+ # TODO: may write a cpp file
186
+ def pixel_unshuffle(x, scale):
187
+ """ Pixel unshuffle.
188
+
189
+ Args:
190
+ x (Tensor): Input feature with shape (b, c, hh, hw).
191
+ scale (int): Downsample ratio.
192
+
193
+ Returns:
194
+ Tensor: the pixel unshuffled feature.
195
+ """
196
+ b, c, hh, hw = x.size()
197
+ out_channel = c * (scale**2)
198
+ assert hh % scale == 0 and hw % scale == 0
199
+ h = hh // scale
200
+ w = hw // scale
201
+ x_view = x.view(b, c, h, scale, w, scale)
202
+ return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
203
+
204
+
205
+ class DCNv2Pack(ModulatedDeformConvPack):
206
+ """Modulated deformable conv for deformable alignment.
207
+
208
+ Different from the official DCNv2Pack, which generates offsets and masks
209
+ from the preceding features, this DCNv2Pack takes another different
210
+ features to generate offsets and masks.
211
+
212
+ ``Paper: Delving Deep into Deformable Alignment in Video Super-Resolution``
213
+ """
214
+
215
+ def forward(self, x, feat):
216
+ out = self.conv_offset(feat)
217
+ o1, o2, mask = torch.chunk(out, 3, dim=1)
218
+ offset = torch.cat((o1, o2), dim=1)
219
+ mask = torch.sigmoid(mask)
220
+
221
+ offset_absmean = torch.mean(torch.abs(offset))
222
+ if offset_absmean > 50:
223
+ logger = get_root_logger()
224
+ logger.warning(f'Offset abs mean is {offset_absmean}, larger than 50.')
225
+
226
+ if LooseVersion(torchvision.__version__) >= LooseVersion('0.9.0'):
227
+ return torchvision.ops.deform_conv2d(x, offset, self.weight, self.bias, self.stride, self.padding,
228
+ self.dilation, mask)
229
+ else:
230
+ return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding,
231
+ self.dilation, self.groups, self.deformable_groups)
232
+
233
+
234
+ def _no_grad_trunc_normal_(tensor, mean, std, a, b):
235
+ # From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
236
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
237
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
238
+ def norm_cdf(x):
239
+ # Computes standard normal cumulative distribution function
240
+ return (1. + math.erf(x / math.sqrt(2.))) / 2.
241
+
242
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
243
+ warnings.warn(
244
+ 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
245
+ 'The distribution of values may be incorrect.',
246
+ stacklevel=2)
247
+
248
+ with torch.no_grad():
249
+ # Values are generated by using a truncated uniform distribution and
250
+ # then using the inverse CDF for the normal distribution.
251
+ # Get upper and lower cdf values
252
+ low = norm_cdf((a - mean) / std)
253
+ up = norm_cdf((b - mean) / std)
254
+
255
+ # Uniformly fill tensor with values from [low, up], then translate to
256
+ # [2l-1, 2u-1].
257
+ tensor.uniform_(2 * low - 1, 2 * up - 1)
258
+
259
+ # Use inverse cdf transform for normal distribution to get truncated
260
+ # standard normal
261
+ tensor.erfinv_()
262
+
263
+ # Transform to proper mean, std
264
+ tensor.mul_(std * math.sqrt(2.))
265
+ tensor.add_(mean)
266
+
267
+ # Clamp to ensure it's in the proper range
268
+ tensor.clamp_(min=a, max=b)
269
+ return tensor
270
+
271
+
272
+ def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
273
+ r"""Fills the input Tensor with values drawn from a truncated
274
+ normal distribution.
275
+
276
+ From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
277
+
278
+ The values are effectively drawn from the
279
+ normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
280
+ with values outside :math:`[a, b]` redrawn until they are within
281
+ the bounds. The method used for generating the random values works
282
+ best when :math:`a \leq \text{mean} \leq b`.
283
+
284
+ Args:
285
+ tensor: an n-dimensional `torch.Tensor`
286
+ mean: the mean of the normal distribution
287
+ std: the standard deviation of the normal distribution
288
+ a: the minimum cutoff value
289
+ b: the maximum cutoff value
290
+
291
+ Examples:
292
+ >>> w = torch.empty(3, 5)
293
+ >>> nn.init.trunc_normal_(w)
294
+ """
295
+ return _no_grad_trunc_normal_(tensor, mean, std, a, b)
296
+
297
+
298
+ # From PyTorch
299
+ def _ntuple(n):
300
+
301
+ def parse(x):
302
+ if isinstance(x, collections.abc.Iterable):
303
+ return x
304
+ return tuple(repeat(x, n))
305
+
306
+ return parse
307
+
308
+
309
+ to_1tuple = _ntuple(1)
310
+ to_2tuple = _ntuple(2)
311
+ to_3tuple = _ntuple(3)
312
+ to_4tuple = _ntuple(4)
313
+ to_ntuple = _ntuple
basicsr/archs/basicvsr_arch.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn as nn
3
+ from torch.nn import functional as F
4
+
5
+ from basicsr.utils.registry import ARCH_REGISTRY
6
+ from .arch_util import ResidualBlockNoBN, flow_warp, make_layer
7
+ from .edvr_arch import PCDAlignment, TSAFusion
8
+ from .spynet_arch import SpyNet
9
+
10
+
11
+ @ARCH_REGISTRY.register()
12
+ class BasicVSR(nn.Module):
13
+ """A recurrent network for video SR. Now only x4 is supported.
14
+
15
+ Args:
16
+ num_feat (int): Number of channels. Default: 64.
17
+ num_block (int): Number of residual blocks for each branch. Default: 15
18
+ spynet_path (str): Path to the pretrained weights of SPyNet. Default: None.
19
+ """
20
+
21
+ def __init__(self, num_feat=64, num_block=15, spynet_path=None):
22
+ super().__init__()
23
+ self.num_feat = num_feat
24
+
25
+ # alignment
26
+ self.spynet = SpyNet(spynet_path)
27
+
28
+ # propagation
29
+ self.backward_trunk = ConvResidualBlocks(num_feat + 3, num_feat, num_block)
30
+ self.forward_trunk = ConvResidualBlocks(num_feat + 3, num_feat, num_block)
31
+
32
+ # reconstruction
33
+ self.fusion = nn.Conv2d(num_feat * 2, num_feat, 1, 1, 0, bias=True)
34
+ self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1, bias=True)
35
+ self.upconv2 = nn.Conv2d(num_feat, 64 * 4, 3, 1, 1, bias=True)
36
+ self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
37
+ self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
38
+
39
+ self.pixel_shuffle = nn.PixelShuffle(2)
40
+
41
+ # activation functions
42
+ self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
43
+
44
+ def get_flow(self, x):
45
+ b, n, c, h, w = x.size()
46
+
47
+ x_1 = x[:, :-1, :, :, :].reshape(-1, c, h, w)
48
+ x_2 = x[:, 1:, :, :, :].reshape(-1, c, h, w)
49
+
50
+ flows_backward = self.spynet(x_1, x_2).view(b, n - 1, 2, h, w)
51
+ flows_forward = self.spynet(x_2, x_1).view(b, n - 1, 2, h, w)
52
+
53
+ return flows_forward, flows_backward
54
+
55
+ def forward(self, x):
56
+ """Forward function of BasicVSR.
57
+
58
+ Args:
59
+ x: Input frames with shape (b, n, c, h, w). n is the temporal dimension / number of frames.
60
+ """
61
+ flows_forward, flows_backward = self.get_flow(x)
62
+ b, n, _, h, w = x.size()
63
+
64
+ # backward branch
65
+ out_l = []
66
+ feat_prop = x.new_zeros(b, self.num_feat, h, w)
67
+ for i in range(n - 1, -1, -1):
68
+ x_i = x[:, i, :, :, :]
69
+ if i < n - 1:
70
+ flow = flows_backward[:, i, :, :, :]
71
+ feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
72
+ feat_prop = torch.cat([x_i, feat_prop], dim=1)
73
+ feat_prop = self.backward_trunk(feat_prop)
74
+ out_l.insert(0, feat_prop)
75
+
76
+ # forward branch
77
+ feat_prop = torch.zeros_like(feat_prop)
78
+ for i in range(0, n):
79
+ x_i = x[:, i, :, :, :]
80
+ if i > 0:
81
+ flow = flows_forward[:, i - 1, :, :, :]
82
+ feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
83
+
84
+ feat_prop = torch.cat([x_i, feat_prop], dim=1)
85
+ feat_prop = self.forward_trunk(feat_prop)
86
+
87
+ # upsample
88
+ out = torch.cat([out_l[i], feat_prop], dim=1)
89
+ out = self.lrelu(self.fusion(out))
90
+ out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))
91
+ out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))
92
+ out = self.lrelu(self.conv_hr(out))
93
+ out = self.conv_last(out)
94
+ base = F.interpolate(x_i, scale_factor=4, mode='bilinear', align_corners=False)
95
+ out += base
96
+ out_l[i] = out
97
+
98
+ return torch.stack(out_l, dim=1)
99
+
100
+
101
+ class ConvResidualBlocks(nn.Module):
102
+ """Conv and residual block used in BasicVSR.
103
+
104
+ Args:
105
+ num_in_ch (int): Number of input channels. Default: 3.
106
+ num_out_ch (int): Number of output channels. Default: 64.
107
+ num_block (int): Number of residual blocks. Default: 15.
108
+ """
109
+
110
+ def __init__(self, num_in_ch=3, num_out_ch=64, num_block=15):
111
+ super().__init__()
112
+ self.main = nn.Sequential(
113
+ nn.Conv2d(num_in_ch, num_out_ch, 3, 1, 1, bias=True), nn.LeakyReLU(negative_slope=0.1, inplace=True),
114
+ make_layer(ResidualBlockNoBN, num_block, num_feat=num_out_ch))
115
+
116
+ def forward(self, fea):
117
+ return self.main(fea)
118
+
119
+
120
+ @ARCH_REGISTRY.register()
121
+ class IconVSR(nn.Module):
122
+ """IconVSR, proposed also in the BasicVSR paper.
123
+
124
+ Args:
125
+ num_feat (int): Number of channels. Default: 64.
126
+ num_block (int): Number of residual blocks for each branch. Default: 15.
127
+ keyframe_stride (int): Keyframe stride. Default: 5.
128
+ temporal_padding (int): Temporal padding. Default: 2.
129
+ spynet_path (str): Path to the pretrained weights of SPyNet. Default: None.
130
+ edvr_path (str): Path to the pretrained EDVR model. Default: None.
131
+ """
132
+
133
+ def __init__(self,
134
+ num_feat=64,
135
+ num_block=15,
136
+ keyframe_stride=5,
137
+ temporal_padding=2,
138
+ spynet_path=None,
139
+ edvr_path=None):
140
+ super().__init__()
141
+
142
+ self.num_feat = num_feat
143
+ self.temporal_padding = temporal_padding
144
+ self.keyframe_stride = keyframe_stride
145
+
146
+ # keyframe_branch
147
+ self.edvr = EDVRFeatureExtractor(temporal_padding * 2 + 1, num_feat, edvr_path)
148
+ # alignment
149
+ self.spynet = SpyNet(spynet_path)
150
+
151
+ # propagation
152
+ self.backward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)
153
+ self.backward_trunk = ConvResidualBlocks(num_feat + 3, num_feat, num_block)
154
+
155
+ self.forward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)
156
+ self.forward_trunk = ConvResidualBlocks(2 * num_feat + 3, num_feat, num_block)
157
+
158
+ # reconstruction
159
+ self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1, bias=True)
160
+ self.upconv2 = nn.Conv2d(num_feat, 64 * 4, 3, 1, 1, bias=True)
161
+ self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
162
+ self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
163
+
164
+ self.pixel_shuffle = nn.PixelShuffle(2)
165
+
166
+ # activation functions
167
+ self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
168
+
169
+ def pad_spatial(self, x):
170
+ """Apply padding spatially.
171
+
172
+ Since the PCD module in EDVR requires that the resolution is a multiple
173
+ of 4, we apply padding to the input LR images if their resolution is
174
+ not divisible by 4.
175
+
176
+ Args:
177
+ x (Tensor): Input LR sequence with shape (n, t, c, h, w).
178
+ Returns:
179
+ Tensor: Padded LR sequence with shape (n, t, c, h_pad, w_pad).
180
+ """
181
+ n, t, c, h, w = x.size()
182
+
183
+ pad_h = (4 - h % 4) % 4
184
+ pad_w = (4 - w % 4) % 4
185
+
186
+ # padding
187
+ x = x.view(-1, c, h, w)
188
+ x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
189
+
190
+ return x.view(n, t, c, h + pad_h, w + pad_w)
191
+
192
+ def get_flow(self, x):
193
+ b, n, c, h, w = x.size()
194
+
195
+ x_1 = x[:, :-1, :, :, :].reshape(-1, c, h, w)
196
+ x_2 = x[:, 1:, :, :, :].reshape(-1, c, h, w)
197
+
198
+ flows_backward = self.spynet(x_1, x_2).view(b, n - 1, 2, h, w)
199
+ flows_forward = self.spynet(x_2, x_1).view(b, n - 1, 2, h, w)
200
+
201
+ return flows_forward, flows_backward
202
+
203
+ def get_keyframe_feature(self, x, keyframe_idx):
204
+ if self.temporal_padding == 2:
205
+ x = [x[:, [4, 3]], x, x[:, [-4, -5]]]
206
+ elif self.temporal_padding == 3:
207
+ x = [x[:, [6, 5, 4]], x, x[:, [-5, -6, -7]]]
208
+ x = torch.cat(x, dim=1)
209
+
210
+ num_frames = 2 * self.temporal_padding + 1
211
+ feats_keyframe = {}
212
+ for i in keyframe_idx:
213
+ feats_keyframe[i] = self.edvr(x[:, i:i + num_frames].contiguous())
214
+ return feats_keyframe
215
+
216
+ def forward(self, x):
217
+ b, n, _, h_input, w_input = x.size()
218
+
219
+ x = self.pad_spatial(x)
220
+ h, w = x.shape[3:]
221
+
222
+ keyframe_idx = list(range(0, n, self.keyframe_stride))
223
+ if keyframe_idx[-1] != n - 1:
224
+ keyframe_idx.append(n - 1) # last frame is a keyframe
225
+
226
+ # compute flow and keyframe features
227
+ flows_forward, flows_backward = self.get_flow(x)
228
+ feats_keyframe = self.get_keyframe_feature(x, keyframe_idx)
229
+
230
+ # backward branch
231
+ out_l = []
232
+ feat_prop = x.new_zeros(b, self.num_feat, h, w)
233
+ for i in range(n - 1, -1, -1):
234
+ x_i = x[:, i, :, :, :]
235
+ if i < n - 1:
236
+ flow = flows_backward[:, i, :, :, :]
237
+ feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
238
+ if i in keyframe_idx:
239
+ feat_prop = torch.cat([feat_prop, feats_keyframe[i]], dim=1)
240
+ feat_prop = self.backward_fusion(feat_prop)
241
+ feat_prop = torch.cat([x_i, feat_prop], dim=1)
242
+ feat_prop = self.backward_trunk(feat_prop)
243
+ out_l.insert(0, feat_prop)
244
+
245
+ # forward branch
246
+ feat_prop = torch.zeros_like(feat_prop)
247
+ for i in range(0, n):
248
+ x_i = x[:, i, :, :, :]
249
+ if i > 0:
250
+ flow = flows_forward[:, i - 1, :, :, :]
251
+ feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
252
+ if i in keyframe_idx:
253
+ feat_prop = torch.cat([feat_prop, feats_keyframe[i]], dim=1)
254
+ feat_prop = self.forward_fusion(feat_prop)
255
+
256
+ feat_prop = torch.cat([x_i, out_l[i], feat_prop], dim=1)
257
+ feat_prop = self.forward_trunk(feat_prop)
258
+
259
+ # upsample
260
+ out = self.lrelu(self.pixel_shuffle(self.upconv1(feat_prop)))
261
+ out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))
262
+ out = self.lrelu(self.conv_hr(out))
263
+ out = self.conv_last(out)
264
+ base = F.interpolate(x_i, scale_factor=4, mode='bilinear', align_corners=False)
265
+ out += base
266
+ out_l[i] = out
267
+
268
+ return torch.stack(out_l, dim=1)[..., :4 * h_input, :4 * w_input]
269
+
270
+
271
+ class EDVRFeatureExtractor(nn.Module):
272
+ """EDVR feature extractor used in IconVSR.
273
+
274
+ Args:
275
+ num_input_frame (int): Number of input frames.
276
+ num_feat (int): Number of feature channels
277
+ load_path (str): Path to the pretrained weights of EDVR. Default: None.
278
+ """
279
+
280
+ def __init__(self, num_input_frame, num_feat, load_path):
281
+
282
+ super(EDVRFeatureExtractor, self).__init__()
283
+
284
+ self.center_frame_idx = num_input_frame // 2
285
+
286
+ # extract pyramid features
287
+ self.conv_first = nn.Conv2d(3, num_feat, 3, 1, 1)
288
+ self.feature_extraction = make_layer(ResidualBlockNoBN, 5, num_feat=num_feat)
289
+ self.conv_l2_1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
290
+ self.conv_l2_2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
291
+ self.conv_l3_1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
292
+ self.conv_l3_2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
293
+
294
+ # pcd and tsa module
295
+ self.pcd_align = PCDAlignment(num_feat=num_feat, deformable_groups=8)
296
+ self.fusion = TSAFusion(num_feat=num_feat, num_frame=num_input_frame, center_frame_idx=self.center_frame_idx)
297
+
298
+ # activation function
299
+ self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
300
+
301
+ if load_path:
302
+ self.load_state_dict(torch.load(load_path, map_location=lambda storage, loc: storage)['params'])
303
+
304
+ def forward(self, x):
305
+ b, n, c, h, w = x.size()
306
+
307
+ # extract features for each frame
308
+ # L1
309
+ feat_l1 = self.lrelu(self.conv_first(x.view(-1, c, h, w)))
310
+ feat_l1 = self.feature_extraction(feat_l1)
311
+ # L2
312
+ feat_l2 = self.lrelu(self.conv_l2_1(feat_l1))
313
+ feat_l2 = self.lrelu(self.conv_l2_2(feat_l2))
314
+ # L3
315
+ feat_l3 = self.lrelu(self.conv_l3_1(feat_l2))
316
+ feat_l3 = self.lrelu(self.conv_l3_2(feat_l3))
317
+
318
+ feat_l1 = feat_l1.view(b, n, -1, h, w)
319
+ feat_l2 = feat_l2.view(b, n, -1, h // 2, w // 2)
320
+ feat_l3 = feat_l3.view(b, n, -1, h // 4, w // 4)
321
+
322
+ # PCD alignment
323
+ ref_feat_l = [ # reference feature list
324
+ feat_l1[:, self.center_frame_idx, :, :, :].clone(), feat_l2[:, self.center_frame_idx, :, :, :].clone(),
325
+ feat_l3[:, self.center_frame_idx, :, :, :].clone()
326
+ ]
327
+ aligned_feat = []
328
+ for i in range(n):
329
+ nbr_feat_l = [ # neighboring feature list
330
+ feat_l1[:, i, :, :, :].clone(), feat_l2[:, i, :, :, :].clone(), feat_l3[:, i, :, :, :].clone()
331
+ ]
332
+ aligned_feat.append(self.pcd_align(nbr_feat_l, ref_feat_l))
333
+ aligned_feat = torch.stack(aligned_feat, dim=1) # (b, t, c, h, w)
334
+
335
+ # TSA fusion
336
+ return self.fusion(aligned_feat)
basicsr/archs/basicvsrpp_arch.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import torchvision
5
+ import warnings
6
+
7
+ from basicsr.archs.arch_util import flow_warp
8
+ from basicsr.archs.basicvsr_arch import ConvResidualBlocks
9
+ from basicsr.archs.spynet_arch import SpyNet
10
+ from basicsr.ops.dcn import ModulatedDeformConvPack
11
+ from basicsr.utils.registry import ARCH_REGISTRY
12
+
13
+
14
+ @ARCH_REGISTRY.register()
15
+ class BasicVSRPlusPlus(nn.Module):
16
+ """BasicVSR++ network structure.
17
+
18
+ Support either x4 upsampling or same size output. Since DCN is used in this
19
+ model, it can only be used with CUDA enabled. If CUDA is not enabled,
20
+ feature alignment will be skipped. Besides, we adopt the official DCN
21
+ implementation and the version of torch need to be higher than 1.9.
22
+
23
+ ``Paper: BasicVSR++: Improving Video Super-Resolution with Enhanced Propagation and Alignment``
24
+
25
+ Args:
26
+ mid_channels (int, optional): Channel number of the intermediate
27
+ features. Default: 64.
28
+ num_blocks (int, optional): The number of residual blocks in each
29
+ propagation branch. Default: 7.
30
+ max_residue_magnitude (int): The maximum magnitude of the offset
31
+ residue (Eq. 6 in paper). Default: 10.
32
+ is_low_res_input (bool, optional): Whether the input is low-resolution
33
+ or not. If False, the output resolution is equal to the input
34
+ resolution. Default: True.
35
+ spynet_path (str): Path to the pretrained weights of SPyNet. Default: None.
36
+ cpu_cache_length (int, optional): When the length of sequence is larger
37
+ than this value, the intermediate features are sent to CPU. This
38
+ saves GPU memory, but slows down the inference speed. You can
39
+ increase this number if you have a GPU with large memory.
40
+ Default: 100.
41
+ """
42
+
43
+ def __init__(self,
44
+ mid_channels=64,
45
+ num_blocks=7,
46
+ max_residue_magnitude=10,
47
+ is_low_res_input=True,
48
+ spynet_path=None,
49
+ cpu_cache_length=100):
50
+
51
+ super().__init__()
52
+ self.mid_channels = mid_channels
53
+ self.is_low_res_input = is_low_res_input
54
+ self.cpu_cache_length = cpu_cache_length
55
+
56
+ # optical flow
57
+ self.spynet = SpyNet(spynet_path)
58
+
59
+ # feature extraction module
60
+ if is_low_res_input:
61
+ self.feat_extract = ConvResidualBlocks(3, mid_channels, 5)
62
+ else:
63
+ self.feat_extract = nn.Sequential(
64
+ nn.Conv2d(3, mid_channels, 3, 2, 1), nn.LeakyReLU(negative_slope=0.1, inplace=True),
65
+ nn.Conv2d(mid_channels, mid_channels, 3, 2, 1), nn.LeakyReLU(negative_slope=0.1, inplace=True),
66
+ ConvResidualBlocks(mid_channels, mid_channels, 5))
67
+
68
+ # propagation branches
69
+ self.deform_align = nn.ModuleDict()
70
+ self.backbone = nn.ModuleDict()
71
+ modules = ['backward_1', 'forward_1', 'backward_2', 'forward_2']
72
+ for i, module in enumerate(modules):
73
+ if torch.cuda.is_available():
74
+ self.deform_align[module] = SecondOrderDeformableAlignment(
75
+ 2 * mid_channels,
76
+ mid_channels,
77
+ 3,
78
+ padding=1,
79
+ deformable_groups=16,
80
+ max_residue_magnitude=max_residue_magnitude)
81
+ self.backbone[module] = ConvResidualBlocks((2 + i) * mid_channels, mid_channels, num_blocks)
82
+
83
+ # upsampling module
84
+ self.reconstruction = ConvResidualBlocks(5 * mid_channels, mid_channels, 5)
85
+
86
+ self.upconv1 = nn.Conv2d(mid_channels, mid_channels * 4, 3, 1, 1, bias=True)
87
+ self.upconv2 = nn.Conv2d(mid_channels, 64 * 4, 3, 1, 1, bias=True)
88
+
89
+ self.pixel_shuffle = nn.PixelShuffle(2)
90
+
91
+ self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
92
+ self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
93
+ self.img_upsample = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False)
94
+
95
+ # activation function
96
+ self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
97
+
98
+ # check if the sequence is augmented by flipping
99
+ self.is_mirror_extended = False
100
+
101
+ if len(self.deform_align) > 0:
102
+ self.is_with_alignment = True
103
+ else:
104
+ self.is_with_alignment = False
105
+ warnings.warn('Deformable alignment module is not added. '
106
+ 'Probably your CUDA is not configured correctly. DCN can only '
107
+ 'be used with CUDA enabled. Alignment is skipped now.')
108
+
109
+ def check_if_mirror_extended(self, lqs):
110
+ """Check whether the input is a mirror-extended sequence.
111
+
112
+ If mirror-extended, the i-th (i=0, ..., t-1) frame is equal to the (t-1-i)-th frame.
113
+
114
+ Args:
115
+ lqs (tensor): Input low quality (LQ) sequence with shape (n, t, c, h, w).
116
+ """
117
+
118
+ if lqs.size(1) % 2 == 0:
119
+ lqs_1, lqs_2 = torch.chunk(lqs, 2, dim=1)
120
+ if torch.norm(lqs_1 - lqs_2.flip(1)) == 0:
121
+ self.is_mirror_extended = True
122
+
123
+ def compute_flow(self, lqs):
124
+ """Compute optical flow using SPyNet for feature alignment.
125
+
126
+ Note that if the input is an mirror-extended sequence, 'flows_forward'
127
+ is not needed, since it is equal to 'flows_backward.flip(1)'.
128
+
129
+ Args:
130
+ lqs (tensor): Input low quality (LQ) sequence with
131
+ shape (n, t, c, h, w).
132
+
133
+ Return:
134
+ tuple(Tensor): Optical flow. 'flows_forward' corresponds to the flows used for forward-time propagation \
135
+ (current to previous). 'flows_backward' corresponds to the flows used for backward-time \
136
+ propagation (current to next).
137
+ """
138
+
139
+ n, t, c, h, w = lqs.size()
140
+ lqs_1 = lqs[:, :-1, :, :, :].reshape(-1, c, h, w)
141
+ lqs_2 = lqs[:, 1:, :, :, :].reshape(-1, c, h, w)
142
+
143
+ flows_backward = self.spynet(lqs_1, lqs_2).view(n, t - 1, 2, h, w)
144
+
145
+ if self.is_mirror_extended: # flows_forward = flows_backward.flip(1)
146
+ flows_forward = flows_backward.flip(1)
147
+ else:
148
+ flows_forward = self.spynet(lqs_2, lqs_1).view(n, t - 1, 2, h, w)
149
+
150
+ if self.cpu_cache:
151
+ flows_backward = flows_backward.cpu()
152
+ flows_forward = flows_forward.cpu()
153
+
154
+ return flows_forward, flows_backward
155
+
156
+ def propagate(self, feats, flows, module_name):
157
+ """Propagate the latent features throughout the sequence.
158
+
159
+ Args:
160
+ feats dict(list[tensor]): Features from previous branches. Each
161
+ component is a list of tensors with shape (n, c, h, w).
162
+ flows (tensor): Optical flows with shape (n, t - 1, 2, h, w).
163
+ module_name (str): The name of the propgation branches. Can either
164
+ be 'backward_1', 'forward_1', 'backward_2', 'forward_2'.
165
+
166
+ Return:
167
+ dict(list[tensor]): A dictionary containing all the propagated \
168
+ features. Each key in the dictionary corresponds to a \
169
+ propagation branch, which is represented by a list of tensors.
170
+ """
171
+
172
+ n, t, _, h, w = flows.size()
173
+
174
+ frame_idx = range(0, t + 1)
175
+ flow_idx = range(-1, t)
176
+ mapping_idx = list(range(0, len(feats['spatial'])))
177
+ mapping_idx += mapping_idx[::-1]
178
+
179
+ if 'backward' in module_name:
180
+ frame_idx = frame_idx[::-1]
181
+ flow_idx = frame_idx
182
+
183
+ feat_prop = flows.new_zeros(n, self.mid_channels, h, w)
184
+ for i, idx in enumerate(frame_idx):
185
+ feat_current = feats['spatial'][mapping_idx[idx]]
186
+ if self.cpu_cache:
187
+ feat_current = feat_current.cuda()
188
+ feat_prop = feat_prop.cuda()
189
+ # second-order deformable alignment
190
+ if i > 0 and self.is_with_alignment:
191
+ flow_n1 = flows[:, flow_idx[i], :, :, :]
192
+ if self.cpu_cache:
193
+ flow_n1 = flow_n1.cuda()
194
+
195
+ cond_n1 = flow_warp(feat_prop, flow_n1.permute(0, 2, 3, 1))
196
+
197
+ # initialize second-order features
198
+ feat_n2 = torch.zeros_like(feat_prop)
199
+ flow_n2 = torch.zeros_like(flow_n1)
200
+ cond_n2 = torch.zeros_like(cond_n1)
201
+
202
+ if i > 1: # second-order features
203
+ feat_n2 = feats[module_name][-2]
204
+ if self.cpu_cache:
205
+ feat_n2 = feat_n2.cuda()
206
+
207
+ flow_n2 = flows[:, flow_idx[i - 1], :, :, :]
208
+ if self.cpu_cache:
209
+ flow_n2 = flow_n2.cuda()
210
+
211
+ flow_n2 = flow_n1 + flow_warp(flow_n2, flow_n1.permute(0, 2, 3, 1))
212
+ cond_n2 = flow_warp(feat_n2, flow_n2.permute(0, 2, 3, 1))
213
+
214
+ # flow-guided deformable convolution
215
+ cond = torch.cat([cond_n1, feat_current, cond_n2], dim=1)
216
+ feat_prop = torch.cat([feat_prop, feat_n2], dim=1)
217
+ feat_prop = self.deform_align[module_name](feat_prop, cond, flow_n1, flow_n2)
218
+
219
+ # concatenate and residual blocks
220
+ feat = [feat_current] + [feats[k][idx] for k in feats if k not in ['spatial', module_name]] + [feat_prop]
221
+ if self.cpu_cache:
222
+ feat = [f.cuda() for f in feat]
223
+
224
+ feat = torch.cat(feat, dim=1)
225
+ feat_prop = feat_prop + self.backbone[module_name](feat)
226
+ feats[module_name].append(feat_prop)
227
+
228
+ if self.cpu_cache:
229
+ feats[module_name][-1] = feats[module_name][-1].cpu()
230
+ torch.cuda.empty_cache()
231
+
232
+ if 'backward' in module_name:
233
+ feats[module_name] = feats[module_name][::-1]
234
+
235
+ return feats
236
+
237
+ def upsample(self, lqs, feats):
238
+ """Compute the output image given the features.
239
+
240
+ Args:
241
+ lqs (tensor): Input low quality (LQ) sequence with
242
+ shape (n, t, c, h, w).
243
+ feats (dict): The features from the propagation branches.
244
+
245
+ Returns:
246
+ Tensor: Output HR sequence with shape (n, t, c, 4h, 4w).
247
+ """
248
+
249
+ outputs = []
250
+ num_outputs = len(feats['spatial'])
251
+
252
+ mapping_idx = list(range(0, num_outputs))
253
+ mapping_idx += mapping_idx[::-1]
254
+
255
+ for i in range(0, lqs.size(1)):
256
+ hr = [feats[k].pop(0) for k in feats if k != 'spatial']
257
+ hr.insert(0, feats['spatial'][mapping_idx[i]])
258
+ hr = torch.cat(hr, dim=1)
259
+ if self.cpu_cache:
260
+ hr = hr.cuda()
261
+
262
+ hr = self.reconstruction(hr)
263
+ hr = self.lrelu(self.pixel_shuffle(self.upconv1(hr)))
264
+ hr = self.lrelu(self.pixel_shuffle(self.upconv2(hr)))
265
+ hr = self.lrelu(self.conv_hr(hr))
266
+ hr = self.conv_last(hr)
267
+ if self.is_low_res_input:
268
+ hr += self.img_upsample(lqs[:, i, :, :, :])
269
+ else:
270
+ hr += lqs[:, i, :, :, :]
271
+
272
+ if self.cpu_cache:
273
+ hr = hr.cpu()
274
+ torch.cuda.empty_cache()
275
+
276
+ outputs.append(hr)
277
+
278
+ return torch.stack(outputs, dim=1)
279
+
280
+ def forward(self, lqs):
281
+ """Forward function for BasicVSR++.
282
+
283
+ Args:
284
+ lqs (tensor): Input low quality (LQ) sequence with
285
+ shape (n, t, c, h, w).
286
+
287
+ Returns:
288
+ Tensor: Output HR sequence with shape (n, t, c, 4h, 4w).
289
+ """
290
+
291
+ n, t, c, h, w = lqs.size()
292
+
293
+ # whether to cache the features in CPU
294
+ self.cpu_cache = True if t > self.cpu_cache_length else False
295
+
296
+ if self.is_low_res_input:
297
+ lqs_downsample = lqs.clone()
298
+ else:
299
+ lqs_downsample = F.interpolate(
300
+ lqs.view(-1, c, h, w), scale_factor=0.25, mode='bicubic').view(n, t, c, h // 4, w // 4)
301
+
302
+ # check whether the input is an extended sequence
303
+ self.check_if_mirror_extended(lqs)
304
+
305
+ feats = {}
306
+ # compute spatial features
307
+ if self.cpu_cache:
308
+ feats['spatial'] = []
309
+ for i in range(0, t):
310
+ feat = self.feat_extract(lqs[:, i, :, :, :]).cpu()
311
+ feats['spatial'].append(feat)
312
+ torch.cuda.empty_cache()
313
+ else:
314
+ feats_ = self.feat_extract(lqs.view(-1, c, h, w))
315
+ h, w = feats_.shape[2:]
316
+ feats_ = feats_.view(n, t, -1, h, w)
317
+ feats['spatial'] = [feats_[:, i, :, :, :] for i in range(0, t)]
318
+
319
+ # compute optical flow using the low-res inputs
320
+ assert lqs_downsample.size(3) >= 64 and lqs_downsample.size(4) >= 64, (
321
+ 'The height and width of low-res inputs must be at least 64, '
322
+ f'but got {h} and {w}.')
323
+ flows_forward, flows_backward = self.compute_flow(lqs_downsample)
324
+
325
+ # feature propgation
326
+ for iter_ in [1, 2]:
327
+ for direction in ['backward', 'forward']:
328
+ module = f'{direction}_{iter_}'
329
+
330
+ feats[module] = []
331
+
332
+ if direction == 'backward':
333
+ flows = flows_backward
334
+ elif flows_forward is not None:
335
+ flows = flows_forward
336
+ else:
337
+ flows = flows_backward.flip(1)
338
+
339
+ feats = self.propagate(feats, flows, module)
340
+ if self.cpu_cache:
341
+ del flows
342
+ torch.cuda.empty_cache()
343
+
344
+ return self.upsample(lqs, feats)
345
+
346
+
347
+ class SecondOrderDeformableAlignment(ModulatedDeformConvPack):
348
+ """Second-order deformable alignment module.
349
+
350
+ Args:
351
+ in_channels (int): Same as nn.Conv2d.
352
+ out_channels (int): Same as nn.Conv2d.
353
+ kernel_size (int or tuple[int]): Same as nn.Conv2d.
354
+ stride (int or tuple[int]): Same as nn.Conv2d.
355
+ padding (int or tuple[int]): Same as nn.Conv2d.
356
+ dilation (int or tuple[int]): Same as nn.Conv2d.
357
+ groups (int): Same as nn.Conv2d.
358
+ bias (bool or str): If specified as `auto`, it will be decided by the
359
+ norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
360
+ False.
361
+ max_residue_magnitude (int): The maximum magnitude of the offset
362
+ residue (Eq. 6 in paper). Default: 10.
363
+ """
364
+
365
+ def __init__(self, *args, **kwargs):
366
+ self.max_residue_magnitude = kwargs.pop('max_residue_magnitude', 10)
367
+
368
+ super(SecondOrderDeformableAlignment, self).__init__(*args, **kwargs)
369
+
370
+ self.conv_offset = nn.Sequential(
371
+ nn.Conv2d(3 * self.out_channels + 4, self.out_channels, 3, 1, 1),
372
+ nn.LeakyReLU(negative_slope=0.1, inplace=True),
373
+ nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1),
374
+ nn.LeakyReLU(negative_slope=0.1, inplace=True),
375
+ nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1),
376
+ nn.LeakyReLU(negative_slope=0.1, inplace=True),
377
+ nn.Conv2d(self.out_channels, 27 * self.deformable_groups, 3, 1, 1),
378
+ )
379
+
380
+ self.init_offset()
381
+
382
+ def init_offset(self):
383
+
384
+ def _constant_init(module, val, bias=0):
385
+ if hasattr(module, 'weight') and module.weight is not None:
386
+ nn.init.constant_(module.weight, val)
387
+ if hasattr(module, 'bias') and module.bias is not None:
388
+ nn.init.constant_(module.bias, bias)
389
+
390
+ _constant_init(self.conv_offset[-1], val=0, bias=0)
391
+
392
+ def forward(self, x, extra_feat, flow_1, flow_2):
393
+ extra_feat = torch.cat([extra_feat, flow_1, flow_2], dim=1)
394
+ out = self.conv_offset(extra_feat)
395
+ o1, o2, mask = torch.chunk(out, 3, dim=1)
396
+
397
+ # offset
398
+ offset = self.max_residue_magnitude * torch.tanh(torch.cat((o1, o2), dim=1))
399
+ offset_1, offset_2 = torch.chunk(offset, 2, dim=1)
400
+ offset_1 = offset_1 + flow_1.flip(1).repeat(1, offset_1.size(1) // 2, 1, 1)
401
+ offset_2 = offset_2 + flow_2.flip(1).repeat(1, offset_2.size(1) // 2, 1, 1)
402
+ offset = torch.cat([offset_1, offset_2], dim=1)
403
+
404
+ # mask
405
+ mask = torch.sigmoid(mask)
406
+
407
+ return torchvision.ops.deform_conv2d(x, offset, self.weight, self.bias, self.stride, self.padding,
408
+ self.dilation, mask)
409
+
410
+
411
+ # if __name__ == '__main__':
412
+ # spynet_path = 'experiments/pretrained_models/flownet/spynet_sintel_final-3d2a1287.pth'
413
+ # model = BasicVSRPlusPlus(spynet_path=spynet_path).cuda()
414
+ # input = torch.rand(1, 2, 3, 64, 64).cuda()
415
+ # output = model(input)
416
+ # print('===================')
417
+ # print(output.shape)
basicsr/archs/dfdnet_arch.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from torch.nn.utils.spectral_norm import spectral_norm
6
+
7
+ from basicsr.utils.registry import ARCH_REGISTRY
8
+ from .dfdnet_util import AttentionBlock, Blur, MSDilationBlock, UpResBlock, adaptive_instance_normalization
9
+ from .vgg_arch import VGGFeatureExtractor
10
+
11
+
12
+ class SFTUpBlock(nn.Module):
13
+ """Spatial feature transform (SFT) with upsampling block.
14
+
15
+ Args:
16
+ in_channel (int): Number of input channels.
17
+ out_channel (int): Number of output channels.
18
+ kernel_size (int): Kernel size in convolutions. Default: 3.
19
+ padding (int): Padding in convolutions. Default: 1.
20
+ """
21
+
22
+ def __init__(self, in_channel, out_channel, kernel_size=3, padding=1):
23
+ super(SFTUpBlock, self).__init__()
24
+ self.conv1 = nn.Sequential(
25
+ Blur(in_channel),
26
+ spectral_norm(nn.Conv2d(in_channel, out_channel, kernel_size, padding=padding)),
27
+ nn.LeakyReLU(0.04, True),
28
+ # The official codes use two LeakyReLU here, so 0.04 for equivalent
29
+ )
30
+ self.convup = nn.Sequential(
31
+ nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
32
+ spectral_norm(nn.Conv2d(out_channel, out_channel, kernel_size, padding=padding)),
33
+ nn.LeakyReLU(0.2, True),
34
+ )
35
+
36
+ # for SFT scale and shift
37
+ self.scale_block = nn.Sequential(
38
+ spectral_norm(nn.Conv2d(in_channel, out_channel, 3, 1, 1)), nn.LeakyReLU(0.2, True),
39
+ spectral_norm(nn.Conv2d(out_channel, out_channel, 3, 1, 1)))
40
+ self.shift_block = nn.Sequential(
41
+ spectral_norm(nn.Conv2d(in_channel, out_channel, 3, 1, 1)), nn.LeakyReLU(0.2, True),
42
+ spectral_norm(nn.Conv2d(out_channel, out_channel, 3, 1, 1)), nn.Sigmoid())
43
+ # The official codes use sigmoid for shift block, do not know why
44
+
45
+ def forward(self, x, updated_feat):
46
+ out = self.conv1(x)
47
+ # SFT
48
+ scale = self.scale_block(updated_feat)
49
+ shift = self.shift_block(updated_feat)
50
+ out = out * scale + shift
51
+ # upsample
52
+ out = self.convup(out)
53
+ return out
54
+
55
+
56
+ @ARCH_REGISTRY.register()
57
+ class DFDNet(nn.Module):
58
+ """DFDNet: Deep Face Dictionary Network.
59
+
60
+ It only processes faces with 512x512 size.
61
+
62
+ Args:
63
+ num_feat (int): Number of feature channels.
64
+ dict_path (str): Path to the facial component dictionary.
65
+ """
66
+
67
+ def __init__(self, num_feat, dict_path):
68
+ super().__init__()
69
+ self.parts = ['left_eye', 'right_eye', 'nose', 'mouth']
70
+ # part_sizes: [80, 80, 50, 110]
71
+ channel_sizes = [128, 256, 512, 512]
72
+ self.feature_sizes = np.array([256, 128, 64, 32])
73
+ self.vgg_layers = ['relu2_2', 'relu3_4', 'relu4_4', 'conv5_4']
74
+ self.flag_dict_device = False
75
+
76
+ # dict
77
+ self.dict = torch.load(dict_path)
78
+
79
+ # vgg face extractor
80
+ self.vgg_extractor = VGGFeatureExtractor(
81
+ layer_name_list=self.vgg_layers,
82
+ vgg_type='vgg19',
83
+ use_input_norm=True,
84
+ range_norm=True,
85
+ requires_grad=False)
86
+
87
+ # attention block for fusing dictionary features and input features
88
+ self.attn_blocks = nn.ModuleDict()
89
+ for idx, feat_size in enumerate(self.feature_sizes):
90
+ for name in self.parts:
91
+ self.attn_blocks[f'{name}_{feat_size}'] = AttentionBlock(channel_sizes[idx])
92
+
93
+ # multi scale dilation block
94
+ self.multi_scale_dilation = MSDilationBlock(num_feat * 8, dilation=[4, 3, 2, 1])
95
+
96
+ # upsampling and reconstruction
97
+ self.upsample0 = SFTUpBlock(num_feat * 8, num_feat * 8)
98
+ self.upsample1 = SFTUpBlock(num_feat * 8, num_feat * 4)
99
+ self.upsample2 = SFTUpBlock(num_feat * 4, num_feat * 2)
100
+ self.upsample3 = SFTUpBlock(num_feat * 2, num_feat)
101
+ self.upsample4 = nn.Sequential(
102
+ spectral_norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1)), nn.LeakyReLU(0.2, True), UpResBlock(num_feat),
103
+ UpResBlock(num_feat), nn.Conv2d(num_feat, 3, kernel_size=3, stride=1, padding=1), nn.Tanh())
104
+
105
+ def swap_feat(self, vgg_feat, updated_feat, dict_feat, location, part_name, f_size):
106
+ """swap the features from the dictionary."""
107
+ # get the original vgg features
108
+ part_feat = vgg_feat[:, :, location[1]:location[3], location[0]:location[2]].clone()
109
+ # resize original vgg features
110
+ part_resize_feat = F.interpolate(part_feat, dict_feat.size()[2:4], mode='bilinear', align_corners=False)
111
+ # use adaptive instance normalization to adjust color and illuminations
112
+ dict_feat = adaptive_instance_normalization(dict_feat, part_resize_feat)
113
+ # get similarity scores
114
+ similarity_score = F.conv2d(part_resize_feat, dict_feat)
115
+ similarity_score = F.softmax(similarity_score.view(-1), dim=0)
116
+ # select the most similar features in the dict (after norm)
117
+ select_idx = torch.argmax(similarity_score)
118
+ swap_feat = F.interpolate(dict_feat[select_idx:select_idx + 1], part_feat.size()[2:4])
119
+ # attention
120
+ attn = self.attn_blocks[f'{part_name}_' + str(f_size)](swap_feat - part_feat)
121
+ attn_feat = attn * swap_feat
122
+ # update features
123
+ updated_feat[:, :, location[1]:location[3], location[0]:location[2]] = attn_feat + part_feat
124
+ return updated_feat
125
+
126
+ def put_dict_to_device(self, x):
127
+ if self.flag_dict_device is False:
128
+ for k, v in self.dict.items():
129
+ for kk, vv in v.items():
130
+ self.dict[k][kk] = vv.to(x)
131
+ self.flag_dict_device = True
132
+
133
+ def forward(self, x, part_locations):
134
+ """
135
+ Now only support testing with batch size = 0.
136
+
137
+ Args:
138
+ x (Tensor): Input faces with shape (b, c, 512, 512).
139
+ part_locations (list[Tensor]): Part locations.
140
+ """
141
+ self.put_dict_to_device(x)
142
+ # extract vggface features
143
+ vgg_features = self.vgg_extractor(x)
144
+ # update vggface features using the dictionary for each part
145
+ updated_vgg_features = []
146
+ batch = 0 # only supports testing with batch size = 0
147
+ for vgg_layer, f_size in zip(self.vgg_layers, self.feature_sizes):
148
+ dict_features = self.dict[f'{f_size}']
149
+ vgg_feat = vgg_features[vgg_layer]
150
+ updated_feat = vgg_feat.clone()
151
+
152
+ # swap features from dictionary
153
+ for part_idx, part_name in enumerate(self.parts):
154
+ location = (part_locations[part_idx][batch] // (512 / f_size)).int()
155
+ updated_feat = self.swap_feat(vgg_feat, updated_feat, dict_features[part_name], location, part_name,
156
+ f_size)
157
+
158
+ updated_vgg_features.append(updated_feat)
159
+
160
+ vgg_feat_dilation = self.multi_scale_dilation(vgg_features['conv5_4'])
161
+ # use updated vgg features to modulate the upsampled features with
162
+ # SFT (Spatial Feature Transform) scaling and shifting manner.
163
+ upsampled_feat = self.upsample0(vgg_feat_dilation, updated_vgg_features[3])
164
+ upsampled_feat = self.upsample1(upsampled_feat, updated_vgg_features[2])
165
+ upsampled_feat = self.upsample2(upsampled_feat, updated_vgg_features[1])
166
+ upsampled_feat = self.upsample3(upsampled_feat, updated_vgg_features[0])
167
+ out = self.upsample4(upsampled_feat)
168
+
169
+ return out
basicsr/archs/dfdnet_util.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from torch.autograd import Function
5
+ from torch.nn.utils.spectral_norm import spectral_norm
6
+
7
+
8
+ class BlurFunctionBackward(Function):
9
+
10
+ @staticmethod
11
+ def forward(ctx, grad_output, kernel, kernel_flip):
12
+ ctx.save_for_backward(kernel, kernel_flip)
13
+ grad_input = F.conv2d(grad_output, kernel_flip, padding=1, groups=grad_output.shape[1])
14
+ return grad_input
15
+
16
+ @staticmethod
17
+ def backward(ctx, gradgrad_output):
18
+ kernel, _ = ctx.saved_tensors
19
+ grad_input = F.conv2d(gradgrad_output, kernel, padding=1, groups=gradgrad_output.shape[1])
20
+ return grad_input, None, None
21
+
22
+
23
+ class BlurFunction(Function):
24
+
25
+ @staticmethod
26
+ def forward(ctx, x, kernel, kernel_flip):
27
+ ctx.save_for_backward(kernel, kernel_flip)
28
+ output = F.conv2d(x, kernel, padding=1, groups=x.shape[1])
29
+ return output
30
+
31
+ @staticmethod
32
+ def backward(ctx, grad_output):
33
+ kernel, kernel_flip = ctx.saved_tensors
34
+ grad_input = BlurFunctionBackward.apply(grad_output, kernel, kernel_flip)
35
+ return grad_input, None, None
36
+
37
+
38
+ blur = BlurFunction.apply
39
+
40
+
41
+ class Blur(nn.Module):
42
+
43
+ def __init__(self, channel):
44
+ super().__init__()
45
+ kernel = torch.tensor([[1, 2, 1], [2, 4, 2], [1, 2, 1]], dtype=torch.float32)
46
+ kernel = kernel.view(1, 1, 3, 3)
47
+ kernel = kernel / kernel.sum()
48
+ kernel_flip = torch.flip(kernel, [2, 3])
49
+
50
+ self.kernel = kernel.repeat(channel, 1, 1, 1)
51
+ self.kernel_flip = kernel_flip.repeat(channel, 1, 1, 1)
52
+
53
+ def forward(self, x):
54
+ return blur(x, self.kernel.type_as(x), self.kernel_flip.type_as(x))
55
+
56
+
57
+ def calc_mean_std(feat, eps=1e-5):
58
+ """Calculate mean and std for adaptive_instance_normalization.
59
+
60
+ Args:
61
+ feat (Tensor): 4D tensor.
62
+ eps (float): A small value added to the variance to avoid
63
+ divide-by-zero. Default: 1e-5.
64
+ """
65
+ size = feat.size()
66
+ assert len(size) == 4, 'The input feature should be 4D tensor.'
67
+ n, c = size[:2]
68
+ feat_var = feat.view(n, c, -1).var(dim=2) + eps
69
+ feat_std = feat_var.sqrt().view(n, c, 1, 1)
70
+ feat_mean = feat.view(n, c, -1).mean(dim=2).view(n, c, 1, 1)
71
+ return feat_mean, feat_std
72
+
73
+
74
+ def adaptive_instance_normalization(content_feat, style_feat):
75
+ """Adaptive instance normalization.
76
+
77
+ Adjust the reference features to have the similar color and illuminations
78
+ as those in the degradate features.
79
+
80
+ Args:
81
+ content_feat (Tensor): The reference feature.
82
+ style_feat (Tensor): The degradate features.
83
+ """
84
+ size = content_feat.size()
85
+ style_mean, style_std = calc_mean_std(style_feat)
86
+ content_mean, content_std = calc_mean_std(content_feat)
87
+ normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size)
88
+ return normalized_feat * style_std.expand(size) + style_mean.expand(size)
89
+
90
+
91
+ def AttentionBlock(in_channel):
92
+ return nn.Sequential(
93
+ spectral_norm(nn.Conv2d(in_channel, in_channel, 3, 1, 1)), nn.LeakyReLU(0.2, True),
94
+ spectral_norm(nn.Conv2d(in_channel, in_channel, 3, 1, 1)))
95
+
96
+
97
+ def conv_block(in_channels, out_channels, kernel_size=3, stride=1, dilation=1, bias=True):
98
+ """Conv block used in MSDilationBlock."""
99
+
100
+ return nn.Sequential(
101
+ spectral_norm(
102
+ nn.Conv2d(
103
+ in_channels,
104
+ out_channels,
105
+ kernel_size=kernel_size,
106
+ stride=stride,
107
+ dilation=dilation,
108
+ padding=((kernel_size - 1) // 2) * dilation,
109
+ bias=bias)),
110
+ nn.LeakyReLU(0.2),
111
+ spectral_norm(
112
+ nn.Conv2d(
113
+ out_channels,
114
+ out_channels,
115
+ kernel_size=kernel_size,
116
+ stride=stride,
117
+ dilation=dilation,
118
+ padding=((kernel_size - 1) // 2) * dilation,
119
+ bias=bias)),
120
+ )
121
+
122
+
123
+ class MSDilationBlock(nn.Module):
124
+ """Multi-scale dilation block."""
125
+
126
+ def __init__(self, in_channels, kernel_size=3, dilation=(1, 1, 1, 1), bias=True):
127
+ super(MSDilationBlock, self).__init__()
128
+
129
+ self.conv_blocks = nn.ModuleList()
130
+ for i in range(4):
131
+ self.conv_blocks.append(conv_block(in_channels, in_channels, kernel_size, dilation=dilation[i], bias=bias))
132
+ self.conv_fusion = spectral_norm(
133
+ nn.Conv2d(
134
+ in_channels * 4,
135
+ in_channels,
136
+ kernel_size=kernel_size,
137
+ stride=1,
138
+ padding=(kernel_size - 1) // 2,
139
+ bias=bias))
140
+
141
+ def forward(self, x):
142
+ out = []
143
+ for i in range(4):
144
+ out.append(self.conv_blocks[i](x))
145
+ out = torch.cat(out, 1)
146
+ out = self.conv_fusion(out) + x
147
+ return out
148
+
149
+
150
+ class UpResBlock(nn.Module):
151
+
152
+ def __init__(self, in_channel):
153
+ super(UpResBlock, self).__init__()
154
+ self.body = nn.Sequential(
155
+ nn.Conv2d(in_channel, in_channel, 3, 1, 1),
156
+ nn.LeakyReLU(0.2, True),
157
+ nn.Conv2d(in_channel, in_channel, 3, 1, 1),
158
+ )
159
+
160
+ def forward(self, x):
161
+ out = x + self.body(x)
162
+ return out
basicsr/archs/discriminator_arch.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch import nn as nn
2
+ from torch.nn import functional as F
3
+ from torch.nn.utils import spectral_norm
4
+
5
+ from basicsr.utils.registry import ARCH_REGISTRY
6
+
7
+
8
+ @ARCH_REGISTRY.register()
9
+ class VGGStyleDiscriminator(nn.Module):
10
+ """VGG style discriminator with input size 128 x 128 or 256 x 256.
11
+
12
+ It is used to train SRGAN, ESRGAN, and VideoGAN.
13
+
14
+ Args:
15
+ num_in_ch (int): Channel number of inputs. Default: 3.
16
+ num_feat (int): Channel number of base intermediate features.Default: 64.
17
+ """
18
+
19
+ def __init__(self, num_in_ch, num_feat, input_size=128):
20
+ super(VGGStyleDiscriminator, self).__init__()
21
+ self.input_size = input_size
22
+ assert self.input_size == 128 or self.input_size == 256, (
23
+ f'input size must be 128 or 256, but received {input_size}')
24
+
25
+ self.conv0_0 = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1, bias=True)
26
+ self.conv0_1 = nn.Conv2d(num_feat, num_feat, 4, 2, 1, bias=False)
27
+ self.bn0_1 = nn.BatchNorm2d(num_feat, affine=True)
28
+
29
+ self.conv1_0 = nn.Conv2d(num_feat, num_feat * 2, 3, 1, 1, bias=False)
30
+ self.bn1_0 = nn.BatchNorm2d(num_feat * 2, affine=True)
31
+ self.conv1_1 = nn.Conv2d(num_feat * 2, num_feat * 2, 4, 2, 1, bias=False)
32
+ self.bn1_1 = nn.BatchNorm2d(num_feat * 2, affine=True)
33
+
34
+ self.conv2_0 = nn.Conv2d(num_feat * 2, num_feat * 4, 3, 1, 1, bias=False)
35
+ self.bn2_0 = nn.BatchNorm2d(num_feat * 4, affine=True)
36
+ self.conv2_1 = nn.Conv2d(num_feat * 4, num_feat * 4, 4, 2, 1, bias=False)
37
+ self.bn2_1 = nn.BatchNorm2d(num_feat * 4, affine=True)
38
+
39
+ self.conv3_0 = nn.Conv2d(num_feat * 4, num_feat * 8, 3, 1, 1, bias=False)
40
+ self.bn3_0 = nn.BatchNorm2d(num_feat * 8, affine=True)
41
+ self.conv3_1 = nn.Conv2d(num_feat * 8, num_feat * 8, 4, 2, 1, bias=False)
42
+ self.bn3_1 = nn.BatchNorm2d(num_feat * 8, affine=True)
43
+
44
+ self.conv4_0 = nn.Conv2d(num_feat * 8, num_feat * 8, 3, 1, 1, bias=False)
45
+ self.bn4_0 = nn.BatchNorm2d(num_feat * 8, affine=True)
46
+ self.conv4_1 = nn.Conv2d(num_feat * 8, num_feat * 8, 4, 2, 1, bias=False)
47
+ self.bn4_1 = nn.BatchNorm2d(num_feat * 8, affine=True)
48
+
49
+ if self.input_size == 256:
50
+ self.conv5_0 = nn.Conv2d(num_feat * 8, num_feat * 8, 3, 1, 1, bias=False)
51
+ self.bn5_0 = nn.BatchNorm2d(num_feat * 8, affine=True)
52
+ self.conv5_1 = nn.Conv2d(num_feat * 8, num_feat * 8, 4, 2, 1, bias=False)
53
+ self.bn5_1 = nn.BatchNorm2d(num_feat * 8, affine=True)
54
+
55
+ self.linear1 = nn.Linear(num_feat * 8 * 4 * 4, 100)
56
+ self.linear2 = nn.Linear(100, 1)
57
+
58
+ # activation function
59
+ self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
60
+
61
+ def forward(self, x):
62
+ assert x.size(2) == self.input_size, (f'Input size must be identical to input_size, but received {x.size()}.')
63
+
64
+ feat = self.lrelu(self.conv0_0(x))
65
+ feat = self.lrelu(self.bn0_1(self.conv0_1(feat))) # output spatial size: /2
66
+
67
+ feat = self.lrelu(self.bn1_0(self.conv1_0(feat)))
68
+ feat = self.lrelu(self.bn1_1(self.conv1_1(feat))) # output spatial size: /4
69
+
70
+ feat = self.lrelu(self.bn2_0(self.conv2_0(feat)))
71
+ feat = self.lrelu(self.bn2_1(self.conv2_1(feat))) # output spatial size: /8
72
+
73
+ feat = self.lrelu(self.bn3_0(self.conv3_0(feat)))
74
+ feat = self.lrelu(self.bn3_1(self.conv3_1(feat))) # output spatial size: /16
75
+
76
+ feat = self.lrelu(self.bn4_0(self.conv4_0(feat)))
77
+ feat = self.lrelu(self.bn4_1(self.conv4_1(feat))) # output spatial size: /32
78
+
79
+ if self.input_size == 256:
80
+ feat = self.lrelu(self.bn5_0(self.conv5_0(feat)))
81
+ feat = self.lrelu(self.bn5_1(self.conv5_1(feat))) # output spatial size: / 64
82
+
83
+ # spatial size: (4, 4)
84
+ feat = feat.view(feat.size(0), -1)
85
+ feat = self.lrelu(self.linear1(feat))
86
+ out = self.linear2(feat)
87
+ return out
88
+
89
+
90
+ @ARCH_REGISTRY.register(suffix='basicsr')
91
+ class UNetDiscriminatorSN(nn.Module):
92
+ """Defines a U-Net discriminator with spectral normalization (SN)
93
+
94
+ It is used in Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
95
+
96
+ Arg:
97
+ num_in_ch (int): Channel number of inputs. Default: 3.
98
+ num_feat (int): Channel number of base intermediate features. Default: 64.
99
+ skip_connection (bool): Whether to use skip connections between U-Net. Default: True.
100
+ """
101
+
102
+ def __init__(self, num_in_ch, num_feat=64, skip_connection=True):
103
+ super(UNetDiscriminatorSN, self).__init__()
104
+ self.skip_connection = skip_connection
105
+ norm = spectral_norm
106
+ # the first convolution
107
+ self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)
108
+ # downsample
109
+ self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 4, 2, 1, bias=False))
110
+ self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 4, 2, 1, bias=False))
111
+ self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 4, 2, 1, bias=False))
112
+ # upsample
113
+ self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False))
114
+ self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False))
115
+ self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False))
116
+ # extra convolutions
117
+ self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
118
+ self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
119
+ self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1)
120
+
121
+ def forward(self, x):
122
+ # downsample
123
+ x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True)
124
+ x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True)
125
+ x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True)
126
+ x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True)
127
+
128
+ # upsample
129
+ x3 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False)
130
+ x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True)
131
+
132
+ if self.skip_connection:
133
+ x4 = x4 + x2
134
+ x4 = F.interpolate(x4, scale_factor=2, mode='bilinear', align_corners=False)
135
+ x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True)
136
+
137
+ if self.skip_connection:
138
+ x5 = x5 + x1
139
+ x5 = F.interpolate(x5, scale_factor=2, mode='bilinear', align_corners=False)
140
+ x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True)
141
+
142
+ if self.skip_connection:
143
+ x6 = x6 + x0
144
+
145
+ # extra convolutions
146
+ out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True)
147
+ out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True)
148
+ out = self.conv9(out)
149
+
150
+ return out
basicsr/archs/duf_arch.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from torch import nn as nn
4
+ from torch.nn import functional as F
5
+
6
+ from basicsr.utils.registry import ARCH_REGISTRY
7
+
8
+
9
+ class DenseBlocksTemporalReduce(nn.Module):
10
+ """A concatenation of 3 dense blocks with reduction in temporal dimension.
11
+
12
+ Note that the output temporal dimension is 6 fewer the input temporal dimension, since there are 3 blocks.
13
+
14
+ Args:
15
+ num_feat (int): Number of channels in the blocks. Default: 64.
16
+ num_grow_ch (int): Growing factor of the dense blocks. Default: 32
17
+ adapt_official_weights (bool): Whether to adapt the weights translated from the official implementation.
18
+ Set to false if you want to train from scratch. Default: False.
19
+ """
20
+
21
+ def __init__(self, num_feat=64, num_grow_ch=32, adapt_official_weights=False):
22
+ super(DenseBlocksTemporalReduce, self).__init__()
23
+ if adapt_official_weights:
24
+ eps = 1e-3
25
+ momentum = 1e-3
26
+ else: # pytorch default values
27
+ eps = 1e-05
28
+ momentum = 0.1
29
+
30
+ self.temporal_reduce1 = nn.Sequential(
31
+ nn.BatchNorm3d(num_feat, eps=eps, momentum=momentum), nn.ReLU(inplace=True),
32
+ nn.Conv3d(num_feat, num_feat, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True),
33
+ nn.BatchNorm3d(num_feat, eps=eps, momentum=momentum), nn.ReLU(inplace=True),
34
+ nn.Conv3d(num_feat, num_grow_ch, (3, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True))
35
+
36
+ self.temporal_reduce2 = nn.Sequential(
37
+ nn.BatchNorm3d(num_feat + num_grow_ch, eps=eps, momentum=momentum), nn.ReLU(inplace=True),
38
+ nn.Conv3d(
39
+ num_feat + num_grow_ch,
40
+ num_feat + num_grow_ch, (1, 1, 1),
41
+ stride=(1, 1, 1),
42
+ padding=(0, 0, 0),
43
+ bias=True), nn.BatchNorm3d(num_feat + num_grow_ch, eps=eps, momentum=momentum), nn.ReLU(inplace=True),
44
+ nn.Conv3d(num_feat + num_grow_ch, num_grow_ch, (3, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True))
45
+
46
+ self.temporal_reduce3 = nn.Sequential(
47
+ nn.BatchNorm3d(num_feat + 2 * num_grow_ch, eps=eps, momentum=momentum), nn.ReLU(inplace=True),
48
+ nn.Conv3d(
49
+ num_feat + 2 * num_grow_ch,
50
+ num_feat + 2 * num_grow_ch, (1, 1, 1),
51
+ stride=(1, 1, 1),
52
+ padding=(0, 0, 0),
53
+ bias=True), nn.BatchNorm3d(num_feat + 2 * num_grow_ch, eps=eps, momentum=momentum),
54
+ nn.ReLU(inplace=True),
55
+ nn.Conv3d(
56
+ num_feat + 2 * num_grow_ch, num_grow_ch, (3, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True))
57
+
58
+ def forward(self, x):
59
+ """
60
+ Args:
61
+ x (Tensor): Input tensor with shape (b, num_feat, t, h, w).
62
+
63
+ Returns:
64
+ Tensor: Output with shape (b, num_feat + num_grow_ch * 3, 1, h, w).
65
+ """
66
+ x1 = self.temporal_reduce1(x)
67
+ x1 = torch.cat((x[:, :, 1:-1, :, :], x1), 1)
68
+
69
+ x2 = self.temporal_reduce2(x1)
70
+ x2 = torch.cat((x1[:, :, 1:-1, :, :], x2), 1)
71
+
72
+ x3 = self.temporal_reduce3(x2)
73
+ x3 = torch.cat((x2[:, :, 1:-1, :, :], x3), 1)
74
+
75
+ return x3
76
+
77
+
78
+ class DenseBlocks(nn.Module):
79
+ """ A concatenation of N dense blocks.
80
+
81
+ Args:
82
+ num_feat (int): Number of channels in the blocks. Default: 64.
83
+ num_grow_ch (int): Growing factor of the dense blocks. Default: 32.
84
+ num_block (int): Number of dense blocks. The values are:
85
+ DUF-S (16 layers): 3
86
+ DUF-M (18 layers): 9
87
+ DUF-L (52 layers): 21
88
+ adapt_official_weights (bool): Whether to adapt the weights translated from the official implementation.
89
+ Set to false if you want to train from scratch. Default: False.
90
+ """
91
+
92
+ def __init__(self, num_block, num_feat=64, num_grow_ch=16, adapt_official_weights=False):
93
+ super(DenseBlocks, self).__init__()
94
+ if adapt_official_weights:
95
+ eps = 1e-3
96
+ momentum = 1e-3
97
+ else: # pytorch default values
98
+ eps = 1e-05
99
+ momentum = 0.1
100
+
101
+ self.dense_blocks = nn.ModuleList()
102
+ for i in range(0, num_block):
103
+ self.dense_blocks.append(
104
+ nn.Sequential(
105
+ nn.BatchNorm3d(num_feat + i * num_grow_ch, eps=eps, momentum=momentum), nn.ReLU(inplace=True),
106
+ nn.Conv3d(
107
+ num_feat + i * num_grow_ch,
108
+ num_feat + i * num_grow_ch, (1, 1, 1),
109
+ stride=(1, 1, 1),
110
+ padding=(0, 0, 0),
111
+ bias=True), nn.BatchNorm3d(num_feat + i * num_grow_ch, eps=eps, momentum=momentum),
112
+ nn.ReLU(inplace=True),
113
+ nn.Conv3d(
114
+ num_feat + i * num_grow_ch,
115
+ num_grow_ch, (3, 3, 3),
116
+ stride=(1, 1, 1),
117
+ padding=(1, 1, 1),
118
+ bias=True)))
119
+
120
+ def forward(self, x):
121
+ """
122
+ Args:
123
+ x (Tensor): Input tensor with shape (b, num_feat, t, h, w).
124
+
125
+ Returns:
126
+ Tensor: Output with shape (b, num_feat + num_block * num_grow_ch, t, h, w).
127
+ """
128
+ for i in range(0, len(self.dense_blocks)):
129
+ y = self.dense_blocks[i](x)
130
+ x = torch.cat((x, y), 1)
131
+ return x
132
+
133
+
134
+ class DynamicUpsamplingFilter(nn.Module):
135
+ """Dynamic upsampling filter used in DUF.
136
+
137
+ Reference: https://github.com/yhjo09/VSR-DUF
138
+
139
+ It only supports input with 3 channels. And it applies the same filters to 3 channels.
140
+
141
+ Args:
142
+ filter_size (tuple): Filter size of generated filters. The shape is (kh, kw). Default: (5, 5).
143
+ """
144
+
145
+ def __init__(self, filter_size=(5, 5)):
146
+ super(DynamicUpsamplingFilter, self).__init__()
147
+ if not isinstance(filter_size, tuple):
148
+ raise TypeError(f'The type of filter_size must be tuple, but got type{filter_size}')
149
+ if len(filter_size) != 2:
150
+ raise ValueError(f'The length of filter size must be 2, but got {len(filter_size)}.')
151
+ # generate a local expansion filter, similar to im2col
152
+ self.filter_size = filter_size
153
+ filter_prod = np.prod(filter_size)
154
+ expansion_filter = torch.eye(int(filter_prod)).view(filter_prod, 1, *filter_size) # (kh*kw, 1, kh, kw)
155
+ self.expansion_filter = expansion_filter.repeat(3, 1, 1, 1) # repeat for all the 3 channels
156
+
157
+ def forward(self, x, filters):
158
+ """Forward function for DynamicUpsamplingFilter.
159
+
160
+ Args:
161
+ x (Tensor): Input image with 3 channels. The shape is (n, 3, h, w).
162
+ filters (Tensor): Generated dynamic filters. The shape is (n, filter_prod, upsampling_square, h, w).
163
+ filter_prod: prod of filter kernel size, e.g., 1*5*5=25.
164
+ upsampling_square: similar to pixel shuffle, upsampling_square = upsampling * upsampling.
165
+ e.g., for x 4 upsampling, upsampling_square= 4*4 = 16
166
+
167
+ Returns:
168
+ Tensor: Filtered image with shape (n, 3*upsampling_square, h, w)
169
+ """
170
+ n, filter_prod, upsampling_square, h, w = filters.size()
171
+ kh, kw = self.filter_size
172
+ expanded_input = F.conv2d(
173
+ x, self.expansion_filter.to(x), padding=(kh // 2, kw // 2), groups=3) # (n, 3*filter_prod, h, w)
174
+ expanded_input = expanded_input.view(n, 3, filter_prod, h, w).permute(0, 3, 4, 1,
175
+ 2) # (n, h, w, 3, filter_prod)
176
+ filters = filters.permute(0, 3, 4, 1, 2) # (n, h, w, filter_prod, upsampling_square]
177
+ out = torch.matmul(expanded_input, filters) # (n, h, w, 3, upsampling_square)
178
+ return out.permute(0, 3, 4, 1, 2).view(n, 3 * upsampling_square, h, w)
179
+
180
+
181
+ @ARCH_REGISTRY.register()
182
+ class DUF(nn.Module):
183
+ """Network architecture for DUF
184
+
185
+ ``Paper: Deep Video Super-Resolution Network Using Dynamic Upsampling Filters Without Explicit Motion Compensation``
186
+
187
+ Reference: https://github.com/yhjo09/VSR-DUF
188
+
189
+ For all the models below, 'adapt_official_weights' is only necessary when
190
+ loading the weights converted from the official TensorFlow weights.
191
+ Please set it to False if you are training the model from scratch.
192
+
193
+ There are three models with different model size: DUF16Layers, DUF28Layers,
194
+ and DUF52Layers. This class is the base class for these models.
195
+
196
+ Args:
197
+ scale (int): The upsampling factor. Default: 4.
198
+ num_layer (int): The number of layers. Default: 52.
199
+ adapt_official_weights_weights (bool): Whether to adapt the weights
200
+ translated from the official implementation. Set to false if you
201
+ want to train from scratch. Default: False.
202
+ """
203
+
204
+ def __init__(self, scale=4, num_layer=52, adapt_official_weights=False):
205
+ super(DUF, self).__init__()
206
+ self.scale = scale
207
+ if adapt_official_weights:
208
+ eps = 1e-3
209
+ momentum = 1e-3
210
+ else: # pytorch default values
211
+ eps = 1e-05
212
+ momentum = 0.1
213
+
214
+ self.conv3d1 = nn.Conv3d(3, 64, (1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True)
215
+ self.dynamic_filter = DynamicUpsamplingFilter((5, 5))
216
+
217
+ if num_layer == 16:
218
+ num_block = 3
219
+ num_grow_ch = 32
220
+ elif num_layer == 28:
221
+ num_block = 9
222
+ num_grow_ch = 16
223
+ elif num_layer == 52:
224
+ num_block = 21
225
+ num_grow_ch = 16
226
+ else:
227
+ raise ValueError(f'Only supported (16, 28, 52) layers, but got {num_layer}.')
228
+
229
+ self.dense_block1 = DenseBlocks(
230
+ num_block=num_block, num_feat=64, num_grow_ch=num_grow_ch,
231
+ adapt_official_weights=adapt_official_weights) # T = 7
232
+ self.dense_block2 = DenseBlocksTemporalReduce(
233
+ 64 + num_grow_ch * num_block, num_grow_ch, adapt_official_weights=adapt_official_weights) # T = 1
234
+ channels = 64 + num_grow_ch * num_block + num_grow_ch * 3
235
+ self.bn3d2 = nn.BatchNorm3d(channels, eps=eps, momentum=momentum)
236
+ self.conv3d2 = nn.Conv3d(channels, 256, (1, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1), bias=True)
237
+
238
+ self.conv3d_r1 = nn.Conv3d(256, 256, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True)
239
+ self.conv3d_r2 = nn.Conv3d(256, 3 * (scale**2), (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True)
240
+
241
+ self.conv3d_f1 = nn.Conv3d(256, 512, (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True)
242
+ self.conv3d_f2 = nn.Conv3d(
243
+ 512, 1 * 5 * 5 * (scale**2), (1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=True)
244
+
245
+ def forward(self, x):
246
+ """
247
+ Args:
248
+ x (Tensor): Input with shape (b, 7, c, h, w)
249
+
250
+ Returns:
251
+ Tensor: Output with shape (b, c, h * scale, w * scale)
252
+ """
253
+ num_batches, num_imgs, _, h, w = x.size()
254
+
255
+ x = x.permute(0, 2, 1, 3, 4) # (b, c, 7, h, w) for Conv3D
256
+ x_center = x[:, :, num_imgs // 2, :, :]
257
+
258
+ x = self.conv3d1(x)
259
+ x = self.dense_block1(x)
260
+ x = self.dense_block2(x)
261
+ x = F.relu(self.bn3d2(x), inplace=True)
262
+ x = F.relu(self.conv3d2(x), inplace=True)
263
+
264
+ # residual image
265
+ res = self.conv3d_r2(F.relu(self.conv3d_r1(x), inplace=True))
266
+
267
+ # filter
268
+ filter_ = self.conv3d_f2(F.relu(self.conv3d_f1(x), inplace=True))
269
+ filter_ = F.softmax(filter_.view(num_batches, 25, self.scale**2, h, w), dim=1)
270
+
271
+ # dynamic filter
272
+ out = self.dynamic_filter(x_center, filter_)
273
+ out += res.squeeze_(2)
274
+ out = F.pixel_shuffle(out, self.scale)
275
+
276
+ return out
basicsr/archs/ecbsr_arch.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ from basicsr.utils.registry import ARCH_REGISTRY
6
+
7
+
8
+ class SeqConv3x3(nn.Module):
9
+ """The re-parameterizable block used in the ECBSR architecture.
10
+
11
+ ``Paper: Edge-oriented Convolution Block for Real-time Super Resolution on Mobile Devices``
12
+
13
+ Reference: https://github.com/xindongzhang/ECBSR
14
+
15
+ Args:
16
+ seq_type (str): Sequence type, option: conv1x1-conv3x3 | conv1x1-sobelx | conv1x1-sobely | conv1x1-laplacian.
17
+ in_channels (int): Channel number of input.
18
+ out_channels (int): Channel number of output.
19
+ depth_multiplier (int): Width multiplier in the expand-and-squeeze conv. Default: 1.
20
+ """
21
+
22
+ def __init__(self, seq_type, in_channels, out_channels, depth_multiplier=1):
23
+ super(SeqConv3x3, self).__init__()
24
+ self.seq_type = seq_type
25
+ self.in_channels = in_channels
26
+ self.out_channels = out_channels
27
+
28
+ if self.seq_type == 'conv1x1-conv3x3':
29
+ self.mid_planes = int(out_channels * depth_multiplier)
30
+ conv0 = torch.nn.Conv2d(self.in_channels, self.mid_planes, kernel_size=1, padding=0)
31
+ self.k0 = conv0.weight
32
+ self.b0 = conv0.bias
33
+
34
+ conv1 = torch.nn.Conv2d(self.mid_planes, self.out_channels, kernel_size=3)
35
+ self.k1 = conv1.weight
36
+ self.b1 = conv1.bias
37
+
38
+ elif self.seq_type == 'conv1x1-sobelx':
39
+ conv0 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, padding=0)
40
+ self.k0 = conv0.weight
41
+ self.b0 = conv0.bias
42
+
43
+ # init scale and bias
44
+ scale = torch.randn(size=(self.out_channels, 1, 1, 1)) * 1e-3
45
+ self.scale = nn.Parameter(scale)
46
+ bias = torch.randn(self.out_channels) * 1e-3
47
+ bias = torch.reshape(bias, (self.out_channels, ))
48
+ self.bias = nn.Parameter(bias)
49
+ # init mask
50
+ self.mask = torch.zeros((self.out_channels, 1, 3, 3), dtype=torch.float32)
51
+ for i in range(self.out_channels):
52
+ self.mask[i, 0, 0, 0] = 1.0
53
+ self.mask[i, 0, 1, 0] = 2.0
54
+ self.mask[i, 0, 2, 0] = 1.0
55
+ self.mask[i, 0, 0, 2] = -1.0
56
+ self.mask[i, 0, 1, 2] = -2.0
57
+ self.mask[i, 0, 2, 2] = -1.0
58
+ self.mask = nn.Parameter(data=self.mask, requires_grad=False)
59
+
60
+ elif self.seq_type == 'conv1x1-sobely':
61
+ conv0 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, padding=0)
62
+ self.k0 = conv0.weight
63
+ self.b0 = conv0.bias
64
+
65
+ # init scale and bias
66
+ scale = torch.randn(size=(self.out_channels, 1, 1, 1)) * 1e-3
67
+ self.scale = nn.Parameter(torch.FloatTensor(scale))
68
+ bias = torch.randn(self.out_channels) * 1e-3
69
+ bias = torch.reshape(bias, (self.out_channels, ))
70
+ self.bias = nn.Parameter(torch.FloatTensor(bias))
71
+ # init mask
72
+ self.mask = torch.zeros((self.out_channels, 1, 3, 3), dtype=torch.float32)
73
+ for i in range(self.out_channels):
74
+ self.mask[i, 0, 0, 0] = 1.0
75
+ self.mask[i, 0, 0, 1] = 2.0
76
+ self.mask[i, 0, 0, 2] = 1.0
77
+ self.mask[i, 0, 2, 0] = -1.0
78
+ self.mask[i, 0, 2, 1] = -2.0
79
+ self.mask[i, 0, 2, 2] = -1.0
80
+ self.mask = nn.Parameter(data=self.mask, requires_grad=False)
81
+
82
+ elif self.seq_type == 'conv1x1-laplacian':
83
+ conv0 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1, padding=0)
84
+ self.k0 = conv0.weight
85
+ self.b0 = conv0.bias
86
+
87
+ # init scale and bias
88
+ scale = torch.randn(size=(self.out_channels, 1, 1, 1)) * 1e-3
89
+ self.scale = nn.Parameter(torch.FloatTensor(scale))
90
+ bias = torch.randn(self.out_channels) * 1e-3
91
+ bias = torch.reshape(bias, (self.out_channels, ))
92
+ self.bias = nn.Parameter(torch.FloatTensor(bias))
93
+ # init mask
94
+ self.mask = torch.zeros((self.out_channels, 1, 3, 3), dtype=torch.float32)
95
+ for i in range(self.out_channels):
96
+ self.mask[i, 0, 0, 1] = 1.0
97
+ self.mask[i, 0, 1, 0] = 1.0
98
+ self.mask[i, 0, 1, 2] = 1.0
99
+ self.mask[i, 0, 2, 1] = 1.0
100
+ self.mask[i, 0, 1, 1] = -4.0
101
+ self.mask = nn.Parameter(data=self.mask, requires_grad=False)
102
+ else:
103
+ raise ValueError('The type of seqconv is not supported!')
104
+
105
+ def forward(self, x):
106
+ if self.seq_type == 'conv1x1-conv3x3':
107
+ # conv-1x1
108
+ y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1)
109
+ # explicitly padding with bias
110
+ y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0)
111
+ b0_pad = self.b0.view(1, -1, 1, 1)
112
+ y0[:, :, 0:1, :] = b0_pad
113
+ y0[:, :, -1:, :] = b0_pad
114
+ y0[:, :, :, 0:1] = b0_pad
115
+ y0[:, :, :, -1:] = b0_pad
116
+ # conv-3x3
117
+ y1 = F.conv2d(input=y0, weight=self.k1, bias=self.b1, stride=1)
118
+ else:
119
+ y0 = F.conv2d(input=x, weight=self.k0, bias=self.b0, stride=1)
120
+ # explicitly padding with bias
121
+ y0 = F.pad(y0, (1, 1, 1, 1), 'constant', 0)
122
+ b0_pad = self.b0.view(1, -1, 1, 1)
123
+ y0[:, :, 0:1, :] = b0_pad
124
+ y0[:, :, -1:, :] = b0_pad
125
+ y0[:, :, :, 0:1] = b0_pad
126
+ y0[:, :, :, -1:] = b0_pad
127
+ # conv-3x3
128
+ y1 = F.conv2d(input=y0, weight=self.scale * self.mask, bias=self.bias, stride=1, groups=self.out_channels)
129
+ return y1
130
+
131
+ def rep_params(self):
132
+ device = self.k0.get_device()
133
+ if device < 0:
134
+ device = None
135
+
136
+ if self.seq_type == 'conv1x1-conv3x3':
137
+ # re-param conv kernel
138
+ rep_weight = F.conv2d(input=self.k1, weight=self.k0.permute(1, 0, 2, 3))
139
+ # re-param conv bias
140
+ rep_bias = torch.ones(1, self.mid_planes, 3, 3, device=device) * self.b0.view(1, -1, 1, 1)
141
+ rep_bias = F.conv2d(input=rep_bias, weight=self.k1).view(-1, ) + self.b1
142
+ else:
143
+ tmp = self.scale * self.mask
144
+ k1 = torch.zeros((self.out_channels, self.out_channels, 3, 3), device=device)
145
+ for i in range(self.out_channels):
146
+ k1[i, i, :, :] = tmp[i, 0, :, :]
147
+ b1 = self.bias
148
+ # re-param conv kernel
149
+ rep_weight = F.conv2d(input=k1, weight=self.k0.permute(1, 0, 2, 3))
150
+ # re-param conv bias
151
+ rep_bias = torch.ones(1, self.out_channels, 3, 3, device=device) * self.b0.view(1, -1, 1, 1)
152
+ rep_bias = F.conv2d(input=rep_bias, weight=k1).view(-1, ) + b1
153
+ return rep_weight, rep_bias
154
+
155
+
156
+ class ECB(nn.Module):
157
+ """The ECB block used in the ECBSR architecture.
158
+
159
+ Paper: Edge-oriented Convolution Block for Real-time Super Resolution on Mobile Devices
160
+ Ref git repo: https://github.com/xindongzhang/ECBSR
161
+
162
+ Args:
163
+ in_channels (int): Channel number of input.
164
+ out_channels (int): Channel number of output.
165
+ depth_multiplier (int): Width multiplier in the expand-and-squeeze conv. Default: 1.
166
+ act_type (str): Activation type. Option: prelu | relu | rrelu | softplus | linear. Default: prelu.
167
+ with_idt (bool): Whether to use identity connection. Default: False.
168
+ """
169
+
170
+ def __init__(self, in_channels, out_channels, depth_multiplier, act_type='prelu', with_idt=False):
171
+ super(ECB, self).__init__()
172
+
173
+ self.depth_multiplier = depth_multiplier
174
+ self.in_channels = in_channels
175
+ self.out_channels = out_channels
176
+ self.act_type = act_type
177
+
178
+ if with_idt and (self.in_channels == self.out_channels):
179
+ self.with_idt = True
180
+ else:
181
+ self.with_idt = False
182
+
183
+ self.conv3x3 = torch.nn.Conv2d(self.in_channels, self.out_channels, kernel_size=3, padding=1)
184
+ self.conv1x1_3x3 = SeqConv3x3('conv1x1-conv3x3', self.in_channels, self.out_channels, self.depth_multiplier)
185
+ self.conv1x1_sbx = SeqConv3x3('conv1x1-sobelx', self.in_channels, self.out_channels)
186
+ self.conv1x1_sby = SeqConv3x3('conv1x1-sobely', self.in_channels, self.out_channels)
187
+ self.conv1x1_lpl = SeqConv3x3('conv1x1-laplacian', self.in_channels, self.out_channels)
188
+
189
+ if self.act_type == 'prelu':
190
+ self.act = nn.PReLU(num_parameters=self.out_channels)
191
+ elif self.act_type == 'relu':
192
+ self.act = nn.ReLU(inplace=True)
193
+ elif self.act_type == 'rrelu':
194
+ self.act = nn.RReLU(lower=-0.05, upper=0.05)
195
+ elif self.act_type == 'softplus':
196
+ self.act = nn.Softplus()
197
+ elif self.act_type == 'linear':
198
+ pass
199
+ else:
200
+ raise ValueError('The type of activation if not support!')
201
+
202
+ def forward(self, x):
203
+ if self.training:
204
+ y = self.conv3x3(x) + self.conv1x1_3x3(x) + self.conv1x1_sbx(x) + self.conv1x1_sby(x) + self.conv1x1_lpl(x)
205
+ if self.with_idt:
206
+ y += x
207
+ else:
208
+ rep_weight, rep_bias = self.rep_params()
209
+ y = F.conv2d(input=x, weight=rep_weight, bias=rep_bias, stride=1, padding=1)
210
+ if self.act_type != 'linear':
211
+ y = self.act(y)
212
+ return y
213
+
214
+ def rep_params(self):
215
+ weight0, bias0 = self.conv3x3.weight, self.conv3x3.bias
216
+ weight1, bias1 = self.conv1x1_3x3.rep_params()
217
+ weight2, bias2 = self.conv1x1_sbx.rep_params()
218
+ weight3, bias3 = self.conv1x1_sby.rep_params()
219
+ weight4, bias4 = self.conv1x1_lpl.rep_params()
220
+ rep_weight, rep_bias = (weight0 + weight1 + weight2 + weight3 + weight4), (
221
+ bias0 + bias1 + bias2 + bias3 + bias4)
222
+
223
+ if self.with_idt:
224
+ device = rep_weight.get_device()
225
+ if device < 0:
226
+ device = None
227
+ weight_idt = torch.zeros(self.out_channels, self.out_channels, 3, 3, device=device)
228
+ for i in range(self.out_channels):
229
+ weight_idt[i, i, 1, 1] = 1.0
230
+ bias_idt = 0.0
231
+ rep_weight, rep_bias = rep_weight + weight_idt, rep_bias + bias_idt
232
+ return rep_weight, rep_bias
233
+
234
+
235
+ @ARCH_REGISTRY.register()
236
+ class ECBSR(nn.Module):
237
+ """ECBSR architecture.
238
+
239
+ Paper: Edge-oriented Convolution Block for Real-time Super Resolution on Mobile Devices
240
+ Ref git repo: https://github.com/xindongzhang/ECBSR
241
+
242
+ Args:
243
+ num_in_ch (int): Channel number of inputs.
244
+ num_out_ch (int): Channel number of outputs.
245
+ num_block (int): Block number in the trunk network.
246
+ num_channel (int): Channel number.
247
+ with_idt (bool): Whether use identity in convolution layers.
248
+ act_type (str): Activation type.
249
+ scale (int): Upsampling factor.
250
+ """
251
+
252
+ def __init__(self, num_in_ch, num_out_ch, num_block, num_channel, with_idt, act_type, scale):
253
+ super(ECBSR, self).__init__()
254
+ self.num_in_ch = num_in_ch
255
+ self.scale = scale
256
+
257
+ backbone = []
258
+ backbone += [ECB(num_in_ch, num_channel, depth_multiplier=2.0, act_type=act_type, with_idt=with_idt)]
259
+ for _ in range(num_block):
260
+ backbone += [ECB(num_channel, num_channel, depth_multiplier=2.0, act_type=act_type, with_idt=with_idt)]
261
+ backbone += [
262
+ ECB(num_channel, num_out_ch * scale * scale, depth_multiplier=2.0, act_type='linear', with_idt=with_idt)
263
+ ]
264
+
265
+ self.backbone = nn.Sequential(*backbone)
266
+ self.upsampler = nn.PixelShuffle(scale)
267
+
268
+ def forward(self, x):
269
+ if self.num_in_ch > 1:
270
+ shortcut = torch.repeat_interleave(x, self.scale * self.scale, dim=1)
271
+ else:
272
+ shortcut = x # will repeat the input in the channel dimension (repeat scale * scale times)
273
+ y = self.backbone(x) + shortcut
274
+ y = self.upsampler(y)
275
+ return y
basicsr/archs/edsr_arch.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn as nn
3
+
4
+ from basicsr.archs.arch_util import ResidualBlockNoBN, Upsample, make_layer
5
+ from basicsr.utils.registry import ARCH_REGISTRY
6
+
7
+
8
+ @ARCH_REGISTRY.register()
9
+ class EDSR(nn.Module):
10
+ """EDSR network structure.
11
+
12
+ Paper: Enhanced Deep Residual Networks for Single Image Super-Resolution.
13
+ Ref git repo: https://github.com/thstkdgus35/EDSR-PyTorch
14
+
15
+ Args:
16
+ num_in_ch (int): Channel number of inputs.
17
+ num_out_ch (int): Channel number of outputs.
18
+ num_feat (int): Channel number of intermediate features.
19
+ Default: 64.
20
+ num_block (int): Block number in the trunk network. Default: 16.
21
+ upscale (int): Upsampling factor. Support 2^n and 3.
22
+ Default: 4.
23
+ res_scale (float): Used to scale the residual in residual block.
24
+ Default: 1.
25
+ img_range (float): Image range. Default: 255.
26
+ rgb_mean (tuple[float]): Image mean in RGB orders.
27
+ Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset.
28
+ """
29
+
30
+ def __init__(self,
31
+ num_in_ch,
32
+ num_out_ch,
33
+ num_feat=64,
34
+ num_block=16,
35
+ upscale=4,
36
+ res_scale=1,
37
+ img_range=255.,
38
+ rgb_mean=(0.4488, 0.4371, 0.4040)):
39
+ super(EDSR, self).__init__()
40
+
41
+ self.img_range = img_range
42
+ self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
43
+
44
+ self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
45
+ self.body = make_layer(ResidualBlockNoBN, num_block, num_feat=num_feat, res_scale=res_scale, pytorch_init=True)
46
+ self.conv_after_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
47
+ self.upsample = Upsample(upscale, num_feat)
48
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
49
+
50
+ def forward(self, x):
51
+ self.mean = self.mean.type_as(x)
52
+
53
+ x = (x - self.mean) * self.img_range
54
+ x = self.conv_first(x)
55
+ res = self.conv_after_body(self.body(x))
56
+ res += x
57
+
58
+ x = self.conv_last(self.upsample(res))
59
+ x = x / self.img_range + self.mean
60
+
61
+ return x
basicsr/archs/edvr_arch.py ADDED
@@ -0,0 +1,382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn as nn
3
+ from torch.nn import functional as F
4
+
5
+ from basicsr.utils.registry import ARCH_REGISTRY
6
+ from .arch_util import DCNv2Pack, ResidualBlockNoBN, make_layer
7
+
8
+
9
+ class PCDAlignment(nn.Module):
10
+ """Alignment module using Pyramid, Cascading and Deformable convolution
11
+ (PCD). It is used in EDVR.
12
+
13
+ ``Paper: EDVR: Video Restoration with Enhanced Deformable Convolutional Networks``
14
+
15
+ Args:
16
+ num_feat (int): Channel number of middle features. Default: 64.
17
+ deformable_groups (int): Deformable groups. Defaults: 8.
18
+ """
19
+
20
+ def __init__(self, num_feat=64, deformable_groups=8):
21
+ super(PCDAlignment, self).__init__()
22
+
23
+ # Pyramid has three levels:
24
+ # L3: level 3, 1/4 spatial size
25
+ # L2: level 2, 1/2 spatial size
26
+ # L1: level 1, original spatial size
27
+ self.offset_conv1 = nn.ModuleDict()
28
+ self.offset_conv2 = nn.ModuleDict()
29
+ self.offset_conv3 = nn.ModuleDict()
30
+ self.dcn_pack = nn.ModuleDict()
31
+ self.feat_conv = nn.ModuleDict()
32
+
33
+ # Pyramids
34
+ for i in range(3, 0, -1):
35
+ level = f'l{i}'
36
+ self.offset_conv1[level] = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1)
37
+ if i == 3:
38
+ self.offset_conv2[level] = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
39
+ else:
40
+ self.offset_conv2[level] = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1)
41
+ self.offset_conv3[level] = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
42
+ self.dcn_pack[level] = DCNv2Pack(num_feat, num_feat, 3, padding=1, deformable_groups=deformable_groups)
43
+
44
+ if i < 3:
45
+ self.feat_conv[level] = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1)
46
+
47
+ # Cascading dcn
48
+ self.cas_offset_conv1 = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1)
49
+ self.cas_offset_conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
50
+ self.cas_dcnpack = DCNv2Pack(num_feat, num_feat, 3, padding=1, deformable_groups=deformable_groups)
51
+
52
+ self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
53
+ self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
54
+
55
+ def forward(self, nbr_feat_l, ref_feat_l):
56
+ """Align neighboring frame features to the reference frame features.
57
+
58
+ Args:
59
+ nbr_feat_l (list[Tensor]): Neighboring feature list. It
60
+ contains three pyramid levels (L1, L2, L3),
61
+ each with shape (b, c, h, w).
62
+ ref_feat_l (list[Tensor]): Reference feature list. It
63
+ contains three pyramid levels (L1, L2, L3),
64
+ each with shape (b, c, h, w).
65
+
66
+ Returns:
67
+ Tensor: Aligned features.
68
+ """
69
+ # Pyramids
70
+ upsampled_offset, upsampled_feat = None, None
71
+ for i in range(3, 0, -1):
72
+ level = f'l{i}'
73
+ offset = torch.cat([nbr_feat_l[i - 1], ref_feat_l[i - 1]], dim=1)
74
+ offset = self.lrelu(self.offset_conv1[level](offset))
75
+ if i == 3:
76
+ offset = self.lrelu(self.offset_conv2[level](offset))
77
+ else:
78
+ offset = self.lrelu(self.offset_conv2[level](torch.cat([offset, upsampled_offset], dim=1)))
79
+ offset = self.lrelu(self.offset_conv3[level](offset))
80
+
81
+ feat = self.dcn_pack[level](nbr_feat_l[i - 1], offset)
82
+ if i < 3:
83
+ feat = self.feat_conv[level](torch.cat([feat, upsampled_feat], dim=1))
84
+ if i > 1:
85
+ feat = self.lrelu(feat)
86
+
87
+ if i > 1: # upsample offset and features
88
+ # x2: when we upsample the offset, we should also enlarge
89
+ # the magnitude.
90
+ upsampled_offset = self.upsample(offset) * 2
91
+ upsampled_feat = self.upsample(feat)
92
+
93
+ # Cascading
94
+ offset = torch.cat([feat, ref_feat_l[0]], dim=1)
95
+ offset = self.lrelu(self.cas_offset_conv2(self.lrelu(self.cas_offset_conv1(offset))))
96
+ feat = self.lrelu(self.cas_dcnpack(feat, offset))
97
+ return feat
98
+
99
+
100
+ class TSAFusion(nn.Module):
101
+ """Temporal Spatial Attention (TSA) fusion module.
102
+
103
+ Temporal: Calculate the correlation between center frame and
104
+ neighboring frames;
105
+ Spatial: It has 3 pyramid levels, the attention is similar to SFT.
106
+ (SFT: Recovering realistic texture in image super-resolution by deep
107
+ spatial feature transform.)
108
+
109
+ Args:
110
+ num_feat (int): Channel number of middle features. Default: 64.
111
+ num_frame (int): Number of frames. Default: 5.
112
+ center_frame_idx (int): The index of center frame. Default: 2.
113
+ """
114
+
115
+ def __init__(self, num_feat=64, num_frame=5, center_frame_idx=2):
116
+ super(TSAFusion, self).__init__()
117
+ self.center_frame_idx = center_frame_idx
118
+ # temporal attention (before fusion conv)
119
+ self.temporal_attn1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
120
+ self.temporal_attn2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
121
+ self.feat_fusion = nn.Conv2d(num_frame * num_feat, num_feat, 1, 1)
122
+
123
+ # spatial attention (after fusion conv)
124
+ self.max_pool = nn.MaxPool2d(3, stride=2, padding=1)
125
+ self.avg_pool = nn.AvgPool2d(3, stride=2, padding=1)
126
+ self.spatial_attn1 = nn.Conv2d(num_frame * num_feat, num_feat, 1)
127
+ self.spatial_attn2 = nn.Conv2d(num_feat * 2, num_feat, 1)
128
+ self.spatial_attn3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
129
+ self.spatial_attn4 = nn.Conv2d(num_feat, num_feat, 1)
130
+ self.spatial_attn5 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
131
+ self.spatial_attn_l1 = nn.Conv2d(num_feat, num_feat, 1)
132
+ self.spatial_attn_l2 = nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1)
133
+ self.spatial_attn_l3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
134
+ self.spatial_attn_add1 = nn.Conv2d(num_feat, num_feat, 1)
135
+ self.spatial_attn_add2 = nn.Conv2d(num_feat, num_feat, 1)
136
+
137
+ self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
138
+ self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
139
+
140
+ def forward(self, aligned_feat):
141
+ """
142
+ Args:
143
+ aligned_feat (Tensor): Aligned features with shape (b, t, c, h, w).
144
+
145
+ Returns:
146
+ Tensor: Features after TSA with the shape (b, c, h, w).
147
+ """
148
+ b, t, c, h, w = aligned_feat.size()
149
+ # temporal attention
150
+ embedding_ref = self.temporal_attn1(aligned_feat[:, self.center_frame_idx, :, :, :].clone())
151
+ embedding = self.temporal_attn2(aligned_feat.view(-1, c, h, w))
152
+ embedding = embedding.view(b, t, -1, h, w) # (b, t, c, h, w)
153
+
154
+ corr_l = [] # correlation list
155
+ for i in range(t):
156
+ emb_neighbor = embedding[:, i, :, :, :]
157
+ corr = torch.sum(emb_neighbor * embedding_ref, 1) # (b, h, w)
158
+ corr_l.append(corr.unsqueeze(1)) # (b, 1, h, w)
159
+ corr_prob = torch.sigmoid(torch.cat(corr_l, dim=1)) # (b, t, h, w)
160
+ corr_prob = corr_prob.unsqueeze(2).expand(b, t, c, h, w)
161
+ corr_prob = corr_prob.contiguous().view(b, -1, h, w) # (b, t*c, h, w)
162
+ aligned_feat = aligned_feat.view(b, -1, h, w) * corr_prob
163
+
164
+ # fusion
165
+ feat = self.lrelu(self.feat_fusion(aligned_feat))
166
+
167
+ # spatial attention
168
+ attn = self.lrelu(self.spatial_attn1(aligned_feat))
169
+ attn_max = self.max_pool(attn)
170
+ attn_avg = self.avg_pool(attn)
171
+ attn = self.lrelu(self.spatial_attn2(torch.cat([attn_max, attn_avg], dim=1)))
172
+ # pyramid levels
173
+ attn_level = self.lrelu(self.spatial_attn_l1(attn))
174
+ attn_max = self.max_pool(attn_level)
175
+ attn_avg = self.avg_pool(attn_level)
176
+ attn_level = self.lrelu(self.spatial_attn_l2(torch.cat([attn_max, attn_avg], dim=1)))
177
+ attn_level = self.lrelu(self.spatial_attn_l3(attn_level))
178
+ attn_level = self.upsample(attn_level)
179
+
180
+ attn = self.lrelu(self.spatial_attn3(attn)) + attn_level
181
+ attn = self.lrelu(self.spatial_attn4(attn))
182
+ attn = self.upsample(attn)
183
+ attn = self.spatial_attn5(attn)
184
+ attn_add = self.spatial_attn_add2(self.lrelu(self.spatial_attn_add1(attn)))
185
+ attn = torch.sigmoid(attn)
186
+
187
+ # after initialization, * 2 makes (attn * 2) to be close to 1.
188
+ feat = feat * attn * 2 + attn_add
189
+ return feat
190
+
191
+
192
+ class PredeblurModule(nn.Module):
193
+ """Pre-dublur module.
194
+
195
+ Args:
196
+ num_in_ch (int): Channel number of input image. Default: 3.
197
+ num_feat (int): Channel number of intermediate features. Default: 64.
198
+ hr_in (bool): Whether the input has high resolution. Default: False.
199
+ """
200
+
201
+ def __init__(self, num_in_ch=3, num_feat=64, hr_in=False):
202
+ super(PredeblurModule, self).__init__()
203
+ self.hr_in = hr_in
204
+
205
+ self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
206
+ if self.hr_in:
207
+ # downsample x4 by stride conv
208
+ self.stride_conv_hr1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
209
+ self.stride_conv_hr2 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
210
+
211
+ # generate feature pyramid
212
+ self.stride_conv_l2 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
213
+ self.stride_conv_l3 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
214
+
215
+ self.resblock_l3 = ResidualBlockNoBN(num_feat=num_feat)
216
+ self.resblock_l2_1 = ResidualBlockNoBN(num_feat=num_feat)
217
+ self.resblock_l2_2 = ResidualBlockNoBN(num_feat=num_feat)
218
+ self.resblock_l1 = nn.ModuleList([ResidualBlockNoBN(num_feat=num_feat) for i in range(5)])
219
+
220
+ self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
221
+ self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
222
+
223
+ def forward(self, x):
224
+ feat_l1 = self.lrelu(self.conv_first(x))
225
+ if self.hr_in:
226
+ feat_l1 = self.lrelu(self.stride_conv_hr1(feat_l1))
227
+ feat_l1 = self.lrelu(self.stride_conv_hr2(feat_l1))
228
+
229
+ # generate feature pyramid
230
+ feat_l2 = self.lrelu(self.stride_conv_l2(feat_l1))
231
+ feat_l3 = self.lrelu(self.stride_conv_l3(feat_l2))
232
+
233
+ feat_l3 = self.upsample(self.resblock_l3(feat_l3))
234
+ feat_l2 = self.resblock_l2_1(feat_l2) + feat_l3
235
+ feat_l2 = self.upsample(self.resblock_l2_2(feat_l2))
236
+
237
+ for i in range(2):
238
+ feat_l1 = self.resblock_l1[i](feat_l1)
239
+ feat_l1 = feat_l1 + feat_l2
240
+ for i in range(2, 5):
241
+ feat_l1 = self.resblock_l1[i](feat_l1)
242
+ return feat_l1
243
+
244
+
245
+ @ARCH_REGISTRY.register()
246
+ class EDVR(nn.Module):
247
+ """EDVR network structure for video super-resolution.
248
+
249
+ Now only support X4 upsampling factor.
250
+
251
+ ``Paper: EDVR: Video Restoration with Enhanced Deformable Convolutional Networks``
252
+
253
+ Args:
254
+ num_in_ch (int): Channel number of input image. Default: 3.
255
+ num_out_ch (int): Channel number of output image. Default: 3.
256
+ num_feat (int): Channel number of intermediate features. Default: 64.
257
+ num_frame (int): Number of input frames. Default: 5.
258
+ deformable_groups (int): Deformable groups. Defaults: 8.
259
+ num_extract_block (int): Number of blocks for feature extraction.
260
+ Default: 5.
261
+ num_reconstruct_block (int): Number of blocks for reconstruction.
262
+ Default: 10.
263
+ center_frame_idx (int): The index of center frame. Frame counting from
264
+ 0. Default: Middle of input frames.
265
+ hr_in (bool): Whether the input has high resolution. Default: False.
266
+ with_predeblur (bool): Whether has predeblur module.
267
+ Default: False.
268
+ with_tsa (bool): Whether has TSA module. Default: True.
269
+ """
270
+
271
+ def __init__(self,
272
+ num_in_ch=3,
273
+ num_out_ch=3,
274
+ num_feat=64,
275
+ num_frame=5,
276
+ deformable_groups=8,
277
+ num_extract_block=5,
278
+ num_reconstruct_block=10,
279
+ center_frame_idx=None,
280
+ hr_in=False,
281
+ with_predeblur=False,
282
+ with_tsa=True):
283
+ super(EDVR, self).__init__()
284
+ if center_frame_idx is None:
285
+ self.center_frame_idx = num_frame // 2
286
+ else:
287
+ self.center_frame_idx = center_frame_idx
288
+ self.hr_in = hr_in
289
+ self.with_predeblur = with_predeblur
290
+ self.with_tsa = with_tsa
291
+
292
+ # extract features for each frame
293
+ if self.with_predeblur:
294
+ self.predeblur = PredeblurModule(num_feat=num_feat, hr_in=self.hr_in)
295
+ self.conv_1x1 = nn.Conv2d(num_feat, num_feat, 1, 1)
296
+ else:
297
+ self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
298
+
299
+ # extract pyramid features
300
+ self.feature_extraction = make_layer(ResidualBlockNoBN, num_extract_block, num_feat=num_feat)
301
+ self.conv_l2_1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
302
+ self.conv_l2_2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
303
+ self.conv_l3_1 = nn.Conv2d(num_feat, num_feat, 3, 2, 1)
304
+ self.conv_l3_2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
305
+
306
+ # pcd and tsa module
307
+ self.pcd_align = PCDAlignment(num_feat=num_feat, deformable_groups=deformable_groups)
308
+ if self.with_tsa:
309
+ self.fusion = TSAFusion(num_feat=num_feat, num_frame=num_frame, center_frame_idx=self.center_frame_idx)
310
+ else:
311
+ self.fusion = nn.Conv2d(num_frame * num_feat, num_feat, 1, 1)
312
+
313
+ # reconstruction
314
+ self.reconstruction = make_layer(ResidualBlockNoBN, num_reconstruct_block, num_feat=num_feat)
315
+ # upsample
316
+ self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
317
+ self.upconv2 = nn.Conv2d(num_feat, 64 * 4, 3, 1, 1)
318
+ self.pixel_shuffle = nn.PixelShuffle(2)
319
+ self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
320
+ self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
321
+
322
+ # activation function
323
+ self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
324
+
325
+ def forward(self, x):
326
+ b, t, c, h, w = x.size()
327
+ if self.hr_in:
328
+ assert h % 16 == 0 and w % 16 == 0, ('The height and width must be multiple of 16.')
329
+ else:
330
+ assert h % 4 == 0 and w % 4 == 0, ('The height and width must be multiple of 4.')
331
+
332
+ x_center = x[:, self.center_frame_idx, :, :, :].contiguous()
333
+
334
+ # extract features for each frame
335
+ # L1
336
+ if self.with_predeblur:
337
+ feat_l1 = self.conv_1x1(self.predeblur(x.view(-1, c, h, w)))
338
+ if self.hr_in:
339
+ h, w = h // 4, w // 4
340
+ else:
341
+ feat_l1 = self.lrelu(self.conv_first(x.view(-1, c, h, w)))
342
+
343
+ feat_l1 = self.feature_extraction(feat_l1)
344
+ # L2
345
+ feat_l2 = self.lrelu(self.conv_l2_1(feat_l1))
346
+ feat_l2 = self.lrelu(self.conv_l2_2(feat_l2))
347
+ # L3
348
+ feat_l3 = self.lrelu(self.conv_l3_1(feat_l2))
349
+ feat_l3 = self.lrelu(self.conv_l3_2(feat_l3))
350
+
351
+ feat_l1 = feat_l1.view(b, t, -1, h, w)
352
+ feat_l2 = feat_l2.view(b, t, -1, h // 2, w // 2)
353
+ feat_l3 = feat_l3.view(b, t, -1, h // 4, w // 4)
354
+
355
+ # PCD alignment
356
+ ref_feat_l = [ # reference feature list
357
+ feat_l1[:, self.center_frame_idx, :, :, :].clone(), feat_l2[:, self.center_frame_idx, :, :, :].clone(),
358
+ feat_l3[:, self.center_frame_idx, :, :, :].clone()
359
+ ]
360
+ aligned_feat = []
361
+ for i in range(t):
362
+ nbr_feat_l = [ # neighboring feature list
363
+ feat_l1[:, i, :, :, :].clone(), feat_l2[:, i, :, :, :].clone(), feat_l3[:, i, :, :, :].clone()
364
+ ]
365
+ aligned_feat.append(self.pcd_align(nbr_feat_l, ref_feat_l))
366
+ aligned_feat = torch.stack(aligned_feat, dim=1) # (b, t, c, h, w)
367
+
368
+ if not self.with_tsa:
369
+ aligned_feat = aligned_feat.view(b, -1, h, w)
370
+ feat = self.fusion(aligned_feat)
371
+
372
+ out = self.reconstruction(feat)
373
+ out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))
374
+ out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))
375
+ out = self.lrelu(self.conv_hr(out))
376
+ out = self.conv_last(out)
377
+ if self.hr_in:
378
+ base = x_center
379
+ else:
380
+ base = F.interpolate(x_center, scale_factor=4, mode='bilinear', align_corners=False)
381
+ out += base
382
+ return out
basicsr/archs/hifacegan_arch.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+ from basicsr.utils.registry import ARCH_REGISTRY
7
+ from .hifacegan_util import BaseNetwork, LIPEncoder, SPADEResnetBlock, get_nonspade_norm_layer
8
+
9
+
10
+ class SPADEGenerator(BaseNetwork):
11
+ """Generator with SPADEResBlock"""
12
+
13
+ def __init__(self,
14
+ num_in_ch=3,
15
+ num_feat=64,
16
+ use_vae=False,
17
+ z_dim=256,
18
+ crop_size=512,
19
+ norm_g='spectralspadesyncbatch3x3',
20
+ is_train=True,
21
+ init_train_phase=3): # progressive training disabled
22
+ super().__init__()
23
+ self.nf = num_feat
24
+ self.input_nc = num_in_ch
25
+ self.is_train = is_train
26
+ self.train_phase = init_train_phase
27
+
28
+ self.scale_ratio = 5 # hardcoded now
29
+ self.sw = crop_size // (2**self.scale_ratio)
30
+ self.sh = self.sw # 20210519: By default use square image, aspect_ratio = 1.0
31
+
32
+ if use_vae:
33
+ # In case of VAE, we will sample from random z vector
34
+ self.fc = nn.Linear(z_dim, 16 * self.nf * self.sw * self.sh)
35
+ else:
36
+ # Otherwise, we make the network deterministic by starting with
37
+ # downsampled segmentation map instead of random z
38
+ self.fc = nn.Conv2d(num_in_ch, 16 * self.nf, 3, padding=1)
39
+
40
+ self.head_0 = SPADEResnetBlock(16 * self.nf, 16 * self.nf, norm_g)
41
+
42
+ self.g_middle_0 = SPADEResnetBlock(16 * self.nf, 16 * self.nf, norm_g)
43
+ self.g_middle_1 = SPADEResnetBlock(16 * self.nf, 16 * self.nf, norm_g)
44
+
45
+ self.ups = nn.ModuleList([
46
+ SPADEResnetBlock(16 * self.nf, 8 * self.nf, norm_g),
47
+ SPADEResnetBlock(8 * self.nf, 4 * self.nf, norm_g),
48
+ SPADEResnetBlock(4 * self.nf, 2 * self.nf, norm_g),
49
+ SPADEResnetBlock(2 * self.nf, 1 * self.nf, norm_g)
50
+ ])
51
+
52
+ self.to_rgbs = nn.ModuleList([
53
+ nn.Conv2d(8 * self.nf, 3, 3, padding=1),
54
+ nn.Conv2d(4 * self.nf, 3, 3, padding=1),
55
+ nn.Conv2d(2 * self.nf, 3, 3, padding=1),
56
+ nn.Conv2d(1 * self.nf, 3, 3, padding=1)
57
+ ])
58
+
59
+ self.up = nn.Upsample(scale_factor=2)
60
+
61
+ def encode(self, input_tensor):
62
+ """
63
+ Encode input_tensor into feature maps, can be overridden in derived classes
64
+ Default: nearest downsampling of 2**5 = 32 times
65
+ """
66
+ h, w = input_tensor.size()[-2:]
67
+ sh, sw = h // 2**self.scale_ratio, w // 2**self.scale_ratio
68
+ x = F.interpolate(input_tensor, size=(sh, sw))
69
+ return self.fc(x)
70
+
71
+ def forward(self, x):
72
+ # In oroginal SPADE, seg means a segmentation map, but here we use x instead.
73
+ seg = x
74
+
75
+ x = self.encode(x)
76
+ x = self.head_0(x, seg)
77
+
78
+ x = self.up(x)
79
+ x = self.g_middle_0(x, seg)
80
+ x = self.g_middle_1(x, seg)
81
+
82
+ if self.is_train:
83
+ phase = self.train_phase + 1
84
+ else:
85
+ phase = len(self.to_rgbs)
86
+
87
+ for i in range(phase):
88
+ x = self.up(x)
89
+ x = self.ups[i](x, seg)
90
+
91
+ x = self.to_rgbs[phase - 1](F.leaky_relu(x, 2e-1))
92
+ x = torch.tanh(x)
93
+
94
+ return x
95
+
96
+ def mixed_guidance_forward(self, input_x, seg=None, n=0, mode='progressive'):
97
+ """
98
+ A helper class for subspace visualization. Input and seg are different images.
99
+ For the first n levels (including encoder) we use input, for the rest we use seg.
100
+
101
+ If mode = 'progressive', the output's like: AAABBB
102
+ If mode = 'one_plug', the output's like: AAABAA
103
+ If mode = 'one_ablate', the output's like: BBBABB
104
+ """
105
+
106
+ if seg is None:
107
+ return self.forward(input_x)
108
+
109
+ if self.is_train:
110
+ phase = self.train_phase + 1
111
+ else:
112
+ phase = len(self.to_rgbs)
113
+
114
+ if mode == 'progressive':
115
+ n = max(min(n, 4 + phase), 0)
116
+ guide_list = [input_x] * n + [seg] * (4 + phase - n)
117
+ elif mode == 'one_plug':
118
+ n = max(min(n, 4 + phase - 1), 0)
119
+ guide_list = [seg] * (4 + phase)
120
+ guide_list[n] = input_x
121
+ elif mode == 'one_ablate':
122
+ if n > 3 + phase:
123
+ return self.forward(input_x)
124
+ guide_list = [input_x] * (4 + phase)
125
+ guide_list[n] = seg
126
+
127
+ x = self.encode(guide_list[0])
128
+ x = self.head_0(x, guide_list[1])
129
+
130
+ x = self.up(x)
131
+ x = self.g_middle_0(x, guide_list[2])
132
+ x = self.g_middle_1(x, guide_list[3])
133
+
134
+ for i in range(phase):
135
+ x = self.up(x)
136
+ x = self.ups[i](x, guide_list[4 + i])
137
+
138
+ x = self.to_rgbs[phase - 1](F.leaky_relu(x, 2e-1))
139
+ x = torch.tanh(x)
140
+
141
+ return x
142
+
143
+
144
+ @ARCH_REGISTRY.register()
145
+ class HiFaceGAN(SPADEGenerator):
146
+ """
147
+ HiFaceGAN: SPADEGenerator with a learnable feature encoder
148
+ Current encoder design: LIPEncoder
149
+ """
150
+
151
+ def __init__(self,
152
+ num_in_ch=3,
153
+ num_feat=64,
154
+ use_vae=False,
155
+ z_dim=256,
156
+ crop_size=512,
157
+ norm_g='spectralspadesyncbatch3x3',
158
+ is_train=True,
159
+ init_train_phase=3):
160
+ super().__init__(num_in_ch, num_feat, use_vae, z_dim, crop_size, norm_g, is_train, init_train_phase)
161
+ self.lip_encoder = LIPEncoder(num_in_ch, num_feat, self.sw, self.sh, self.scale_ratio)
162
+
163
+ def encode(self, input_tensor):
164
+ return self.lip_encoder(input_tensor)
165
+
166
+
167
+ @ARCH_REGISTRY.register()
168
+ class HiFaceGANDiscriminator(BaseNetwork):
169
+ """
170
+ Inspired by pix2pixHD multiscale discriminator.
171
+
172
+ Args:
173
+ num_in_ch (int): Channel number of inputs. Default: 3.
174
+ num_out_ch (int): Channel number of outputs. Default: 3.
175
+ conditional_d (bool): Whether use conditional discriminator.
176
+ Default: True.
177
+ num_d (int): Number of Multiscale discriminators. Default: 3.
178
+ n_layers_d (int): Number of downsample layers in each D. Default: 4.
179
+ num_feat (int): Channel number of base intermediate features.
180
+ Default: 64.
181
+ norm_d (str): String to determine normalization layers in D.
182
+ Choices: [spectral][instance/batch/syncbatch]
183
+ Default: 'spectralinstance'.
184
+ keep_features (bool): Keep intermediate features for matching loss, etc.
185
+ Default: True.
186
+ """
187
+
188
+ def __init__(self,
189
+ num_in_ch=3,
190
+ num_out_ch=3,
191
+ conditional_d=True,
192
+ num_d=2,
193
+ n_layers_d=4,
194
+ num_feat=64,
195
+ norm_d='spectralinstance',
196
+ keep_features=True):
197
+ super().__init__()
198
+ self.num_d = num_d
199
+
200
+ input_nc = num_in_ch
201
+ if conditional_d:
202
+ input_nc += num_out_ch
203
+
204
+ for i in range(num_d):
205
+ subnet_d = NLayerDiscriminator(input_nc, n_layers_d, num_feat, norm_d, keep_features)
206
+ self.add_module(f'discriminator_{i}', subnet_d)
207
+
208
+ def downsample(self, x):
209
+ return F.avg_pool2d(x, kernel_size=3, stride=2, padding=[1, 1], count_include_pad=False)
210
+
211
+ # Returns list of lists of discriminator outputs.
212
+ # The final result is of size opt.num_d x opt.n_layers_D
213
+ def forward(self, x):
214
+ result = []
215
+ for _, _net_d in self.named_children():
216
+ out = _net_d(x)
217
+ result.append(out)
218
+ x = self.downsample(x)
219
+
220
+ return result
221
+
222
+
223
+ class NLayerDiscriminator(BaseNetwork):
224
+ """Defines the PatchGAN discriminator with the specified arguments."""
225
+
226
+ def __init__(self, input_nc, n_layers_d, num_feat, norm_d, keep_features):
227
+ super().__init__()
228
+ kw = 4
229
+ padw = int(np.ceil((kw - 1.0) / 2))
230
+ nf = num_feat
231
+ self.keep_features = keep_features
232
+
233
+ norm_layer = get_nonspade_norm_layer(norm_d)
234
+ sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, False)]]
235
+
236
+ for n in range(1, n_layers_d):
237
+ nf_prev = nf
238
+ nf = min(nf * 2, 512)
239
+ stride = 1 if n == n_layers_d - 1 else 2
240
+ sequence += [[
241
+ norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=stride, padding=padw)),
242
+ nn.LeakyReLU(0.2, False)
243
+ ]]
244
+
245
+ sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
246
+
247
+ # We divide the layers into groups to extract intermediate layer outputs
248
+ for n in range(len(sequence)):
249
+ self.add_module('model' + str(n), nn.Sequential(*sequence[n]))
250
+
251
+ def forward(self, x):
252
+ results = [x]
253
+ for submodel in self.children():
254
+ intermediate_output = submodel(results[-1])
255
+ results.append(intermediate_output)
256
+
257
+ if self.keep_features:
258
+ return results[1:]
259
+ else:
260
+ return results[-1]
basicsr/archs/hifacegan_util.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from torch.nn import init
6
+ # Warning: spectral norm could be buggy
7
+ # under eval mode and multi-GPU inference
8
+ # A workaround is sticking to single-GPU inference and train mode
9
+ from torch.nn.utils import spectral_norm
10
+
11
+
12
+ class SPADE(nn.Module):
13
+
14
+ def __init__(self, config_text, norm_nc, label_nc):
15
+ super().__init__()
16
+
17
+ assert config_text.startswith('spade')
18
+ parsed = re.search('spade(\\D+)(\\d)x\\d', config_text)
19
+ param_free_norm_type = str(parsed.group(1))
20
+ ks = int(parsed.group(2))
21
+
22
+ if param_free_norm_type == 'instance':
23
+ self.param_free_norm = nn.InstanceNorm2d(norm_nc)
24
+ elif param_free_norm_type == 'syncbatch':
25
+ print('SyncBatchNorm is currently not supported under single-GPU mode, switch to "instance" instead')
26
+ self.param_free_norm = nn.InstanceNorm2d(norm_nc)
27
+ elif param_free_norm_type == 'batch':
28
+ self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
29
+ else:
30
+ raise ValueError(f'{param_free_norm_type} is not a recognized param-free norm type in SPADE')
31
+
32
+ # The dimension of the intermediate embedding space. Yes, hardcoded.
33
+ nhidden = 128 if norm_nc > 128 else norm_nc
34
+
35
+ pw = ks // 2
36
+ self.mlp_shared = nn.Sequential(nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw), nn.ReLU())
37
+ self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw, bias=False)
38
+ self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw, bias=False)
39
+
40
+ def forward(self, x, segmap):
41
+
42
+ # Part 1. generate parameter-free normalized activations
43
+ normalized = self.param_free_norm(x)
44
+
45
+ # Part 2. produce scaling and bias conditioned on semantic map
46
+ segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
47
+ actv = self.mlp_shared(segmap)
48
+ gamma = self.mlp_gamma(actv)
49
+ beta = self.mlp_beta(actv)
50
+
51
+ # apply scale and bias
52
+ out = normalized * gamma + beta
53
+
54
+ return out
55
+
56
+
57
+ class SPADEResnetBlock(nn.Module):
58
+ """
59
+ ResNet block that uses SPADE. It differs from the ResNet block of pix2pixHD in that
60
+ it takes in the segmentation map as input, learns the skip connection if necessary,
61
+ and applies normalization first and then convolution.
62
+ This architecture seemed like a standard architecture for unconditional or
63
+ class-conditional GAN architecture using residual block.
64
+ The code was inspired from https://github.com/LMescheder/GAN_stability.
65
+ """
66
+
67
+ def __init__(self, fin, fout, norm_g='spectralspadesyncbatch3x3', semantic_nc=3):
68
+ super().__init__()
69
+ # Attributes
70
+ self.learned_shortcut = (fin != fout)
71
+ fmiddle = min(fin, fout)
72
+
73
+ # create conv layers
74
+ self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
75
+ self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
76
+ if self.learned_shortcut:
77
+ self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
78
+
79
+ # apply spectral norm if specified
80
+ if 'spectral' in norm_g:
81
+ self.conv_0 = spectral_norm(self.conv_0)
82
+ self.conv_1 = spectral_norm(self.conv_1)
83
+ if self.learned_shortcut:
84
+ self.conv_s = spectral_norm(self.conv_s)
85
+
86
+ # define normalization layers
87
+ spade_config_str = norm_g.replace('spectral', '')
88
+ self.norm_0 = SPADE(spade_config_str, fin, semantic_nc)
89
+ self.norm_1 = SPADE(spade_config_str, fmiddle, semantic_nc)
90
+ if self.learned_shortcut:
91
+ self.norm_s = SPADE(spade_config_str, fin, semantic_nc)
92
+
93
+ # note the resnet block with SPADE also takes in |seg|,
94
+ # the semantic segmentation map as input
95
+ def forward(self, x, seg):
96
+ x_s = self.shortcut(x, seg)
97
+ dx = self.conv_0(self.act(self.norm_0(x, seg)))
98
+ dx = self.conv_1(self.act(self.norm_1(dx, seg)))
99
+ out = x_s + dx
100
+ return out
101
+
102
+ def shortcut(self, x, seg):
103
+ if self.learned_shortcut:
104
+ x_s = self.conv_s(self.norm_s(x, seg))
105
+ else:
106
+ x_s = x
107
+ return x_s
108
+
109
+ def act(self, x):
110
+ return F.leaky_relu(x, 2e-1)
111
+
112
+
113
+ class BaseNetwork(nn.Module):
114
+ """ A basis for hifacegan archs with custom initialization """
115
+
116
+ def init_weights(self, init_type='normal', gain=0.02):
117
+
118
+ def init_func(m):
119
+ classname = m.__class__.__name__
120
+ if classname.find('BatchNorm2d') != -1:
121
+ if hasattr(m, 'weight') and m.weight is not None:
122
+ init.normal_(m.weight.data, 1.0, gain)
123
+ if hasattr(m, 'bias') and m.bias is not None:
124
+ init.constant_(m.bias.data, 0.0)
125
+ elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
126
+ if init_type == 'normal':
127
+ init.normal_(m.weight.data, 0.0, gain)
128
+ elif init_type == 'xavier':
129
+ init.xavier_normal_(m.weight.data, gain=gain)
130
+ elif init_type == 'xavier_uniform':
131
+ init.xavier_uniform_(m.weight.data, gain=1.0)
132
+ elif init_type == 'kaiming':
133
+ init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
134
+ elif init_type == 'orthogonal':
135
+ init.orthogonal_(m.weight.data, gain=gain)
136
+ elif init_type == 'none': # uses pytorch's default init method
137
+ m.reset_parameters()
138
+ else:
139
+ raise NotImplementedError(f'initialization method [{init_type}] is not implemented')
140
+ if hasattr(m, 'bias') and m.bias is not None:
141
+ init.constant_(m.bias.data, 0.0)
142
+
143
+ self.apply(init_func)
144
+
145
+ # propagate to children
146
+ for m in self.children():
147
+ if hasattr(m, 'init_weights'):
148
+ m.init_weights(init_type, gain)
149
+
150
+ def forward(self, x):
151
+ pass
152
+
153
+
154
+ def lip2d(x, logit, kernel=3, stride=2, padding=1):
155
+ weight = logit.exp()
156
+ return F.avg_pool2d(x * weight, kernel, stride, padding) / F.avg_pool2d(weight, kernel, stride, padding)
157
+
158
+
159
+ class SoftGate(nn.Module):
160
+ COEFF = 12.0
161
+
162
+ def forward(self, x):
163
+ return torch.sigmoid(x).mul(self.COEFF)
164
+
165
+
166
+ class SimplifiedLIP(nn.Module):
167
+
168
+ def __init__(self, channels):
169
+ super(SimplifiedLIP, self).__init__()
170
+ self.logit = nn.Sequential(
171
+ nn.Conv2d(channels, channels, 3, padding=1, bias=False), nn.InstanceNorm2d(channels, affine=True),
172
+ SoftGate())
173
+
174
+ def init_layer(self):
175
+ self.logit[0].weight.data.fill_(0.0)
176
+
177
+ def forward(self, x):
178
+ frac = lip2d(x, self.logit(x))
179
+ return frac
180
+
181
+
182
+ class LIPEncoder(BaseNetwork):
183
+ """Local Importance-based Pooling (Ziteng Gao et.al.,ICCV 2019)"""
184
+
185
+ def __init__(self, input_nc, ngf, sw, sh, n_2xdown, norm_layer=nn.InstanceNorm2d):
186
+ super().__init__()
187
+ self.sw = sw
188
+ self.sh = sh
189
+ self.max_ratio = 16
190
+ # 20200310: Several Convolution (stride 1) + LIP blocks, 4 fold
191
+ kw = 3
192
+ pw = (kw - 1) // 2
193
+
194
+ model = [
195
+ nn.Conv2d(input_nc, ngf, kw, stride=1, padding=pw, bias=False),
196
+ norm_layer(ngf),
197
+ nn.ReLU(),
198
+ ]
199
+ cur_ratio = 1
200
+ for i in range(n_2xdown):
201
+ next_ratio = min(cur_ratio * 2, self.max_ratio)
202
+ model += [
203
+ SimplifiedLIP(ngf * cur_ratio),
204
+ nn.Conv2d(ngf * cur_ratio, ngf * next_ratio, kw, stride=1, padding=pw),
205
+ norm_layer(ngf * next_ratio),
206
+ ]
207
+ cur_ratio = next_ratio
208
+ if i < n_2xdown - 1:
209
+ model += [nn.ReLU(inplace=True)]
210
+
211
+ self.model = nn.Sequential(*model)
212
+
213
+ def forward(self, x):
214
+ return self.model(x)
215
+
216
+
217
+ def get_nonspade_norm_layer(norm_type='instance'):
218
+ # helper function to get # output channels of the previous layer
219
+ def get_out_channel(layer):
220
+ if hasattr(layer, 'out_channels'):
221
+ return getattr(layer, 'out_channels')
222
+ return layer.weight.size(0)
223
+
224
+ # this function will be returned
225
+ def add_norm_layer(layer):
226
+ nonlocal norm_type
227
+ if norm_type.startswith('spectral'):
228
+ layer = spectral_norm(layer)
229
+ subnorm_type = norm_type[len('spectral'):]
230
+
231
+ if subnorm_type == 'none' or len(subnorm_type) == 0:
232
+ return layer
233
+
234
+ # remove bias in the previous layer, which is meaningless
235
+ # since it has no effect after normalization
236
+ if getattr(layer, 'bias', None) is not None:
237
+ delattr(layer, 'bias')
238
+ layer.register_parameter('bias', None)
239
+
240
+ if subnorm_type == 'batch':
241
+ norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
242
+ elif subnorm_type == 'sync_batch':
243
+ print('SyncBatchNorm is currently not supported under single-GPU mode, switch to "instance" instead')
244
+ # norm_layer = SynchronizedBatchNorm2d(
245
+ # get_out_channel(layer), affine=True)
246
+ norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
247
+ elif subnorm_type == 'instance':
248
+ norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
249
+ else:
250
+ raise ValueError(f'normalization layer {subnorm_type} is not recognized')
251
+
252
+ return nn.Sequential(layer, norm_layer)
253
+
254
+ print('This is a legacy from nvlabs/SPADE, and will be removed in future versions.')
255
+ return add_norm_layer
basicsr/archs/inception.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from https://github.com/mseitzer/pytorch-fid/blob/master/pytorch_fid/inception.py # noqa: E501
2
+ # For FID metric
3
+
4
+ import os
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from torch.utils.model_zoo import load_url
9
+ from torchvision import models
10
+
11
+ # Inception weights ported to Pytorch from
12
+ # http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
13
+ FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' # noqa: E501
14
+ LOCAL_FID_WEIGHTS = 'experiments/pretrained_models/pt_inception-2015-12-05-6726825d.pth' # noqa: E501
15
+
16
+
17
+ class InceptionV3(nn.Module):
18
+ """Pretrained InceptionV3 network returning feature maps"""
19
+
20
+ # Index of default block of inception to return,
21
+ # corresponds to output of final average pooling
22
+ DEFAULT_BLOCK_INDEX = 3
23
+
24
+ # Maps feature dimensionality to their output blocks indices
25
+ BLOCK_INDEX_BY_DIM = {
26
+ 64: 0, # First max pooling features
27
+ 192: 1, # Second max pooling features
28
+ 768: 2, # Pre-aux classifier features
29
+ 2048: 3 # Final average pooling features
30
+ }
31
+
32
+ def __init__(self,
33
+ output_blocks=(DEFAULT_BLOCK_INDEX),
34
+ resize_input=True,
35
+ normalize_input=True,
36
+ requires_grad=False,
37
+ use_fid_inception=True):
38
+ """Build pretrained InceptionV3.
39
+
40
+ Args:
41
+ output_blocks (list[int]): Indices of blocks to return features of.
42
+ Possible values are:
43
+ - 0: corresponds to output of first max pooling
44
+ - 1: corresponds to output of second max pooling
45
+ - 2: corresponds to output which is fed to aux classifier
46
+ - 3: corresponds to output of final average pooling
47
+ resize_input (bool): If true, bilinearly resizes input to width and
48
+ height 299 before feeding input to model. As the network
49
+ without fully connected layers is fully convolutional, it
50
+ should be able to handle inputs of arbitrary size, so resizing
51
+ might not be strictly needed. Default: True.
52
+ normalize_input (bool): If true, scales the input from range (0, 1)
53
+ to the range the pretrained Inception network expects,
54
+ namely (-1, 1). Default: True.
55
+ requires_grad (bool): If true, parameters of the model require
56
+ gradients. Possibly useful for finetuning the network.
57
+ Default: False.
58
+ use_fid_inception (bool): If true, uses the pretrained Inception
59
+ model used in Tensorflow's FID implementation.
60
+ If false, uses the pretrained Inception model available in
61
+ torchvision. The FID Inception model has different weights
62
+ and a slightly different structure from torchvision's
63
+ Inception model. If you want to compute FID scores, you are
64
+ strongly advised to set this parameter to true to get
65
+ comparable results. Default: True.
66
+ """
67
+ super(InceptionV3, self).__init__()
68
+
69
+ self.resize_input = resize_input
70
+ self.normalize_input = normalize_input
71
+ self.output_blocks = sorted(output_blocks)
72
+ self.last_needed_block = max(output_blocks)
73
+
74
+ assert self.last_needed_block <= 3, ('Last possible output block index is 3')
75
+
76
+ self.blocks = nn.ModuleList()
77
+
78
+ if use_fid_inception:
79
+ inception = fid_inception_v3()
80
+ else:
81
+ try:
82
+ inception = models.inception_v3(pretrained=True, init_weights=False)
83
+ except TypeError:
84
+ # pytorch < 1.5 does not have init_weights for inception_v3
85
+ inception = models.inception_v3(pretrained=True)
86
+
87
+ # Block 0: input to maxpool1
88
+ block0 = [
89
+ inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3,
90
+ nn.MaxPool2d(kernel_size=3, stride=2)
91
+ ]
92
+ self.blocks.append(nn.Sequential(*block0))
93
+
94
+ # Block 1: maxpool1 to maxpool2
95
+ if self.last_needed_block >= 1:
96
+ block1 = [inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]
97
+ self.blocks.append(nn.Sequential(*block1))
98
+
99
+ # Block 2: maxpool2 to aux classifier
100
+ if self.last_needed_block >= 2:
101
+ block2 = [
102
+ inception.Mixed_5b,
103
+ inception.Mixed_5c,
104
+ inception.Mixed_5d,
105
+ inception.Mixed_6a,
106
+ inception.Mixed_6b,
107
+ inception.Mixed_6c,
108
+ inception.Mixed_6d,
109
+ inception.Mixed_6e,
110
+ ]
111
+ self.blocks.append(nn.Sequential(*block2))
112
+
113
+ # Block 3: aux classifier to final avgpool
114
+ if self.last_needed_block >= 3:
115
+ block3 = [
116
+ inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c,
117
+ nn.AdaptiveAvgPool2d(output_size=(1, 1))
118
+ ]
119
+ self.blocks.append(nn.Sequential(*block3))
120
+
121
+ for param in self.parameters():
122
+ param.requires_grad = requires_grad
123
+
124
+ def forward(self, x):
125
+ """Get Inception feature maps.
126
+
127
+ Args:
128
+ x (Tensor): Input tensor of shape (b, 3, h, w).
129
+ Values are expected to be in range (-1, 1). You can also input
130
+ (0, 1) with setting normalize_input = True.
131
+
132
+ Returns:
133
+ list[Tensor]: Corresponding to the selected output block, sorted
134
+ ascending by index.
135
+ """
136
+ output = []
137
+
138
+ if self.resize_input:
139
+ x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=False)
140
+
141
+ if self.normalize_input:
142
+ x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
143
+
144
+ for idx, block in enumerate(self.blocks):
145
+ x = block(x)
146
+ if idx in self.output_blocks:
147
+ output.append(x)
148
+
149
+ if idx == self.last_needed_block:
150
+ break
151
+
152
+ return output
153
+
154
+
155
+ def fid_inception_v3():
156
+ """Build pretrained Inception model for FID computation.
157
+
158
+ The Inception model for FID computation uses a different set of weights
159
+ and has a slightly different structure than torchvision's Inception.
160
+
161
+ This method first constructs torchvision's Inception and then patches the
162
+ necessary parts that are different in the FID Inception model.
163
+ """
164
+ try:
165
+ inception = models.inception_v3(num_classes=1008, aux_logits=False, pretrained=False, init_weights=False)
166
+ except TypeError:
167
+ # pytorch < 1.5 does not have init_weights for inception_v3
168
+ inception = models.inception_v3(num_classes=1008, aux_logits=False, pretrained=False)
169
+
170
+ inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
171
+ inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
172
+ inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
173
+ inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
174
+ inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
175
+ inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
176
+ inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
177
+ inception.Mixed_7b = FIDInceptionE_1(1280)
178
+ inception.Mixed_7c = FIDInceptionE_2(2048)
179
+
180
+ if os.path.exists(LOCAL_FID_WEIGHTS):
181
+ state_dict = torch.load(LOCAL_FID_WEIGHTS, map_location=lambda storage, loc: storage)
182
+ else:
183
+ state_dict = load_url(FID_WEIGHTS_URL, progress=True)
184
+
185
+ inception.load_state_dict(state_dict)
186
+ return inception
187
+
188
+
189
+ class FIDInceptionA(models.inception.InceptionA):
190
+ """InceptionA block patched for FID computation"""
191
+
192
+ def __init__(self, in_channels, pool_features):
193
+ super(FIDInceptionA, self).__init__(in_channels, pool_features)
194
+
195
+ def forward(self, x):
196
+ branch1x1 = self.branch1x1(x)
197
+
198
+ branch5x5 = self.branch5x5_1(x)
199
+ branch5x5 = self.branch5x5_2(branch5x5)
200
+
201
+ branch3x3dbl = self.branch3x3dbl_1(x)
202
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
203
+ branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
204
+
205
+ # Patch: Tensorflow's average pool does not use the padded zero's in
206
+ # its average calculation
207
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False)
208
+ branch_pool = self.branch_pool(branch_pool)
209
+
210
+ outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
211
+ return torch.cat(outputs, 1)
212
+
213
+
214
+ class FIDInceptionC(models.inception.InceptionC):
215
+ """InceptionC block patched for FID computation"""
216
+
217
+ def __init__(self, in_channels, channels_7x7):
218
+ super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
219
+
220
+ def forward(self, x):
221
+ branch1x1 = self.branch1x1(x)
222
+
223
+ branch7x7 = self.branch7x7_1(x)
224
+ branch7x7 = self.branch7x7_2(branch7x7)
225
+ branch7x7 = self.branch7x7_3(branch7x7)
226
+
227
+ branch7x7dbl = self.branch7x7dbl_1(x)
228
+ branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
229
+ branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
230
+ branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
231
+ branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
232
+
233
+ # Patch: Tensorflow's average pool does not use the padded zero's in
234
+ # its average calculation
235
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False)
236
+ branch_pool = self.branch_pool(branch_pool)
237
+
238
+ outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
239
+ return torch.cat(outputs, 1)
240
+
241
+
242
+ class FIDInceptionE_1(models.inception.InceptionE):
243
+ """First InceptionE block patched for FID computation"""
244
+
245
+ def __init__(self, in_channels):
246
+ super(FIDInceptionE_1, self).__init__(in_channels)
247
+
248
+ def forward(self, x):
249
+ branch1x1 = self.branch1x1(x)
250
+
251
+ branch3x3 = self.branch3x3_1(x)
252
+ branch3x3 = [
253
+ self.branch3x3_2a(branch3x3),
254
+ self.branch3x3_2b(branch3x3),
255
+ ]
256
+ branch3x3 = torch.cat(branch3x3, 1)
257
+
258
+ branch3x3dbl = self.branch3x3dbl_1(x)
259
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
260
+ branch3x3dbl = [
261
+ self.branch3x3dbl_3a(branch3x3dbl),
262
+ self.branch3x3dbl_3b(branch3x3dbl),
263
+ ]
264
+ branch3x3dbl = torch.cat(branch3x3dbl, 1)
265
+
266
+ # Patch: Tensorflow's average pool does not use the padded zero's in
267
+ # its average calculation
268
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False)
269
+ branch_pool = self.branch_pool(branch_pool)
270
+
271
+ outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
272
+ return torch.cat(outputs, 1)
273
+
274
+
275
+ class FIDInceptionE_2(models.inception.InceptionE):
276
+ """Second InceptionE block patched for FID computation"""
277
+
278
+ def __init__(self, in_channels):
279
+ super(FIDInceptionE_2, self).__init__(in_channels)
280
+
281
+ def forward(self, x):
282
+ branch1x1 = self.branch1x1(x)
283
+
284
+ branch3x3 = self.branch3x3_1(x)
285
+ branch3x3 = [
286
+ self.branch3x3_2a(branch3x3),
287
+ self.branch3x3_2b(branch3x3),
288
+ ]
289
+ branch3x3 = torch.cat(branch3x3, 1)
290
+
291
+ branch3x3dbl = self.branch3x3dbl_1(x)
292
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
293
+ branch3x3dbl = [
294
+ self.branch3x3dbl_3a(branch3x3dbl),
295
+ self.branch3x3dbl_3b(branch3x3dbl),
296
+ ]
297
+ branch3x3dbl = torch.cat(branch3x3dbl, 1)
298
+
299
+ # Patch: The FID Inception model uses max pooling instead of average
300
+ # pooling. This is likely an error in this specific Inception
301
+ # implementation, as other Inception models use average pooling here
302
+ # (which matches the description in the paper).
303
+ branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
304
+ branch_pool = self.branch_pool(branch_pool)
305
+
306
+ outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
307
+ return torch.cat(outputs, 1)
basicsr/archs/rcan_arch.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn as nn
3
+
4
+ from basicsr.utils.registry import ARCH_REGISTRY
5
+ from .arch_util import Upsample, make_layer
6
+
7
+
8
+ class ChannelAttention(nn.Module):
9
+ """Channel attention used in RCAN.
10
+
11
+ Args:
12
+ num_feat (int): Channel number of intermediate features.
13
+ squeeze_factor (int): Channel squeeze factor. Default: 16.
14
+ """
15
+
16
+ def __init__(self, num_feat, squeeze_factor=16):
17
+ super(ChannelAttention, self).__init__()
18
+ self.attention = nn.Sequential(
19
+ nn.AdaptiveAvgPool2d(1), nn.Conv2d(num_feat, num_feat // squeeze_factor, 1, padding=0),
20
+ nn.ReLU(inplace=True), nn.Conv2d(num_feat // squeeze_factor, num_feat, 1, padding=0), nn.Sigmoid())
21
+
22
+ def forward(self, x):
23
+ y = self.attention(x)
24
+ return x * y
25
+
26
+
27
+ class RCAB(nn.Module):
28
+ """Residual Channel Attention Block (RCAB) used in RCAN.
29
+
30
+ Args:
31
+ num_feat (int): Channel number of intermediate features.
32
+ squeeze_factor (int): Channel squeeze factor. Default: 16.
33
+ res_scale (float): Scale the residual. Default: 1.
34
+ """
35
+
36
+ def __init__(self, num_feat, squeeze_factor=16, res_scale=1):
37
+ super(RCAB, self).__init__()
38
+ self.res_scale = res_scale
39
+
40
+ self.rcab = nn.Sequential(
41
+ nn.Conv2d(num_feat, num_feat, 3, 1, 1), nn.ReLU(True), nn.Conv2d(num_feat, num_feat, 3, 1, 1),
42
+ ChannelAttention(num_feat, squeeze_factor))
43
+
44
+ def forward(self, x):
45
+ res = self.rcab(x) * self.res_scale
46
+ return res + x
47
+
48
+
49
+ class ResidualGroup(nn.Module):
50
+ """Residual Group of RCAB.
51
+
52
+ Args:
53
+ num_feat (int): Channel number of intermediate features.
54
+ num_block (int): Block number in the body network.
55
+ squeeze_factor (int): Channel squeeze factor. Default: 16.
56
+ res_scale (float): Scale the residual. Default: 1.
57
+ """
58
+
59
+ def __init__(self, num_feat, num_block, squeeze_factor=16, res_scale=1):
60
+ super(ResidualGroup, self).__init__()
61
+
62
+ self.residual_group = make_layer(
63
+ RCAB, num_block, num_feat=num_feat, squeeze_factor=squeeze_factor, res_scale=res_scale)
64
+ self.conv = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
65
+
66
+ def forward(self, x):
67
+ res = self.conv(self.residual_group(x))
68
+ return res + x
69
+
70
+
71
+ @ARCH_REGISTRY.register()
72
+ class RCAN(nn.Module):
73
+ """Residual Channel Attention Networks.
74
+
75
+ ``Paper: Image Super-Resolution Using Very Deep Residual Channel Attention Networks``
76
+
77
+ Reference: https://github.com/yulunzhang/RCAN
78
+
79
+ Args:
80
+ num_in_ch (int): Channel number of inputs.
81
+ num_out_ch (int): Channel number of outputs.
82
+ num_feat (int): Channel number of intermediate features.
83
+ Default: 64.
84
+ num_group (int): Number of ResidualGroup. Default: 10.
85
+ num_block (int): Number of RCAB in ResidualGroup. Default: 16.
86
+ squeeze_factor (int): Channel squeeze factor. Default: 16.
87
+ upscale (int): Upsampling factor. Support 2^n and 3.
88
+ Default: 4.
89
+ res_scale (float): Used to scale the residual in residual block.
90
+ Default: 1.
91
+ img_range (float): Image range. Default: 255.
92
+ rgb_mean (tuple[float]): Image mean in RGB orders.
93
+ Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset.
94
+ """
95
+
96
+ def __init__(self,
97
+ num_in_ch,
98
+ num_out_ch,
99
+ num_feat=64,
100
+ num_group=10,
101
+ num_block=16,
102
+ squeeze_factor=16,
103
+ upscale=4,
104
+ res_scale=1,
105
+ img_range=255.,
106
+ rgb_mean=(0.4488, 0.4371, 0.4040)):
107
+ super(RCAN, self).__init__()
108
+
109
+ self.img_range = img_range
110
+ self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
111
+
112
+ self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
113
+ self.body = make_layer(
114
+ ResidualGroup,
115
+ num_group,
116
+ num_feat=num_feat,
117
+ num_block=num_block,
118
+ squeeze_factor=squeeze_factor,
119
+ res_scale=res_scale)
120
+ self.conv_after_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
121
+ self.upsample = Upsample(upscale, num_feat)
122
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
123
+
124
+ def forward(self, x):
125
+ self.mean = self.mean.type_as(x)
126
+
127
+ x = (x - self.mean) * self.img_range
128
+ x = self.conv_first(x)
129
+ res = self.conv_after_body(self.body(x))
130
+ res += x
131
+
132
+ x = self.conv_last(self.upsample(res))
133
+ x = x / self.img_range + self.mean
134
+
135
+ return x
basicsr/archs/ridnet_arch.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from basicsr.utils.registry import ARCH_REGISTRY
5
+ from .arch_util import ResidualBlockNoBN, make_layer
6
+
7
+
8
+ class MeanShift(nn.Conv2d):
9
+ """ Data normalization with mean and std.
10
+
11
+ Args:
12
+ rgb_range (int): Maximum value of RGB.
13
+ rgb_mean (list[float]): Mean for RGB channels.
14
+ rgb_std (list[float]): Std for RGB channels.
15
+ sign (int): For subtraction, sign is -1, for addition, sign is 1.
16
+ Default: -1.
17
+ requires_grad (bool): Whether to update the self.weight and self.bias.
18
+ Default: True.
19
+ """
20
+
21
+ def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1, requires_grad=True):
22
+ super(MeanShift, self).__init__(3, 3, kernel_size=1)
23
+ std = torch.Tensor(rgb_std)
24
+ self.weight.data = torch.eye(3).view(3, 3, 1, 1)
25
+ self.weight.data.div_(std.view(3, 1, 1, 1))
26
+ self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
27
+ self.bias.data.div_(std)
28
+ self.requires_grad = requires_grad
29
+
30
+
31
+ class EResidualBlockNoBN(nn.Module):
32
+ """Enhanced Residual block without BN.
33
+
34
+ There are three convolution layers in residual branch.
35
+ """
36
+
37
+ def __init__(self, in_channels, out_channels):
38
+ super(EResidualBlockNoBN, self).__init__()
39
+
40
+ self.body = nn.Sequential(
41
+ nn.Conv2d(in_channels, out_channels, 3, 1, 1),
42
+ nn.ReLU(inplace=True),
43
+ nn.Conv2d(out_channels, out_channels, 3, 1, 1),
44
+ nn.ReLU(inplace=True),
45
+ nn.Conv2d(out_channels, out_channels, 1, 1, 0),
46
+ )
47
+ self.relu = nn.ReLU(inplace=True)
48
+
49
+ def forward(self, x):
50
+ out = self.body(x)
51
+ out = self.relu(out + x)
52
+ return out
53
+
54
+
55
+ class MergeRun(nn.Module):
56
+ """ Merge-and-run unit.
57
+
58
+ This unit contains two branches with different dilated convolutions,
59
+ followed by a convolution to process the concatenated features.
60
+
61
+ Paper: Real Image Denoising with Feature Attention
62
+ Ref git repo: https://github.com/saeed-anwar/RIDNet
63
+ """
64
+
65
+ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
66
+ super(MergeRun, self).__init__()
67
+
68
+ self.dilation1 = nn.Sequential(
69
+ nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding), nn.ReLU(inplace=True),
70
+ nn.Conv2d(out_channels, out_channels, kernel_size, stride, 2, 2), nn.ReLU(inplace=True))
71
+ self.dilation2 = nn.Sequential(
72
+ nn.Conv2d(in_channels, out_channels, kernel_size, stride, 3, 3), nn.ReLU(inplace=True),
73
+ nn.Conv2d(out_channels, out_channels, kernel_size, stride, 4, 4), nn.ReLU(inplace=True))
74
+
75
+ self.aggregation = nn.Sequential(
76
+ nn.Conv2d(out_channels * 2, out_channels, kernel_size, stride, padding), nn.ReLU(inplace=True))
77
+
78
+ def forward(self, x):
79
+ dilation1 = self.dilation1(x)
80
+ dilation2 = self.dilation2(x)
81
+ out = torch.cat([dilation1, dilation2], dim=1)
82
+ out = self.aggregation(out)
83
+ out = out + x
84
+ return out
85
+
86
+
87
+ class ChannelAttention(nn.Module):
88
+ """Channel attention.
89
+
90
+ Args:
91
+ num_feat (int): Channel number of intermediate features.
92
+ squeeze_factor (int): Channel squeeze factor. Default:
93
+ """
94
+
95
+ def __init__(self, mid_channels, squeeze_factor=16):
96
+ super(ChannelAttention, self).__init__()
97
+ self.attention = nn.Sequential(
98
+ nn.AdaptiveAvgPool2d(1), nn.Conv2d(mid_channels, mid_channels // squeeze_factor, 1, padding=0),
99
+ nn.ReLU(inplace=True), nn.Conv2d(mid_channels // squeeze_factor, mid_channels, 1, padding=0), nn.Sigmoid())
100
+
101
+ def forward(self, x):
102
+ y = self.attention(x)
103
+ return x * y
104
+
105
+
106
+ class EAM(nn.Module):
107
+ """Enhancement attention modules (EAM) in RIDNet.
108
+
109
+ This module contains a merge-and-run unit, a residual block,
110
+ an enhanced residual block and a feature attention unit.
111
+
112
+ Attributes:
113
+ merge: The merge-and-run unit.
114
+ block1: The residual block.
115
+ block2: The enhanced residual block.
116
+ ca: The feature/channel attention unit.
117
+ """
118
+
119
+ def __init__(self, in_channels, mid_channels, out_channels):
120
+ super(EAM, self).__init__()
121
+
122
+ self.merge = MergeRun(in_channels, mid_channels)
123
+ self.block1 = ResidualBlockNoBN(mid_channels)
124
+ self.block2 = EResidualBlockNoBN(mid_channels, out_channels)
125
+ self.ca = ChannelAttention(out_channels)
126
+ # The residual block in the paper contains a relu after addition.
127
+ self.relu = nn.ReLU(inplace=True)
128
+
129
+ def forward(self, x):
130
+ out = self.merge(x)
131
+ out = self.relu(self.block1(out))
132
+ out = self.block2(out)
133
+ out = self.ca(out)
134
+ return out
135
+
136
+
137
+ @ARCH_REGISTRY.register()
138
+ class RIDNet(nn.Module):
139
+ """RIDNet: Real Image Denoising with Feature Attention.
140
+
141
+ Ref git repo: https://github.com/saeed-anwar/RIDNet
142
+
143
+ Args:
144
+ in_channels (int): Channel number of inputs.
145
+ mid_channels (int): Channel number of EAM modules.
146
+ Default: 64.
147
+ out_channels (int): Channel number of outputs.
148
+ num_block (int): Number of EAM. Default: 4.
149
+ img_range (float): Image range. Default: 255.
150
+ rgb_mean (tuple[float]): Image mean in RGB orders.
151
+ Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset.
152
+ """
153
+
154
+ def __init__(self,
155
+ in_channels,
156
+ mid_channels,
157
+ out_channels,
158
+ num_block=4,
159
+ img_range=255.,
160
+ rgb_mean=(0.4488, 0.4371, 0.4040),
161
+ rgb_std=(1.0, 1.0, 1.0)):
162
+ super(RIDNet, self).__init__()
163
+
164
+ self.sub_mean = MeanShift(img_range, rgb_mean, rgb_std)
165
+ self.add_mean = MeanShift(img_range, rgb_mean, rgb_std, 1)
166
+
167
+ self.head = nn.Conv2d(in_channels, mid_channels, 3, 1, 1)
168
+ self.body = make_layer(
169
+ EAM, num_block, in_channels=mid_channels, mid_channels=mid_channels, out_channels=mid_channels)
170
+ self.tail = nn.Conv2d(mid_channels, out_channels, 3, 1, 1)
171
+
172
+ self.relu = nn.ReLU(inplace=True)
173
+
174
+ def forward(self, x):
175
+ res = self.sub_mean(x)
176
+ res = self.tail(self.body(self.relu(self.head(res))))
177
+ res = self.add_mean(res)
178
+
179
+ out = x + res
180
+ return out
basicsr/archs/rrdbnet_arch.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn as nn
3
+ from torch.nn import functional as F
4
+
5
+ from basicsr.utils.registry import ARCH_REGISTRY
6
+ from .arch_util import default_init_weights, make_layer, pixel_unshuffle
7
+
8
+
9
+ class ResidualDenseBlock(nn.Module):
10
+ """Residual Dense Block.
11
+
12
+ Used in RRDB block in ESRGAN.
13
+
14
+ Args:
15
+ num_feat (int): Channel number of intermediate features.
16
+ num_grow_ch (int): Channels for each growth.
17
+ """
18
+
19
+ def __init__(self, num_feat=64, num_grow_ch=32):
20
+ super(ResidualDenseBlock, self).__init__()
21
+ self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1)
22
+ self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1)
23
+ self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1)
24
+ self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1)
25
+ self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1)
26
+
27
+ self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
28
+
29
+ # initialization
30
+ default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
31
+
32
+ def forward(self, x):
33
+ x1 = self.lrelu(self.conv1(x))
34
+ x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
35
+ x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
36
+ x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
37
+ x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
38
+ # Empirically, we use 0.2 to scale the residual for better performance
39
+ return x5 * 0.2 + x
40
+
41
+
42
+ class RRDB(nn.Module):
43
+ """Residual in Residual Dense Block.
44
+
45
+ Used in RRDB-Net in ESRGAN.
46
+
47
+ Args:
48
+ num_feat (int): Channel number of intermediate features.
49
+ num_grow_ch (int): Channels for each growth.
50
+ """
51
+
52
+ def __init__(self, num_feat, num_grow_ch=32):
53
+ super(RRDB, self).__init__()
54
+ self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch)
55
+ self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch)
56
+ self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch)
57
+
58
+ def forward(self, x):
59
+ out = self.rdb1(x)
60
+ out = self.rdb2(out)
61
+ out = self.rdb3(out)
62
+ # Empirically, we use 0.2 to scale the residual for better performance
63
+ return out * 0.2 + x
64
+
65
+
66
+ @ARCH_REGISTRY.register()
67
+ class RRDBNet(nn.Module):
68
+ """Networks consisting of Residual in Residual Dense Block, which is used
69
+ in ESRGAN.
70
+
71
+ ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.
72
+
73
+ We extend ESRGAN for scale x2 and scale x1.
74
+ Note: This is one option for scale 1, scale 2 in RRDBNet.
75
+ We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size
76
+ and enlarge the channel size before feeding inputs into the main ESRGAN architecture.
77
+
78
+ Args:
79
+ num_in_ch (int): Channel number of inputs.
80
+ num_out_ch (int): Channel number of outputs.
81
+ num_feat (int): Channel number of intermediate features.
82
+ Default: 64
83
+ num_block (int): Block number in the trunk network. Defaults: 23
84
+ num_grow_ch (int): Channels for each growth. Default: 32.
85
+ """
86
+
87
+ def __init__(self, num_in_ch, num_out_ch, scale=4, num_feat=64, num_block=23, num_grow_ch=32):
88
+ super(RRDBNet, self).__init__()
89
+ self.scale = scale
90
+ if scale == 2:
91
+ num_in_ch = num_in_ch * 4
92
+ elif scale == 1:
93
+ num_in_ch = num_in_ch * 16
94
+ self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
95
+ self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch)
96
+ self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
97
+ # upsample
98
+ self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
99
+ self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
100
+ self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
101
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
102
+
103
+ self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
104
+
105
+ def forward(self, x):
106
+ if self.scale == 2:
107
+ feat = pixel_unshuffle(x, scale=2)
108
+ elif self.scale == 1:
109
+ feat = pixel_unshuffle(x, scale=4)
110
+ else:
111
+ feat = x
112
+ feat = self.conv_first(feat)
113
+ body_feat = self.conv_body(self.body(feat))
114
+ feat = feat + body_feat
115
+ # upsample
116
+ feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest')))
117
+ feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode='nearest')))
118
+ out = self.conv_last(self.lrelu(self.conv_hr(feat)))
119
+ return out
basicsr/archs/spynet_arch.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn as nn
4
+ from torch.nn import functional as F
5
+
6
+ from basicsr.utils.registry import ARCH_REGISTRY
7
+ from .arch_util import flow_warp
8
+
9
+
10
+ class BasicModule(nn.Module):
11
+ """Basic Module for SpyNet.
12
+ """
13
+
14
+ def __init__(self):
15
+ super(BasicModule, self).__init__()
16
+
17
+ self.basic_module = nn.Sequential(
18
+ nn.Conv2d(in_channels=8, out_channels=32, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=False),
19
+ nn.Conv2d(in_channels=32, out_channels=64, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=False),
20
+ nn.Conv2d(in_channels=64, out_channels=32, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=False),
21
+ nn.Conv2d(in_channels=32, out_channels=16, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=False),
22
+ nn.Conv2d(in_channels=16, out_channels=2, kernel_size=7, stride=1, padding=3))
23
+
24
+ def forward(self, tensor_input):
25
+ return self.basic_module(tensor_input)
26
+
27
+
28
+ @ARCH_REGISTRY.register()
29
+ class SpyNet(nn.Module):
30
+ """SpyNet architecture.
31
+
32
+ Args:
33
+ load_path (str): path for pretrained SpyNet. Default: None.
34
+ """
35
+
36
+ def __init__(self, load_path=None):
37
+ super(SpyNet, self).__init__()
38
+ self.basic_module = nn.ModuleList([BasicModule() for _ in range(6)])
39
+ if load_path:
40
+ self.load_state_dict(torch.load(load_path, map_location=lambda storage, loc: storage)['params'])
41
+
42
+ self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
43
+ self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
44
+
45
+ def preprocess(self, tensor_input):
46
+ tensor_output = (tensor_input - self.mean) / self.std
47
+ return tensor_output
48
+
49
+ def process(self, ref, supp):
50
+ flow = []
51
+
52
+ ref = [self.preprocess(ref)]
53
+ supp = [self.preprocess(supp)]
54
+
55
+ for level in range(5):
56
+ ref.insert(0, F.avg_pool2d(input=ref[0], kernel_size=2, stride=2, count_include_pad=False))
57
+ supp.insert(0, F.avg_pool2d(input=supp[0], kernel_size=2, stride=2, count_include_pad=False))
58
+
59
+ flow = ref[0].new_zeros(
60
+ [ref[0].size(0), 2,
61
+ int(math.floor(ref[0].size(2) / 2.0)),
62
+ int(math.floor(ref[0].size(3) / 2.0))])
63
+
64
+ for level in range(len(ref)):
65
+ upsampled_flow = F.interpolate(input=flow, scale_factor=2, mode='bilinear', align_corners=True) * 2.0
66
+
67
+ if upsampled_flow.size(2) != ref[level].size(2):
68
+ upsampled_flow = F.pad(input=upsampled_flow, pad=[0, 0, 0, 1], mode='replicate')
69
+ if upsampled_flow.size(3) != ref[level].size(3):
70
+ upsampled_flow = F.pad(input=upsampled_flow, pad=[0, 1, 0, 0], mode='replicate')
71
+
72
+ flow = self.basic_module[level](torch.cat([
73
+ ref[level],
74
+ flow_warp(
75
+ supp[level], upsampled_flow.permute(0, 2, 3, 1), interp_mode='bilinear', padding_mode='border'),
76
+ upsampled_flow
77
+ ], 1)) + upsampled_flow
78
+
79
+ return flow
80
+
81
+ def forward(self, ref, supp):
82
+ assert ref.size() == supp.size()
83
+
84
+ h, w = ref.size(2), ref.size(3)
85
+ w_floor = math.floor(math.ceil(w / 32.0) * 32.0)
86
+ h_floor = math.floor(math.ceil(h / 32.0) * 32.0)
87
+
88
+ ref = F.interpolate(input=ref, size=(h_floor, w_floor), mode='bilinear', align_corners=False)
89
+ supp = F.interpolate(input=supp, size=(h_floor, w_floor), mode='bilinear', align_corners=False)
90
+
91
+ flow = F.interpolate(input=self.process(ref, supp), size=(h, w), mode='bilinear', align_corners=False)
92
+
93
+ flow[:, 0, :, :] *= float(w) / float(w_floor)
94
+ flow[:, 1, :, :] *= float(h) / float(h_floor)
95
+
96
+ return flow
basicsr/archs/srresnet_arch.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch import nn as nn
2
+ from torch.nn import functional as F
3
+
4
+ from basicsr.utils.registry import ARCH_REGISTRY
5
+ from .arch_util import ResidualBlockNoBN, default_init_weights, make_layer
6
+
7
+
8
+ @ARCH_REGISTRY.register()
9
+ class MSRResNet(nn.Module):
10
+ """Modified SRResNet.
11
+
12
+ A compacted version modified from SRResNet in
13
+ "Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network"
14
+ It uses residual blocks without BN, similar to EDSR.
15
+ Currently, it supports x2, x3 and x4 upsampling scale factor.
16
+
17
+ Args:
18
+ num_in_ch (int): Channel number of inputs. Default: 3.
19
+ num_out_ch (int): Channel number of outputs. Default: 3.
20
+ num_feat (int): Channel number of intermediate features. Default: 64.
21
+ num_block (int): Block number in the body network. Default: 16.
22
+ upscale (int): Upsampling factor. Support x2, x3 and x4. Default: 4.
23
+ """
24
+
25
+ def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_block=16, upscale=4):
26
+ super(MSRResNet, self).__init__()
27
+ self.upscale = upscale
28
+
29
+ self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
30
+ self.body = make_layer(ResidualBlockNoBN, num_block, num_feat=num_feat)
31
+
32
+ # upsampling
33
+ if self.upscale in [2, 3]:
34
+ self.upconv1 = nn.Conv2d(num_feat, num_feat * self.upscale * self.upscale, 3, 1, 1)
35
+ self.pixel_shuffle = nn.PixelShuffle(self.upscale)
36
+ elif self.upscale == 4:
37
+ self.upconv1 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
38
+ self.upconv2 = nn.Conv2d(num_feat, num_feat * 4, 3, 1, 1)
39
+ self.pixel_shuffle = nn.PixelShuffle(2)
40
+
41
+ self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
42
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
43
+
44
+ # activation function
45
+ self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
46
+
47
+ # initialization
48
+ default_init_weights([self.conv_first, self.upconv1, self.conv_hr, self.conv_last], 0.1)
49
+ if self.upscale == 4:
50
+ default_init_weights(self.upconv2, 0.1)
51
+
52
+ def forward(self, x):
53
+ feat = self.lrelu(self.conv_first(x))
54
+ out = self.body(feat)
55
+
56
+ if self.upscale == 4:
57
+ out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))
58
+ out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))
59
+ elif self.upscale in [2, 3]:
60
+ out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))
61
+
62
+ out = self.conv_last(self.lrelu(self.conv_hr(out)))
63
+ base = F.interpolate(x, scale_factor=self.upscale, mode='bilinear', align_corners=False)
64
+ out += base
65
+ return out
basicsr/archs/srvgg_arch.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch import nn as nn
2
+ from torch.nn import functional as F
3
+
4
+ from basicsr.utils.registry import ARCH_REGISTRY
5
+
6
+
7
+ @ARCH_REGISTRY.register(suffix='basicsr')
8
+ class SRVGGNetCompact(nn.Module):
9
+ """A compact VGG-style network structure for super-resolution.
10
+
11
+ It is a compact network structure, which performs upsampling in the last layer and no convolution is
12
+ conducted on the HR feature space.
13
+
14
+ Args:
15
+ num_in_ch (int): Channel number of inputs. Default: 3.
16
+ num_out_ch (int): Channel number of outputs. Default: 3.
17
+ num_feat (int): Channel number of intermediate features. Default: 64.
18
+ num_conv (int): Number of convolution layers in the body network. Default: 16.
19
+ upscale (int): Upsampling factor. Default: 4.
20
+ act_type (str): Activation type, options: 'relu', 'prelu', 'leakyrelu'. Default: prelu.
21
+ """
22
+
23
+ def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'):
24
+ super(SRVGGNetCompact, self).__init__()
25
+ self.num_in_ch = num_in_ch
26
+ self.num_out_ch = num_out_ch
27
+ self.num_feat = num_feat
28
+ self.num_conv = num_conv
29
+ self.upscale = upscale
30
+ self.act_type = act_type
31
+
32
+ self.body = nn.ModuleList()
33
+ # the first conv
34
+ self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1))
35
+ # the first activation
36
+ if act_type == 'relu':
37
+ activation = nn.ReLU(inplace=True)
38
+ elif act_type == 'prelu':
39
+ activation = nn.PReLU(num_parameters=num_feat)
40
+ elif act_type == 'leakyrelu':
41
+ activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
42
+ self.body.append(activation)
43
+
44
+ # the body structure
45
+ for _ in range(num_conv):
46
+ self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1))
47
+ # activation
48
+ if act_type == 'relu':
49
+ activation = nn.ReLU(inplace=True)
50
+ elif act_type == 'prelu':
51
+ activation = nn.PReLU(num_parameters=num_feat)
52
+ elif act_type == 'leakyrelu':
53
+ activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
54
+ self.body.append(activation)
55
+
56
+ # the last conv
57
+ self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1))
58
+ # upsample
59
+ self.upsampler = nn.PixelShuffle(upscale)
60
+
61
+ def forward(self, x):
62
+ out = x
63
+ for i in range(0, len(self.body)):
64
+ out = self.body[i](out)
65
+
66
+ out = self.upsampler(out)
67
+ # add the nearest upsampled image, so that the network learns the residual
68
+ base = F.interpolate(x, scale_factor=self.upscale, mode='nearest')
69
+ out += base
70
+ return out
basicsr/archs/stylegan2_arch.py ADDED
@@ -0,0 +1,799 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import random
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+ from basicsr.ops.fused_act import FusedLeakyReLU, fused_leaky_relu
8
+ from basicsr.ops.upfirdn2d import upfirdn2d
9
+ from basicsr.utils.registry import ARCH_REGISTRY
10
+
11
+
12
+ class NormStyleCode(nn.Module):
13
+
14
+ def forward(self, x):
15
+ """Normalize the style codes.
16
+
17
+ Args:
18
+ x (Tensor): Style codes with shape (b, c).
19
+
20
+ Returns:
21
+ Tensor: Normalized tensor.
22
+ """
23
+ return x * torch.rsqrt(torch.mean(x**2, dim=1, keepdim=True) + 1e-8)
24
+
25
+
26
+ def make_resample_kernel(k):
27
+ """Make resampling kernel for UpFirDn.
28
+
29
+ Args:
30
+ k (list[int]): A list indicating the 1D resample kernel magnitude.
31
+
32
+ Returns:
33
+ Tensor: 2D resampled kernel.
34
+ """
35
+ k = torch.tensor(k, dtype=torch.float32)
36
+ if k.ndim == 1:
37
+ k = k[None, :] * k[:, None] # to 2D kernel, outer product
38
+ # normalize
39
+ k /= k.sum()
40
+ return k
41
+
42
+
43
+ class UpFirDnUpsample(nn.Module):
44
+ """Upsample, FIR filter, and downsample (upsampole version).
45
+
46
+ References:
47
+ 1. https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.upfirdn.html # noqa: E501
48
+ 2. http://www.ece.northwestern.edu/local-apps/matlabhelp/toolbox/signal/upfirdn.html # noqa: E501
49
+
50
+ Args:
51
+ resample_kernel (list[int]): A list indicating the 1D resample kernel
52
+ magnitude.
53
+ factor (int): Upsampling scale factor. Default: 2.
54
+ """
55
+
56
+ def __init__(self, resample_kernel, factor=2):
57
+ super(UpFirDnUpsample, self).__init__()
58
+ self.kernel = make_resample_kernel(resample_kernel) * (factor**2)
59
+ self.factor = factor
60
+
61
+ pad = self.kernel.shape[0] - factor
62
+ self.pad = ((pad + 1) // 2 + factor - 1, pad // 2)
63
+
64
+ def forward(self, x):
65
+ out = upfirdn2d(x, self.kernel.type_as(x), up=self.factor, down=1, pad=self.pad)
66
+ return out
67
+
68
+ def __repr__(self):
69
+ return (f'{self.__class__.__name__}(factor={self.factor})')
70
+
71
+
72
+ class UpFirDnDownsample(nn.Module):
73
+ """Upsample, FIR filter, and downsample (downsampole version).
74
+
75
+ Args:
76
+ resample_kernel (list[int]): A list indicating the 1D resample kernel
77
+ magnitude.
78
+ factor (int): Downsampling scale factor. Default: 2.
79
+ """
80
+
81
+ def __init__(self, resample_kernel, factor=2):
82
+ super(UpFirDnDownsample, self).__init__()
83
+ self.kernel = make_resample_kernel(resample_kernel)
84
+ self.factor = factor
85
+
86
+ pad = self.kernel.shape[0] - factor
87
+ self.pad = ((pad + 1) // 2, pad // 2)
88
+
89
+ def forward(self, x):
90
+ out = upfirdn2d(x, self.kernel.type_as(x), up=1, down=self.factor, pad=self.pad)
91
+ return out
92
+
93
+ def __repr__(self):
94
+ return (f'{self.__class__.__name__}(factor={self.factor})')
95
+
96
+
97
+ class UpFirDnSmooth(nn.Module):
98
+ """Upsample, FIR filter, and downsample (smooth version).
99
+
100
+ Args:
101
+ resample_kernel (list[int]): A list indicating the 1D resample kernel
102
+ magnitude.
103
+ upsample_factor (int): Upsampling scale factor. Default: 1.
104
+ downsample_factor (int): Downsampling scale factor. Default: 1.
105
+ kernel_size (int): Kernel size: Default: 1.
106
+ """
107
+
108
+ def __init__(self, resample_kernel, upsample_factor=1, downsample_factor=1, kernel_size=1):
109
+ super(UpFirDnSmooth, self).__init__()
110
+ self.upsample_factor = upsample_factor
111
+ self.downsample_factor = downsample_factor
112
+ self.kernel = make_resample_kernel(resample_kernel)
113
+ if upsample_factor > 1:
114
+ self.kernel = self.kernel * (upsample_factor**2)
115
+
116
+ if upsample_factor > 1:
117
+ pad = (self.kernel.shape[0] - upsample_factor) - (kernel_size - 1)
118
+ self.pad = ((pad + 1) // 2 + upsample_factor - 1, pad // 2 + 1)
119
+ elif downsample_factor > 1:
120
+ pad = (self.kernel.shape[0] - downsample_factor) + (kernel_size - 1)
121
+ self.pad = ((pad + 1) // 2, pad // 2)
122
+ else:
123
+ raise NotImplementedError
124
+
125
+ def forward(self, x):
126
+ out = upfirdn2d(x, self.kernel.type_as(x), up=1, down=1, pad=self.pad)
127
+ return out
128
+
129
+ def __repr__(self):
130
+ return (f'{self.__class__.__name__}(upsample_factor={self.upsample_factor}'
131
+ f', downsample_factor={self.downsample_factor})')
132
+
133
+
134
+ class EqualLinear(nn.Module):
135
+ """Equalized Linear as StyleGAN2.
136
+
137
+ Args:
138
+ in_channels (int): Size of each sample.
139
+ out_channels (int): Size of each output sample.
140
+ bias (bool): If set to ``False``, the layer will not learn an additive
141
+ bias. Default: ``True``.
142
+ bias_init_val (float): Bias initialized value. Default: 0.
143
+ lr_mul (float): Learning rate multiplier. Default: 1.
144
+ activation (None | str): The activation after ``linear`` operation.
145
+ Supported: 'fused_lrelu', None. Default: None.
146
+ """
147
+
148
+ def __init__(self, in_channels, out_channels, bias=True, bias_init_val=0, lr_mul=1, activation=None):
149
+ super(EqualLinear, self).__init__()
150
+ self.in_channels = in_channels
151
+ self.out_channels = out_channels
152
+ self.lr_mul = lr_mul
153
+ self.activation = activation
154
+ if self.activation not in ['fused_lrelu', None]:
155
+ raise ValueError(f'Wrong activation value in EqualLinear: {activation}'
156
+ "Supported ones are: ['fused_lrelu', None].")
157
+ self.scale = (1 / math.sqrt(in_channels)) * lr_mul
158
+
159
+ self.weight = nn.Parameter(torch.randn(out_channels, in_channels).div_(lr_mul))
160
+ if bias:
161
+ self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val))
162
+ else:
163
+ self.register_parameter('bias', None)
164
+
165
+ def forward(self, x):
166
+ if self.bias is None:
167
+ bias = None
168
+ else:
169
+ bias = self.bias * self.lr_mul
170
+ if self.activation == 'fused_lrelu':
171
+ out = F.linear(x, self.weight * self.scale)
172
+ out = fused_leaky_relu(out, bias)
173
+ else:
174
+ out = F.linear(x, self.weight * self.scale, bias=bias)
175
+ return out
176
+
177
+ def __repr__(self):
178
+ return (f'{self.__class__.__name__}(in_channels={self.in_channels}, '
179
+ f'out_channels={self.out_channels}, bias={self.bias is not None})')
180
+
181
+
182
+ class ModulatedConv2d(nn.Module):
183
+ """Modulated Conv2d used in StyleGAN2.
184
+
185
+ There is no bias in ModulatedConv2d.
186
+
187
+ Args:
188
+ in_channels (int): Channel number of the input.
189
+ out_channels (int): Channel number of the output.
190
+ kernel_size (int): Size of the convolving kernel.
191
+ num_style_feat (int): Channel number of style features.
192
+ demodulate (bool): Whether to demodulate in the conv layer.
193
+ Default: True.
194
+ sample_mode (str | None): Indicating 'upsample', 'downsample' or None.
195
+ Default: None.
196
+ resample_kernel (list[int]): A list indicating the 1D resample kernel
197
+ magnitude. Default: (1, 3, 3, 1).
198
+ eps (float): A value added to the denominator for numerical stability.
199
+ Default: 1e-8.
200
+ """
201
+
202
+ def __init__(self,
203
+ in_channels,
204
+ out_channels,
205
+ kernel_size,
206
+ num_style_feat,
207
+ demodulate=True,
208
+ sample_mode=None,
209
+ resample_kernel=(1, 3, 3, 1),
210
+ eps=1e-8):
211
+ super(ModulatedConv2d, self).__init__()
212
+ self.in_channels = in_channels
213
+ self.out_channels = out_channels
214
+ self.kernel_size = kernel_size
215
+ self.demodulate = demodulate
216
+ self.sample_mode = sample_mode
217
+ self.eps = eps
218
+
219
+ if self.sample_mode == 'upsample':
220
+ self.smooth = UpFirDnSmooth(
221
+ resample_kernel, upsample_factor=2, downsample_factor=1, kernel_size=kernel_size)
222
+ elif self.sample_mode == 'downsample':
223
+ self.smooth = UpFirDnSmooth(
224
+ resample_kernel, upsample_factor=1, downsample_factor=2, kernel_size=kernel_size)
225
+ elif self.sample_mode is None:
226
+ pass
227
+ else:
228
+ raise ValueError(f'Wrong sample mode {self.sample_mode}, '
229
+ "supported ones are ['upsample', 'downsample', None].")
230
+
231
+ self.scale = 1 / math.sqrt(in_channels * kernel_size**2)
232
+ # modulation inside each modulated conv
233
+ self.modulation = EqualLinear(
234
+ num_style_feat, in_channels, bias=True, bias_init_val=1, lr_mul=1, activation=None)
235
+
236
+ self.weight = nn.Parameter(torch.randn(1, out_channels, in_channels, kernel_size, kernel_size))
237
+ self.padding = kernel_size // 2
238
+
239
+ def forward(self, x, style):
240
+ """Forward function.
241
+
242
+ Args:
243
+ x (Tensor): Tensor with shape (b, c, h, w).
244
+ style (Tensor): Tensor with shape (b, num_style_feat).
245
+
246
+ Returns:
247
+ Tensor: Modulated tensor after convolution.
248
+ """
249
+ b, c, h, w = x.shape # c = c_in
250
+ # weight modulation
251
+ style = self.modulation(style).view(b, 1, c, 1, 1)
252
+ # self.weight: (1, c_out, c_in, k, k); style: (b, 1, c, 1, 1)
253
+ weight = self.scale * self.weight * style # (b, c_out, c_in, k, k)
254
+
255
+ if self.demodulate:
256
+ demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps)
257
+ weight = weight * demod.view(b, self.out_channels, 1, 1, 1)
258
+
259
+ weight = weight.view(b * self.out_channels, c, self.kernel_size, self.kernel_size)
260
+
261
+ if self.sample_mode == 'upsample':
262
+ x = x.view(1, b * c, h, w)
263
+ weight = weight.view(b, self.out_channels, c, self.kernel_size, self.kernel_size)
264
+ weight = weight.transpose(1, 2).reshape(b * c, self.out_channels, self.kernel_size, self.kernel_size)
265
+ out = F.conv_transpose2d(x, weight, padding=0, stride=2, groups=b)
266
+ out = out.view(b, self.out_channels, *out.shape[2:4])
267
+ out = self.smooth(out)
268
+ elif self.sample_mode == 'downsample':
269
+ x = self.smooth(x)
270
+ x = x.view(1, b * c, *x.shape[2:4])
271
+ out = F.conv2d(x, weight, padding=0, stride=2, groups=b)
272
+ out = out.view(b, self.out_channels, *out.shape[2:4])
273
+ else:
274
+ x = x.view(1, b * c, h, w)
275
+ # weight: (b*c_out, c_in, k, k), groups=b
276
+ out = F.conv2d(x, weight, padding=self.padding, groups=b)
277
+ out = out.view(b, self.out_channels, *out.shape[2:4])
278
+
279
+ return out
280
+
281
+ def __repr__(self):
282
+ return (f'{self.__class__.__name__}(in_channels={self.in_channels}, '
283
+ f'out_channels={self.out_channels}, '
284
+ f'kernel_size={self.kernel_size}, '
285
+ f'demodulate={self.demodulate}, sample_mode={self.sample_mode})')
286
+
287
+
288
+ class StyleConv(nn.Module):
289
+ """Style conv.
290
+
291
+ Args:
292
+ in_channels (int): Channel number of the input.
293
+ out_channels (int): Channel number of the output.
294
+ kernel_size (int): Size of the convolving kernel.
295
+ num_style_feat (int): Channel number of style features.
296
+ demodulate (bool): Whether demodulate in the conv layer. Default: True.
297
+ sample_mode (str | None): Indicating 'upsample', 'downsample' or None.
298
+ Default: None.
299
+ resample_kernel (list[int]): A list indicating the 1D resample kernel
300
+ magnitude. Default: (1, 3, 3, 1).
301
+ """
302
+
303
+ def __init__(self,
304
+ in_channels,
305
+ out_channels,
306
+ kernel_size,
307
+ num_style_feat,
308
+ demodulate=True,
309
+ sample_mode=None,
310
+ resample_kernel=(1, 3, 3, 1)):
311
+ super(StyleConv, self).__init__()
312
+ self.modulated_conv = ModulatedConv2d(
313
+ in_channels,
314
+ out_channels,
315
+ kernel_size,
316
+ num_style_feat,
317
+ demodulate=demodulate,
318
+ sample_mode=sample_mode,
319
+ resample_kernel=resample_kernel)
320
+ self.weight = nn.Parameter(torch.zeros(1)) # for noise injection
321
+ self.activate = FusedLeakyReLU(out_channels)
322
+
323
+ def forward(self, x, style, noise=None):
324
+ # modulate
325
+ out = self.modulated_conv(x, style)
326
+ # noise injection
327
+ if noise is None:
328
+ b, _, h, w = out.shape
329
+ noise = out.new_empty(b, 1, h, w).normal_()
330
+ out = out + self.weight * noise
331
+ # activation (with bias)
332
+ out = self.activate(out)
333
+ return out
334
+
335
+
336
+ class ToRGB(nn.Module):
337
+ """To RGB from features.
338
+
339
+ Args:
340
+ in_channels (int): Channel number of input.
341
+ num_style_feat (int): Channel number of style features.
342
+ upsample (bool): Whether to upsample. Default: True.
343
+ resample_kernel (list[int]): A list indicating the 1D resample kernel
344
+ magnitude. Default: (1, 3, 3, 1).
345
+ """
346
+
347
+ def __init__(self, in_channels, num_style_feat, upsample=True, resample_kernel=(1, 3, 3, 1)):
348
+ super(ToRGB, self).__init__()
349
+ if upsample:
350
+ self.upsample = UpFirDnUpsample(resample_kernel, factor=2)
351
+ else:
352
+ self.upsample = None
353
+ self.modulated_conv = ModulatedConv2d(
354
+ in_channels, 3, kernel_size=1, num_style_feat=num_style_feat, demodulate=False, sample_mode=None)
355
+ self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
356
+
357
+ def forward(self, x, style, skip=None):
358
+ """Forward function.
359
+
360
+ Args:
361
+ x (Tensor): Feature tensor with shape (b, c, h, w).
362
+ style (Tensor): Tensor with shape (b, num_style_feat).
363
+ skip (Tensor): Base/skip tensor. Default: None.
364
+
365
+ Returns:
366
+ Tensor: RGB images.
367
+ """
368
+ out = self.modulated_conv(x, style)
369
+ out = out + self.bias
370
+ if skip is not None:
371
+ if self.upsample:
372
+ skip = self.upsample(skip)
373
+ out = out + skip
374
+ return out
375
+
376
+
377
+ class ConstantInput(nn.Module):
378
+ """Constant input.
379
+
380
+ Args:
381
+ num_channel (int): Channel number of constant input.
382
+ size (int): Spatial size of constant input.
383
+ """
384
+
385
+ def __init__(self, num_channel, size):
386
+ super(ConstantInput, self).__init__()
387
+ self.weight = nn.Parameter(torch.randn(1, num_channel, size, size))
388
+
389
+ def forward(self, batch):
390
+ out = self.weight.repeat(batch, 1, 1, 1)
391
+ return out
392
+
393
+
394
+ @ARCH_REGISTRY.register()
395
+ class StyleGAN2Generator(nn.Module):
396
+ """StyleGAN2 Generator.
397
+
398
+ Args:
399
+ out_size (int): The spatial size of outputs.
400
+ num_style_feat (int): Channel number of style features. Default: 512.
401
+ num_mlp (int): Layer number of MLP style layers. Default: 8.
402
+ channel_multiplier (int): Channel multiplier for large networks of
403
+ StyleGAN2. Default: 2.
404
+ resample_kernel (list[int]): A list indicating the 1D resample kernel
405
+ magnitude. A cross production will be applied to extent 1D resample
406
+ kernel to 2D resample kernel. Default: (1, 3, 3, 1).
407
+ lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01.
408
+ narrow (float): Narrow ratio for channels. Default: 1.0.
409
+ """
410
+
411
+ def __init__(self,
412
+ out_size,
413
+ num_style_feat=512,
414
+ num_mlp=8,
415
+ channel_multiplier=2,
416
+ resample_kernel=(1, 3, 3, 1),
417
+ lr_mlp=0.01,
418
+ narrow=1):
419
+ super(StyleGAN2Generator, self).__init__()
420
+ # Style MLP layers
421
+ self.num_style_feat = num_style_feat
422
+ style_mlp_layers = [NormStyleCode()]
423
+ for i in range(num_mlp):
424
+ style_mlp_layers.append(
425
+ EqualLinear(
426
+ num_style_feat, num_style_feat, bias=True, bias_init_val=0, lr_mul=lr_mlp,
427
+ activation='fused_lrelu'))
428
+ self.style_mlp = nn.Sequential(*style_mlp_layers)
429
+
430
+ channels = {
431
+ '4': int(512 * narrow),
432
+ '8': int(512 * narrow),
433
+ '16': int(512 * narrow),
434
+ '32': int(512 * narrow),
435
+ '64': int(256 * channel_multiplier * narrow),
436
+ '128': int(128 * channel_multiplier * narrow),
437
+ '256': int(64 * channel_multiplier * narrow),
438
+ '512': int(32 * channel_multiplier * narrow),
439
+ '1024': int(16 * channel_multiplier * narrow)
440
+ }
441
+ self.channels = channels
442
+
443
+ self.constant_input = ConstantInput(channels['4'], size=4)
444
+ self.style_conv1 = StyleConv(
445
+ channels['4'],
446
+ channels['4'],
447
+ kernel_size=3,
448
+ num_style_feat=num_style_feat,
449
+ demodulate=True,
450
+ sample_mode=None,
451
+ resample_kernel=resample_kernel)
452
+ self.to_rgb1 = ToRGB(channels['4'], num_style_feat, upsample=False, resample_kernel=resample_kernel)
453
+
454
+ self.log_size = int(math.log(out_size, 2))
455
+ self.num_layers = (self.log_size - 2) * 2 + 1
456
+ self.num_latent = self.log_size * 2 - 2
457
+
458
+ self.style_convs = nn.ModuleList()
459
+ self.to_rgbs = nn.ModuleList()
460
+ self.noises = nn.Module()
461
+
462
+ in_channels = channels['4']
463
+ # noise
464
+ for layer_idx in range(self.num_layers):
465
+ resolution = 2**((layer_idx + 5) // 2)
466
+ shape = [1, 1, resolution, resolution]
467
+ self.noises.register_buffer(f'noise{layer_idx}', torch.randn(*shape))
468
+ # style convs and to_rgbs
469
+ for i in range(3, self.log_size + 1):
470
+ out_channels = channels[f'{2**i}']
471
+ self.style_convs.append(
472
+ StyleConv(
473
+ in_channels,
474
+ out_channels,
475
+ kernel_size=3,
476
+ num_style_feat=num_style_feat,
477
+ demodulate=True,
478
+ sample_mode='upsample',
479
+ resample_kernel=resample_kernel,
480
+ ))
481
+ self.style_convs.append(
482
+ StyleConv(
483
+ out_channels,
484
+ out_channels,
485
+ kernel_size=3,
486
+ num_style_feat=num_style_feat,
487
+ demodulate=True,
488
+ sample_mode=None,
489
+ resample_kernel=resample_kernel))
490
+ self.to_rgbs.append(ToRGB(out_channels, num_style_feat, upsample=True, resample_kernel=resample_kernel))
491
+ in_channels = out_channels
492
+
493
+ def make_noise(self):
494
+ """Make noise for noise injection."""
495
+ device = self.constant_input.weight.device
496
+ noises = [torch.randn(1, 1, 4, 4, device=device)]
497
+
498
+ for i in range(3, self.log_size + 1):
499
+ for _ in range(2):
500
+ noises.append(torch.randn(1, 1, 2**i, 2**i, device=device))
501
+
502
+ return noises
503
+
504
+ def get_latent(self, x):
505
+ return self.style_mlp(x)
506
+
507
+ def mean_latent(self, num_latent):
508
+ latent_in = torch.randn(num_latent, self.num_style_feat, device=self.constant_input.weight.device)
509
+ latent = self.style_mlp(latent_in).mean(0, keepdim=True)
510
+ return latent
511
+
512
+ def forward(self,
513
+ styles,
514
+ input_is_latent=False,
515
+ noise=None,
516
+ randomize_noise=True,
517
+ truncation=1,
518
+ truncation_latent=None,
519
+ inject_index=None,
520
+ return_latents=False):
521
+ """Forward function for StyleGAN2Generator.
522
+
523
+ Args:
524
+ styles (list[Tensor]): Sample codes of styles.
525
+ input_is_latent (bool): Whether input is latent style.
526
+ Default: False.
527
+ noise (Tensor | None): Input noise or None. Default: None.
528
+ randomize_noise (bool): Randomize noise, used when 'noise' is
529
+ False. Default: True.
530
+ truncation (float): TODO. Default: 1.
531
+ truncation_latent (Tensor | None): TODO. Default: None.
532
+ inject_index (int | None): The injection index for mixing noise.
533
+ Default: None.
534
+ return_latents (bool): Whether to return style latents.
535
+ Default: False.
536
+ """
537
+ # style codes -> latents with Style MLP layer
538
+ if not input_is_latent:
539
+ styles = [self.style_mlp(s) for s in styles]
540
+ # noises
541
+ if noise is None:
542
+ if randomize_noise:
543
+ noise = [None] * self.num_layers # for each style conv layer
544
+ else: # use the stored noise
545
+ noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)]
546
+ # style truncation
547
+ if truncation < 1:
548
+ style_truncation = []
549
+ for style in styles:
550
+ style_truncation.append(truncation_latent + truncation * (style - truncation_latent))
551
+ styles = style_truncation
552
+ # get style latent with injection
553
+ if len(styles) == 1:
554
+ inject_index = self.num_latent
555
+
556
+ if styles[0].ndim < 3:
557
+ # repeat latent code for all the layers
558
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
559
+ else: # used for encoder with different latent code for each layer
560
+ latent = styles[0]
561
+ elif len(styles) == 2: # mixing noises
562
+ if inject_index is None:
563
+ inject_index = random.randint(1, self.num_latent - 1)
564
+ latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
565
+ latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)
566
+ latent = torch.cat([latent1, latent2], 1)
567
+
568
+ # main generation
569
+ out = self.constant_input(latent.shape[0])
570
+ out = self.style_conv1(out, latent[:, 0], noise=noise[0])
571
+ skip = self.to_rgb1(out, latent[:, 1])
572
+
573
+ i = 1
574
+ for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2],
575
+ noise[2::2], self.to_rgbs):
576
+ out = conv1(out, latent[:, i], noise=noise1)
577
+ out = conv2(out, latent[:, i + 1], noise=noise2)
578
+ skip = to_rgb(out, latent[:, i + 2], skip)
579
+ i += 2
580
+
581
+ image = skip
582
+
583
+ if return_latents:
584
+ return image, latent
585
+ else:
586
+ return image, None
587
+
588
+
589
+ class ScaledLeakyReLU(nn.Module):
590
+ """Scaled LeakyReLU.
591
+
592
+ Args:
593
+ negative_slope (float): Negative slope. Default: 0.2.
594
+ """
595
+
596
+ def __init__(self, negative_slope=0.2):
597
+ super(ScaledLeakyReLU, self).__init__()
598
+ self.negative_slope = negative_slope
599
+
600
+ def forward(self, x):
601
+ out = F.leaky_relu(x, negative_slope=self.negative_slope)
602
+ return out * math.sqrt(2)
603
+
604
+
605
+ class EqualConv2d(nn.Module):
606
+ """Equalized Linear as StyleGAN2.
607
+
608
+ Args:
609
+ in_channels (int): Channel number of the input.
610
+ out_channels (int): Channel number of the output.
611
+ kernel_size (int): Size of the convolving kernel.
612
+ stride (int): Stride of the convolution. Default: 1
613
+ padding (int): Zero-padding added to both sides of the input.
614
+ Default: 0.
615
+ bias (bool): If ``True``, adds a learnable bias to the output.
616
+ Default: ``True``.
617
+ bias_init_val (float): Bias initialized value. Default: 0.
618
+ """
619
+
620
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, bias_init_val=0):
621
+ super(EqualConv2d, self).__init__()
622
+ self.in_channels = in_channels
623
+ self.out_channels = out_channels
624
+ self.kernel_size = kernel_size
625
+ self.stride = stride
626
+ self.padding = padding
627
+ self.scale = 1 / math.sqrt(in_channels * kernel_size**2)
628
+
629
+ self.weight = nn.Parameter(torch.randn(out_channels, in_channels, kernel_size, kernel_size))
630
+ if bias:
631
+ self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val))
632
+ else:
633
+ self.register_parameter('bias', None)
634
+
635
+ def forward(self, x):
636
+ out = F.conv2d(
637
+ x,
638
+ self.weight * self.scale,
639
+ bias=self.bias,
640
+ stride=self.stride,
641
+ padding=self.padding,
642
+ )
643
+
644
+ return out
645
+
646
+ def __repr__(self):
647
+ return (f'{self.__class__.__name__}(in_channels={self.in_channels}, '
648
+ f'out_channels={self.out_channels}, '
649
+ f'kernel_size={self.kernel_size},'
650
+ f' stride={self.stride}, padding={self.padding}, '
651
+ f'bias={self.bias is not None})')
652
+
653
+
654
+ class ConvLayer(nn.Sequential):
655
+ """Conv Layer used in StyleGAN2 Discriminator.
656
+
657
+ Args:
658
+ in_channels (int): Channel number of the input.
659
+ out_channels (int): Channel number of the output.
660
+ kernel_size (int): Kernel size.
661
+ downsample (bool): Whether downsample by a factor of 2.
662
+ Default: False.
663
+ resample_kernel (list[int]): A list indicating the 1D resample
664
+ kernel magnitude. A cross production will be applied to
665
+ extent 1D resample kernel to 2D resample kernel.
666
+ Default: (1, 3, 3, 1).
667
+ bias (bool): Whether with bias. Default: True.
668
+ activate (bool): Whether use activateion. Default: True.
669
+ """
670
+
671
+ def __init__(self,
672
+ in_channels,
673
+ out_channels,
674
+ kernel_size,
675
+ downsample=False,
676
+ resample_kernel=(1, 3, 3, 1),
677
+ bias=True,
678
+ activate=True):
679
+ layers = []
680
+ # downsample
681
+ if downsample:
682
+ layers.append(
683
+ UpFirDnSmooth(resample_kernel, upsample_factor=1, downsample_factor=2, kernel_size=kernel_size))
684
+ stride = 2
685
+ self.padding = 0
686
+ else:
687
+ stride = 1
688
+ self.padding = kernel_size // 2
689
+ # conv
690
+ layers.append(
691
+ EqualConv2d(
692
+ in_channels, out_channels, kernel_size, stride=stride, padding=self.padding, bias=bias
693
+ and not activate))
694
+ # activation
695
+ if activate:
696
+ if bias:
697
+ layers.append(FusedLeakyReLU(out_channels))
698
+ else:
699
+ layers.append(ScaledLeakyReLU(0.2))
700
+
701
+ super(ConvLayer, self).__init__(*layers)
702
+
703
+
704
+ class ResBlock(nn.Module):
705
+ """Residual block used in StyleGAN2 Discriminator.
706
+
707
+ Args:
708
+ in_channels (int): Channel number of the input.
709
+ out_channels (int): Channel number of the output.
710
+ resample_kernel (list[int]): A list indicating the 1D resample
711
+ kernel magnitude. A cross production will be applied to
712
+ extent 1D resample kernel to 2D resample kernel.
713
+ Default: (1, 3, 3, 1).
714
+ """
715
+
716
+ def __init__(self, in_channels, out_channels, resample_kernel=(1, 3, 3, 1)):
717
+ super(ResBlock, self).__init__()
718
+
719
+ self.conv1 = ConvLayer(in_channels, in_channels, 3, bias=True, activate=True)
720
+ self.conv2 = ConvLayer(
721
+ in_channels, out_channels, 3, downsample=True, resample_kernel=resample_kernel, bias=True, activate=True)
722
+ self.skip = ConvLayer(
723
+ in_channels, out_channels, 1, downsample=True, resample_kernel=resample_kernel, bias=False, activate=False)
724
+
725
+ def forward(self, x):
726
+ out = self.conv1(x)
727
+ out = self.conv2(out)
728
+ skip = self.skip(x)
729
+ out = (out + skip) / math.sqrt(2)
730
+ return out
731
+
732
+
733
+ @ARCH_REGISTRY.register()
734
+ class StyleGAN2Discriminator(nn.Module):
735
+ """StyleGAN2 Discriminator.
736
+
737
+ Args:
738
+ out_size (int): The spatial size of outputs.
739
+ channel_multiplier (int): Channel multiplier for large networks of
740
+ StyleGAN2. Default: 2.
741
+ resample_kernel (list[int]): A list indicating the 1D resample kernel
742
+ magnitude. A cross production will be applied to extent 1D resample
743
+ kernel to 2D resample kernel. Default: (1, 3, 3, 1).
744
+ stddev_group (int): For group stddev statistics. Default: 4.
745
+ narrow (float): Narrow ratio for channels. Default: 1.0.
746
+ """
747
+
748
+ def __init__(self, out_size, channel_multiplier=2, resample_kernel=(1, 3, 3, 1), stddev_group=4, narrow=1):
749
+ super(StyleGAN2Discriminator, self).__init__()
750
+
751
+ channels = {
752
+ '4': int(512 * narrow),
753
+ '8': int(512 * narrow),
754
+ '16': int(512 * narrow),
755
+ '32': int(512 * narrow),
756
+ '64': int(256 * channel_multiplier * narrow),
757
+ '128': int(128 * channel_multiplier * narrow),
758
+ '256': int(64 * channel_multiplier * narrow),
759
+ '512': int(32 * channel_multiplier * narrow),
760
+ '1024': int(16 * channel_multiplier * narrow)
761
+ }
762
+
763
+ log_size = int(math.log(out_size, 2))
764
+
765
+ conv_body = [ConvLayer(3, channels[f'{out_size}'], 1, bias=True, activate=True)]
766
+
767
+ in_channels = channels[f'{out_size}']
768
+ for i in range(log_size, 2, -1):
769
+ out_channels = channels[f'{2**(i - 1)}']
770
+ conv_body.append(ResBlock(in_channels, out_channels, resample_kernel))
771
+ in_channels = out_channels
772
+ self.conv_body = nn.Sequential(*conv_body)
773
+
774
+ self.final_conv = ConvLayer(in_channels + 1, channels['4'], 3, bias=True, activate=True)
775
+ self.final_linear = nn.Sequential(
776
+ EqualLinear(
777
+ channels['4'] * 4 * 4, channels['4'], bias=True, bias_init_val=0, lr_mul=1, activation='fused_lrelu'),
778
+ EqualLinear(channels['4'], 1, bias=True, bias_init_val=0, lr_mul=1, activation=None),
779
+ )
780
+ self.stddev_group = stddev_group
781
+ self.stddev_feat = 1
782
+
783
+ def forward(self, x):
784
+ out = self.conv_body(x)
785
+
786
+ b, c, h, w = out.shape
787
+ # concatenate a group stddev statistics to out
788
+ group = min(b, self.stddev_group) # Minibatch must be divisible by (or smaller than) group_size
789
+ stddev = out.view(group, -1, self.stddev_feat, c // self.stddev_feat, h, w)
790
+ stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
791
+ stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
792
+ stddev = stddev.repeat(group, 1, h, w)
793
+ out = torch.cat([out, stddev], 1)
794
+
795
+ out = self.final_conv(out)
796
+ out = out.view(b, -1)
797
+ out = self.final_linear(out)
798
+
799
+ return out
basicsr/archs/stylegan2_bilinear_arch.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import random
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+ from basicsr.ops.fused_act import FusedLeakyReLU, fused_leaky_relu
8
+ from basicsr.utils.registry import ARCH_REGISTRY
9
+
10
+
11
+ class NormStyleCode(nn.Module):
12
+
13
+ def forward(self, x):
14
+ """Normalize the style codes.
15
+
16
+ Args:
17
+ x (Tensor): Style codes with shape (b, c).
18
+
19
+ Returns:
20
+ Tensor: Normalized tensor.
21
+ """
22
+ return x * torch.rsqrt(torch.mean(x**2, dim=1, keepdim=True) + 1e-8)
23
+
24
+
25
+ class EqualLinear(nn.Module):
26
+ """Equalized Linear as StyleGAN2.
27
+
28
+ Args:
29
+ in_channels (int): Size of each sample.
30
+ out_channels (int): Size of each output sample.
31
+ bias (bool): If set to ``False``, the layer will not learn an additive
32
+ bias. Default: ``True``.
33
+ bias_init_val (float): Bias initialized value. Default: 0.
34
+ lr_mul (float): Learning rate multiplier. Default: 1.
35
+ activation (None | str): The activation after ``linear`` operation.
36
+ Supported: 'fused_lrelu', None. Default: None.
37
+ """
38
+
39
+ def __init__(self, in_channels, out_channels, bias=True, bias_init_val=0, lr_mul=1, activation=None):
40
+ super(EqualLinear, self).__init__()
41
+ self.in_channels = in_channels
42
+ self.out_channels = out_channels
43
+ self.lr_mul = lr_mul
44
+ self.activation = activation
45
+ if self.activation not in ['fused_lrelu', None]:
46
+ raise ValueError(f'Wrong activation value in EqualLinear: {activation}'
47
+ "Supported ones are: ['fused_lrelu', None].")
48
+ self.scale = (1 / math.sqrt(in_channels)) * lr_mul
49
+
50
+ self.weight = nn.Parameter(torch.randn(out_channels, in_channels).div_(lr_mul))
51
+ if bias:
52
+ self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val))
53
+ else:
54
+ self.register_parameter('bias', None)
55
+
56
+ def forward(self, x):
57
+ if self.bias is None:
58
+ bias = None
59
+ else:
60
+ bias = self.bias * self.lr_mul
61
+ if self.activation == 'fused_lrelu':
62
+ out = F.linear(x, self.weight * self.scale)
63
+ out = fused_leaky_relu(out, bias)
64
+ else:
65
+ out = F.linear(x, self.weight * self.scale, bias=bias)
66
+ return out
67
+
68
+ def __repr__(self):
69
+ return (f'{self.__class__.__name__}(in_channels={self.in_channels}, '
70
+ f'out_channels={self.out_channels}, bias={self.bias is not None})')
71
+
72
+
73
+ class ModulatedConv2d(nn.Module):
74
+ """Modulated Conv2d used in StyleGAN2.
75
+
76
+ There is no bias in ModulatedConv2d.
77
+
78
+ Args:
79
+ in_channels (int): Channel number of the input.
80
+ out_channels (int): Channel number of the output.
81
+ kernel_size (int): Size of the convolving kernel.
82
+ num_style_feat (int): Channel number of style features.
83
+ demodulate (bool): Whether to demodulate in the conv layer.
84
+ Default: True.
85
+ sample_mode (str | None): Indicating 'upsample', 'downsample' or None.
86
+ Default: None.
87
+ eps (float): A value added to the denominator for numerical stability.
88
+ Default: 1e-8.
89
+ """
90
+
91
+ def __init__(self,
92
+ in_channels,
93
+ out_channels,
94
+ kernel_size,
95
+ num_style_feat,
96
+ demodulate=True,
97
+ sample_mode=None,
98
+ eps=1e-8,
99
+ interpolation_mode='bilinear'):
100
+ super(ModulatedConv2d, self).__init__()
101
+ self.in_channels = in_channels
102
+ self.out_channels = out_channels
103
+ self.kernel_size = kernel_size
104
+ self.demodulate = demodulate
105
+ self.sample_mode = sample_mode
106
+ self.eps = eps
107
+ self.interpolation_mode = interpolation_mode
108
+ if self.interpolation_mode == 'nearest':
109
+ self.align_corners = None
110
+ else:
111
+ self.align_corners = False
112
+
113
+ self.scale = 1 / math.sqrt(in_channels * kernel_size**2)
114
+ # modulation inside each modulated conv
115
+ self.modulation = EqualLinear(
116
+ num_style_feat, in_channels, bias=True, bias_init_val=1, lr_mul=1, activation=None)
117
+
118
+ self.weight = nn.Parameter(torch.randn(1, out_channels, in_channels, kernel_size, kernel_size))
119
+ self.padding = kernel_size // 2
120
+
121
+ def forward(self, x, style):
122
+ """Forward function.
123
+
124
+ Args:
125
+ x (Tensor): Tensor with shape (b, c, h, w).
126
+ style (Tensor): Tensor with shape (b, num_style_feat).
127
+
128
+ Returns:
129
+ Tensor: Modulated tensor after convolution.
130
+ """
131
+ b, c, h, w = x.shape # c = c_in
132
+ # weight modulation
133
+ style = self.modulation(style).view(b, 1, c, 1, 1)
134
+ # self.weight: (1, c_out, c_in, k, k); style: (b, 1, c, 1, 1)
135
+ weight = self.scale * self.weight * style # (b, c_out, c_in, k, k)
136
+
137
+ if self.demodulate:
138
+ demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps)
139
+ weight = weight * demod.view(b, self.out_channels, 1, 1, 1)
140
+
141
+ weight = weight.view(b * self.out_channels, c, self.kernel_size, self.kernel_size)
142
+
143
+ if self.sample_mode == 'upsample':
144
+ x = F.interpolate(x, scale_factor=2, mode=self.interpolation_mode, align_corners=self.align_corners)
145
+ elif self.sample_mode == 'downsample':
146
+ x = F.interpolate(x, scale_factor=0.5, mode=self.interpolation_mode, align_corners=self.align_corners)
147
+
148
+ b, c, h, w = x.shape
149
+ x = x.view(1, b * c, h, w)
150
+ # weight: (b*c_out, c_in, k, k), groups=b
151
+ out = F.conv2d(x, weight, padding=self.padding, groups=b)
152
+ out = out.view(b, self.out_channels, *out.shape[2:4])
153
+
154
+ return out
155
+
156
+ def __repr__(self):
157
+ return (f'{self.__class__.__name__}(in_channels={self.in_channels}, '
158
+ f'out_channels={self.out_channels}, '
159
+ f'kernel_size={self.kernel_size}, '
160
+ f'demodulate={self.demodulate}, sample_mode={self.sample_mode})')
161
+
162
+
163
+ class StyleConv(nn.Module):
164
+ """Style conv.
165
+
166
+ Args:
167
+ in_channels (int): Channel number of the input.
168
+ out_channels (int): Channel number of the output.
169
+ kernel_size (int): Size of the convolving kernel.
170
+ num_style_feat (int): Channel number of style features.
171
+ demodulate (bool): Whether demodulate in the conv layer. Default: True.
172
+ sample_mode (str | None): Indicating 'upsample', 'downsample' or None.
173
+ Default: None.
174
+ """
175
+
176
+ def __init__(self,
177
+ in_channels,
178
+ out_channels,
179
+ kernel_size,
180
+ num_style_feat,
181
+ demodulate=True,
182
+ sample_mode=None,
183
+ interpolation_mode='bilinear'):
184
+ super(StyleConv, self).__init__()
185
+ self.modulated_conv = ModulatedConv2d(
186
+ in_channels,
187
+ out_channels,
188
+ kernel_size,
189
+ num_style_feat,
190
+ demodulate=demodulate,
191
+ sample_mode=sample_mode,
192
+ interpolation_mode=interpolation_mode)
193
+ self.weight = nn.Parameter(torch.zeros(1)) # for noise injection
194
+ self.activate = FusedLeakyReLU(out_channels)
195
+
196
+ def forward(self, x, style, noise=None):
197
+ # modulate
198
+ out = self.modulated_conv(x, style)
199
+ # noise injection
200
+ if noise is None:
201
+ b, _, h, w = out.shape
202
+ noise = out.new_empty(b, 1, h, w).normal_()
203
+ out = out + self.weight * noise
204
+ # activation (with bias)
205
+ out = self.activate(out)
206
+ return out
207
+
208
+
209
+ class ToRGB(nn.Module):
210
+ """To RGB from features.
211
+
212
+ Args:
213
+ in_channels (int): Channel number of input.
214
+ num_style_feat (int): Channel number of style features.
215
+ upsample (bool): Whether to upsample. Default: True.
216
+ """
217
+
218
+ def __init__(self, in_channels, num_style_feat, upsample=True, interpolation_mode='bilinear'):
219
+ super(ToRGB, self).__init__()
220
+ self.upsample = upsample
221
+ self.interpolation_mode = interpolation_mode
222
+ if self.interpolation_mode == 'nearest':
223
+ self.align_corners = None
224
+ else:
225
+ self.align_corners = False
226
+ self.modulated_conv = ModulatedConv2d(
227
+ in_channels,
228
+ 3,
229
+ kernel_size=1,
230
+ num_style_feat=num_style_feat,
231
+ demodulate=False,
232
+ sample_mode=None,
233
+ interpolation_mode=interpolation_mode)
234
+ self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
235
+
236
+ def forward(self, x, style, skip=None):
237
+ """Forward function.
238
+
239
+ Args:
240
+ x (Tensor): Feature tensor with shape (b, c, h, w).
241
+ style (Tensor): Tensor with shape (b, num_style_feat).
242
+ skip (Tensor): Base/skip tensor. Default: None.
243
+
244
+ Returns:
245
+ Tensor: RGB images.
246
+ """
247
+ out = self.modulated_conv(x, style)
248
+ out = out + self.bias
249
+ if skip is not None:
250
+ if self.upsample:
251
+ skip = F.interpolate(
252
+ skip, scale_factor=2, mode=self.interpolation_mode, align_corners=self.align_corners)
253
+ out = out + skip
254
+ return out
255
+
256
+
257
+ class ConstantInput(nn.Module):
258
+ """Constant input.
259
+
260
+ Args:
261
+ num_channel (int): Channel number of constant input.
262
+ size (int): Spatial size of constant input.
263
+ """
264
+
265
+ def __init__(self, num_channel, size):
266
+ super(ConstantInput, self).__init__()
267
+ self.weight = nn.Parameter(torch.randn(1, num_channel, size, size))
268
+
269
+ def forward(self, batch):
270
+ out = self.weight.repeat(batch, 1, 1, 1)
271
+ return out
272
+
273
+
274
+ @ARCH_REGISTRY.register(suffix='basicsr')
275
+ class StyleGAN2GeneratorBilinear(nn.Module):
276
+ """StyleGAN2 Generator.
277
+
278
+ Args:
279
+ out_size (int): The spatial size of outputs.
280
+ num_style_feat (int): Channel number of style features. Default: 512.
281
+ num_mlp (int): Layer number of MLP style layers. Default: 8.
282
+ channel_multiplier (int): Channel multiplier for large networks of
283
+ StyleGAN2. Default: 2.
284
+ lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01.
285
+ narrow (float): Narrow ratio for channels. Default: 1.0.
286
+ """
287
+
288
+ def __init__(self,
289
+ out_size,
290
+ num_style_feat=512,
291
+ num_mlp=8,
292
+ channel_multiplier=2,
293
+ lr_mlp=0.01,
294
+ narrow=1,
295
+ interpolation_mode='bilinear'):
296
+ super(StyleGAN2GeneratorBilinear, self).__init__()
297
+ # Style MLP layers
298
+ self.num_style_feat = num_style_feat
299
+ style_mlp_layers = [NormStyleCode()]
300
+ for i in range(num_mlp):
301
+ style_mlp_layers.append(
302
+ EqualLinear(
303
+ num_style_feat, num_style_feat, bias=True, bias_init_val=0, lr_mul=lr_mlp,
304
+ activation='fused_lrelu'))
305
+ self.style_mlp = nn.Sequential(*style_mlp_layers)
306
+
307
+ channels = {
308
+ '4': int(512 * narrow),
309
+ '8': int(512 * narrow),
310
+ '16': int(512 * narrow),
311
+ '32': int(512 * narrow),
312
+ '64': int(256 * channel_multiplier * narrow),
313
+ '128': int(128 * channel_multiplier * narrow),
314
+ '256': int(64 * channel_multiplier * narrow),
315
+ '512': int(32 * channel_multiplier * narrow),
316
+ '1024': int(16 * channel_multiplier * narrow)
317
+ }
318
+ self.channels = channels
319
+
320
+ self.constant_input = ConstantInput(channels['4'], size=4)
321
+ self.style_conv1 = StyleConv(
322
+ channels['4'],
323
+ channels['4'],
324
+ kernel_size=3,
325
+ num_style_feat=num_style_feat,
326
+ demodulate=True,
327
+ sample_mode=None,
328
+ interpolation_mode=interpolation_mode)
329
+ self.to_rgb1 = ToRGB(channels['4'], num_style_feat, upsample=False, interpolation_mode=interpolation_mode)
330
+
331
+ self.log_size = int(math.log(out_size, 2))
332
+ self.num_layers = (self.log_size - 2) * 2 + 1
333
+ self.num_latent = self.log_size * 2 - 2
334
+
335
+ self.style_convs = nn.ModuleList()
336
+ self.to_rgbs = nn.ModuleList()
337
+ self.noises = nn.Module()
338
+
339
+ in_channels = channels['4']
340
+ # noise
341
+ for layer_idx in range(self.num_layers):
342
+ resolution = 2**((layer_idx + 5) // 2)
343
+ shape = [1, 1, resolution, resolution]
344
+ self.noises.register_buffer(f'noise{layer_idx}', torch.randn(*shape))
345
+ # style convs and to_rgbs
346
+ for i in range(3, self.log_size + 1):
347
+ out_channels = channels[f'{2**i}']
348
+ self.style_convs.append(
349
+ StyleConv(
350
+ in_channels,
351
+ out_channels,
352
+ kernel_size=3,
353
+ num_style_feat=num_style_feat,
354
+ demodulate=True,
355
+ sample_mode='upsample',
356
+ interpolation_mode=interpolation_mode))
357
+ self.style_convs.append(
358
+ StyleConv(
359
+ out_channels,
360
+ out_channels,
361
+ kernel_size=3,
362
+ num_style_feat=num_style_feat,
363
+ demodulate=True,
364
+ sample_mode=None,
365
+ interpolation_mode=interpolation_mode))
366
+ self.to_rgbs.append(
367
+ ToRGB(out_channels, num_style_feat, upsample=True, interpolation_mode=interpolation_mode))
368
+ in_channels = out_channels
369
+
370
+ def make_noise(self):
371
+ """Make noise for noise injection."""
372
+ device = self.constant_input.weight.device
373
+ noises = [torch.randn(1, 1, 4, 4, device=device)]
374
+
375
+ for i in range(3, self.log_size + 1):
376
+ for _ in range(2):
377
+ noises.append(torch.randn(1, 1, 2**i, 2**i, device=device))
378
+
379
+ return noises
380
+
381
+ def get_latent(self, x):
382
+ return self.style_mlp(x)
383
+
384
+ def mean_latent(self, num_latent):
385
+ latent_in = torch.randn(num_latent, self.num_style_feat, device=self.constant_input.weight.device)
386
+ latent = self.style_mlp(latent_in).mean(0, keepdim=True)
387
+ return latent
388
+
389
+ def forward(self,
390
+ styles,
391
+ input_is_latent=False,
392
+ noise=None,
393
+ randomize_noise=True,
394
+ truncation=1,
395
+ truncation_latent=None,
396
+ inject_index=None,
397
+ return_latents=False):
398
+ """Forward function for StyleGAN2Generator.
399
+
400
+ Args:
401
+ styles (list[Tensor]): Sample codes of styles.
402
+ input_is_latent (bool): Whether input is latent style.
403
+ Default: False.
404
+ noise (Tensor | None): Input noise or None. Default: None.
405
+ randomize_noise (bool): Randomize noise, used when 'noise' is
406
+ False. Default: True.
407
+ truncation (float): TODO. Default: 1.
408
+ truncation_latent (Tensor | None): TODO. Default: None.
409
+ inject_index (int | None): The injection index for mixing noise.
410
+ Default: None.
411
+ return_latents (bool): Whether to return style latents.
412
+ Default: False.
413
+ """
414
+ # style codes -> latents with Style MLP layer
415
+ if not input_is_latent:
416
+ styles = [self.style_mlp(s) for s in styles]
417
+ # noises
418
+ if noise is None:
419
+ if randomize_noise:
420
+ noise = [None] * self.num_layers # for each style conv layer
421
+ else: # use the stored noise
422
+ noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)]
423
+ # style truncation
424
+ if truncation < 1:
425
+ style_truncation = []
426
+ for style in styles:
427
+ style_truncation.append(truncation_latent + truncation * (style - truncation_latent))
428
+ styles = style_truncation
429
+ # get style latent with injection
430
+ if len(styles) == 1:
431
+ inject_index = self.num_latent
432
+
433
+ if styles[0].ndim < 3:
434
+ # repeat latent code for all the layers
435
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
436
+ else: # used for encoder with different latent code for each layer
437
+ latent = styles[0]
438
+ elif len(styles) == 2: # mixing noises
439
+ if inject_index is None:
440
+ inject_index = random.randint(1, self.num_latent - 1)
441
+ latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
442
+ latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)
443
+ latent = torch.cat([latent1, latent2], 1)
444
+
445
+ # main generation
446
+ out = self.constant_input(latent.shape[0])
447
+ out = self.style_conv1(out, latent[:, 0], noise=noise[0])
448
+ skip = self.to_rgb1(out, latent[:, 1])
449
+
450
+ i = 1
451
+ for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2],
452
+ noise[2::2], self.to_rgbs):
453
+ out = conv1(out, latent[:, i], noise=noise1)
454
+ out = conv2(out, latent[:, i + 1], noise=noise2)
455
+ skip = to_rgb(out, latent[:, i + 2], skip)
456
+ i += 2
457
+
458
+ image = skip
459
+
460
+ if return_latents:
461
+ return image, latent
462
+ else:
463
+ return image, None
464
+
465
+
466
+ class ScaledLeakyReLU(nn.Module):
467
+ """Scaled LeakyReLU.
468
+
469
+ Args:
470
+ negative_slope (float): Negative slope. Default: 0.2.
471
+ """
472
+
473
+ def __init__(self, negative_slope=0.2):
474
+ super(ScaledLeakyReLU, self).__init__()
475
+ self.negative_slope = negative_slope
476
+
477
+ def forward(self, x):
478
+ out = F.leaky_relu(x, negative_slope=self.negative_slope)
479
+ return out * math.sqrt(2)
480
+
481
+
482
+ class EqualConv2d(nn.Module):
483
+ """Equalized Linear as StyleGAN2.
484
+
485
+ Args:
486
+ in_channels (int): Channel number of the input.
487
+ out_channels (int): Channel number of the output.
488
+ kernel_size (int): Size of the convolving kernel.
489
+ stride (int): Stride of the convolution. Default: 1
490
+ padding (int): Zero-padding added to both sides of the input.
491
+ Default: 0.
492
+ bias (bool): If ``True``, adds a learnable bias to the output.
493
+ Default: ``True``.
494
+ bias_init_val (float): Bias initialized value. Default: 0.
495
+ """
496
+
497
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, bias_init_val=0):
498
+ super(EqualConv2d, self).__init__()
499
+ self.in_channels = in_channels
500
+ self.out_channels = out_channels
501
+ self.kernel_size = kernel_size
502
+ self.stride = stride
503
+ self.padding = padding
504
+ self.scale = 1 / math.sqrt(in_channels * kernel_size**2)
505
+
506
+ self.weight = nn.Parameter(torch.randn(out_channels, in_channels, kernel_size, kernel_size))
507
+ if bias:
508
+ self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val))
509
+ else:
510
+ self.register_parameter('bias', None)
511
+
512
+ def forward(self, x):
513
+ out = F.conv2d(
514
+ x,
515
+ self.weight * self.scale,
516
+ bias=self.bias,
517
+ stride=self.stride,
518
+ padding=self.padding,
519
+ )
520
+
521
+ return out
522
+
523
+ def __repr__(self):
524
+ return (f'{self.__class__.__name__}(in_channels={self.in_channels}, '
525
+ f'out_channels={self.out_channels}, '
526
+ f'kernel_size={self.kernel_size},'
527
+ f' stride={self.stride}, padding={self.padding}, '
528
+ f'bias={self.bias is not None})')
529
+
530
+
531
+ class ConvLayer(nn.Sequential):
532
+ """Conv Layer used in StyleGAN2 Discriminator.
533
+
534
+ Args:
535
+ in_channels (int): Channel number of the input.
536
+ out_channels (int): Channel number of the output.
537
+ kernel_size (int): Kernel size.
538
+ downsample (bool): Whether downsample by a factor of 2.
539
+ Default: False.
540
+ bias (bool): Whether with bias. Default: True.
541
+ activate (bool): Whether use activateion. Default: True.
542
+ """
543
+
544
+ def __init__(self,
545
+ in_channels,
546
+ out_channels,
547
+ kernel_size,
548
+ downsample=False,
549
+ bias=True,
550
+ activate=True,
551
+ interpolation_mode='bilinear'):
552
+ layers = []
553
+ self.interpolation_mode = interpolation_mode
554
+ # downsample
555
+ if downsample:
556
+ if self.interpolation_mode == 'nearest':
557
+ self.align_corners = None
558
+ else:
559
+ self.align_corners = False
560
+
561
+ layers.append(
562
+ torch.nn.Upsample(scale_factor=0.5, mode=interpolation_mode, align_corners=self.align_corners))
563
+ stride = 1
564
+ self.padding = kernel_size // 2
565
+ # conv
566
+ layers.append(
567
+ EqualConv2d(
568
+ in_channels, out_channels, kernel_size, stride=stride, padding=self.padding, bias=bias
569
+ and not activate))
570
+ # activation
571
+ if activate:
572
+ if bias:
573
+ layers.append(FusedLeakyReLU(out_channels))
574
+ else:
575
+ layers.append(ScaledLeakyReLU(0.2))
576
+
577
+ super(ConvLayer, self).__init__(*layers)
578
+
579
+
580
+ class ResBlock(nn.Module):
581
+ """Residual block used in StyleGAN2 Discriminator.
582
+
583
+ Args:
584
+ in_channels (int): Channel number of the input.
585
+ out_channels (int): Channel number of the output.
586
+ """
587
+
588
+ def __init__(self, in_channels, out_channels, interpolation_mode='bilinear'):
589
+ super(ResBlock, self).__init__()
590
+
591
+ self.conv1 = ConvLayer(in_channels, in_channels, 3, bias=True, activate=True)
592
+ self.conv2 = ConvLayer(
593
+ in_channels,
594
+ out_channels,
595
+ 3,
596
+ downsample=True,
597
+ interpolation_mode=interpolation_mode,
598
+ bias=True,
599
+ activate=True)
600
+ self.skip = ConvLayer(
601
+ in_channels,
602
+ out_channels,
603
+ 1,
604
+ downsample=True,
605
+ interpolation_mode=interpolation_mode,
606
+ bias=False,
607
+ activate=False)
608
+
609
+ def forward(self, x):
610
+ out = self.conv1(x)
611
+ out = self.conv2(out)
612
+ skip = self.skip(x)
613
+ out = (out + skip) / math.sqrt(2)
614
+ return out
basicsr/archs/swinir_arch.py ADDED
@@ -0,0 +1,956 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from https://github.com/JingyunLiang/SwinIR
2
+ # SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
3
+ # Originally Written by Ze Liu, Modified by Jingyun Liang.
4
+
5
+ import math
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.utils.checkpoint as checkpoint
9
+
10
+ from basicsr.utils.registry import ARCH_REGISTRY
11
+ from .arch_util import to_2tuple, trunc_normal_
12
+
13
+
14
+ def drop_path(x, drop_prob: float = 0., training: bool = False):
15
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
16
+
17
+ From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
18
+ """
19
+ if drop_prob == 0. or not training:
20
+ return x
21
+ keep_prob = 1 - drop_prob
22
+ shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
23
+ random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
24
+ random_tensor.floor_() # binarize
25
+ output = x.div(keep_prob) * random_tensor
26
+ return output
27
+
28
+
29
+ class DropPath(nn.Module):
30
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
31
+
32
+ From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
33
+ """
34
+
35
+ def __init__(self, drop_prob=None):
36
+ super(DropPath, self).__init__()
37
+ self.drop_prob = drop_prob
38
+
39
+ def forward(self, x):
40
+ return drop_path(x, self.drop_prob, self.training)
41
+
42
+
43
+ class Mlp(nn.Module):
44
+
45
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
46
+ super().__init__()
47
+ out_features = out_features or in_features
48
+ hidden_features = hidden_features or in_features
49
+ self.fc1 = nn.Linear(in_features, hidden_features)
50
+ self.act = act_layer()
51
+ self.fc2 = nn.Linear(hidden_features, out_features)
52
+ self.drop = nn.Dropout(drop)
53
+
54
+ def forward(self, x):
55
+ x = self.fc1(x)
56
+ x = self.act(x)
57
+ x = self.drop(x)
58
+ x = self.fc2(x)
59
+ x = self.drop(x)
60
+ return x
61
+
62
+
63
+ def window_partition(x, window_size):
64
+ """
65
+ Args:
66
+ x: (b, h, w, c)
67
+ window_size (int): window size
68
+
69
+ Returns:
70
+ windows: (num_windows*b, window_size, window_size, c)
71
+ """
72
+ b, h, w, c = x.shape
73
+ x = x.view(b, h // window_size, window_size, w // window_size, window_size, c)
74
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, c)
75
+ return windows
76
+
77
+
78
+ def window_reverse(windows, window_size, h, w):
79
+ """
80
+ Args:
81
+ windows: (num_windows*b, window_size, window_size, c)
82
+ window_size (int): Window size
83
+ h (int): Height of image
84
+ w (int): Width of image
85
+
86
+ Returns:
87
+ x: (b, h, w, c)
88
+ """
89
+ b = int(windows.shape[0] / (h * w / window_size / window_size))
90
+ x = windows.view(b, h // window_size, w // window_size, window_size, window_size, -1)
91
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(b, h, w, -1)
92
+ return x
93
+
94
+
95
+ class WindowAttention(nn.Module):
96
+ r""" Window based multi-head self attention (W-MSA) module with relative position bias.
97
+ It supports both of shifted and non-shifted window.
98
+
99
+ Args:
100
+ dim (int): Number of input channels.
101
+ window_size (tuple[int]): The height and width of the window.
102
+ num_heads (int): Number of attention heads.
103
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
104
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
105
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
106
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
107
+ """
108
+
109
+ def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
110
+
111
+ super().__init__()
112
+ self.dim = dim
113
+ self.window_size = window_size # Wh, Ww
114
+ self.num_heads = num_heads
115
+ head_dim = dim // num_heads
116
+ self.scale = qk_scale or head_dim**-0.5
117
+
118
+ # define a parameter table of relative position bias
119
+ self.relative_position_bias_table = nn.Parameter(
120
+ torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
121
+
122
+ # get pair-wise relative position index for each token inside the window
123
+ coords_h = torch.arange(self.window_size[0])
124
+ coords_w = torch.arange(self.window_size[1])
125
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
126
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
127
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
128
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
129
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
130
+ relative_coords[:, :, 1] += self.window_size[1] - 1
131
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
132
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
133
+ self.register_buffer('relative_position_index', relative_position_index)
134
+
135
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
136
+ self.attn_drop = nn.Dropout(attn_drop)
137
+ self.proj = nn.Linear(dim, dim)
138
+
139
+ self.proj_drop = nn.Dropout(proj_drop)
140
+
141
+ trunc_normal_(self.relative_position_bias_table, std=.02)
142
+ self.softmax = nn.Softmax(dim=-1)
143
+
144
+ def forward(self, x, mask=None):
145
+ """
146
+ Args:
147
+ x: input features with shape of (num_windows*b, n, c)
148
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
149
+ """
150
+ b_, n, c = x.shape
151
+ qkv = self.qkv(x).reshape(b_, n, 3, self.num_heads, c // self.num_heads).permute(2, 0, 3, 1, 4)
152
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
153
+
154
+ q = q * self.scale
155
+ attn = (q @ k.transpose(-2, -1))
156
+
157
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
158
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
159
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
160
+ attn = attn + relative_position_bias.unsqueeze(0)
161
+
162
+ if mask is not None:
163
+ nw = mask.shape[0]
164
+ attn = attn.view(b_ // nw, nw, self.num_heads, n, n) + mask.unsqueeze(1).unsqueeze(0)
165
+ attn = attn.view(-1, self.num_heads, n, n)
166
+ attn = self.softmax(attn)
167
+ else:
168
+ attn = self.softmax(attn)
169
+
170
+ attn = self.attn_drop(attn)
171
+
172
+ x = (attn @ v).transpose(1, 2).reshape(b_, n, c)
173
+ x = self.proj(x)
174
+ x = self.proj_drop(x)
175
+ return x
176
+
177
+ def extra_repr(self) -> str:
178
+ return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
179
+
180
+ def flops(self, n):
181
+ # calculate flops for 1 window with token length of n
182
+ flops = 0
183
+ # qkv = self.qkv(x)
184
+ flops += n * self.dim * 3 * self.dim
185
+ # attn = (q @ k.transpose(-2, -1))
186
+ flops += self.num_heads * n * (self.dim // self.num_heads) * n
187
+ # x = (attn @ v)
188
+ flops += self.num_heads * n * n * (self.dim // self.num_heads)
189
+ # x = self.proj(x)
190
+ flops += n * self.dim * self.dim
191
+ return flops
192
+
193
+
194
+ class SwinTransformerBlock(nn.Module):
195
+ r""" Swin Transformer Block.
196
+
197
+ Args:
198
+ dim (int): Number of input channels.
199
+ input_resolution (tuple[int]): Input resolution.
200
+ num_heads (int): Number of attention heads.
201
+ window_size (int): Window size.
202
+ shift_size (int): Shift size for SW-MSA.
203
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
204
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
205
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
206
+ drop (float, optional): Dropout rate. Default: 0.0
207
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
208
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
209
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
210
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
211
+ """
212
+
213
+ def __init__(self,
214
+ dim,
215
+ input_resolution,
216
+ num_heads,
217
+ window_size=7,
218
+ shift_size=0,
219
+ mlp_ratio=4.,
220
+ qkv_bias=True,
221
+ qk_scale=None,
222
+ drop=0.,
223
+ attn_drop=0.,
224
+ drop_path=0.,
225
+ act_layer=nn.GELU,
226
+ norm_layer=nn.LayerNorm):
227
+ super().__init__()
228
+ self.dim = dim
229
+ self.input_resolution = input_resolution
230
+ self.num_heads = num_heads
231
+ self.window_size = window_size
232
+ self.shift_size = shift_size
233
+ self.mlp_ratio = mlp_ratio
234
+ if min(self.input_resolution) <= self.window_size:
235
+ # if window size is larger than input resolution, we don't partition windows
236
+ self.shift_size = 0
237
+ self.window_size = min(self.input_resolution)
238
+ assert 0 <= self.shift_size < self.window_size, 'shift_size must in 0-window_size'
239
+
240
+ self.norm1 = norm_layer(dim)
241
+ self.attn = WindowAttention(
242
+ dim,
243
+ window_size=to_2tuple(self.window_size),
244
+ num_heads=num_heads,
245
+ qkv_bias=qkv_bias,
246
+ qk_scale=qk_scale,
247
+ attn_drop=attn_drop,
248
+ proj_drop=drop)
249
+
250
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
251
+ self.norm2 = norm_layer(dim)
252
+ mlp_hidden_dim = int(dim * mlp_ratio)
253
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
254
+
255
+ if self.shift_size > 0:
256
+ attn_mask = self.calculate_mask(self.input_resolution)
257
+ else:
258
+ attn_mask = None
259
+
260
+ self.register_buffer('attn_mask', attn_mask)
261
+
262
+ def calculate_mask(self, x_size):
263
+ # calculate attention mask for SW-MSA
264
+ h, w = x_size
265
+ img_mask = torch.zeros((1, h, w, 1)) # 1 h w 1
266
+ h_slices = (slice(0, -self.window_size), slice(-self.window_size,
267
+ -self.shift_size), slice(-self.shift_size, None))
268
+ w_slices = (slice(0, -self.window_size), slice(-self.window_size,
269
+ -self.shift_size), slice(-self.shift_size, None))
270
+ cnt = 0
271
+ for h in h_slices:
272
+ for w in w_slices:
273
+ img_mask[:, h, w, :] = cnt
274
+ cnt += 1
275
+
276
+ mask_windows = window_partition(img_mask, self.window_size) # nw, window_size, window_size, 1
277
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
278
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
279
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
280
+
281
+ return attn_mask
282
+
283
+ def forward(self, x, x_size):
284
+ h, w = x_size
285
+ b, _, c = x.shape
286
+ # assert seq_len == h * w, "input feature has wrong size"
287
+
288
+ shortcut = x
289
+ x = self.norm1(x)
290
+ x = x.view(b, h, w, c)
291
+
292
+ # cyclic shift
293
+ if self.shift_size > 0:
294
+ shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
295
+ else:
296
+ shifted_x = x
297
+
298
+ # partition windows
299
+ x_windows = window_partition(shifted_x, self.window_size) # nw*b, window_size, window_size, c
300
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, c) # nw*b, window_size*window_size, c
301
+
302
+ # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
303
+ if self.input_resolution == x_size:
304
+ attn_windows = self.attn(x_windows, mask=self.attn_mask) # nw*b, window_size*window_size, c
305
+ else:
306
+ attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
307
+
308
+ # merge windows
309
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, c)
310
+ shifted_x = window_reverse(attn_windows, self.window_size, h, w) # b h' w' c
311
+
312
+ # reverse cyclic shift
313
+ if self.shift_size > 0:
314
+ x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
315
+ else:
316
+ x = shifted_x
317
+ x = x.view(b, h * w, c)
318
+
319
+ # FFN
320
+ x = shortcut + self.drop_path(x)
321
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
322
+
323
+ return x
324
+
325
+ def extra_repr(self) -> str:
326
+ return (f'dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, '
327
+ f'window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}')
328
+
329
+ def flops(self):
330
+ flops = 0
331
+ h, w = self.input_resolution
332
+ # norm1
333
+ flops += self.dim * h * w
334
+ # W-MSA/SW-MSA
335
+ nw = h * w / self.window_size / self.window_size
336
+ flops += nw * self.attn.flops(self.window_size * self.window_size)
337
+ # mlp
338
+ flops += 2 * h * w * self.dim * self.dim * self.mlp_ratio
339
+ # norm2
340
+ flops += self.dim * h * w
341
+ return flops
342
+
343
+
344
+ class PatchMerging(nn.Module):
345
+ r""" Patch Merging Layer.
346
+
347
+ Args:
348
+ input_resolution (tuple[int]): Resolution of input feature.
349
+ dim (int): Number of input channels.
350
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
351
+ """
352
+
353
+ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
354
+ super().__init__()
355
+ self.input_resolution = input_resolution
356
+ self.dim = dim
357
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
358
+ self.norm = norm_layer(4 * dim)
359
+
360
+ def forward(self, x):
361
+ """
362
+ x: b, h*w, c
363
+ """
364
+ h, w = self.input_resolution
365
+ b, seq_len, c = x.shape
366
+ assert seq_len == h * w, 'input feature has wrong size'
367
+ assert h % 2 == 0 and w % 2 == 0, f'x size ({h}*{w}) are not even.'
368
+
369
+ x = x.view(b, h, w, c)
370
+
371
+ x0 = x[:, 0::2, 0::2, :] # b h/2 w/2 c
372
+ x1 = x[:, 1::2, 0::2, :] # b h/2 w/2 c
373
+ x2 = x[:, 0::2, 1::2, :] # b h/2 w/2 c
374
+ x3 = x[:, 1::2, 1::2, :] # b h/2 w/2 c
375
+ x = torch.cat([x0, x1, x2, x3], -1) # b h/2 w/2 4*c
376
+ x = x.view(b, -1, 4 * c) # b h/2*w/2 4*c
377
+
378
+ x = self.norm(x)
379
+ x = self.reduction(x)
380
+
381
+ return x
382
+
383
+ def extra_repr(self) -> str:
384
+ return f'input_resolution={self.input_resolution}, dim={self.dim}'
385
+
386
+ def flops(self):
387
+ h, w = self.input_resolution
388
+ flops = h * w * self.dim
389
+ flops += (h // 2) * (w // 2) * 4 * self.dim * 2 * self.dim
390
+ return flops
391
+
392
+
393
+ class BasicLayer(nn.Module):
394
+ """ A basic Swin Transformer layer for one stage.
395
+
396
+ Args:
397
+ dim (int): Number of input channels.
398
+ input_resolution (tuple[int]): Input resolution.
399
+ depth (int): Number of blocks.
400
+ num_heads (int): Number of attention heads.
401
+ window_size (int): Local window size.
402
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
403
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
404
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
405
+ drop (float, optional): Dropout rate. Default: 0.0
406
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
407
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
408
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
409
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
410
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
411
+ """
412
+
413
+ def __init__(self,
414
+ dim,
415
+ input_resolution,
416
+ depth,
417
+ num_heads,
418
+ window_size,
419
+ mlp_ratio=4.,
420
+ qkv_bias=True,
421
+ qk_scale=None,
422
+ drop=0.,
423
+ attn_drop=0.,
424
+ drop_path=0.,
425
+ norm_layer=nn.LayerNorm,
426
+ downsample=None,
427
+ use_checkpoint=False):
428
+
429
+ super().__init__()
430
+ self.dim = dim
431
+ self.input_resolution = input_resolution
432
+ self.depth = depth
433
+ self.use_checkpoint = use_checkpoint
434
+
435
+ # build blocks
436
+ self.blocks = nn.ModuleList([
437
+ SwinTransformerBlock(
438
+ dim=dim,
439
+ input_resolution=input_resolution,
440
+ num_heads=num_heads,
441
+ window_size=window_size,
442
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
443
+ mlp_ratio=mlp_ratio,
444
+ qkv_bias=qkv_bias,
445
+ qk_scale=qk_scale,
446
+ drop=drop,
447
+ attn_drop=attn_drop,
448
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
449
+ norm_layer=norm_layer) for i in range(depth)
450
+ ])
451
+
452
+ # patch merging layer
453
+ if downsample is not None:
454
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
455
+ else:
456
+ self.downsample = None
457
+
458
+ def forward(self, x, x_size):
459
+ for blk in self.blocks:
460
+ if self.use_checkpoint:
461
+ x = checkpoint.checkpoint(blk, x)
462
+ else:
463
+ x = blk(x, x_size)
464
+ if self.downsample is not None:
465
+ x = self.downsample(x)
466
+ return x
467
+
468
+ def extra_repr(self) -> str:
469
+ return f'dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}'
470
+
471
+ def flops(self):
472
+ flops = 0
473
+ for blk in self.blocks:
474
+ flops += blk.flops()
475
+ if self.downsample is not None:
476
+ flops += self.downsample.flops()
477
+ return flops
478
+
479
+
480
+ class RSTB(nn.Module):
481
+ """Residual Swin Transformer Block (RSTB).
482
+
483
+ Args:
484
+ dim (int): Number of input channels.
485
+ input_resolution (tuple[int]): Input resolution.
486
+ depth (int): Number of blocks.
487
+ num_heads (int): Number of attention heads.
488
+ window_size (int): Local window size.
489
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
490
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
491
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
492
+ drop (float, optional): Dropout rate. Default: 0.0
493
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
494
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
495
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
496
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
497
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
498
+ img_size: Input image size.
499
+ patch_size: Patch size.
500
+ resi_connection: The convolutional block before residual connection.
501
+ """
502
+
503
+ def __init__(self,
504
+ dim,
505
+ input_resolution,
506
+ depth,
507
+ num_heads,
508
+ window_size,
509
+ mlp_ratio=4.,
510
+ qkv_bias=True,
511
+ qk_scale=None,
512
+ drop=0.,
513
+ attn_drop=0.,
514
+ drop_path=0.,
515
+ norm_layer=nn.LayerNorm,
516
+ downsample=None,
517
+ use_checkpoint=False,
518
+ img_size=224,
519
+ patch_size=4,
520
+ resi_connection='1conv'):
521
+ super(RSTB, self).__init__()
522
+
523
+ self.dim = dim
524
+ self.input_resolution = input_resolution
525
+
526
+ self.residual_group = BasicLayer(
527
+ dim=dim,
528
+ input_resolution=input_resolution,
529
+ depth=depth,
530
+ num_heads=num_heads,
531
+ window_size=window_size,
532
+ mlp_ratio=mlp_ratio,
533
+ qkv_bias=qkv_bias,
534
+ qk_scale=qk_scale,
535
+ drop=drop,
536
+ attn_drop=attn_drop,
537
+ drop_path=drop_path,
538
+ norm_layer=norm_layer,
539
+ downsample=downsample,
540
+ use_checkpoint=use_checkpoint)
541
+
542
+ if resi_connection == '1conv':
543
+ self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
544
+ elif resi_connection == '3conv':
545
+ # to save parameters and memory
546
+ self.conv = nn.Sequential(
547
+ nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
548
+ nn.Conv2d(dim // 4, dim // 4, 1, 1, 0), nn.LeakyReLU(negative_slope=0.2, inplace=True),
549
+ nn.Conv2d(dim // 4, dim, 3, 1, 1))
550
+
551
+ self.patch_embed = PatchEmbed(
552
+ img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None)
553
+
554
+ self.patch_unembed = PatchUnEmbed(
555
+ img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None)
556
+
557
+ def forward(self, x, x_size):
558
+ return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
559
+
560
+ def flops(self):
561
+ flops = 0
562
+ flops += self.residual_group.flops()
563
+ h, w = self.input_resolution
564
+ flops += h * w * self.dim * self.dim * 9
565
+ flops += self.patch_embed.flops()
566
+ flops += self.patch_unembed.flops()
567
+
568
+ return flops
569
+
570
+
571
+ class PatchEmbed(nn.Module):
572
+ r""" Image to Patch Embedding
573
+
574
+ Args:
575
+ img_size (int): Image size. Default: 224.
576
+ patch_size (int): Patch token size. Default: 4.
577
+ in_chans (int): Number of input image channels. Default: 3.
578
+ embed_dim (int): Number of linear projection output channels. Default: 96.
579
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
580
+ """
581
+
582
+ def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
583
+ super().__init__()
584
+ img_size = to_2tuple(img_size)
585
+ patch_size = to_2tuple(patch_size)
586
+ patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
587
+ self.img_size = img_size
588
+ self.patch_size = patch_size
589
+ self.patches_resolution = patches_resolution
590
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
591
+
592
+ self.in_chans = in_chans
593
+ self.embed_dim = embed_dim
594
+
595
+ if norm_layer is not None:
596
+ self.norm = norm_layer(embed_dim)
597
+ else:
598
+ self.norm = None
599
+
600
+ def forward(self, x):
601
+ x = x.flatten(2).transpose(1, 2) # b Ph*Pw c
602
+ if self.norm is not None:
603
+ x = self.norm(x)
604
+ return x
605
+
606
+ def flops(self):
607
+ flops = 0
608
+ h, w = self.img_size
609
+ if self.norm is not None:
610
+ flops += h * w * self.embed_dim
611
+ return flops
612
+
613
+
614
+ class PatchUnEmbed(nn.Module):
615
+ r""" Image to Patch Unembedding
616
+
617
+ Args:
618
+ img_size (int): Image size. Default: 224.
619
+ patch_size (int): Patch token size. Default: 4.
620
+ in_chans (int): Number of input image channels. Default: 3.
621
+ embed_dim (int): Number of linear projection output channels. Default: 96.
622
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
623
+ """
624
+
625
+ def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
626
+ super().__init__()
627
+ img_size = to_2tuple(img_size)
628
+ patch_size = to_2tuple(patch_size)
629
+ patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
630
+ self.img_size = img_size
631
+ self.patch_size = patch_size
632
+ self.patches_resolution = patches_resolution
633
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
634
+
635
+ self.in_chans = in_chans
636
+ self.embed_dim = embed_dim
637
+
638
+ def forward(self, x, x_size):
639
+ x = x.transpose(1, 2).view(x.shape[0], self.embed_dim, x_size[0], x_size[1]) # b Ph*Pw c
640
+ return x
641
+
642
+ def flops(self):
643
+ flops = 0
644
+ return flops
645
+
646
+
647
+ class Upsample(nn.Sequential):
648
+ """Upsample module.
649
+
650
+ Args:
651
+ scale (int): Scale factor. Supported scales: 2^n and 3.
652
+ num_feat (int): Channel number of intermediate features.
653
+ """
654
+
655
+ def __init__(self, scale, num_feat):
656
+ m = []
657
+ if (scale & (scale - 1)) == 0: # scale = 2^n
658
+ for _ in range(int(math.log(scale, 2))):
659
+ m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
660
+ m.append(nn.PixelShuffle(2))
661
+ elif scale == 3:
662
+ m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
663
+ m.append(nn.PixelShuffle(3))
664
+ else:
665
+ raise ValueError(f'scale {scale} is not supported. Supported scales: 2^n and 3.')
666
+ super(Upsample, self).__init__(*m)
667
+
668
+
669
+ class UpsampleOneStep(nn.Sequential):
670
+ """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
671
+ Used in lightweight SR to save parameters.
672
+
673
+ Args:
674
+ scale (int): Scale factor. Supported scales: 2^n and 3.
675
+ num_feat (int): Channel number of intermediate features.
676
+
677
+ """
678
+
679
+ def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
680
+ self.num_feat = num_feat
681
+ self.input_resolution = input_resolution
682
+ m = []
683
+ m.append(nn.Conv2d(num_feat, (scale**2) * num_out_ch, 3, 1, 1))
684
+ m.append(nn.PixelShuffle(scale))
685
+ super(UpsampleOneStep, self).__init__(*m)
686
+
687
+ def flops(self):
688
+ h, w = self.input_resolution
689
+ flops = h * w * self.num_feat * 3 * 9
690
+ return flops
691
+
692
+
693
+ @ARCH_REGISTRY.register()
694
+ class SwinIR(nn.Module):
695
+ r""" SwinIR
696
+ A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
697
+
698
+ Args:
699
+ img_size (int | tuple(int)): Input image size. Default 64
700
+ patch_size (int | tuple(int)): Patch size. Default: 1
701
+ in_chans (int): Number of input image channels. Default: 3
702
+ embed_dim (int): Patch embedding dimension. Default: 96
703
+ depths (tuple(int)): Depth of each Swin Transformer layer.
704
+ num_heads (tuple(int)): Number of attention heads in different layers.
705
+ window_size (int): Window size. Default: 7
706
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
707
+ qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
708
+ qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
709
+ drop_rate (float): Dropout rate. Default: 0
710
+ attn_drop_rate (float): Attention dropout rate. Default: 0
711
+ drop_path_rate (float): Stochastic depth rate. Default: 0.1
712
+ norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
713
+ ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
714
+ patch_norm (bool): If True, add normalization after patch embedding. Default: True
715
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
716
+ upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
717
+ img_range: Image range. 1. or 255.
718
+ upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
719
+ resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
720
+ """
721
+
722
+ def __init__(self,
723
+ img_size=64,
724
+ patch_size=1,
725
+ in_chans=3,
726
+ embed_dim=96,
727
+ depths=(6, 6, 6, 6),
728
+ num_heads=(6, 6, 6, 6),
729
+ window_size=7,
730
+ mlp_ratio=4.,
731
+ qkv_bias=True,
732
+ qk_scale=None,
733
+ drop_rate=0.,
734
+ attn_drop_rate=0.,
735
+ drop_path_rate=0.1,
736
+ norm_layer=nn.LayerNorm,
737
+ ape=False,
738
+ patch_norm=True,
739
+ use_checkpoint=False,
740
+ upscale=2,
741
+ img_range=1.,
742
+ upsampler='',
743
+ resi_connection='1conv',
744
+ **kwargs):
745
+ super(SwinIR, self).__init__()
746
+ num_in_ch = in_chans
747
+ num_out_ch = in_chans
748
+ num_feat = 64
749
+ self.img_range = img_range
750
+ if in_chans == 3:
751
+ rgb_mean = (0.4488, 0.4371, 0.4040)
752
+ self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
753
+ else:
754
+ self.mean = torch.zeros(1, 1, 1, 1)
755
+ self.upscale = upscale
756
+ self.upsampler = upsampler
757
+
758
+ # ------------------------- 1, shallow feature extraction ------------------------- #
759
+ self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
760
+
761
+ # ------------------------- 2, deep feature extraction ------------------------- #
762
+ self.num_layers = len(depths)
763
+ self.embed_dim = embed_dim
764
+ self.ape = ape
765
+ self.patch_norm = patch_norm
766
+ self.num_features = embed_dim
767
+ self.mlp_ratio = mlp_ratio
768
+
769
+ # split image into non-overlapping patches
770
+ self.patch_embed = PatchEmbed(
771
+ img_size=img_size,
772
+ patch_size=patch_size,
773
+ in_chans=embed_dim,
774
+ embed_dim=embed_dim,
775
+ norm_layer=norm_layer if self.patch_norm else None)
776
+ num_patches = self.patch_embed.num_patches
777
+ patches_resolution = self.patch_embed.patches_resolution
778
+ self.patches_resolution = patches_resolution
779
+
780
+ # merge non-overlapping patches into image
781
+ self.patch_unembed = PatchUnEmbed(
782
+ img_size=img_size,
783
+ patch_size=patch_size,
784
+ in_chans=embed_dim,
785
+ embed_dim=embed_dim,
786
+ norm_layer=norm_layer if self.patch_norm else None)
787
+
788
+ # absolute position embedding
789
+ if self.ape:
790
+ self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
791
+ trunc_normal_(self.absolute_pos_embed, std=.02)
792
+
793
+ self.pos_drop = nn.Dropout(p=drop_rate)
794
+
795
+ # stochastic depth
796
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
797
+
798
+ # build Residual Swin Transformer blocks (RSTB)
799
+ self.layers = nn.ModuleList()
800
+ for i_layer in range(self.num_layers):
801
+ layer = RSTB(
802
+ dim=embed_dim,
803
+ input_resolution=(patches_resolution[0], patches_resolution[1]),
804
+ depth=depths[i_layer],
805
+ num_heads=num_heads[i_layer],
806
+ window_size=window_size,
807
+ mlp_ratio=self.mlp_ratio,
808
+ qkv_bias=qkv_bias,
809
+ qk_scale=qk_scale,
810
+ drop=drop_rate,
811
+ attn_drop=attn_drop_rate,
812
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
813
+ norm_layer=norm_layer,
814
+ downsample=None,
815
+ use_checkpoint=use_checkpoint,
816
+ img_size=img_size,
817
+ patch_size=patch_size,
818
+ resi_connection=resi_connection)
819
+ self.layers.append(layer)
820
+ self.norm = norm_layer(self.num_features)
821
+
822
+ # build the last conv layer in deep feature extraction
823
+ if resi_connection == '1conv':
824
+ self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
825
+ elif resi_connection == '3conv':
826
+ # to save parameters and memory
827
+ self.conv_after_body = nn.Sequential(
828
+ nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
829
+ nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0), nn.LeakyReLU(negative_slope=0.2, inplace=True),
830
+ nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
831
+
832
+ # ------------------------- 3, high quality image reconstruction ------------------------- #
833
+ if self.upsampler == 'pixelshuffle':
834
+ # for classical SR
835
+ self.conv_before_upsample = nn.Sequential(
836
+ nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True))
837
+ self.upsample = Upsample(upscale, num_feat)
838
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
839
+ elif self.upsampler == 'pixelshuffledirect':
840
+ # for lightweight SR (to save parameters)
841
+ self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
842
+ (patches_resolution[0], patches_resolution[1]))
843
+ elif self.upsampler == 'nearest+conv':
844
+ # for real-world SR (less artifacts)
845
+ assert self.upscale == 4, 'only support x4 now.'
846
+ self.conv_before_upsample = nn.Sequential(
847
+ nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True))
848
+ self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
849
+ self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
850
+ self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
851
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
852
+ self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
853
+ else:
854
+ # for image denoising and JPEG compression artifact reduction
855
+ self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
856
+
857
+ self.apply(self._init_weights)
858
+
859
+ def _init_weights(self, m):
860
+ if isinstance(m, nn.Linear):
861
+ trunc_normal_(m.weight, std=.02)
862
+ if isinstance(m, nn.Linear) and m.bias is not None:
863
+ nn.init.constant_(m.bias, 0)
864
+ elif isinstance(m, nn.LayerNorm):
865
+ nn.init.constant_(m.bias, 0)
866
+ nn.init.constant_(m.weight, 1.0)
867
+
868
+ @torch.jit.ignore
869
+ def no_weight_decay(self):
870
+ return {'absolute_pos_embed'}
871
+
872
+ @torch.jit.ignore
873
+ def no_weight_decay_keywords(self):
874
+ return {'relative_position_bias_table'}
875
+
876
+ def forward_features(self, x):
877
+ x_size = (x.shape[2], x.shape[3])
878
+ x = self.patch_embed(x)
879
+ if self.ape:
880
+ x = x + self.absolute_pos_embed
881
+ x = self.pos_drop(x)
882
+
883
+ for layer in self.layers:
884
+ x = layer(x, x_size)
885
+
886
+ x = self.norm(x) # b seq_len c
887
+ x = self.patch_unembed(x, x_size)
888
+
889
+ return x
890
+
891
+ def forward(self, x):
892
+ self.mean = self.mean.type_as(x)
893
+ x = (x - self.mean) * self.img_range
894
+
895
+ if self.upsampler == 'pixelshuffle':
896
+ # for classical SR
897
+ x = self.conv_first(x)
898
+ x = self.conv_after_body(self.forward_features(x)) + x
899
+ x = self.conv_before_upsample(x)
900
+ x = self.conv_last(self.upsample(x))
901
+ elif self.upsampler == 'pixelshuffledirect':
902
+ # for lightweight SR
903
+ x = self.conv_first(x)
904
+ x = self.conv_after_body(self.forward_features(x)) + x
905
+ x = self.upsample(x)
906
+ elif self.upsampler == 'nearest+conv':
907
+ # for real-world SR
908
+ x = self.conv_first(x)
909
+ x = self.conv_after_body(self.forward_features(x)) + x
910
+ x = self.conv_before_upsample(x)
911
+ x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
912
+ x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
913
+ x = self.conv_last(self.lrelu(self.conv_hr(x)))
914
+ else:
915
+ # for image denoising and JPEG compression artifact reduction
916
+ x_first = self.conv_first(x)
917
+ res = self.conv_after_body(self.forward_features(x_first)) + x_first
918
+ x = x + self.conv_last(res)
919
+
920
+ x = x / self.img_range + self.mean
921
+
922
+ return x
923
+
924
+ def flops(self):
925
+ flops = 0
926
+ h, w = self.patches_resolution
927
+ flops += h * w * 3 * self.embed_dim * 9
928
+ flops += self.patch_embed.flops()
929
+ for layer in self.layers:
930
+ flops += layer.flops()
931
+ flops += h * w * 3 * self.embed_dim * self.embed_dim
932
+ flops += self.upsample.flops()
933
+ return flops
934
+
935
+
936
+ if __name__ == '__main__':
937
+ upscale = 4
938
+ window_size = 8
939
+ height = (1024 // upscale // window_size + 1) * window_size
940
+ width = (720 // upscale // window_size + 1) * window_size
941
+ model = SwinIR(
942
+ upscale=2,
943
+ img_size=(height, width),
944
+ window_size=window_size,
945
+ img_range=1.,
946
+ depths=[6, 6, 6, 6],
947
+ embed_dim=60,
948
+ num_heads=[6, 6, 6, 6],
949
+ mlp_ratio=2,
950
+ upsampler='pixelshuffledirect')
951
+ print(model)
952
+ print(height, width, model.flops() / 1e9)
953
+
954
+ x = torch.randn((1, 3, height, width))
955
+ x = model(x)
956
+ print(x.shape)
basicsr/archs/tof_arch.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn as nn
3
+ from torch.nn import functional as F
4
+
5
+ from basicsr.utils.registry import ARCH_REGISTRY
6
+ from .arch_util import flow_warp
7
+
8
+
9
+ class BasicModule(nn.Module):
10
+ """Basic module of SPyNet.
11
+
12
+ Note that unlike the architecture in spynet_arch.py, the basic module
13
+ here contains batch normalization.
14
+ """
15
+
16
+ def __init__(self):
17
+ super(BasicModule, self).__init__()
18
+ self.basic_module = nn.Sequential(
19
+ nn.Conv2d(in_channels=8, out_channels=32, kernel_size=7, stride=1, padding=3, bias=False),
20
+ nn.BatchNorm2d(32), nn.ReLU(inplace=True),
21
+ nn.Conv2d(in_channels=32, out_channels=64, kernel_size=7, stride=1, padding=3, bias=False),
22
+ nn.BatchNorm2d(64), nn.ReLU(inplace=True),
23
+ nn.Conv2d(in_channels=64, out_channels=32, kernel_size=7, stride=1, padding=3, bias=False),
24
+ nn.BatchNorm2d(32), nn.ReLU(inplace=True),
25
+ nn.Conv2d(in_channels=32, out_channels=16, kernel_size=7, stride=1, padding=3, bias=False),
26
+ nn.BatchNorm2d(16), nn.ReLU(inplace=True),
27
+ nn.Conv2d(in_channels=16, out_channels=2, kernel_size=7, stride=1, padding=3))
28
+
29
+ def forward(self, tensor_input):
30
+ """
31
+ Args:
32
+ tensor_input (Tensor): Input tensor with shape (b, 8, h, w).
33
+ 8 channels contain:
34
+ [reference image (3), neighbor image (3), initial flow (2)].
35
+
36
+ Returns:
37
+ Tensor: Estimated flow with shape (b, 2, h, w)
38
+ """
39
+ return self.basic_module(tensor_input)
40
+
41
+
42
+ class SPyNetTOF(nn.Module):
43
+ """SPyNet architecture for TOF.
44
+
45
+ Note that this implementation is specifically for TOFlow. Please use :file:`spynet_arch.py` for general use.
46
+ They differ in the following aspects:
47
+
48
+ 1. The basic modules here contain BatchNorm.
49
+ 2. Normalization and denormalization are not done here, as they are done in TOFlow.
50
+
51
+ ``Paper: Optical Flow Estimation using a Spatial Pyramid Network``
52
+
53
+ Reference: https://github.com/Coldog2333/pytoflow
54
+
55
+ Args:
56
+ load_path (str): Path for pretrained SPyNet. Default: None.
57
+ """
58
+
59
+ def __init__(self, load_path=None):
60
+ super(SPyNetTOF, self).__init__()
61
+
62
+ self.basic_module = nn.ModuleList([BasicModule() for _ in range(4)])
63
+ if load_path:
64
+ self.load_state_dict(torch.load(load_path, map_location=lambda storage, loc: storage)['params'])
65
+
66
+ def forward(self, ref, supp):
67
+ """
68
+ Args:
69
+ ref (Tensor): Reference image with shape of (b, 3, h, w).
70
+ supp: The supporting image to be warped: (b, 3, h, w).
71
+
72
+ Returns:
73
+ Tensor: Estimated optical flow: (b, 2, h, w).
74
+ """
75
+ num_batches, _, h, w = ref.size()
76
+ ref = [ref]
77
+ supp = [supp]
78
+
79
+ # generate downsampled frames
80
+ for _ in range(3):
81
+ ref.insert(0, F.avg_pool2d(input=ref[0], kernel_size=2, stride=2, count_include_pad=False))
82
+ supp.insert(0, F.avg_pool2d(input=supp[0], kernel_size=2, stride=2, count_include_pad=False))
83
+
84
+ # flow computation
85
+ flow = ref[0].new_zeros(num_batches, 2, h // 16, w // 16)
86
+ for i in range(4):
87
+ flow_up = F.interpolate(input=flow, scale_factor=2, mode='bilinear', align_corners=True) * 2.0
88
+ flow = flow_up + self.basic_module[i](
89
+ torch.cat([ref[i], flow_warp(supp[i], flow_up.permute(0, 2, 3, 1)), flow_up], 1))
90
+ return flow
91
+
92
+
93
+ @ARCH_REGISTRY.register()
94
+ class TOFlow(nn.Module):
95
+ """PyTorch implementation of TOFlow.
96
+
97
+ In TOFlow, the LR frames are pre-upsampled and have the same size with the GT frames.
98
+
99
+ ``Paper: Video Enhancement with Task-Oriented Flow``
100
+
101
+ Reference: https://github.com/anchen1011/toflow
102
+
103
+ Reference: https://github.com/Coldog2333/pytoflow
104
+
105
+ Args:
106
+ adapt_official_weights (bool): Whether to adapt the weights translated
107
+ from the official implementation. Set to false if you want to
108
+ train from scratch. Default: False
109
+ """
110
+
111
+ def __init__(self, adapt_official_weights=False):
112
+ super(TOFlow, self).__init__()
113
+ self.adapt_official_weights = adapt_official_weights
114
+ self.ref_idx = 0 if adapt_official_weights else 3
115
+
116
+ self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
117
+ self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
118
+
119
+ # flow estimation module
120
+ self.spynet = SPyNetTOF()
121
+
122
+ # reconstruction module
123
+ self.conv_1 = nn.Conv2d(3 * 7, 64, 9, 1, 4)
124
+ self.conv_2 = nn.Conv2d(64, 64, 9, 1, 4)
125
+ self.conv_3 = nn.Conv2d(64, 64, 1)
126
+ self.conv_4 = nn.Conv2d(64, 3, 1)
127
+
128
+ # activation function
129
+ self.relu = nn.ReLU(inplace=True)
130
+
131
+ def normalize(self, img):
132
+ return (img - self.mean) / self.std
133
+
134
+ def denormalize(self, img):
135
+ return img * self.std + self.mean
136
+
137
+ def forward(self, lrs):
138
+ """
139
+ Args:
140
+ lrs: Input lr frames: (b, 7, 3, h, w).
141
+
142
+ Returns:
143
+ Tensor: SR frame: (b, 3, h, w).
144
+ """
145
+ # In the official implementation, the 0-th frame is the reference frame
146
+ if self.adapt_official_weights:
147
+ lrs = lrs[:, [3, 0, 1, 2, 4, 5, 6], :, :, :]
148
+
149
+ num_batches, num_lrs, _, h, w = lrs.size()
150
+
151
+ lrs = self.normalize(lrs.view(-1, 3, h, w))
152
+ lrs = lrs.view(num_batches, num_lrs, 3, h, w)
153
+
154
+ lr_ref = lrs[:, self.ref_idx, :, :, :]
155
+ lr_aligned = []
156
+ for i in range(7): # 7 frames
157
+ if i == self.ref_idx:
158
+ lr_aligned.append(lr_ref)
159
+ else:
160
+ lr_supp = lrs[:, i, :, :, :]
161
+ flow = self.spynet(lr_ref, lr_supp)
162
+ lr_aligned.append(flow_warp(lr_supp, flow.permute(0, 2, 3, 1)))
163
+
164
+ # reconstruction
165
+ hr = torch.stack(lr_aligned, dim=1)
166
+ hr = hr.view(num_batches, -1, h, w)
167
+ hr = self.relu(self.conv_1(hr))
168
+ hr = self.relu(self.conv_2(hr))
169
+ hr = self.relu(self.conv_3(hr))
170
+ hr = self.conv_4(hr) + lr_ref
171
+
172
+ return self.denormalize(hr)
basicsr/archs/vgg_arch.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from collections import OrderedDict
4
+ from torch import nn as nn
5
+ from torchvision.models import vgg as vgg
6
+
7
+ from basicsr.utils.registry import ARCH_REGISTRY
8
+
9
+ VGG_PRETRAIN_PATH = 'experiments/pretrained_models/vgg19-dcbb9e9d.pth'
10
+ NAMES = {
11
+ 'vgg11': [
12
+ 'conv1_1', 'relu1_1', 'pool1', 'conv2_1', 'relu2_1', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2',
13
+ 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2',
14
+ 'pool5'
15
+ ],
16
+ 'vgg13': [
17
+ 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
18
+ 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'pool4',
19
+ 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'pool5'
20
+ ],
21
+ 'vgg16': [
22
+ 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
23
+ 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2',
24
+ 'relu4_2', 'conv4_3', 'relu4_3', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3',
25
+ 'pool5'
26
+ ],
27
+ 'vgg19': [
28
+ 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
29
+ 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 'conv4_1',
30
+ 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
31
+ 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4', 'pool5'
32
+ ]
33
+ }
34
+
35
+
36
+ def insert_bn(names):
37
+ """Insert bn layer after each conv.
38
+
39
+ Args:
40
+ names (list): The list of layer names.
41
+
42
+ Returns:
43
+ list: The list of layer names with bn layers.
44
+ """
45
+ names_bn = []
46
+ for name in names:
47
+ names_bn.append(name)
48
+ if 'conv' in name:
49
+ position = name.replace('conv', '')
50
+ names_bn.append('bn' + position)
51
+ return names_bn
52
+
53
+
54
+ @ARCH_REGISTRY.register()
55
+ class VGGFeatureExtractor(nn.Module):
56
+ """VGG network for feature extraction.
57
+
58
+ In this implementation, we allow users to choose whether use normalization
59
+ in the input feature and the type of vgg network. Note that the pretrained
60
+ path must fit the vgg type.
61
+
62
+ Args:
63
+ layer_name_list (list[str]): Forward function returns the corresponding
64
+ features according to the layer_name_list.
65
+ Example: {'relu1_1', 'relu2_1', 'relu3_1'}.
66
+ vgg_type (str): Set the type of vgg network. Default: 'vgg19'.
67
+ use_input_norm (bool): If True, normalize the input image. Importantly,
68
+ the input feature must in the range [0, 1]. Default: True.
69
+ range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].
70
+ Default: False.
71
+ requires_grad (bool): If true, the parameters of VGG network will be
72
+ optimized. Default: False.
73
+ remove_pooling (bool): If true, the max pooling operations in VGG net
74
+ will be removed. Default: False.
75
+ pooling_stride (int): The stride of max pooling operation. Default: 2.
76
+ """
77
+
78
+ def __init__(self,
79
+ layer_name_list,
80
+ vgg_type='vgg19',
81
+ use_input_norm=True,
82
+ range_norm=False,
83
+ requires_grad=False,
84
+ remove_pooling=False,
85
+ pooling_stride=2):
86
+ super(VGGFeatureExtractor, self).__init__()
87
+
88
+ self.layer_name_list = layer_name_list
89
+ self.use_input_norm = use_input_norm
90
+ self.range_norm = range_norm
91
+
92
+ self.names = NAMES[vgg_type.replace('_bn', '')]
93
+ if 'bn' in vgg_type:
94
+ self.names = insert_bn(self.names)
95
+
96
+ # only borrow layers that will be used to avoid unused params
97
+ max_idx = 0
98
+ for v in layer_name_list:
99
+ idx = self.names.index(v)
100
+ if idx > max_idx:
101
+ max_idx = idx
102
+
103
+ if os.path.exists(VGG_PRETRAIN_PATH):
104
+ vgg_net = getattr(vgg, vgg_type)(pretrained=False)
105
+ state_dict = torch.load(VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage)
106
+ vgg_net.load_state_dict(state_dict)
107
+ else:
108
+ vgg_net = getattr(vgg, vgg_type)(pretrained=True)
109
+
110
+ features = vgg_net.features[:max_idx + 1]
111
+
112
+ modified_net = OrderedDict()
113
+ for k, v in zip(self.names, features):
114
+ if 'pool' in k:
115
+ # if remove_pooling is true, pooling operation will be removed
116
+ if remove_pooling:
117
+ continue
118
+ else:
119
+ # in some cases, we may want to change the default stride
120
+ modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride)
121
+ else:
122
+ modified_net[k] = v
123
+
124
+ self.vgg_net = nn.Sequential(modified_net)
125
+
126
+ if not requires_grad:
127
+ self.vgg_net.eval()
128
+ for param in self.parameters():
129
+ param.requires_grad = False
130
+ else:
131
+ self.vgg_net.train()
132
+ for param in self.parameters():
133
+ param.requires_grad = True
134
+
135
+ if self.use_input_norm:
136
+ # the mean is for image with range [0, 1]
137
+ self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
138
+ # the std is for image with range [0, 1]
139
+ self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
140
+
141
+ def forward(self, x):
142
+ """Forward function.
143
+
144
+ Args:
145
+ x (Tensor): Input tensor with shape (n, c, h, w).
146
+
147
+ Returns:
148
+ Tensor: Forward results.
149
+ """
150
+ if self.range_norm:
151
+ x = (x + 1) / 2
152
+ if self.use_input_norm:
153
+ x = (x - self.mean) / self.std
154
+
155
+ output = {}
156
+ for key, layer in self.vgg_net._modules.items():
157
+ x = layer(x)
158
+ if key in self.layer_name_list:
159
+ output[key] = x.clone()
160
+
161
+ return output
basicsr/data/__init__.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import numpy as np
3
+ import random
4
+ import torch
5
+ import torch.utils.data
6
+ from copy import deepcopy
7
+ from functools import partial
8
+ from os import path as osp
9
+
10
+ from basicsr.data.prefetch_dataloader import PrefetchDataLoader
11
+ from basicsr.utils import get_root_logger, scandir
12
+ from basicsr.utils.dist_util import get_dist_info
13
+ from basicsr.utils.registry import DATASET_REGISTRY
14
+
15
+ __all__ = ['build_dataset', 'build_dataloader']
16
+
17
+ # automatically scan and import dataset modules for registry
18
+ # scan all the files under the data folder with '_dataset' in file names
19
+ data_folder = osp.dirname(osp.abspath(__file__))
20
+ dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')]
21
+ # import all the dataset modules
22
+ _dataset_modules = [importlib.import_module(f'basicsr.data.{file_name}') for file_name in dataset_filenames]
23
+
24
+
25
+ def build_dataset(dataset_opt):
26
+ """Build dataset from options.
27
+
28
+ Args:
29
+ dataset_opt (dict): Configuration for dataset. It must contain:
30
+ name (str): Dataset name.
31
+ type (str): Dataset type.
32
+ """
33
+ dataset_opt = deepcopy(dataset_opt)
34
+ dataset = DATASET_REGISTRY.get(dataset_opt['type'])(dataset_opt)
35
+ logger = get_root_logger()
36
+ logger.info(f'Dataset [{dataset.__class__.__name__}] - {dataset_opt["name"]} is built.')
37
+ return dataset
38
+
39
+
40
+ def build_dataloader(dataset, dataset_opt, num_gpu=1, dist=False, sampler=None, seed=None):
41
+ """Build dataloader.
42
+
43
+ Args:
44
+ dataset (torch.utils.data.Dataset): Dataset.
45
+ dataset_opt (dict): Dataset options. It contains the following keys:
46
+ phase (str): 'train' or 'val'.
47
+ num_worker_per_gpu (int): Number of workers for each GPU.
48
+ batch_size_per_gpu (int): Training batch size for each GPU.
49
+ num_gpu (int): Number of GPUs. Used only in the train phase.
50
+ Default: 1.
51
+ dist (bool): Whether in distributed training. Used only in the train
52
+ phase. Default: False.
53
+ sampler (torch.utils.data.sampler): Data sampler. Default: None.
54
+ seed (int | None): Seed. Default: None
55
+ """
56
+ phase = dataset_opt['phase']
57
+ rank, _ = get_dist_info()
58
+ if phase == 'train':
59
+ if dist: # distributed training
60
+ batch_size = dataset_opt['batch_size_per_gpu']
61
+ num_workers = dataset_opt['num_worker_per_gpu']
62
+ else: # non-distributed training
63
+ multiplier = 1 if num_gpu == 0 else num_gpu
64
+ batch_size = dataset_opt['batch_size_per_gpu'] * multiplier
65
+ num_workers = dataset_opt['num_worker_per_gpu'] * multiplier
66
+ dataloader_args = dict(
67
+ dataset=dataset,
68
+ batch_size=batch_size,
69
+ shuffle=False,
70
+ num_workers=num_workers,
71
+ sampler=sampler,
72
+ drop_last=True)
73
+ if sampler is None:
74
+ dataloader_args['shuffle'] = True
75
+ dataloader_args['worker_init_fn'] = partial(
76
+ worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if seed is not None else None
77
+ elif phase in ['val', 'test']: # validation
78
+ dataloader_args = dict(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
79
+ else:
80
+ raise ValueError(f"Wrong dataset phase: {phase}. Supported ones are 'train', 'val' and 'test'.")
81
+
82
+ dataloader_args['pin_memory'] = dataset_opt.get('pin_memory', False)
83
+ dataloader_args['persistent_workers'] = dataset_opt.get('persistent_workers', False)
84
+
85
+ prefetch_mode = dataset_opt.get('prefetch_mode')
86
+ if prefetch_mode == 'cpu': # CPUPrefetcher
87
+ num_prefetch_queue = dataset_opt.get('num_prefetch_queue', 1)
88
+ logger = get_root_logger()
89
+ logger.info(f'Use {prefetch_mode} prefetch dataloader: num_prefetch_queue = {num_prefetch_queue}')
90
+ return PrefetchDataLoader(num_prefetch_queue=num_prefetch_queue, **dataloader_args)
91
+ else:
92
+ # prefetch_mode=None: Normal dataloader
93
+ # prefetch_mode='cuda': dataloader for CUDAPrefetcher
94
+ return torch.utils.data.DataLoader(**dataloader_args)
95
+
96
+
97
+ def worker_init_fn(worker_id, num_workers, rank, seed):
98
+ # Set the worker seed to num_workers * rank + worker_id + seed
99
+ worker_seed = num_workers * rank + worker_id + seed
100
+ np.random.seed(worker_seed)
101
+ random.seed(worker_seed)
basicsr/data/data_sampler.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch.utils.data.sampler import Sampler
4
+
5
+
6
+ class EnlargedSampler(Sampler):
7
+ """Sampler that restricts data loading to a subset of the dataset.
8
+
9
+ Modified from torch.utils.data.distributed.DistributedSampler
10
+ Support enlarging the dataset for iteration-based training, for saving
11
+ time when restart the dataloader after each epoch
12
+
13
+ Args:
14
+ dataset (torch.utils.data.Dataset): Dataset used for sampling.
15
+ num_replicas (int | None): Number of processes participating in
16
+ the training. It is usually the world_size.
17
+ rank (int | None): Rank of the current process within num_replicas.
18
+ ratio (int): Enlarging ratio. Default: 1.
19
+ """
20
+
21
+ def __init__(self, dataset, num_replicas, rank, ratio=1):
22
+ self.dataset = dataset
23
+ self.num_replicas = num_replicas
24
+ self.rank = rank
25
+ self.epoch = 0
26
+ self.num_samples = math.ceil(len(self.dataset) * ratio / self.num_replicas)
27
+ self.total_size = self.num_samples * self.num_replicas
28
+
29
+ def __iter__(self):
30
+ # deterministically shuffle based on epoch
31
+ g = torch.Generator()
32
+ g.manual_seed(self.epoch)
33
+ indices = torch.randperm(self.total_size, generator=g).tolist()
34
+
35
+ dataset_size = len(self.dataset)
36
+ indices = [v % dataset_size for v in indices]
37
+
38
+ # subsample
39
+ indices = indices[self.rank:self.total_size:self.num_replicas]
40
+ assert len(indices) == self.num_samples
41
+
42
+ return iter(indices)
43
+
44
+ def __len__(self):
45
+ return self.num_samples
46
+
47
+ def set_epoch(self, epoch):
48
+ self.epoch = epoch
basicsr/data/data_util.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
+ from os import path as osp
5
+ from torch.nn import functional as F
6
+
7
+ from basicsr.data.transforms import mod_crop
8
+ from basicsr.utils import img2tensor, scandir
9
+
10
+
11
+ def read_img_seq(path, require_mod_crop=False, scale=1, return_imgname=False):
12
+ """Read a sequence of images from a given folder path.
13
+
14
+ Args:
15
+ path (list[str] | str): List of image paths or image folder path.
16
+ require_mod_crop (bool): Require mod crop for each image.
17
+ Default: False.
18
+ scale (int): Scale factor for mod_crop. Default: 1.
19
+ return_imgname(bool): Whether return image names. Default False.
20
+
21
+ Returns:
22
+ Tensor: size (t, c, h, w), RGB, [0, 1].
23
+ list[str]: Returned image name list.
24
+ """
25
+ if isinstance(path, list):
26
+ img_paths = path
27
+ else:
28
+ img_paths = sorted(list(scandir(path, full_path=True)))
29
+ imgs = [cv2.imread(v).astype(np.float32) / 255. for v in img_paths]
30
+
31
+ if require_mod_crop:
32
+ imgs = [mod_crop(img, scale) for img in imgs]
33
+ imgs = img2tensor(imgs, bgr2rgb=True, float32=True)
34
+ imgs = torch.stack(imgs, dim=0)
35
+
36
+ if return_imgname:
37
+ imgnames = [osp.splitext(osp.basename(path))[0] for path in img_paths]
38
+ return imgs, imgnames
39
+ else:
40
+ return imgs
41
+
42
+
43
+ def generate_frame_indices(crt_idx, max_frame_num, num_frames, padding='reflection'):
44
+ """Generate an index list for reading `num_frames` frames from a sequence
45
+ of images.
46
+
47
+ Args:
48
+ crt_idx (int): Current center index.
49
+ max_frame_num (int): Max number of the sequence of images (from 1).
50
+ num_frames (int): Reading num_frames frames.
51
+ padding (str): Padding mode, one of
52
+ 'replicate' | 'reflection' | 'reflection_circle' | 'circle'
53
+ Examples: current_idx = 0, num_frames = 5
54
+ The generated frame indices under different padding mode:
55
+ replicate: [0, 0, 0, 1, 2]
56
+ reflection: [2, 1, 0, 1, 2]
57
+ reflection_circle: [4, 3, 0, 1, 2]
58
+ circle: [3, 4, 0, 1, 2]
59
+
60
+ Returns:
61
+ list[int]: A list of indices.
62
+ """
63
+ assert num_frames % 2 == 1, 'num_frames should be an odd number.'
64
+ assert padding in ('replicate', 'reflection', 'reflection_circle', 'circle'), f'Wrong padding mode: {padding}.'
65
+
66
+ max_frame_num = max_frame_num - 1 # start from 0
67
+ num_pad = num_frames // 2
68
+
69
+ indices = []
70
+ for i in range(crt_idx - num_pad, crt_idx + num_pad + 1):
71
+ if i < 0:
72
+ if padding == 'replicate':
73
+ pad_idx = 0
74
+ elif padding == 'reflection':
75
+ pad_idx = -i
76
+ elif padding == 'reflection_circle':
77
+ pad_idx = crt_idx + num_pad - i
78
+ else:
79
+ pad_idx = num_frames + i
80
+ elif i > max_frame_num:
81
+ if padding == 'replicate':
82
+ pad_idx = max_frame_num
83
+ elif padding == 'reflection':
84
+ pad_idx = max_frame_num * 2 - i
85
+ elif padding == 'reflection_circle':
86
+ pad_idx = (crt_idx - num_pad) - (i - max_frame_num)
87
+ else:
88
+ pad_idx = i - num_frames
89
+ else:
90
+ pad_idx = i
91
+ indices.append(pad_idx)
92
+ return indices
93
+
94
+
95
+ def paired_paths_from_lmdb(folders, keys):
96
+ """Generate paired paths from lmdb files.
97
+
98
+ Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is:
99
+
100
+ ::
101
+
102
+ lq.lmdb
103
+ ├── data.mdb
104
+ ├── lock.mdb
105
+ ├── meta_info.txt
106
+
107
+ The data.mdb and lock.mdb are standard lmdb files and you can refer to
108
+ https://lmdb.readthedocs.io/en/release/ for more details.
109
+
110
+ The meta_info.txt is a specified txt file to record the meta information
111
+ of our datasets. It will be automatically created when preparing
112
+ datasets by our provided dataset tools.
113
+ Each line in the txt file records
114
+ 1)image name (with extension),
115
+ 2)image shape,
116
+ 3)compression level, separated by a white space.
117
+ Example: `baboon.png (120,125,3) 1`
118
+
119
+ We use the image name without extension as the lmdb key.
120
+ Note that we use the same key for the corresponding lq and gt images.
121
+
122
+ Args:
123
+ folders (list[str]): A list of folder path. The order of list should
124
+ be [input_folder, gt_folder].
125
+ keys (list[str]): A list of keys identifying folders. The order should
126
+ be in consistent with folders, e.g., ['lq', 'gt'].
127
+ Note that this key is different from lmdb keys.
128
+
129
+ Returns:
130
+ list[str]: Returned path list.
131
+ """
132
+ assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. '
133
+ f'But got {len(folders)}')
134
+ assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}'
135
+ input_folder, gt_folder = folders
136
+ input_key, gt_key = keys
137
+
138
+ if not (input_folder.endswith('.lmdb') and gt_folder.endswith('.lmdb')):
139
+ raise ValueError(f'{input_key} folder and {gt_key} folder should both in lmdb '
140
+ f'formats. But received {input_key}: {input_folder}; '
141
+ f'{gt_key}: {gt_folder}')
142
+ # ensure that the two meta_info files are the same
143
+ with open(osp.join(input_folder, 'meta_info.txt')) as fin:
144
+ input_lmdb_keys = [line.split('.')[0] for line in fin]
145
+ with open(osp.join(gt_folder, 'meta_info.txt')) as fin:
146
+ gt_lmdb_keys = [line.split('.')[0] for line in fin]
147
+ if set(input_lmdb_keys) != set(gt_lmdb_keys):
148
+ raise ValueError(f'Keys in {input_key}_folder and {gt_key}_folder are different.')
149
+ else:
150
+ paths = []
151
+ for lmdb_key in sorted(input_lmdb_keys):
152
+ paths.append(dict([(f'{input_key}_path', lmdb_key), (f'{gt_key}_path', lmdb_key)]))
153
+ return paths
154
+
155
+
156
+ def paired_paths_from_meta_info_file(folders, keys, meta_info_file, filename_tmpl):
157
+ """Generate paired paths from an meta information file.
158
+
159
+ Each line in the meta information file contains the image names and
160
+ image shape (usually for gt), separated by a white space.
161
+
162
+ Example of an meta information file:
163
+ ```
164
+ 0001_s001.png (480,480,3)
165
+ 0001_s002.png (480,480,3)
166
+ ```
167
+
168
+ Args:
169
+ folders (list[str]): A list of folder path. The order of list should
170
+ be [input_folder, gt_folder].
171
+ keys (list[str]): A list of keys identifying folders. The order should
172
+ be in consistent with folders, e.g., ['lq', 'gt'].
173
+ meta_info_file (str): Path to the meta information file.
174
+ filename_tmpl (str): Template for each filename. Note that the
175
+ template excludes the file extension. Usually the filename_tmpl is
176
+ for files in the input folder.
177
+
178
+ Returns:
179
+ list[str]: Returned path list.
180
+ """
181
+ assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. '
182
+ f'But got {len(folders)}')
183
+ assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}'
184
+ input_folder, gt_folder = folders
185
+ input_key, gt_key = keys
186
+
187
+ with open(meta_info_file, 'r') as fin:
188
+ gt_names = [line.strip().split(' ')[0] for line in fin]
189
+
190
+ paths = []
191
+ for gt_name in gt_names:
192
+ basename, ext = osp.splitext(osp.basename(gt_name))
193
+ input_name = f'{filename_tmpl.format(basename)}{ext}'
194
+ input_path = osp.join(input_folder, input_name)
195
+ gt_path = osp.join(gt_folder, gt_name)
196
+ paths.append(dict([(f'{input_key}_path', input_path), (f'{gt_key}_path', gt_path)]))
197
+ return paths
198
+
199
+
200
+ def paired_paths_from_folder(folders, keys, filename_tmpl):
201
+ """Generate paired paths from folders.
202
+
203
+ Args:
204
+ folders (list[str]): A list of folder path. The order of list should
205
+ be [input_folder, gt_folder].
206
+ keys (list[str]): A list of keys identifying folders. The order should
207
+ be in consistent with folders, e.g., ['lq', 'gt'].
208
+ filename_tmpl (str): Template for each filename. Note that the
209
+ template excludes the file extension. Usually the filename_tmpl is
210
+ for files in the input folder.
211
+
212
+ Returns:
213
+ list[str]: Returned path list.
214
+ """
215
+ assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. '
216
+ f'But got {len(folders)}')
217
+ assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}'
218
+ input_folder, gt_folder = folders
219
+ input_key, gt_key = keys
220
+
221
+ input_paths = list(scandir(input_folder))
222
+ gt_paths = list(scandir(gt_folder))
223
+ assert len(input_paths) == len(gt_paths), (f'{input_key} and {gt_key} datasets have different number of images: '
224
+ f'{len(input_paths)}, {len(gt_paths)}.')
225
+ paths = []
226
+ for gt_path in gt_paths:
227
+ basename, ext = osp.splitext(osp.basename(gt_path))
228
+ input_name = f'{filename_tmpl.format(basename)}{ext}'
229
+ input_path = osp.join(input_folder, input_name)
230
+ assert input_name in input_paths, f'{input_name} is not in {input_key}_paths.'
231
+ gt_path = osp.join(gt_folder, gt_path)
232
+ paths.append(dict([(f'{input_key}_path', input_path), (f'{gt_key}_path', gt_path)]))
233
+ return paths
234
+
235
+
236
+ def paths_from_folder(folder):
237
+ """Generate paths from folder.
238
+
239
+ Args:
240
+ folder (str): Folder path.
241
+
242
+ Returns:
243
+ list[str]: Returned path list.
244
+ """
245
+
246
+ paths = list(scandir(folder))
247
+ paths = [osp.join(folder, path) for path in paths]
248
+ return paths
249
+
250
+
251
+ def paths_from_lmdb(folder):
252
+ """Generate paths from lmdb.
253
+
254
+ Args:
255
+ folder (str): Folder path.
256
+
257
+ Returns:
258
+ list[str]: Returned path list.
259
+ """
260
+ if not folder.endswith('.lmdb'):
261
+ raise ValueError(f'Folder {folder}folder should in lmdb format.')
262
+ with open(osp.join(folder, 'meta_info.txt')) as fin:
263
+ paths = [line.split('.')[0] for line in fin]
264
+ return paths
265
+
266
+
267
+ def generate_gaussian_kernel(kernel_size=13, sigma=1.6):
268
+ """Generate Gaussian kernel used in `duf_downsample`.
269
+
270
+ Args:
271
+ kernel_size (int): Kernel size. Default: 13.
272
+ sigma (float): Sigma of the Gaussian kernel. Default: 1.6.
273
+
274
+ Returns:
275
+ np.array: The Gaussian kernel.
276
+ """
277
+ from scipy.ndimage import filters as filters
278
+ kernel = np.zeros((kernel_size, kernel_size))
279
+ # set element at the middle to one, a dirac delta
280
+ kernel[kernel_size // 2, kernel_size // 2] = 1
281
+ # gaussian-smooth the dirac, resulting in a gaussian filter
282
+ return filters.gaussian_filter(kernel, sigma)
283
+
284
+
285
+ def duf_downsample(x, kernel_size=13, scale=4):
286
+ """Downsamping with Gaussian kernel used in the DUF official code.
287
+
288
+ Args:
289
+ x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w).
290
+ kernel_size (int): Kernel size. Default: 13.
291
+ scale (int): Downsampling factor. Supported scale: (2, 3, 4).
292
+ Default: 4.
293
+
294
+ Returns:
295
+ Tensor: DUF downsampled frames.
296
+ """
297
+ assert scale in (2, 3, 4), f'Only support scale (2, 3, 4), but got {scale}.'
298
+
299
+ squeeze_flag = False
300
+ if x.ndim == 4:
301
+ squeeze_flag = True
302
+ x = x.unsqueeze(0)
303
+ b, t, c, h, w = x.size()
304
+ x = x.view(-1, 1, h, w)
305
+ pad_w, pad_h = kernel_size // 2 + scale * 2, kernel_size // 2 + scale * 2
306
+ x = F.pad(x, (pad_w, pad_w, pad_h, pad_h), 'reflect')
307
+
308
+ gaussian_filter = generate_gaussian_kernel(kernel_size, 0.4 * scale)
309
+ gaussian_filter = torch.from_numpy(gaussian_filter).type_as(x).unsqueeze(0).unsqueeze(0)
310
+ x = F.conv2d(x, gaussian_filter, stride=scale)
311
+ x = x[:, :, 2:-2, 2:-2]
312
+ x = x.view(b, t, c, x.size(2), x.size(3))
313
+ if squeeze_flag:
314
+ x = x.squeeze(0)
315
+ return x
basicsr/data/degradations.py ADDED
@@ -0,0 +1,764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import math
3
+ import numpy as np
4
+ import random
5
+ import torch
6
+ from scipy import special
7
+ from scipy.stats import multivariate_normal
8
+ from torchvision.transforms.functional_tensor import rgb_to_grayscale
9
+
10
+ # -------------------------------------------------------------------- #
11
+ # --------------------------- blur kernels --------------------------- #
12
+ # -------------------------------------------------------------------- #
13
+
14
+
15
+ # --------------------------- util functions --------------------------- #
16
+ def sigma_matrix2(sig_x, sig_y, theta):
17
+ """Calculate the rotated sigma matrix (two dimensional matrix).
18
+
19
+ Args:
20
+ sig_x (float):
21
+ sig_y (float):
22
+ theta (float): Radian measurement.
23
+
24
+ Returns:
25
+ ndarray: Rotated sigma matrix.
26
+ """
27
+ d_matrix = np.array([[sig_x**2, 0], [0, sig_y**2]])
28
+ u_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
29
+ return np.dot(u_matrix, np.dot(d_matrix, u_matrix.T))
30
+
31
+
32
+ def mesh_grid(kernel_size):
33
+ """Generate the mesh grid, centering at zero.
34
+
35
+ Args:
36
+ kernel_size (int):
37
+
38
+ Returns:
39
+ xy (ndarray): with the shape (kernel_size, kernel_size, 2)
40
+ xx (ndarray): with the shape (kernel_size, kernel_size)
41
+ yy (ndarray): with the shape (kernel_size, kernel_size)
42
+ """
43
+ ax = np.arange(-kernel_size // 2 + 1., kernel_size // 2 + 1.)
44
+ xx, yy = np.meshgrid(ax, ax)
45
+ xy = np.hstack((xx.reshape((kernel_size * kernel_size, 1)), yy.reshape(kernel_size * kernel_size,
46
+ 1))).reshape(kernel_size, kernel_size, 2)
47
+ return xy, xx, yy
48
+
49
+
50
+ def pdf2(sigma_matrix, grid):
51
+ """Calculate PDF of the bivariate Gaussian distribution.
52
+
53
+ Args:
54
+ sigma_matrix (ndarray): with the shape (2, 2)
55
+ grid (ndarray): generated by :func:`mesh_grid`,
56
+ with the shape (K, K, 2), K is the kernel size.
57
+
58
+ Returns:
59
+ kernel (ndarrray): un-normalized kernel.
60
+ """
61
+ inverse_sigma = np.linalg.inv(sigma_matrix)
62
+ kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2))
63
+ return kernel
64
+
65
+
66
+ def cdf2(d_matrix, grid):
67
+ """Calculate the CDF of the standard bivariate Gaussian distribution.
68
+ Used in skewed Gaussian distribution.
69
+
70
+ Args:
71
+ d_matrix (ndarrasy): skew matrix.
72
+ grid (ndarray): generated by :func:`mesh_grid`,
73
+ with the shape (K, K, 2), K is the kernel size.
74
+
75
+ Returns:
76
+ cdf (ndarray): skewed cdf.
77
+ """
78
+ rv = multivariate_normal([0, 0], [[1, 0], [0, 1]])
79
+ grid = np.dot(grid, d_matrix)
80
+ cdf = rv.cdf(grid)
81
+ return cdf
82
+
83
+
84
+ def bivariate_Gaussian(kernel_size, sig_x, sig_y, theta, grid=None, isotropic=True):
85
+ """Generate a bivariate isotropic or anisotropic Gaussian kernel.
86
+
87
+ In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
88
+
89
+ Args:
90
+ kernel_size (int):
91
+ sig_x (float):
92
+ sig_y (float):
93
+ theta (float): Radian measurement.
94
+ grid (ndarray, optional): generated by :func:`mesh_grid`,
95
+ with the shape (K, K, 2), K is the kernel size. Default: None
96
+ isotropic (bool):
97
+
98
+ Returns:
99
+ kernel (ndarray): normalized kernel.
100
+ """
101
+ if grid is None:
102
+ grid, _, _ = mesh_grid(kernel_size)
103
+ if isotropic:
104
+ sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
105
+ else:
106
+ sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
107
+ kernel = pdf2(sigma_matrix, grid)
108
+ kernel = kernel / np.sum(kernel)
109
+ return kernel
110
+
111
+
112
+ def bivariate_generalized_Gaussian(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
113
+ """Generate a bivariate generalized Gaussian kernel.
114
+
115
+ ``Paper: Parameter Estimation For Multivariate Generalized Gaussian Distributions``
116
+
117
+ In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
118
+
119
+ Args:
120
+ kernel_size (int):
121
+ sig_x (float):
122
+ sig_y (float):
123
+ theta (float): Radian measurement.
124
+ beta (float): shape parameter, beta = 1 is the normal distribution.
125
+ grid (ndarray, optional): generated by :func:`mesh_grid`,
126
+ with the shape (K, K, 2), K is the kernel size. Default: None
127
+
128
+ Returns:
129
+ kernel (ndarray): normalized kernel.
130
+ """
131
+ if grid is None:
132
+ grid, _, _ = mesh_grid(kernel_size)
133
+ if isotropic:
134
+ sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
135
+ else:
136
+ sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
137
+ inverse_sigma = np.linalg.inv(sigma_matrix)
138
+ kernel = np.exp(-0.5 * np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta))
139
+ kernel = kernel / np.sum(kernel)
140
+ return kernel
141
+
142
+
143
+ def bivariate_plateau(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
144
+ """Generate a plateau-like anisotropic kernel.
145
+
146
+ 1 / (1+x^(beta))
147
+
148
+ Reference: https://stats.stackexchange.com/questions/203629/is-there-a-plateau-shaped-distribution
149
+
150
+ In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
151
+
152
+ Args:
153
+ kernel_size (int):
154
+ sig_x (float):
155
+ sig_y (float):
156
+ theta (float): Radian measurement.
157
+ beta (float): shape parameter, beta = 1 is the normal distribution.
158
+ grid (ndarray, optional): generated by :func:`mesh_grid`,
159
+ with the shape (K, K, 2), K is the kernel size. Default: None
160
+
161
+ Returns:
162
+ kernel (ndarray): normalized kernel.
163
+ """
164
+ if grid is None:
165
+ grid, _, _ = mesh_grid(kernel_size)
166
+ if isotropic:
167
+ sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
168
+ else:
169
+ sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
170
+ inverse_sigma = np.linalg.inv(sigma_matrix)
171
+ kernel = np.reciprocal(np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta) + 1)
172
+ kernel = kernel / np.sum(kernel)
173
+ return kernel
174
+
175
+
176
+ def random_bivariate_Gaussian(kernel_size,
177
+ sigma_x_range,
178
+ sigma_y_range,
179
+ rotation_range,
180
+ noise_range=None,
181
+ isotropic=True):
182
+ """Randomly generate bivariate isotropic or anisotropic Gaussian kernels.
183
+
184
+ In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
185
+
186
+ Args:
187
+ kernel_size (int):
188
+ sigma_x_range (tuple): [0.6, 5]
189
+ sigma_y_range (tuple): [0.6, 5]
190
+ rotation range (tuple): [-math.pi, math.pi]
191
+ noise_range(tuple, optional): multiplicative kernel noise,
192
+ [0.75, 1.25]. Default: None
193
+
194
+ Returns:
195
+ kernel (ndarray):
196
+ """
197
+ assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
198
+ assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
199
+ sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
200
+ if isotropic is False:
201
+ assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
202
+ assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
203
+ sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
204
+ rotation = np.random.uniform(rotation_range[0], rotation_range[1])
205
+ else:
206
+ sigma_y = sigma_x
207
+ rotation = 0
208
+
209
+ kernel = bivariate_Gaussian(kernel_size, sigma_x, sigma_y, rotation, isotropic=isotropic)
210
+
211
+ # add multiplicative noise
212
+ if noise_range is not None:
213
+ assert noise_range[0] < noise_range[1], 'Wrong noise range.'
214
+ noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
215
+ kernel = kernel * noise
216
+ kernel = kernel / np.sum(kernel)
217
+ return kernel
218
+
219
+
220
+ def random_bivariate_generalized_Gaussian(kernel_size,
221
+ sigma_x_range,
222
+ sigma_y_range,
223
+ rotation_range,
224
+ beta_range,
225
+ noise_range=None,
226
+ isotropic=True):
227
+ """Randomly generate bivariate generalized Gaussian kernels.
228
+
229
+ In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
230
+
231
+ Args:
232
+ kernel_size (int):
233
+ sigma_x_range (tuple): [0.6, 5]
234
+ sigma_y_range (tuple): [0.6, 5]
235
+ rotation range (tuple): [-math.pi, math.pi]
236
+ beta_range (tuple): [0.5, 8]
237
+ noise_range(tuple, optional): multiplicative kernel noise,
238
+ [0.75, 1.25]. Default: None
239
+
240
+ Returns:
241
+ kernel (ndarray):
242
+ """
243
+ assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
244
+ assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
245
+ sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
246
+ if isotropic is False:
247
+ assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
248
+ assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
249
+ sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
250
+ rotation = np.random.uniform(rotation_range[0], rotation_range[1])
251
+ else:
252
+ sigma_y = sigma_x
253
+ rotation = 0
254
+
255
+ # assume beta_range[0] < 1 < beta_range[1]
256
+ if np.random.uniform() < 0.5:
257
+ beta = np.random.uniform(beta_range[0], 1)
258
+ else:
259
+ beta = np.random.uniform(1, beta_range[1])
260
+
261
+ kernel = bivariate_generalized_Gaussian(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
262
+
263
+ # add multiplicative noise
264
+ if noise_range is not None:
265
+ assert noise_range[0] < noise_range[1], 'Wrong noise range.'
266
+ noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
267
+ kernel = kernel * noise
268
+ kernel = kernel / np.sum(kernel)
269
+ return kernel
270
+
271
+
272
+ def random_bivariate_plateau(kernel_size,
273
+ sigma_x_range,
274
+ sigma_y_range,
275
+ rotation_range,
276
+ beta_range,
277
+ noise_range=None,
278
+ isotropic=True):
279
+ """Randomly generate bivariate plateau kernels.
280
+
281
+ In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
282
+
283
+ Args:
284
+ kernel_size (int):
285
+ sigma_x_range (tuple): [0.6, 5]
286
+ sigma_y_range (tuple): [0.6, 5]
287
+ rotation range (tuple): [-math.pi/2, math.pi/2]
288
+ beta_range (tuple): [1, 4]
289
+ noise_range(tuple, optional): multiplicative kernel noise,
290
+ [0.75, 1.25]. Default: None
291
+
292
+ Returns:
293
+ kernel (ndarray):
294
+ """
295
+ assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
296
+ assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
297
+ sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
298
+ if isotropic is False:
299
+ assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
300
+ assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
301
+ sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
302
+ rotation = np.random.uniform(rotation_range[0], rotation_range[1])
303
+ else:
304
+ sigma_y = sigma_x
305
+ rotation = 0
306
+
307
+ # TODO: this may be not proper
308
+ if np.random.uniform() < 0.5:
309
+ beta = np.random.uniform(beta_range[0], 1)
310
+ else:
311
+ beta = np.random.uniform(1, beta_range[1])
312
+
313
+ kernel = bivariate_plateau(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
314
+ # add multiplicative noise
315
+ if noise_range is not None:
316
+ assert noise_range[0] < noise_range[1], 'Wrong noise range.'
317
+ noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
318
+ kernel = kernel * noise
319
+ kernel = kernel / np.sum(kernel)
320
+
321
+ return kernel
322
+
323
+
324
+ def random_mixed_kernels(kernel_list,
325
+ kernel_prob,
326
+ kernel_size=21,
327
+ sigma_x_range=(0.6, 5),
328
+ sigma_y_range=(0.6, 5),
329
+ rotation_range=(-math.pi, math.pi),
330
+ betag_range=(0.5, 8),
331
+ betap_range=(0.5, 8),
332
+ noise_range=None):
333
+ """Randomly generate mixed kernels.
334
+
335
+ Args:
336
+ kernel_list (tuple): a list name of kernel types,
337
+ support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso',
338
+ 'plateau_aniso']
339
+ kernel_prob (tuple): corresponding kernel probability for each
340
+ kernel type
341
+ kernel_size (int):
342
+ sigma_x_range (tuple): [0.6, 5]
343
+ sigma_y_range (tuple): [0.6, 5]
344
+ rotation range (tuple): [-math.pi, math.pi]
345
+ beta_range (tuple): [0.5, 8]
346
+ noise_range(tuple, optional): multiplicative kernel noise,
347
+ [0.75, 1.25]. Default: None
348
+
349
+ Returns:
350
+ kernel (ndarray):
351
+ """
352
+ kernel_type = random.choices(kernel_list, kernel_prob)[0]
353
+ if kernel_type == 'iso':
354
+ kernel = random_bivariate_Gaussian(
355
+ kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True)
356
+ elif kernel_type == 'aniso':
357
+ kernel = random_bivariate_Gaussian(
358
+ kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False)
359
+ elif kernel_type == 'generalized_iso':
360
+ kernel = random_bivariate_generalized_Gaussian(
361
+ kernel_size,
362
+ sigma_x_range,
363
+ sigma_y_range,
364
+ rotation_range,
365
+ betag_range,
366
+ noise_range=noise_range,
367
+ isotropic=True)
368
+ elif kernel_type == 'generalized_aniso':
369
+ kernel = random_bivariate_generalized_Gaussian(
370
+ kernel_size,
371
+ sigma_x_range,
372
+ sigma_y_range,
373
+ rotation_range,
374
+ betag_range,
375
+ noise_range=noise_range,
376
+ isotropic=False)
377
+ elif kernel_type == 'plateau_iso':
378
+ kernel = random_bivariate_plateau(
379
+ kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True)
380
+ elif kernel_type == 'plateau_aniso':
381
+ kernel = random_bivariate_plateau(
382
+ kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False)
383
+ return kernel
384
+
385
+
386
+ np.seterr(divide='ignore', invalid='ignore')
387
+
388
+
389
+ def circular_lowpass_kernel(cutoff, kernel_size, pad_to=0):
390
+ """2D sinc filter
391
+
392
+ Reference: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter
393
+
394
+ Args:
395
+ cutoff (float): cutoff frequency in radians (pi is max)
396
+ kernel_size (int): horizontal and vertical size, must be odd.
397
+ pad_to (int): pad kernel size to desired size, must be odd or zero.
398
+ """
399
+ assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
400
+ kernel = np.fromfunction(
401
+ lambda x, y: cutoff * special.j1(cutoff * np.sqrt(
402
+ (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)) / (2 * np.pi * np.sqrt(
403
+ (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)), [kernel_size, kernel_size])
404
+ kernel[(kernel_size - 1) // 2, (kernel_size - 1) // 2] = cutoff**2 / (4 * np.pi)
405
+ kernel = kernel / np.sum(kernel)
406
+ if pad_to > kernel_size:
407
+ pad_size = (pad_to - kernel_size) // 2
408
+ kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
409
+ return kernel
410
+
411
+
412
+ # ------------------------------------------------------------- #
413
+ # --------------------------- noise --------------------------- #
414
+ # ------------------------------------------------------------- #
415
+
416
+ # ----------------------- Gaussian Noise ----------------------- #
417
+
418
+
419
+ def generate_gaussian_noise(img, sigma=10, gray_noise=False):
420
+ """Generate Gaussian noise.
421
+
422
+ Args:
423
+ img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
424
+ sigma (float): Noise scale (measured in range 255). Default: 10.
425
+
426
+ Returns:
427
+ (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
428
+ float32.
429
+ """
430
+ if gray_noise:
431
+ noise = np.float32(np.random.randn(*(img.shape[0:2]))) * sigma / 255.
432
+ noise = np.expand_dims(noise, axis=2).repeat(3, axis=2)
433
+ else:
434
+ noise = np.float32(np.random.randn(*(img.shape))) * sigma / 255.
435
+ return noise
436
+
437
+
438
+ def add_gaussian_noise(img, sigma=10, clip=True, rounds=False, gray_noise=False):
439
+ """Add Gaussian noise.
440
+
441
+ Args:
442
+ img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
443
+ sigma (float): Noise scale (measured in range 255). Default: 10.
444
+
445
+ Returns:
446
+ (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
447
+ float32.
448
+ """
449
+ noise = generate_gaussian_noise(img, sigma, gray_noise)
450
+ out = img + noise
451
+ if clip and rounds:
452
+ out = np.clip((out * 255.0).round(), 0, 255) / 255.
453
+ elif clip:
454
+ out = np.clip(out, 0, 1)
455
+ elif rounds:
456
+ out = (out * 255.0).round() / 255.
457
+ return out
458
+
459
+
460
+ def generate_gaussian_noise_pt(img, sigma=10, gray_noise=0):
461
+ """Add Gaussian noise (PyTorch version).
462
+
463
+ Args:
464
+ img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
465
+ scale (float | Tensor): Noise scale. Default: 1.0.
466
+
467
+ Returns:
468
+ (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
469
+ float32.
470
+ """
471
+ b, _, h, w = img.size()
472
+ if not isinstance(sigma, (float, int)):
473
+ sigma = sigma.view(img.size(0), 1, 1, 1)
474
+ if isinstance(gray_noise, (float, int)):
475
+ cal_gray_noise = gray_noise > 0
476
+ else:
477
+ gray_noise = gray_noise.view(b, 1, 1, 1)
478
+ cal_gray_noise = torch.sum(gray_noise) > 0
479
+
480
+ if cal_gray_noise:
481
+ noise_gray = torch.randn(*img.size()[2:4], dtype=img.dtype, device=img.device) * sigma / 255.
482
+ noise_gray = noise_gray.view(b, 1, h, w)
483
+
484
+ # always calculate color noise
485
+ noise = torch.randn(*img.size(), dtype=img.dtype, device=img.device) * sigma / 255.
486
+
487
+ if cal_gray_noise:
488
+ noise = noise * (1 - gray_noise) + noise_gray * gray_noise
489
+ return noise
490
+
491
+
492
+ def add_gaussian_noise_pt(img, sigma=10, gray_noise=0, clip=True, rounds=False):
493
+ """Add Gaussian noise (PyTorch version).
494
+
495
+ Args:
496
+ img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
497
+ scale (float | Tensor): Noise scale. Default: 1.0.
498
+
499
+ Returns:
500
+ (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
501
+ float32.
502
+ """
503
+ noise = generate_gaussian_noise_pt(img, sigma, gray_noise)
504
+ out = img + noise
505
+ if clip and rounds:
506
+ out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
507
+ elif clip:
508
+ out = torch.clamp(out, 0, 1)
509
+ elif rounds:
510
+ out = (out * 255.0).round() / 255.
511
+ return out
512
+
513
+
514
+ # ----------------------- Random Gaussian Noise ----------------------- #
515
+ def random_generate_gaussian_noise(img, sigma_range=(0, 10), gray_prob=0):
516
+ sigma = np.random.uniform(sigma_range[0], sigma_range[1])
517
+ if np.random.uniform() < gray_prob:
518
+ gray_noise = True
519
+ else:
520
+ gray_noise = False
521
+ return generate_gaussian_noise(img, sigma, gray_noise)
522
+
523
+
524
+ def random_add_gaussian_noise(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
525
+ noise = random_generate_gaussian_noise(img, sigma_range, gray_prob)
526
+ out = img + noise
527
+ if clip and rounds:
528
+ out = np.clip((out * 255.0).round(), 0, 255) / 255.
529
+ elif clip:
530
+ out = np.clip(out, 0, 1)
531
+ elif rounds:
532
+ out = (out * 255.0).round() / 255.
533
+ return out
534
+
535
+
536
+ def random_generate_gaussian_noise_pt(img, sigma_range=(0, 10), gray_prob=0):
537
+ sigma = torch.rand(
538
+ img.size(0), dtype=img.dtype, device=img.device) * (sigma_range[1] - sigma_range[0]) + sigma_range[0]
539
+ gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
540
+ gray_noise = (gray_noise < gray_prob).float()
541
+ return generate_gaussian_noise_pt(img, sigma, gray_noise)
542
+
543
+
544
+ def random_add_gaussian_noise_pt(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
545
+ noise = random_generate_gaussian_noise_pt(img, sigma_range, gray_prob)
546
+ out = img + noise
547
+ if clip and rounds:
548
+ out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
549
+ elif clip:
550
+ out = torch.clamp(out, 0, 1)
551
+ elif rounds:
552
+ out = (out * 255.0).round() / 255.
553
+ return out
554
+
555
+
556
+ # ----------------------- Poisson (Shot) Noise ----------------------- #
557
+
558
+
559
+ def generate_poisson_noise(img, scale=1.0, gray_noise=False):
560
+ """Generate poisson noise.
561
+
562
+ Reference: https://github.com/scikit-image/scikit-image/blob/main/skimage/util/noise.py#L37-L219
563
+
564
+ Args:
565
+ img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
566
+ scale (float): Noise scale. Default: 1.0.
567
+ gray_noise (bool): Whether generate gray noise. Default: False.
568
+
569
+ Returns:
570
+ (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
571
+ float32.
572
+ """
573
+ if gray_noise:
574
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
575
+ # round and clip image for counting vals correctly
576
+ img = np.clip((img * 255.0).round(), 0, 255) / 255.
577
+ vals = len(np.unique(img))
578
+ vals = 2**np.ceil(np.log2(vals))
579
+ out = np.float32(np.random.poisson(img * vals) / float(vals))
580
+ noise = out - img
581
+ if gray_noise:
582
+ noise = np.repeat(noise[:, :, np.newaxis], 3, axis=2)
583
+ return noise * scale
584
+
585
+
586
+ def add_poisson_noise(img, scale=1.0, clip=True, rounds=False, gray_noise=False):
587
+ """Add poisson noise.
588
+
589
+ Args:
590
+ img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
591
+ scale (float): Noise scale. Default: 1.0.
592
+ gray_noise (bool): Whether generate gray noise. Default: False.
593
+
594
+ Returns:
595
+ (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
596
+ float32.
597
+ """
598
+ noise = generate_poisson_noise(img, scale, gray_noise)
599
+ out = img + noise
600
+ if clip and rounds:
601
+ out = np.clip((out * 255.0).round(), 0, 255) / 255.
602
+ elif clip:
603
+ out = np.clip(out, 0, 1)
604
+ elif rounds:
605
+ out = (out * 255.0).round() / 255.
606
+ return out
607
+
608
+
609
+ def generate_poisson_noise_pt(img, scale=1.0, gray_noise=0):
610
+ """Generate a batch of poisson noise (PyTorch version)
611
+
612
+ Args:
613
+ img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
614
+ scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
615
+ Default: 1.0.
616
+ gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
617
+ 0 for False, 1 for True. Default: 0.
618
+
619
+ Returns:
620
+ (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
621
+ float32.
622
+ """
623
+ b, _, h, w = img.size()
624
+ if isinstance(gray_noise, (float, int)):
625
+ cal_gray_noise = gray_noise > 0
626
+ else:
627
+ gray_noise = gray_noise.view(b, 1, 1, 1)
628
+ cal_gray_noise = torch.sum(gray_noise) > 0
629
+ if cal_gray_noise:
630
+ img_gray = rgb_to_grayscale(img, num_output_channels=1)
631
+ # round and clip image for counting vals correctly
632
+ img_gray = torch.clamp((img_gray * 255.0).round(), 0, 255) / 255.
633
+ # use for-loop to get the unique values for each sample
634
+ vals_list = [len(torch.unique(img_gray[i, :, :, :])) for i in range(b)]
635
+ vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list]
636
+ vals = img_gray.new_tensor(vals_list).view(b, 1, 1, 1)
637
+ out = torch.poisson(img_gray * vals) / vals
638
+ noise_gray = out - img_gray
639
+ noise_gray = noise_gray.expand(b, 3, h, w)
640
+
641
+ # always calculate color noise
642
+ # round and clip image for counting vals correctly
643
+ img = torch.clamp((img * 255.0).round(), 0, 255) / 255.
644
+ # use for-loop to get the unique values for each sample
645
+ vals_list = [len(torch.unique(img[i, :, :, :])) for i in range(b)]
646
+ vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list]
647
+ vals = img.new_tensor(vals_list).view(b, 1, 1, 1)
648
+ out = torch.poisson(img * vals) / vals
649
+ noise = out - img
650
+ if cal_gray_noise:
651
+ noise = noise * (1 - gray_noise) + noise_gray * gray_noise
652
+ if not isinstance(scale, (float, int)):
653
+ scale = scale.view(b, 1, 1, 1)
654
+ return noise * scale
655
+
656
+
657
+ def add_poisson_noise_pt(img, scale=1.0, clip=True, rounds=False, gray_noise=0):
658
+ """Add poisson noise to a batch of images (PyTorch version).
659
+
660
+ Args:
661
+ img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
662
+ scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
663
+ Default: 1.0.
664
+ gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
665
+ 0 for False, 1 for True. Default: 0.
666
+
667
+ Returns:
668
+ (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
669
+ float32.
670
+ """
671
+ noise = generate_poisson_noise_pt(img, scale, gray_noise)
672
+ out = img + noise
673
+ if clip and rounds:
674
+ out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
675
+ elif clip:
676
+ out = torch.clamp(out, 0, 1)
677
+ elif rounds:
678
+ out = (out * 255.0).round() / 255.
679
+ return out
680
+
681
+
682
+ # ----------------------- Random Poisson (Shot) Noise ----------------------- #
683
+
684
+
685
+ def random_generate_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0):
686
+ scale = np.random.uniform(scale_range[0], scale_range[1])
687
+ if np.random.uniform() < gray_prob:
688
+ gray_noise = True
689
+ else:
690
+ gray_noise = False
691
+ return generate_poisson_noise(img, scale, gray_noise)
692
+
693
+
694
+ def random_add_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
695
+ noise = random_generate_poisson_noise(img, scale_range, gray_prob)
696
+ out = img + noise
697
+ if clip and rounds:
698
+ out = np.clip((out * 255.0).round(), 0, 255) / 255.
699
+ elif clip:
700
+ out = np.clip(out, 0, 1)
701
+ elif rounds:
702
+ out = (out * 255.0).round() / 255.
703
+ return out
704
+
705
+
706
+ def random_generate_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0):
707
+ scale = torch.rand(
708
+ img.size(0), dtype=img.dtype, device=img.device) * (scale_range[1] - scale_range[0]) + scale_range[0]
709
+ gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
710
+ gray_noise = (gray_noise < gray_prob).float()
711
+ return generate_poisson_noise_pt(img, scale, gray_noise)
712
+
713
+
714
+ def random_add_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
715
+ noise = random_generate_poisson_noise_pt(img, scale_range, gray_prob)
716
+ out = img + noise
717
+ if clip and rounds:
718
+ out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
719
+ elif clip:
720
+ out = torch.clamp(out, 0, 1)
721
+ elif rounds:
722
+ out = (out * 255.0).round() / 255.
723
+ return out
724
+
725
+
726
+ # ------------------------------------------------------------------------ #
727
+ # --------------------------- JPEG compression --------------------------- #
728
+ # ------------------------------------------------------------------------ #
729
+
730
+
731
+ def add_jpg_compression(img, quality=90):
732
+ """Add JPG compression artifacts.
733
+
734
+ Args:
735
+ img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
736
+ quality (float): JPG compression quality. 0 for lowest quality, 100 for
737
+ best quality. Default: 90.
738
+
739
+ Returns:
740
+ (Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
741
+ float32.
742
+ """
743
+ img = np.clip(img, 0, 1)
744
+ encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
745
+ _, encimg = cv2.imencode('.jpg', img * 255., encode_param)
746
+ img = np.float32(cv2.imdecode(encimg, 1)) / 255.
747
+ return img
748
+
749
+
750
+ def random_add_jpg_compression(img, quality_range=(90, 100)):
751
+ """Randomly add JPG compression artifacts.
752
+
753
+ Args:
754
+ img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
755
+ quality_range (tuple[float] | list[float]): JPG compression quality
756
+ range. 0 for lowest quality, 100 for best quality.
757
+ Default: (90, 100).
758
+
759
+ Returns:
760
+ (Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
761
+ float32.
762
+ """
763
+ quality = np.random.uniform(quality_range[0], quality_range[1])
764
+ return add_jpg_compression(img, quality)
basicsr/data/ffhq_dataset.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import time
3
+ from os import path as osp
4
+ from torch.utils import data as data
5
+ from torchvision.transforms.functional import normalize
6
+
7
+ from basicsr.data.transforms import augment
8
+ from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
9
+ from basicsr.utils.registry import DATASET_REGISTRY
10
+
11
+
12
+ @DATASET_REGISTRY.register()
13
+ class FFHQDataset(data.Dataset):
14
+ """FFHQ dataset for StyleGAN.
15
+
16
+ Args:
17
+ opt (dict): Config for train datasets. It contains the following keys:
18
+ dataroot_gt (str): Data root path for gt.
19
+ io_backend (dict): IO backend type and other kwarg.
20
+ mean (list | tuple): Image mean.
21
+ std (list | tuple): Image std.
22
+ use_hflip (bool): Whether to horizontally flip.
23
+
24
+ """
25
+
26
+ def __init__(self, opt):
27
+ super(FFHQDataset, self).__init__()
28
+ self.opt = opt
29
+ # file client (io backend)
30
+ self.file_client = None
31
+ self.io_backend_opt = opt['io_backend']
32
+
33
+ self.gt_folder = opt['dataroot_gt']
34
+ self.mean = opt['mean']
35
+ self.std = opt['std']
36
+
37
+ if self.io_backend_opt['type'] == 'lmdb':
38
+ self.io_backend_opt['db_paths'] = self.gt_folder
39
+ if not self.gt_folder.endswith('.lmdb'):
40
+ raise ValueError("'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}")
41
+ with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin:
42
+ self.paths = [line.split('.')[0] for line in fin]
43
+ else:
44
+ # FFHQ has 70000 images in total
45
+ self.paths = [osp.join(self.gt_folder, f'{v:08d}.png') for v in range(70000)]
46
+
47
+ def __getitem__(self, index):
48
+ if self.file_client is None:
49
+ self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
50
+
51
+ # load gt image
52
+ gt_path = self.paths[index]
53
+ # avoid errors caused by high latency in reading files
54
+ retry = 3
55
+ while retry > 0:
56
+ try:
57
+ img_bytes = self.file_client.get(gt_path)
58
+ except Exception as e:
59
+ logger = get_root_logger()
60
+ logger.warning(f'File client error: {e}, remaining retry times: {retry - 1}')
61
+ # change another file to read
62
+ index = random.randint(0, self.__len__())
63
+ gt_path = self.paths[index]
64
+ time.sleep(1) # sleep 1s for occasional server congestion
65
+ else:
66
+ break
67
+ finally:
68
+ retry -= 1
69
+ img_gt = imfrombytes(img_bytes, float32=True)
70
+
71
+ # random horizontal flip
72
+ img_gt = augment(img_gt, hflip=self.opt['use_hflip'], rotation=False)
73
+ # BGR to RGB, HWC to CHW, numpy to tensor
74
+ img_gt = img2tensor(img_gt, bgr2rgb=True, float32=True)
75
+ # normalize
76
+ normalize(img_gt, self.mean, self.std, inplace=True)
77
+ return {'gt': img_gt, 'gt_path': gt_path}
78
+
79
+ def __len__(self):
80
+ return len(self.paths)
basicsr/data/meta_info/meta_info_DIV2K800sub_GT.txt ADDED
The diff for this file is too large to render. See raw diff
basicsr/data/meta_info/meta_info_REDS4_test_GT.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ 000 100 (720,1280,3)
2
+ 011 100 (720,1280,3)
3
+ 015 100 (720,1280,3)
4
+ 020 100 (720,1280,3)
basicsr/data/meta_info/meta_info_REDS_GT.txt ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 000 100 (720,1280,3)
2
+ 001 100 (720,1280,3)
3
+ 002 100 (720,1280,3)
4
+ 003 100 (720,1280,3)
5
+ 004 100 (720,1280,3)
6
+ 005 100 (720,1280,3)
7
+ 006 100 (720,1280,3)
8
+ 007 100 (720,1280,3)
9
+ 008 100 (720,1280,3)
10
+ 009 100 (720,1280,3)
11
+ 010 100 (720,1280,3)
12
+ 011 100 (720,1280,3)
13
+ 012 100 (720,1280,3)
14
+ 013 100 (720,1280,3)
15
+ 014 100 (720,1280,3)
16
+ 015 100 (720,1280,3)
17
+ 016 100 (720,1280,3)
18
+ 017 100 (720,1280,3)
19
+ 018 100 (720,1280,3)
20
+ 019 100 (720,1280,3)
21
+ 020 100 (720,1280,3)
22
+ 021 100 (720,1280,3)
23
+ 022 100 (720,1280,3)
24
+ 023 100 (720,1280,3)
25
+ 024 100 (720,1280,3)
26
+ 025 100 (720,1280,3)
27
+ 026 100 (720,1280,3)
28
+ 027 100 (720,1280,3)
29
+ 028 100 (720,1280,3)
30
+ 029 100 (720,1280,3)
31
+ 030 100 (720,1280,3)
32
+ 031 100 (720,1280,3)
33
+ 032 100 (720,1280,3)
34
+ 033 100 (720,1280,3)
35
+ 034 100 (720,1280,3)
36
+ 035 100 (720,1280,3)
37
+ 036 100 (720,1280,3)
38
+ 037 100 (720,1280,3)
39
+ 038 100 (720,1280,3)
40
+ 039 100 (720,1280,3)
41
+ 040 100 (720,1280,3)
42
+ 041 100 (720,1280,3)
43
+ 042 100 (720,1280,3)
44
+ 043 100 (720,1280,3)
45
+ 044 100 (720,1280,3)
46
+ 045 100 (720,1280,3)
47
+ 046 100 (720,1280,3)
48
+ 047 100 (720,1280,3)
49
+ 048 100 (720,1280,3)
50
+ 049 100 (720,1280,3)
51
+ 050 100 (720,1280,3)
52
+ 051 100 (720,1280,3)
53
+ 052 100 (720,1280,3)
54
+ 053 100 (720,1280,3)
55
+ 054 100 (720,1280,3)
56
+ 055 100 (720,1280,3)
57
+ 056 100 (720,1280,3)
58
+ 057 100 (720,1280,3)
59
+ 058 100 (720,1280,3)
60
+ 059 100 (720,1280,3)
61
+ 060 100 (720,1280,3)
62
+ 061 100 (720,1280,3)
63
+ 062 100 (720,1280,3)
64
+ 063 100 (720,1280,3)
65
+ 064 100 (720,1280,3)
66
+ 065 100 (720,1280,3)
67
+ 066 100 (720,1280,3)
68
+ 067 100 (720,1280,3)
69
+ 068 100 (720,1280,3)
70
+ 069 100 (720,1280,3)
71
+ 070 100 (720,1280,3)
72
+ 071 100 (720,1280,3)
73
+ 072 100 (720,1280,3)
74
+ 073 100 (720,1280,3)
75
+ 074 100 (720,1280,3)
76
+ 075 100 (720,1280,3)
77
+ 076 100 (720,1280,3)
78
+ 077 100 (720,1280,3)
79
+ 078 100 (720,1280,3)
80
+ 079 100 (720,1280,3)
81
+ 080 100 (720,1280,3)
82
+ 081 100 (720,1280,3)
83
+ 082 100 (720,1280,3)
84
+ 083 100 (720,1280,3)
85
+ 084 100 (720,1280,3)
86
+ 085 100 (720,1280,3)
87
+ 086 100 (720,1280,3)
88
+ 087 100 (720,1280,3)
89
+ 088 100 (720,1280,3)
90
+ 089 100 (720,1280,3)
91
+ 090 100 (720,1280,3)
92
+ 091 100 (720,1280,3)
93
+ 092 100 (720,1280,3)
94
+ 093 100 (720,1280,3)
95
+ 094 100 (720,1280,3)
96
+ 095 100 (720,1280,3)
97
+ 096 100 (720,1280,3)
98
+ 097 100 (720,1280,3)
99
+ 098 100 (720,1280,3)
100
+ 099 100 (720,1280,3)
101
+ 100 100 (720,1280,3)
102
+ 101 100 (720,1280,3)
103
+ 102 100 (720,1280,3)
104
+ 103 100 (720,1280,3)
105
+ 104 100 (720,1280,3)
106
+ 105 100 (720,1280,3)
107
+ 106 100 (720,1280,3)
108
+ 107 100 (720,1280,3)
109
+ 108 100 (720,1280,3)
110
+ 109 100 (720,1280,3)
111
+ 110 100 (720,1280,3)
112
+ 111 100 (720,1280,3)
113
+ 112 100 (720,1280,3)
114
+ 113 100 (720,1280,3)
115
+ 114 100 (720,1280,3)
116
+ 115 100 (720,1280,3)
117
+ 116 100 (720,1280,3)
118
+ 117 100 (720,1280,3)
119
+ 118 100 (720,1280,3)
120
+ 119 100 (720,1280,3)
121
+ 120 100 (720,1280,3)
122
+ 121 100 (720,1280,3)
123
+ 122 100 (720,1280,3)
124
+ 123 100 (720,1280,3)
125
+ 124 100 (720,1280,3)
126
+ 125 100 (720,1280,3)
127
+ 126 100 (720,1280,3)
128
+ 127 100 (720,1280,3)
129
+ 128 100 (720,1280,3)
130
+ 129 100 (720,1280,3)
131
+ 130 100 (720,1280,3)
132
+ 131 100 (720,1280,3)
133
+ 132 100 (720,1280,3)
134
+ 133 100 (720,1280,3)
135
+ 134 100 (720,1280,3)
136
+ 135 100 (720,1280,3)
137
+ 136 100 (720,1280,3)
138
+ 137 100 (720,1280,3)
139
+ 138 100 (720,1280,3)
140
+ 139 100 (720,1280,3)
141
+ 140 100 (720,1280,3)
142
+ 141 100 (720,1280,3)
143
+ 142 100 (720,1280,3)
144
+ 143 100 (720,1280,3)
145
+ 144 100 (720,1280,3)
146
+ 145 100 (720,1280,3)
147
+ 146 100 (720,1280,3)
148
+ 147 100 (720,1280,3)
149
+ 148 100 (720,1280,3)
150
+ 149 100 (720,1280,3)
151
+ 150 100 (720,1280,3)
152
+ 151 100 (720,1280,3)
153
+ 152 100 (720,1280,3)
154
+ 153 100 (720,1280,3)
155
+ 154 100 (720,1280,3)
156
+ 155 100 (720,1280,3)
157
+ 156 100 (720,1280,3)
158
+ 157 100 (720,1280,3)
159
+ 158 100 (720,1280,3)
160
+ 159 100 (720,1280,3)
161
+ 160 100 (720,1280,3)
162
+ 161 100 (720,1280,3)
163
+ 162 100 (720,1280,3)
164
+ 163 100 (720,1280,3)
165
+ 164 100 (720,1280,3)
166
+ 165 100 (720,1280,3)
167
+ 166 100 (720,1280,3)
168
+ 167 100 (720,1280,3)
169
+ 168 100 (720,1280,3)
170
+ 169 100 (720,1280,3)
171
+ 170 100 (720,1280,3)
172
+ 171 100 (720,1280,3)
173
+ 172 100 (720,1280,3)
174
+ 173 100 (720,1280,3)
175
+ 174 100 (720,1280,3)
176
+ 175 100 (720,1280,3)
177
+ 176 100 (720,1280,3)
178
+ 177 100 (720,1280,3)
179
+ 178 100 (720,1280,3)
180
+ 179 100 (720,1280,3)
181
+ 180 100 (720,1280,3)
182
+ 181 100 (720,1280,3)
183
+ 182 100 (720,1280,3)
184
+ 183 100 (720,1280,3)
185
+ 184 100 (720,1280,3)
186
+ 185 100 (720,1280,3)
187
+ 186 100 (720,1280,3)
188
+ 187 100 (720,1280,3)
189
+ 188 100 (720,1280,3)
190
+ 189 100 (720,1280,3)
191
+ 190 100 (720,1280,3)
192
+ 191 100 (720,1280,3)
193
+ 192 100 (720,1280,3)
194
+ 193 100 (720,1280,3)
195
+ 194 100 (720,1280,3)
196
+ 195 100 (720,1280,3)
197
+ 196 100 (720,1280,3)
198
+ 197 100 (720,1280,3)
199
+ 198 100 (720,1280,3)
200
+ 199 100 (720,1280,3)
201
+ 200 100 (720,1280,3)
202
+ 201 100 (720,1280,3)
203
+ 202 100 (720,1280,3)
204
+ 203 100 (720,1280,3)
205
+ 204 100 (720,1280,3)
206
+ 205 100 (720,1280,3)
207
+ 206 100 (720,1280,3)
208
+ 207 100 (720,1280,3)
209
+ 208 100 (720,1280,3)
210
+ 209 100 (720,1280,3)
211
+ 210 100 (720,1280,3)
212
+ 211 100 (720,1280,3)
213
+ 212 100 (720,1280,3)
214
+ 213 100 (720,1280,3)
215
+ 214 100 (720,1280,3)
216
+ 215 100 (720,1280,3)
217
+ 216 100 (720,1280,3)
218
+ 217 100 (720,1280,3)
219
+ 218 100 (720,1280,3)
220
+ 219 100 (720,1280,3)
221
+ 220 100 (720,1280,3)
222
+ 221 100 (720,1280,3)
223
+ 222 100 (720,1280,3)
224
+ 223 100 (720,1280,3)
225
+ 224 100 (720,1280,3)
226
+ 225 100 (720,1280,3)
227
+ 226 100 (720,1280,3)
228
+ 227 100 (720,1280,3)
229
+ 228 100 (720,1280,3)
230
+ 229 100 (720,1280,3)
231
+ 230 100 (720,1280,3)
232
+ 231 100 (720,1280,3)
233
+ 232 100 (720,1280,3)
234
+ 233 100 (720,1280,3)
235
+ 234 100 (720,1280,3)
236
+ 235 100 (720,1280,3)
237
+ 236 100 (720,1280,3)
238
+ 237 100 (720,1280,3)
239
+ 238 100 (720,1280,3)
240
+ 239 100 (720,1280,3)
241
+ 240 100 (720,1280,3)
242
+ 241 100 (720,1280,3)
243
+ 242 100 (720,1280,3)
244
+ 243 100 (720,1280,3)
245
+ 244 100 (720,1280,3)
246
+ 245 100 (720,1280,3)
247
+ 246 100 (720,1280,3)
248
+ 247 100 (720,1280,3)
249
+ 248 100 (720,1280,3)
250
+ 249 100 (720,1280,3)
251
+ 250 100 (720,1280,3)
252
+ 251 100 (720,1280,3)
253
+ 252 100 (720,1280,3)
254
+ 253 100 (720,1280,3)
255
+ 254 100 (720,1280,3)
256
+ 255 100 (720,1280,3)
257
+ 256 100 (720,1280,3)
258
+ 257 100 (720,1280,3)
259
+ 258 100 (720,1280,3)
260
+ 259 100 (720,1280,3)
261
+ 260 100 (720,1280,3)
262
+ 261 100 (720,1280,3)
263
+ 262 100 (720,1280,3)
264
+ 263 100 (720,1280,3)
265
+ 264 100 (720,1280,3)
266
+ 265 100 (720,1280,3)
267
+ 266 100 (720,1280,3)
268
+ 267 100 (720,1280,3)
269
+ 268 100 (720,1280,3)
270
+ 269 100 (720,1280,3)
basicsr/data/meta_info/meta_info_REDSofficial4_test_GT.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ 240 100 (720,1280,3)
2
+ 241 100 (720,1280,3)
3
+ 246 100 (720,1280,3)
4
+ 257 100 (720,1280,3)
basicsr/data/meta_info/meta_info_REDSval_official_test_GT.txt ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 240 100 (720,1280,3)
2
+ 241 100 (720,1280,3)
3
+ 242 100 (720,1280,3)
4
+ 243 100 (720,1280,3)
5
+ 244 100 (720,1280,3)
6
+ 245 100 (720,1280,3)
7
+ 246 100 (720,1280,3)
8
+ 247 100 (720,1280,3)
9
+ 248 100 (720,1280,3)
10
+ 249 100 (720,1280,3)
11
+ 250 100 (720,1280,3)
12
+ 251 100 (720,1280,3)
13
+ 252 100 (720,1280,3)
14
+ 253 100 (720,1280,3)
15
+ 254 100 (720,1280,3)
16
+ 255 100 (720,1280,3)
17
+ 256 100 (720,1280,3)
18
+ 257 100 (720,1280,3)
19
+ 258 100 (720,1280,3)
20
+ 259 100 (720,1280,3)
21
+ 260 100 (720,1280,3)
22
+ 261 100 (720,1280,3)
23
+ 262 100 (720,1280,3)
24
+ 263 100 (720,1280,3)
25
+ 264 100 (720,1280,3)
26
+ 265 100 (720,1280,3)
27
+ 266 100 (720,1280,3)
28
+ 267 100 (720,1280,3)
29
+ 268 100 (720,1280,3)
30
+ 269 100 (720,1280,3)
basicsr/data/meta_info/meta_info_Vimeo90K_test_GT.txt ADDED
The diff for this file is too large to render. See raw diff
basicsr/data/meta_info/meta_info_Vimeo90K_test_fast_GT.txt ADDED
@@ -0,0 +1,1225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 00001/0625 7 (256,448,3)
2
+ 00001/0632 7 (256,448,3)
3
+ 00001/0807 7 (256,448,3)
4
+ 00001/0832 7 (256,448,3)
5
+ 00001/0834 7 (256,448,3)
6
+ 00001/0836 7 (256,448,3)
7
+ 00002/0004 7 (256,448,3)
8
+ 00002/0112 7 (256,448,3)
9
+ 00002/0116 7 (256,448,3)
10
+ 00002/0123 7 (256,448,3)
11
+ 00002/0455 7 (256,448,3)
12
+ 00002/0602 7 (256,448,3)
13
+ 00002/0976 7 (256,448,3)
14
+ 00002/0980 7 (256,448,3)
15
+ 00002/0983 7 (256,448,3)
16
+ 00002/1000 7 (256,448,3)
17
+ 00003/0022 7 (256,448,3)
18
+ 00003/0031 7 (256,448,3)
19
+ 00003/0035 7 (256,448,3)
20
+ 00003/0041 7 (256,448,3)
21
+ 00003/0073 7 (256,448,3)
22
+ 00003/0107 7 (256,448,3)
23
+ 00003/0111 7 (256,448,3)
24
+ 00003/0114 7 (256,448,3)
25
+ 00003/0117 7 (256,448,3)
26
+ 00003/0121 7 (256,448,3)
27
+ 00003/0499 7 (256,448,3)
28
+ 00003/0501 7 (256,448,3)
29
+ 00003/0507 7 (256,448,3)
30
+ 00003/0510 7 (256,448,3)
31
+ 00003/0517 7 (256,448,3)
32
+ 00003/0522 7 (256,448,3)
33
+ 00003/0531 7 (256,448,3)
34
+ 00003/0533 7 (256,448,3)
35
+ 00003/0534 7 (256,448,3)
36
+ 00003/0682 7 (256,448,3)
37
+ 00003/0687 7 (256,448,3)
38
+ 00003/0715 7 (256,448,3)
39
+ 00003/0742 7 (256,448,3)
40
+ 00003/0751 7 (256,448,3)
41
+ 00003/0984 7 (256,448,3)
42
+ 00004/0042 7 (256,448,3)
43
+ 00004/0165 7 (256,448,3)
44
+ 00004/0321 7 (256,448,3)
45
+ 00004/0569 7 (256,448,3)
46
+ 00004/0572 7 (256,448,3)
47
+ 00004/0619 7 (256,448,3)
48
+ 00004/0776 7 (256,448,3)
49
+ 00004/0780 7 (256,448,3)
50
+ 00004/0825 7 (256,448,3)
51
+ 00004/0832 7 (256,448,3)
52
+ 00004/0853 7 (256,448,3)
53
+ 00004/0876 7 (256,448,3)
54
+ 00004/0888 7 (256,448,3)
55
+ 00005/0015 7 (256,448,3)
56
+ 00005/0021 7 (256,448,3)
57
+ 00005/0022 7 (256,448,3)
58
+ 00005/0024 7 (256,448,3)
59
+ 00005/0026 7 (256,448,3)
60
+ 00005/0394 7 (256,448,3)
61
+ 00005/0403 7 (256,448,3)
62
+ 00005/0531 7 (256,448,3)
63
+ 00005/0546 7 (256,448,3)
64
+ 00005/0554 7 (256,448,3)
65
+ 00005/0694 7 (256,448,3)
66
+ 00005/0700 7 (256,448,3)
67
+ 00005/0740 7 (256,448,3)
68
+ 00005/0826 7 (256,448,3)
69
+ 00005/0832 7 (256,448,3)
70
+ 00005/0834 7 (256,448,3)
71
+ 00005/0943 7 (256,448,3)
72
+ 00006/0184 7 (256,448,3)
73
+ 00006/0205 7 (256,448,3)
74
+ 00006/0206 7 (256,448,3)
75
+ 00006/0211 7 (256,448,3)
76
+ 00006/0271 7 (256,448,3)
77
+ 00006/0273 7 (256,448,3)
78
+ 00006/0277 7 (256,448,3)
79
+ 00006/0283 7 (256,448,3)
80
+ 00006/0287 7 (256,448,3)
81
+ 00006/0298 7 (256,448,3)
82
+ 00006/0310 7 (256,448,3)
83
+ 00006/0356 7 (256,448,3)
84
+ 00006/0357 7 (256,448,3)
85
+ 00006/0544 7 (256,448,3)
86
+ 00006/0565 7 (256,448,3)
87
+ 00006/0569 7 (256,448,3)
88
+ 00006/0573 7 (256,448,3)
89
+ 00006/0592 7 (256,448,3)
90
+ 00006/0613 7 (256,448,3)
91
+ 00006/0633 7 (256,448,3)
92
+ 00006/0637 7 (256,448,3)
93
+ 00006/0646 7 (256,448,3)
94
+ 00006/0649 7 (256,448,3)
95
+ 00006/0655 7 (256,448,3)
96
+ 00006/0658 7 (256,448,3)
97
+ 00006/0662 7 (256,448,3)
98
+ 00006/0666 7 (256,448,3)
99
+ 00006/0673 7 (256,448,3)
100
+ 00007/0248 7 (256,448,3)
101
+ 00007/0253 7 (256,448,3)
102
+ 00007/0430 7 (256,448,3)
103
+ 00007/0434 7 (256,448,3)
104
+ 00007/0436 7 (256,448,3)
105
+ 00007/0452 7 (256,448,3)
106
+ 00007/0464 7 (256,448,3)
107
+ 00007/0470 7 (256,448,3)
108
+ 00007/0472 7 (256,448,3)
109
+ 00007/0483 7 (256,448,3)
110
+ 00007/0484 7 (256,448,3)
111
+ 00007/0493 7 (256,448,3)
112
+ 00007/0508 7 (256,448,3)
113
+ 00007/0514 7 (256,448,3)
114
+ 00007/0697 7 (256,448,3)
115
+ 00007/0698 7 (256,448,3)
116
+ 00007/0744 7 (256,448,3)
117
+ 00007/0775 7 (256,448,3)
118
+ 00007/0786 7 (256,448,3)
119
+ 00007/0790 7 (256,448,3)
120
+ 00007/0800 7 (256,448,3)
121
+ 00007/0833 7 (256,448,3)
122
+ 00007/0867 7 (256,448,3)
123
+ 00007/0879 7 (256,448,3)
124
+ 00007/0899 7 (256,448,3)
125
+ 00008/0251 7 (256,448,3)
126
+ 00008/0322 7 (256,448,3)
127
+ 00008/0971 7 (256,448,3)
128
+ 00008/0976 7 (256,448,3)
129
+ 00009/0016 7 (256,448,3)
130
+ 00009/0036 7 (256,448,3)
131
+ 00009/0037 7 (256,448,3)
132
+ 00009/0609 7 (256,448,3)
133
+ 00009/0812 7 (256,448,3)
134
+ 00009/0821 7 (256,448,3)
135
+ 00009/0947 7 (256,448,3)
136
+ 00009/0952 7 (256,448,3)
137
+ 00009/0955 7 (256,448,3)
138
+ 00009/0970 7 (256,448,3)
139
+ 00010/0072 7 (256,448,3)
140
+ 00010/0074 7 (256,448,3)
141
+ 00010/0079 7 (256,448,3)
142
+ 00010/0085 7 (256,448,3)
143
+ 00010/0139 7 (256,448,3)
144
+ 00010/0140 7 (256,448,3)
145
+ 00010/0183 7 (256,448,3)
146
+ 00010/0200 7 (256,448,3)
147
+ 00010/0223 7 (256,448,3)
148
+ 00010/0305 7 (256,448,3)
149
+ 00010/0323 7 (256,448,3)
150
+ 00010/0338 7 (256,448,3)
151
+ 00010/0342 7 (256,448,3)
152
+ 00010/0350 7 (256,448,3)
153
+ 00010/0356 7 (256,448,3)
154
+ 00010/0362 7 (256,448,3)
155
+ 00010/0366 7 (256,448,3)
156
+ 00010/0375 7 (256,448,3)
157
+ 00010/0404 7 (256,448,3)
158
+ 00010/0407 7 (256,448,3)
159
+ 00010/0414 7 (256,448,3)
160
+ 00010/0418 7 (256,448,3)
161
+ 00010/0429 7 (256,448,3)
162
+ 00010/0557 7 (256,448,3)
163
+ 00010/0564 7 (256,448,3)
164
+ 00010/0733 7 (256,448,3)
165
+ 00010/0935 7 (256,448,3)
166
+ 00010/0939 7 (256,448,3)
167
+ 00010/0943 7 (256,448,3)
168
+ 00011/0242 7 (256,448,3)
169
+ 00011/0259 7 (256,448,3)
170
+ 00011/0263 7 (256,448,3)
171
+ 00011/0266 7 (256,448,3)
172
+ 00011/0278 7 (256,448,3)
173
+ 00011/0890 7 (256,448,3)
174
+ 00011/0894 7 (256,448,3)
175
+ 00011/0903 7 (256,448,3)
176
+ 00011/0906 7 (256,448,3)
177
+ 00011/0913 7 (256,448,3)
178
+ 00012/0011 7 (256,448,3)
179
+ 00012/0014 7 (256,448,3)
180
+ 00012/0126 7 (256,448,3)
181
+ 00012/0127 7 (256,448,3)
182
+ 00012/0526 7 (256,448,3)
183
+ 00012/0551 7 (256,448,3)
184
+ 00012/0896 7 (256,448,3)
185
+ 00012/0910 7 (256,448,3)
186
+ 00012/0915 7 (256,448,3)
187
+ 00013/0167 7 (256,448,3)
188
+ 00013/0794 7 (256,448,3)
189
+ 00013/0807 7 (256,448,3)
190
+ 00013/0846 7 (256,448,3)
191
+ 00013/0882 7 (256,448,3)
192
+ 00013/0889 7 (256,448,3)
193
+ 00013/0910 7 (256,448,3)
194
+ 00013/0913 7 (256,448,3)
195
+ 00013/0924 7 (256,448,3)
196
+ 00013/0931 7 (256,448,3)
197
+ 00013/0944 7 (256,448,3)
198
+ 00013/0955 7 (256,448,3)
199
+ 00013/0962 7 (256,448,3)
200
+ 00013/0969 7 (256,448,3)
201
+ 00014/0012 7 (256,448,3)
202
+ 00014/0025 7 (256,448,3)
203
+ 00014/0473 7 (256,448,3)
204
+ 00014/0499 7 (256,448,3)
205
+ 00014/0524 7 (256,448,3)
206
+ 00014/0739 7 (256,448,3)
207
+ 00014/0753 7 (256,448,3)
208
+ 00014/0771 7 (256,448,3)
209
+ 00014/0832 7 (256,448,3)
210
+ 00014/0836 7 (256,448,3)
211
+ 00014/0838 7 (256,448,3)
212
+ 00014/0839 7 (256,448,3)
213
+ 00014/0843 7 (256,448,3)
214
+ 00014/0846 7 (256,448,3)
215
+ 00014/0849 7 (256,448,3)
216
+ 00014/0859 7 (256,448,3)
217
+ 00014/0880 7 (256,448,3)
218
+ 00014/0906 7 (256,448,3)
219
+ 00015/0030 7 (256,448,3)
220
+ 00015/0067 7 (256,448,3)
221
+ 00015/0084 7 (256,448,3)
222
+ 00015/0190 7 (256,448,3)
223
+ 00015/0575 7 (256,448,3)
224
+ 00015/0784 7 (256,448,3)
225
+ 00015/0855 7 (256,448,3)
226
+ 00015/0904 7 (256,448,3)
227
+ 00015/0914 7 (256,448,3)
228
+ 00015/0936 7 (256,448,3)
229
+ 00015/0939 7 (256,448,3)
230
+ 00015/0943 7 (256,448,3)
231
+ 00015/0957 7 (256,448,3)
232
+ 00016/0131 7 (256,448,3)
233
+ 00016/0173 7 (256,448,3)
234
+ 00016/0320 7 (256,448,3)
235
+ 00016/0328 7 (256,448,3)
236
+ 00016/0334 7 (256,448,3)
237
+ 00016/0338 7 (256,448,3)
238
+ 00016/0339 7 (256,448,3)
239
+ 00016/0345 7 (256,448,3)
240
+ 00016/0365 7 (256,448,3)
241
+ 00016/0584 7 (256,448,3)
242
+ 00016/0634 7 (256,448,3)
243
+ 00017/0342 7 (256,448,3)
244
+ 00017/0346 7 (256,448,3)
245
+ 00017/0350 7 (256,448,3)
246
+ 00017/0766 7 (256,448,3)
247
+ 00017/0786 7 (256,448,3)
248
+ 00017/0911 7 (256,448,3)
249
+ 00017/0914 7 (256,448,3)
250
+ 00018/0217 7 (256,448,3)
251
+ 00018/0258 7 (256,448,3)
252
+ 00018/0307 7 (256,448,3)
253
+ 00018/0480 7 (256,448,3)
254
+ 00018/0491 7 (256,448,3)
255
+ 00018/0994 7 (256,448,3)
256
+ 00018/0995 7 (256,448,3)
257
+ 00018/0997 7 (256,448,3)
258
+ 00018/1000 7 (256,448,3)
259
+ 00019/0007 7 (256,448,3)
260
+ 00019/0016 7 (256,448,3)
261
+ 00019/0026 7 (256,448,3)
262
+ 00019/0030 7 (256,448,3)
263
+ 00019/0086 7 (256,448,3)
264
+ 00019/0089 7 (256,448,3)
265
+ 00019/0111 7 (256,448,3)
266
+ 00019/0285 7 (256,448,3)
267
+ 00019/0415 7 (256,448,3)
268
+ 00019/0434 7 (256,448,3)
269
+ 00019/0437 7 (256,448,3)
270
+ 00019/0568 7 (256,448,3)
271
+ 00019/0570 7 (256,448,3)
272
+ 00019/0591 7 (256,448,3)
273
+ 00019/0596 7 (256,448,3)
274
+ 00019/0603 7 (256,448,3)
275
+ 00019/0607 7 (256,448,3)
276
+ 00019/0637 7 (256,448,3)
277
+ 00019/0644 7 (256,448,3)
278
+ 00019/0647 7 (256,448,3)
279
+ 00019/0787 7 (256,448,3)
280
+ 00019/0993 7 (256,448,3)
281
+ 00019/0998 7 (256,448,3)
282
+ 00021/0232 7 (256,448,3)
283
+ 00021/0255 7 (256,448,3)
284
+ 00021/0646 7 (256,448,3)
285
+ 00021/0653 7 (256,448,3)
286
+ 00021/0657 7 (256,448,3)
287
+ 00021/0668 7 (256,448,3)
288
+ 00021/0672 7 (256,448,3)
289
+ 00021/0725 7 (256,448,3)
290
+ 00021/0750 7 (256,448,3)
291
+ 00021/0764 7 (256,448,3)
292
+ 00021/0821 7 (256,448,3)
293
+ 00022/0192 7 (256,448,3)
294
+ 00022/0391 7 (256,448,3)
295
+ 00022/0514 7 (256,448,3)
296
+ 00022/0567 7 (256,448,3)
297
+ 00022/0674 7 (256,448,3)
298
+ 00022/0686 7 (256,448,3)
299
+ 00022/0700 7 (256,448,3)
300
+ 00023/0020 7 (256,448,3)
301
+ 00023/0024 7 (256,448,3)
302
+ 00023/0025 7 (256,448,3)
303
+ 00023/0042 7 (256,448,3)
304
+ 00023/0050 7 (256,448,3)
305
+ 00023/0094 7 (256,448,3)
306
+ 00023/0107 7 (256,448,3)
307
+ 00023/0635 7 (256,448,3)
308
+ 00023/0698 7 (256,448,3)
309
+ 00023/0774 7 (256,448,3)
310
+ 00023/0795 7 (256,448,3)
311
+ 00023/0821 7 (256,448,3)
312
+ 00023/0839 7 (256,448,3)
313
+ 00023/0846 7 (256,448,3)
314
+ 00023/0869 7 (256,448,3)
315
+ 00023/0879 7 (256,448,3)
316
+ 00023/0887 7 (256,448,3)
317
+ 00023/0899 7 (256,448,3)
318
+ 00023/0910 7 (256,448,3)
319
+ 00023/0920 7 (256,448,3)
320
+ 00023/0929 7 (256,448,3)
321
+ 00023/0941 7 (256,448,3)
322
+ 00023/0942 7 (256,448,3)
323
+ 00023/0952 7 (256,448,3)
324
+ 00024/0066 7 (256,448,3)
325
+ 00024/0072 7 (256,448,3)
326
+ 00024/0080 7 (256,448,3)
327
+ 00024/0093 7 (256,448,3)
328
+ 00024/0107 7 (256,448,3)
329
+ 00024/0262 7 (256,448,3)
330
+ 00024/0283 7 (256,448,3)
331
+ 00024/0294 7 (256,448,3)
332
+ 00024/0296 7 (256,448,3)
333
+ 00024/0304 7 (256,448,3)
334
+ 00024/0315 7 (256,448,3)
335
+ 00024/0322 7 (256,448,3)
336
+ 00024/0648 7 (256,448,3)
337
+ 00024/0738 7 (256,448,3)
338
+ 00024/0743 7 (256,448,3)
339
+ 00025/0542 7 (256,448,3)
340
+ 00025/0769 7 (256,448,3)
341
+ 00025/0984 7 (256,448,3)
342
+ 00025/0985 7 (256,448,3)
343
+ 00025/0989 7 (256,448,3)
344
+ 00025/0991 7 (256,448,3)
345
+ 00026/0009 7 (256,448,3)
346
+ 00026/0013 7 (256,448,3)
347
+ 00026/0020 7 (256,448,3)
348
+ 00026/0021 7 (256,448,3)
349
+ 00026/0025 7 (256,448,3)
350
+ 00026/0135 7 (256,448,3)
351
+ 00026/0200 7 (256,448,3)
352
+ 00026/0297 7 (256,448,3)
353
+ 00026/0306 7 (256,448,3)
354
+ 00026/0444 7 (256,448,3)
355
+ 00026/0450 7 (256,448,3)
356
+ 00026/0453 7 (256,448,3)
357
+ 00026/0464 7 (256,448,3)
358
+ 00026/0486 7 (256,448,3)
359
+ 00026/0773 7 (256,448,3)
360
+ 00026/0785 7 (256,448,3)
361
+ 00026/0836 7 (256,448,3)
362
+ 00026/0838 7 (256,448,3)
363
+ 00026/0848 7 (256,448,3)
364
+ 00026/0885 7 (256,448,3)
365
+ 00026/0893 7 (256,448,3)
366
+ 00026/0939 7 (256,448,3)
367
+ 00026/0942 7 (256,448,3)
368
+ 00027/0092 7 (256,448,3)
369
+ 00027/0112 7 (256,448,3)
370
+ 00027/0115 7 (256,448,3)
371
+ 00027/0143 7 (256,448,3)
372
+ 00027/0175 7 (256,448,3)
373
+ 00027/0179 7 (256,448,3)
374
+ 00027/0183 7 (256,448,3)
375
+ 00027/0197 7 (256,448,3)
376
+ 00027/0199 7 (256,448,3)
377
+ 00027/0300 7 (256,448,3)
378
+ 00028/0015 7 (256,448,3)
379
+ 00028/0032 7 (256,448,3)
380
+ 00028/0048 7 (256,448,3)
381
+ 00028/0068 7 (256,448,3)
382
+ 00028/0219 7 (256,448,3)
383
+ 00028/0606 7 (256,448,3)
384
+ 00028/0626 7 (256,448,3)
385
+ 00028/0748 7 (256,448,3)
386
+ 00028/0764 7 (256,448,3)
387
+ 00028/0772 7 (256,448,3)
388
+ 00028/0780 7 (256,448,3)
389
+ 00028/0926 7 (256,448,3)
390
+ 00028/0947 7 (256,448,3)
391
+ 00028/0962 7 (256,448,3)
392
+ 00029/0085 7 (256,448,3)
393
+ 00029/0281 7 (256,448,3)
394
+ 00029/0284 7 (256,448,3)
395
+ 00029/0288 7 (256,448,3)
396
+ 00029/0294 7 (256,448,3)
397
+ 00029/0364 7 (256,448,3)
398
+ 00029/0369 7 (256,448,3)
399
+ 00029/0421 7 (256,448,3)
400
+ 00029/0425 7 (256,448,3)
401
+ 00029/0550 7 (256,448,3)
402
+ 00030/0014 7 (256,448,3)
403
+ 00030/0101 7 (256,448,3)
404
+ 00030/0143 7 (256,448,3)
405
+ 00030/0351 7 (256,448,3)
406
+ 00030/0356 7 (256,448,3)
407
+ 00030/0371 7 (256,448,3)
408
+ 00030/0484 7 (256,448,3)
409
+ 00030/0492 7 (256,448,3)
410
+ 00030/0503 7 (256,448,3)
411
+ 00030/0682 7 (256,448,3)
412
+ 00030/0696 7 (256,448,3)
413
+ 00030/0735 7 (256,448,3)
414
+ 00030/0737 7 (256,448,3)
415
+ 00030/0868 7 (256,448,3)
416
+ 00031/0161 7 (256,448,3)
417
+ 00031/0180 7 (256,448,3)
418
+ 00031/0194 7 (256,448,3)
419
+ 00031/0253 7 (256,448,3)
420
+ 00031/0293 7 (256,448,3)
421
+ 00031/0466 7 (256,448,3)
422
+ 00031/0477 7 (256,448,3)
423
+ 00031/0549 7 (256,448,3)
424
+ 00031/0600 7 (256,448,3)
425
+ 00031/0617 7 (256,448,3)
426
+ 00031/0649 7 (256,448,3)
427
+ 00032/0015 7 (256,448,3)
428
+ 00032/0020 7 (256,448,3)
429
+ 00032/0023 7 (256,448,3)
430
+ 00032/0048 7 (256,448,3)
431
+ 00032/0056 7 (256,448,3)
432
+ 00032/0872 7 (256,448,3)
433
+ 00033/0069 7 (256,448,3)
434
+ 00033/0073 7 (256,448,3)
435
+ 00033/0078 7 (256,448,3)
436
+ 00033/0079 7 (256,448,3)
437
+ 00033/0086 7 (256,448,3)
438
+ 00033/0088 7 (256,448,3)
439
+ 00033/0091 7 (256,448,3)
440
+ 00033/0096 7 (256,448,3)
441
+ 00033/0607 7 (256,448,3)
442
+ 00033/0613 7 (256,448,3)
443
+ 00033/0616 7 (256,448,3)
444
+ 00033/0619 7 (256,448,3)
445
+ 00033/0626 7 (256,448,3)
446
+ 00033/0628 7 (256,448,3)
447
+ 00033/0637 7 (256,448,3)
448
+ 00033/0686 7 (256,448,3)
449
+ 00033/0842 7 (256,448,3)
450
+ 00034/0261 7 (256,448,3)
451
+ 00034/0265 7 (256,448,3)
452
+ 00034/0269 7 (256,448,3)
453
+ 00034/0275 7 (256,448,3)
454
+ 00034/0286 7 (256,448,3)
455
+ 00034/0294 7 (256,448,3)
456
+ 00034/0431 7 (256,448,3)
457
+ 00034/0577 7 (256,448,3)
458
+ 00034/0685 7 (256,448,3)
459
+ 00034/0687 7 (256,448,3)
460
+ 00034/0703 7 (256,448,3)
461
+ 00034/0715 7 (256,448,3)
462
+ 00034/0935 7 (256,448,3)
463
+ 00034/0943 7 (256,448,3)
464
+ 00034/0963 7 (256,448,3)
465
+ 00034/0979 7 (256,448,3)
466
+ 00034/0990 7 (256,448,3)
467
+ 00035/0129 7 (256,448,3)
468
+ 00035/0153 7 (256,448,3)
469
+ 00035/0156 7 (256,448,3)
470
+ 00035/0474 7 (256,448,3)
471
+ 00035/0507 7 (256,448,3)
472
+ 00035/0532 7 (256,448,3)
473
+ 00035/0560 7 (256,448,3)
474
+ 00035/0572 7 (256,448,3)
475
+ 00035/0587 7 (256,448,3)
476
+ 00035/0588 7 (256,448,3)
477
+ 00035/0640 7 (256,448,3)
478
+ 00035/0654 7 (256,448,3)
479
+ 00035/0655 7 (256,448,3)
480
+ 00035/0737 7 (256,448,3)
481
+ 00035/0843 7 (256,448,3)
482
+ 00035/0932 7 (256,448,3)
483
+ 00035/0957 7 (256,448,3)
484
+ 00036/0029 7 (256,448,3)
485
+ 00036/0266 7 (256,448,3)
486
+ 00036/0276 7 (256,448,3)
487
+ 00036/0310 7 (256,448,3)
488
+ 00036/0314 7 (256,448,3)
489
+ 00036/0320 7 (256,448,3)
490
+ 00036/0333 7 (256,448,3)
491
+ 00036/0348 7 (256,448,3)
492
+ 00036/0357 7 (256,448,3)
493
+ 00036/0360 7 (256,448,3)
494
+ 00036/0368 7 (256,448,3)
495
+ 00036/0371 7 (256,448,3)
496
+ 00036/0378 7 (256,448,3)
497
+ 00036/0391 7 (256,448,3)
498
+ 00036/0440 7 (256,448,3)
499
+ 00036/0731 7 (256,448,3)
500
+ 00036/0733 7 (256,448,3)
501
+ 00036/0741 7 (256,448,3)
502
+ 00036/0743 7 (256,448,3)
503
+ 00036/0927 7 (256,448,3)
504
+ 00036/0931 7 (256,448,3)
505
+ 00036/0933 7 (256,448,3)
506
+ 00036/0938 7 (256,448,3)
507
+ 00036/0944 7 (256,448,3)
508
+ 00036/0946 7 (256,448,3)
509
+ 00036/0951 7 (256,448,3)
510
+ 00036/0953 7 (256,448,3)
511
+ 00036/0963 7 (256,448,3)
512
+ 00036/0964 7 (256,448,3)
513
+ 00036/0981 7 (256,448,3)
514
+ 00036/0991 7 (256,448,3)
515
+ 00037/0072 7 (256,448,3)
516
+ 00037/0079 7 (256,448,3)
517
+ 00037/0132 7 (256,448,3)
518
+ 00037/0135 7 (256,448,3)
519
+ 00037/0137 7 (256,448,3)
520
+ 00037/0141 7 (256,448,3)
521
+ 00037/0229 7 (256,448,3)
522
+ 00037/0234 7 (256,448,3)
523
+ 00037/0239 7 (256,448,3)
524
+ 00037/0242 7 (256,448,3)
525
+ 00037/0254 7 (256,448,3)
526
+ 00037/0269 7 (256,448,3)
527
+ 00037/0276 7 (256,448,3)
528
+ 00037/0279 7 (256,448,3)
529
+ 00037/0286 7 (256,448,3)
530
+ 00037/0345 7 (256,448,3)
531
+ 00037/0449 7 (256,448,3)
532
+ 00037/0450 7 (256,448,3)
533
+ 00037/0820 7 (256,448,3)
534
+ 00037/0824 7 (256,448,3)
535
+ 00037/0859 7 (256,448,3)
536
+ 00037/0899 7 (256,448,3)
537
+ 00037/0906 7 (256,448,3)
538
+ 00038/0535 7 (256,448,3)
539
+ 00038/0572 7 (256,448,3)
540
+ 00038/0675 7 (256,448,3)
541
+ 00038/0731 7 (256,448,3)
542
+ 00038/0732 7 (256,448,3)
543
+ 00038/0744 7 (256,448,3)
544
+ 00038/0755 7 (256,448,3)
545
+ 00039/0002 7 (256,448,3)
546
+ 00039/0013 7 (256,448,3)
547
+ 00039/0247 7 (256,448,3)
548
+ 00039/0489 7 (256,448,3)
549
+ 00039/0504 7 (256,448,3)
550
+ 00039/0558 7 (256,448,3)
551
+ 00039/0686 7 (256,448,3)
552
+ 00039/0727 7 (256,448,3)
553
+ 00039/0769 7 (256,448,3)
554
+ 00040/0081 7 (256,448,3)
555
+ 00040/0082 7 (256,448,3)
556
+ 00040/0402 7 (256,448,3)
557
+ 00040/0407 7 (256,448,3)
558
+ 00040/0408 7 (256,448,3)
559
+ 00040/0410 7 (256,448,3)
560
+ 00040/0411 7 (256,448,3)
561
+ 00040/0412 7 (256,448,3)
562
+ 00040/0413 7 (256,448,3)
563
+ 00040/0415 7 (256,448,3)
564
+ 00040/0421 7 (256,448,3)
565
+ 00040/0422 7 (256,448,3)
566
+ 00040/0426 7 (256,448,3)
567
+ 00040/0438 7 (256,448,3)
568
+ 00040/0439 7 (256,448,3)
569
+ 00040/0440 7 (256,448,3)
570
+ 00040/0443 7 (256,448,3)
571
+ 00040/0457 7 (256,448,3)
572
+ 00040/0459 7 (256,448,3)
573
+ 00040/0725 7 (256,448,3)
574
+ 00040/0727 7 (256,448,3)
575
+ 00040/0936 7 (256,448,3)
576
+ 00040/0959 7 (256,448,3)
577
+ 00040/0964 7 (256,448,3)
578
+ 00040/0968 7 (256,448,3)
579
+ 00040/0974 7 (256,448,3)
580
+ 00040/0978 7 (256,448,3)
581
+ 00040/0979 7 (256,448,3)
582
+ 00040/0989 7 (256,448,3)
583
+ 00040/0993 7 (256,448,3)
584
+ 00040/0994 7 (256,448,3)
585
+ 00040/0997 7 (256,448,3)
586
+ 00041/0001 7 (256,448,3)
587
+ 00041/0007 7 (256,448,3)
588
+ 00041/0019 7 (256,448,3)
589
+ 00041/0040 7 (256,448,3)
590
+ 00041/0350 7 (256,448,3)
591
+ 00041/0357 7 (256,448,3)
592
+ 00041/0393 7 (256,448,3)
593
+ 00041/0890 7 (256,448,3)
594
+ 00041/0909 7 (256,448,3)
595
+ 00041/0915 7 (256,448,3)
596
+ 00041/0933 7 (256,448,3)
597
+ 00042/0017 7 (256,448,3)
598
+ 00042/0332 7 (256,448,3)
599
+ 00042/0346 7 (256,448,3)
600
+ 00042/0350 7 (256,448,3)
601
+ 00042/0356 7 (256,448,3)
602
+ 00042/0382 7 (256,448,3)
603
+ 00042/0389 7 (256,448,3)
604
+ 00042/0539 7 (256,448,3)
605
+ 00042/0546 7 (256,448,3)
606
+ 00042/0550 7 (256,448,3)
607
+ 00042/0553 7 (256,448,3)
608
+ 00042/0555 7 (256,448,3)
609
+ 00042/0560 7 (256,448,3)
610
+ 00042/0570 7 (256,448,3)
611
+ 00043/0119 7 (256,448,3)
612
+ 00043/0122 7 (256,448,3)
613
+ 00043/0168 7 (256,448,3)
614
+ 00043/0274 7 (256,448,3)
615
+ 00043/0304 7 (256,448,3)
616
+ 00043/0731 7 (256,448,3)
617
+ 00043/0735 7 (256,448,3)
618
+ 00043/0739 7 (256,448,3)
619
+ 00043/0740 7 (256,448,3)
620
+ 00044/0212 7 (256,448,3)
621
+ 00044/0432 7 (256,448,3)
622
+ 00044/0934 7 (256,448,3)
623
+ 00044/0940 7 (256,448,3)
624
+ 00044/0987 7 (256,448,3)
625
+ 00045/0004 7 (256,448,3)
626
+ 00045/0009 7 (256,448,3)
627
+ 00045/0011 7 (256,448,3)
628
+ 00045/0019 7 (256,448,3)
629
+ 00045/0023 7 (256,448,3)
630
+ 00045/0289 7 (256,448,3)
631
+ 00045/0760 7 (256,448,3)
632
+ 00045/0779 7 (256,448,3)
633
+ 00045/0816 7 (256,448,3)
634
+ 00045/0820 7 (256,448,3)
635
+ 00046/0132 7 (256,448,3)
636
+ 00046/0350 7 (256,448,3)
637
+ 00046/0356 7 (256,448,3)
638
+ 00046/0357 7 (256,448,3)
639
+ 00046/0379 7 (256,448,3)
640
+ 00046/0410 7 (256,448,3)
641
+ 00046/0412 7 (256,448,3)
642
+ 00046/0481 7 (256,448,3)
643
+ 00046/0497 7 (256,448,3)
644
+ 00046/0510 7 (256,448,3)
645
+ 00046/0515 7 (256,448,3)
646
+ 00046/0529 7 (256,448,3)
647
+ 00046/0544 7 (256,448,3)
648
+ 00046/0545 7 (256,448,3)
649
+ 00046/0552 7 (256,448,3)
650
+ 00046/0559 7 (256,448,3)
651
+ 00046/0589 7 (256,448,3)
652
+ 00046/0642 7 (256,448,3)
653
+ 00046/0724 7 (256,448,3)
654
+ 00046/0758 7 (256,448,3)
655
+ 00046/0930 7 (256,448,3)
656
+ 00046/0953 7 (256,448,3)
657
+ 00047/0013 7 (256,448,3)
658
+ 00047/0014 7 (256,448,3)
659
+ 00047/0017 7 (256,448,3)
660
+ 00047/0076 7 (256,448,3)
661
+ 00047/0151 7 (256,448,3)
662
+ 00047/0797 7 (256,448,3)
663
+ 00048/0014 7 (256,448,3)
664
+ 00048/0021 7 (256,448,3)
665
+ 00048/0026 7 (256,448,3)
666
+ 00048/0030 7 (256,448,3)
667
+ 00048/0039 7 (256,448,3)
668
+ 00048/0045 7 (256,448,3)
669
+ 00048/0049 7 (256,448,3)
670
+ 00048/0145 7 (256,448,3)
671
+ 00048/0188 7 (256,448,3)
672
+ 00048/0302 7 (256,448,3)
673
+ 00048/0361 7 (256,448,3)
674
+ 00048/0664 7 (256,448,3)
675
+ 00048/0672 7 (256,448,3)
676
+ 00048/0681 7 (256,448,3)
677
+ 00048/0689 7 (256,448,3)
678
+ 00048/0690 7 (256,448,3)
679
+ 00048/0691 7 (256,448,3)
680
+ 00048/0711 7 (256,448,3)
681
+ 00049/0085 7 (256,448,3)
682
+ 00049/0810 7 (256,448,3)
683
+ 00049/0858 7 (256,448,3)
684
+ 00049/0865 7 (256,448,3)
685
+ 00049/0871 7 (256,448,3)
686
+ 00049/0903 7 (256,448,3)
687
+ 00049/0928 7 (256,448,3)
688
+ 00050/0092 7 (256,448,3)
689
+ 00050/0101 7 (256,448,3)
690
+ 00050/0108 7 (256,448,3)
691
+ 00050/0112 7 (256,448,3)
692
+ 00050/0120 7 (256,448,3)
693
+ 00050/0128 7 (256,448,3)
694
+ 00050/0383 7 (256,448,3)
695
+ 00050/0395 7 (256,448,3)
696
+ 00050/0405 7 (256,448,3)
697
+ 00050/0632 7 (256,448,3)
698
+ 00050/0648 7 (256,448,3)
699
+ 00050/0649 7 (256,448,3)
700
+ 00050/0659 7 (256,448,3)
701
+ 00050/0699 7 (256,448,3)
702
+ 00050/0708 7 (256,448,3)
703
+ 00050/0716 7 (256,448,3)
704
+ 00050/0758 7 (256,448,3)
705
+ 00050/0761 7 (256,448,3)
706
+ 00051/0572 7 (256,448,3)
707
+ 00052/0163 7 (256,448,3)
708
+ 00052/0242 7 (256,448,3)
709
+ 00052/0260 7 (256,448,3)
710
+ 00052/0322 7 (256,448,3)
711
+ 00052/0333 7 (256,448,3)
712
+ 00052/0806 7 (256,448,3)
713
+ 00052/0813 7 (256,448,3)
714
+ 00052/0821 7 (256,448,3)
715
+ 00052/0830 7 (256,448,3)
716
+ 00052/0914 7 (256,448,3)
717
+ 00052/0923 7 (256,448,3)
718
+ 00052/0959 7 (256,448,3)
719
+ 00053/0288 7 (256,448,3)
720
+ 00053/0290 7 (256,448,3)
721
+ 00053/0323 7 (256,448,3)
722
+ 00053/0337 7 (256,448,3)
723
+ 00053/0340 7 (256,448,3)
724
+ 00053/0437 7 (256,448,3)
725
+ 00053/0595 7 (256,448,3)
726
+ 00053/0739 7 (256,448,3)
727
+ 00053/0761 7 (256,448,3)
728
+ 00054/0014 7 (256,448,3)
729
+ 00054/0017 7 (256,448,3)
730
+ 00054/0178 7 (256,448,3)
731
+ 00054/0183 7 (256,448,3)
732
+ 00054/0196 7 (256,448,3)
733
+ 00054/0205 7 (256,448,3)
734
+ 00054/0214 7 (256,448,3)
735
+ 00054/0289 7 (256,448,3)
736
+ 00054/0453 7 (256,448,3)
737
+ 00054/0498 7 (256,448,3)
738
+ 00054/0502 7 (256,448,3)
739
+ 00054/0514 7 (256,448,3)
740
+ 00054/0773 7 (256,448,3)
741
+ 00055/0001 7 (256,448,3)
742
+ 00055/0115 7 (256,448,3)
743
+ 00055/0118 7 (256,448,3)
744
+ 00055/0171 7 (256,448,3)
745
+ 00055/0214 7 (256,448,3)
746
+ 00055/0354 7 (256,448,3)
747
+ 00055/0449 7 (256,448,3)
748
+ 00055/0473 7 (256,448,3)
749
+ 00055/0649 7 (256,448,3)
750
+ 00055/0800 7 (256,448,3)
751
+ 00055/0803 7 (256,448,3)
752
+ 00055/0990 7 (256,448,3)
753
+ 00056/0041 7 (256,448,3)
754
+ 00056/0120 7 (256,448,3)
755
+ 00056/0293 7 (256,448,3)
756
+ 00056/0357 7 (256,448,3)
757
+ 00056/0506 7 (256,448,3)
758
+ 00056/0561 7 (256,448,3)
759
+ 00056/0567 7 (256,448,3)
760
+ 00056/0575 7 (256,448,3)
761
+ 00057/0175 7 (256,448,3)
762
+ 00057/0495 7 (256,448,3)
763
+ 00057/0498 7 (256,448,3)
764
+ 00057/0506 7 (256,448,3)
765
+ 00057/0612 7 (256,448,3)
766
+ 00057/0620 7 (256,448,3)
767
+ 00057/0623 7 (256,448,3)
768
+ 00057/0635 7 (256,448,3)
769
+ 00057/0773 7 (256,448,3)
770
+ 00057/0778 7 (256,448,3)
771
+ 00057/0867 7 (256,448,3)
772
+ 00057/0976 7 (256,448,3)
773
+ 00057/0980 7 (256,448,3)
774
+ 00057/0985 7 (256,448,3)
775
+ 00057/0992 7 (256,448,3)
776
+ 00058/0009 7 (256,448,3)
777
+ 00058/0076 7 (256,448,3)
778
+ 00058/0078 7 (256,448,3)
779
+ 00058/0279 7 (256,448,3)
780
+ 00058/0283 7 (256,448,3)
781
+ 00058/0286 7 (256,448,3)
782
+ 00058/0350 7 (256,448,3)
783
+ 00058/0380 7 (256,448,3)
784
+ 00061/0132 7 (256,448,3)
785
+ 00061/0141 7 (256,448,3)
786
+ 00061/0156 7 (256,448,3)
787
+ 00061/0159 7 (256,448,3)
788
+ 00061/0168 7 (256,448,3)
789
+ 00061/0170 7 (256,448,3)
790
+ 00061/0186 7 (256,448,3)
791
+ 00061/0219 7 (256,448,3)
792
+ 00061/0227 7 (256,448,3)
793
+ 00061/0238 7 (256,448,3)
794
+ 00061/0256 7 (256,448,3)
795
+ 00061/0303 7 (256,448,3)
796
+ 00061/0312 7 (256,448,3)
797
+ 00061/0313 7 (256,448,3)
798
+ 00061/0325 7 (256,448,3)
799
+ 00061/0367 7 (256,448,3)
800
+ 00061/0369 7 (256,448,3)
801
+ 00061/0387 7 (256,448,3)
802
+ 00061/0396 7 (256,448,3)
803
+ 00061/0486 7 (256,448,3)
804
+ 00061/0895 7 (256,448,3)
805
+ 00061/0897 7 (256,448,3)
806
+ 00062/0846 7 (256,448,3)
807
+ 00063/0156 7 (256,448,3)
808
+ 00063/0184 7 (256,448,3)
809
+ 00063/0191 7 (256,448,3)
810
+ 00063/0334 7 (256,448,3)
811
+ 00063/0350 7 (256,448,3)
812
+ 00063/0499 7 (256,448,3)
813
+ 00063/0878 7 (256,448,3)
814
+ 00064/0004 7 (256,448,3)
815
+ 00064/0264 7 (256,448,3)
816
+ 00064/0735 7 (256,448,3)
817
+ 00064/0738 7 (256,448,3)
818
+ 00065/0105 7 (256,448,3)
819
+ 00065/0169 7 (256,448,3)
820
+ 00065/0305 7 (256,448,3)
821
+ 00065/0324 7 (256,448,3)
822
+ 00065/0353 7 (256,448,3)
823
+ 00065/0520 7 (256,448,3)
824
+ 00065/0533 7 (256,448,3)
825
+ 00065/0545 7 (256,448,3)
826
+ 00065/0551 7 (256,448,3)
827
+ 00065/0568 7 (256,448,3)
828
+ 00065/0603 7 (256,448,3)
829
+ 00065/0884 7 (256,448,3)
830
+ 00065/0988 7 (256,448,3)
831
+ 00066/0002 7 (256,448,3)
832
+ 00066/0011 7 (256,448,3)
833
+ 00066/0031 7 (256,448,3)
834
+ 00066/0037 7 (256,448,3)
835
+ 00066/0136 7 (256,448,3)
836
+ 00066/0137 7 (256,448,3)
837
+ 00066/0150 7 (256,448,3)
838
+ 00066/0166 7 (256,448,3)
839
+ 00066/0178 7 (256,448,3)
840
+ 00066/0357 7 (256,448,3)
841
+ 00066/0428 7 (256,448,3)
842
+ 00066/0483 7 (256,448,3)
843
+ 00066/0600 7 (256,448,3)
844
+ 00066/0863 7 (256,448,3)
845
+ 00066/0873 7 (256,448,3)
846
+ 00066/0875 7 (256,448,3)
847
+ 00066/0899 7 (256,448,3)
848
+ 00067/0020 7 (256,448,3)
849
+ 00067/0025 7 (256,448,3)
850
+ 00067/0132 7 (256,448,3)
851
+ 00067/0492 7 (256,448,3)
852
+ 00067/0726 7 (256,448,3)
853
+ 00067/0734 7 (256,448,3)
854
+ 00067/0744 7 (256,448,3)
855
+ 00067/0754 7 (256,448,3)
856
+ 00067/0779 7 (256,448,3)
857
+ 00068/0078 7 (256,448,3)
858
+ 00068/0083 7 (256,448,3)
859
+ 00068/0113 7 (256,448,3)
860
+ 00068/0117 7 (256,448,3)
861
+ 00068/0121 7 (256,448,3)
862
+ 00068/0206 7 (256,448,3)
863
+ 00068/0261 7 (256,448,3)
864
+ 00068/0321 7 (256,448,3)
865
+ 00068/0354 7 (256,448,3)
866
+ 00068/0380 7 (256,448,3)
867
+ 00068/0419 7 (256,448,3)
868
+ 00068/0547 7 (256,448,3)
869
+ 00068/0561 7 (256,448,3)
870
+ 00068/0565 7 (256,448,3)
871
+ 00068/0583 7 (256,448,3)
872
+ 00068/0599 7 (256,448,3)
873
+ 00068/0739 7 (256,448,3)
874
+ 00068/0743 7 (256,448,3)
875
+ 00068/0754 7 (256,448,3)
876
+ 00068/0812 7 (256,448,3)
877
+ 00069/0178 7 (256,448,3)
878
+ 00070/0025 7 (256,448,3)
879
+ 00070/0030 7 (256,448,3)
880
+ 00070/0036 7 (256,448,3)
881
+ 00070/0042 7 (256,448,3)
882
+ 00070/0078 7 (256,448,3)
883
+ 00070/0079 7 (256,448,3)
884
+ 00070/0362 7 (256,448,3)
885
+ 00071/0195 7 (256,448,3)
886
+ 00071/0210 7 (256,448,3)
887
+ 00071/0211 7 (256,448,3)
888
+ 00071/0221 7 (256,448,3)
889
+ 00071/0352 7 (256,448,3)
890
+ 00071/0354 7 (256,448,3)
891
+ 00071/0366 7 (256,448,3)
892
+ 00071/0454 7 (256,448,3)
893
+ 00071/0464 7 (256,448,3)
894
+ 00071/0487 7 (256,448,3)
895
+ 00071/0502 7 (256,448,3)
896
+ 00071/0561 7 (256,448,3)
897
+ 00071/0676 7 (256,448,3)
898
+ 00071/0808 7 (256,448,3)
899
+ 00071/0813 7 (256,448,3)
900
+ 00071/0836 7 (256,448,3)
901
+ 00072/0286 7 (256,448,3)
902
+ 00072/0290 7 (256,448,3)
903
+ 00072/0298 7 (256,448,3)
904
+ 00072/0302 7 (256,448,3)
905
+ 00072/0333 7 (256,448,3)
906
+ 00072/0590 7 (256,448,3)
907
+ 00072/0793 7 (256,448,3)
908
+ 00072/0803 7 (256,448,3)
909
+ 00072/0833 7 (256,448,3)
910
+ 00073/0049 7 (256,448,3)
911
+ 00073/0050 7 (256,448,3)
912
+ 00073/0388 7 (256,448,3)
913
+ 00073/0480 7 (256,448,3)
914
+ 00073/0485 7 (256,448,3)
915
+ 00073/0611 7 (256,448,3)
916
+ 00073/0616 7 (256,448,3)
917
+ 00073/0714 7 (256,448,3)
918
+ 00073/0724 7 (256,448,3)
919
+ 00073/0730 7 (256,448,3)
920
+ 00074/0034 7 (256,448,3)
921
+ 00074/0228 7 (256,448,3)
922
+ 00074/0239 7 (256,448,3)
923
+ 00074/0275 7 (256,448,3)
924
+ 00074/0527 7 (256,448,3)
925
+ 00074/0620 7 (256,448,3)
926
+ 00074/0764 7 (256,448,3)
927
+ 00074/0849 7 (256,448,3)
928
+ 00074/0893 7 (256,448,3)
929
+ 00075/0333 7 (256,448,3)
930
+ 00075/0339 7 (256,448,3)
931
+ 00075/0347 7 (256,448,3)
932
+ 00075/0399 7 (256,448,3)
933
+ 00075/0478 7 (256,448,3)
934
+ 00075/0494 7 (256,448,3)
935
+ 00075/0678 7 (256,448,3)
936
+ 00075/0688 7 (256,448,3)
937
+ 00075/0706 7 (256,448,3)
938
+ 00075/0709 7 (256,448,3)
939
+ 00075/0748 7 (256,448,3)
940
+ 00075/0769 7 (256,448,3)
941
+ 00075/0777 7 (256,448,3)
942
+ 00075/0781 7 (256,448,3)
943
+ 00076/0151 7 (256,448,3)
944
+ 00076/0159 7 (256,448,3)
945
+ 00076/0164 7 (256,448,3)
946
+ 00076/0265 7 (256,448,3)
947
+ 00076/0269 7 (256,448,3)
948
+ 00076/0433 7 (256,448,3)
949
+ 00076/0813 7 (256,448,3)
950
+ 00076/0817 7 (256,448,3)
951
+ 00076/0818 7 (256,448,3)
952
+ 00076/0827 7 (256,448,3)
953
+ 00076/0874 7 (256,448,3)
954
+ 00076/0880 7 (256,448,3)
955
+ 00076/0891 7 (256,448,3)
956
+ 00076/0894 7 (256,448,3)
957
+ 00076/0909 7 (256,448,3)
958
+ 00076/0913 7 (256,448,3)
959
+ 00076/0926 7 (256,448,3)
960
+ 00076/0962 7 (256,448,3)
961
+ 00076/0973 7 (256,448,3)
962
+ 00076/0986 7 (256,448,3)
963
+ 00077/0617 7 (256,448,3)
964
+ 00077/0623 7 (256,448,3)
965
+ 00077/0628 7 (256,448,3)
966
+ 00077/0629 7 (256,448,3)
967
+ 00077/0631 7 (256,448,3)
968
+ 00077/0639 7 (256,448,3)
969
+ 00077/0982 7 (256,448,3)
970
+ 00077/0984 7 (256,448,3)
971
+ 00077/0995 7 (256,448,3)
972
+ 00077/0998 7 (256,448,3)
973
+ 00078/0001 7 (256,448,3)
974
+ 00078/0015 7 (256,448,3)
975
+ 00078/0157 7 (256,448,3)
976
+ 00078/0161 7 (256,448,3)
977
+ 00078/0175 7 (256,448,3)
978
+ 00078/0178 7 (256,448,3)
979
+ 00078/0189 7 (256,448,3)
980
+ 00078/0192 7 (256,448,3)
981
+ 00078/0229 7 (256,448,3)
982
+ 00078/0237 7 (256,448,3)
983
+ 00078/0241 7 (256,448,3)
984
+ 00078/0249 7 (256,448,3)
985
+ 00078/0251 7 (256,448,3)
986
+ 00078/0254 7 (256,448,3)
987
+ 00078/0258 7 (256,448,3)
988
+ 00078/0311 7 (256,448,3)
989
+ 00078/0603 7 (256,448,3)
990
+ 00078/0607 7 (256,448,3)
991
+ 00078/0824 7 (256,448,3)
992
+ 00079/0045 7 (256,448,3)
993
+ 00079/0048 7 (256,448,3)
994
+ 00079/0054 7 (256,448,3)
995
+ 00080/0050 7 (256,448,3)
996
+ 00080/0488 7 (256,448,3)
997
+ 00080/0494 7 (256,448,3)
998
+ 00080/0496 7 (256,448,3)
999
+ 00080/0499 7 (256,448,3)
1000
+ 00080/0502 7 (256,448,3)
1001
+ 00080/0510 7 (256,448,3)
1002
+ 00080/0534 7 (256,448,3)
1003
+ 00080/0558 7 (256,448,3)
1004
+ 00080/0571 7 (256,448,3)
1005
+ 00080/0709 7 (256,448,3)
1006
+ 00080/0882 7 (256,448,3)
1007
+ 00081/0322 7 (256,448,3)
1008
+ 00081/0428 7 (256,448,3)
1009
+ 00081/0700 7 (256,448,3)
1010
+ 00081/0706 7 (256,448,3)
1011
+ 00081/0707 7 (256,448,3)
1012
+ 00081/0937 7 (256,448,3)
1013
+ 00082/0021 7 (256,448,3)
1014
+ 00082/0424 7 (256,448,3)
1015
+ 00082/0794 7 (256,448,3)
1016
+ 00082/0807 7 (256,448,3)
1017
+ 00082/0810 7 (256,448,3)
1018
+ 00082/0824 7 (256,448,3)
1019
+ 00083/0129 7 (256,448,3)
1020
+ 00083/0131 7 (256,448,3)
1021
+ 00083/0249 7 (256,448,3)
1022
+ 00083/0250 7 (256,448,3)
1023
+ 00083/0656 7 (256,448,3)
1024
+ 00083/0812 7 (256,448,3)
1025
+ 00083/0819 7 (256,448,3)
1026
+ 00083/0824 7 (256,448,3)
1027
+ 00083/0827 7 (256,448,3)
1028
+ 00083/0841 7 (256,448,3)
1029
+ 00083/0963 7 (256,448,3)
1030
+ 00084/0047 7 (256,448,3)
1031
+ 00084/0319 7 (256,448,3)
1032
+ 00084/0334 7 (256,448,3)
1033
+ 00084/0363 7 (256,448,3)
1034
+ 00084/0493 7 (256,448,3)
1035
+ 00084/0655 7 (256,448,3)
1036
+ 00084/0752 7 (256,448,3)
1037
+ 00084/0813 7 (256,448,3)
1038
+ 00084/0886 7 (256,448,3)
1039
+ 00084/0948 7 (256,448,3)
1040
+ 00084/0976 7 (256,448,3)
1041
+ 00085/0512 7 (256,448,3)
1042
+ 00085/0641 7 (256,448,3)
1043
+ 00085/0653 7 (256,448,3)
1044
+ 00085/0655 7 (256,448,3)
1045
+ 00085/0697 7 (256,448,3)
1046
+ 00085/0698 7 (256,448,3)
1047
+ 00085/0700 7 (256,448,3)
1048
+ 00085/0703 7 (256,448,3)
1049
+ 00085/0705 7 (256,448,3)
1050
+ 00085/0709 7 (256,448,3)
1051
+ 00085/0713 7 (256,448,3)
1052
+ 00085/0739 7 (256,448,3)
1053
+ 00085/0750 7 (256,448,3)
1054
+ 00085/0763 7 (256,448,3)
1055
+ 00085/0765 7 (256,448,3)
1056
+ 00085/0769 7 (256,448,3)
1057
+ 00085/0863 7 (256,448,3)
1058
+ 00085/0868 7 (256,448,3)
1059
+ 00085/0927 7 (256,448,3)
1060
+ 00085/0936 7 (256,448,3)
1061
+ 00085/0965 7 (256,448,3)
1062
+ 00085/0969 7 (256,448,3)
1063
+ 00085/0974 7 (256,448,3)
1064
+ 00085/0981 7 (256,448,3)
1065
+ 00085/0982 7 (256,448,3)
1066
+ 00085/1000 7 (256,448,3)
1067
+ 00086/0003 7 (256,448,3)
1068
+ 00086/0009 7 (256,448,3)
1069
+ 00086/0011 7 (256,448,3)
1070
+ 00086/0028 7 (256,448,3)
1071
+ 00086/0032 7 (256,448,3)
1072
+ 00086/0034 7 (256,448,3)
1073
+ 00086/0035 7 (256,448,3)
1074
+ 00086/0042 7 (256,448,3)
1075
+ 00086/0064 7 (256,448,3)
1076
+ 00086/0066 7 (256,448,3)
1077
+ 00086/0095 7 (256,448,3)
1078
+ 00086/0099 7 (256,448,3)
1079
+ 00086/0101 7 (256,448,3)
1080
+ 00086/0104 7 (256,448,3)
1081
+ 00086/0115 7 (256,448,3)
1082
+ 00086/0116 7 (256,448,3)
1083
+ 00086/0284 7 (256,448,3)
1084
+ 00086/0291 7 (256,448,3)
1085
+ 00086/0295 7 (256,448,3)
1086
+ 00086/0302 7 (256,448,3)
1087
+ 00086/0318 7 (256,448,3)
1088
+ 00086/0666 7 (256,448,3)
1089
+ 00086/0797 7 (256,448,3)
1090
+ 00086/0851 7 (256,448,3)
1091
+ 00086/0855 7 (256,448,3)
1092
+ 00086/0874 7 (256,448,3)
1093
+ 00086/0878 7 (256,448,3)
1094
+ 00086/0881 7 (256,448,3)
1095
+ 00086/0883 7 (256,448,3)
1096
+ 00086/0896 7 (256,448,3)
1097
+ 00086/0899 7 (256,448,3)
1098
+ 00086/0903 7 (256,448,3)
1099
+ 00086/0989 7 (256,448,3)
1100
+ 00087/0008 7 (256,448,3)
1101
+ 00087/0429 7 (256,448,3)
1102
+ 00087/0511 7 (256,448,3)
1103
+ 00088/0241 7 (256,448,3)
1104
+ 00088/0319 7 (256,448,3)
1105
+ 00088/0323 7 (256,448,3)
1106
+ 00088/0411 7 (256,448,3)
1107
+ 00088/0427 7 (256,448,3)
1108
+ 00088/0452 7 (256,448,3)
1109
+ 00088/0463 7 (256,448,3)
1110
+ 00088/0476 7 (256,448,3)
1111
+ 00088/0496 7 (256,448,3)
1112
+ 00088/0559 7 (256,448,3)
1113
+ 00089/0058 7 (256,448,3)
1114
+ 00089/0061 7 (256,448,3)
1115
+ 00089/0069 7 (256,448,3)
1116
+ 00089/0077 7 (256,448,3)
1117
+ 00089/0096 7 (256,448,3)
1118
+ 00089/0099 7 (256,448,3)
1119
+ 00089/0100 7 (256,448,3)
1120
+ 00089/0211 7 (256,448,3)
1121
+ 00089/0380 7 (256,448,3)
1122
+ 00089/0381 7 (256,448,3)
1123
+ 00089/0384 7 (256,448,3)
1124
+ 00089/0390 7 (256,448,3)
1125
+ 00089/0393 7 (256,448,3)
1126
+ 00089/0394 7 (256,448,3)
1127
+ 00089/0395 7 (256,448,3)
1128
+ 00089/0406 7 (256,448,3)
1129
+ 00089/0410 7 (256,448,3)
1130
+ 00089/0412 7 (256,448,3)
1131
+ 00089/0703 7 (256,448,3)
1132
+ 00089/0729 7 (256,448,3)
1133
+ 00089/0930 7 (256,448,3)
1134
+ 00089/0952 7 (256,448,3)
1135
+ 00090/0062 7 (256,448,3)
1136
+ 00090/0101 7 (256,448,3)
1137
+ 00090/0213 7 (256,448,3)
1138
+ 00090/0216 7 (256,448,3)
1139
+ 00090/0268 7 (256,448,3)
1140
+ 00090/0406 7 (256,448,3)
1141
+ 00090/0411 7 (256,448,3)
1142
+ 00090/0442 7 (256,448,3)
1143
+ 00090/0535 7 (256,448,3)
1144
+ 00090/0542 7 (256,448,3)
1145
+ 00090/0571 7 (256,448,3)
1146
+ 00090/0934 7 (256,448,3)
1147
+ 00090/0938 7 (256,448,3)
1148
+ 00090/0947 7 (256,448,3)
1149
+ 00091/0066 7 (256,448,3)
1150
+ 00091/0448 7 (256,448,3)
1151
+ 00091/0451 7 (256,448,3)
1152
+ 00091/0454 7 (256,448,3)
1153
+ 00091/0457 7 (256,448,3)
1154
+ 00091/0467 7 (256,448,3)
1155
+ 00091/0470 7 (256,448,3)
1156
+ 00091/0477 7 (256,448,3)
1157
+ 00091/0583 7 (256,448,3)
1158
+ 00091/0981 7 (256,448,3)
1159
+ 00091/0994 7 (256,448,3)
1160
+ 00092/0112 7 (256,448,3)
1161
+ 00092/0119 7 (256,448,3)
1162
+ 00092/0129 7 (256,448,3)
1163
+ 00092/0146 7 (256,448,3)
1164
+ 00092/0149 7 (256,448,3)
1165
+ 00092/0608 7 (256,448,3)
1166
+ 00092/0643 7 (256,448,3)
1167
+ 00092/0646 7 (256,448,3)
1168
+ 00092/0766 7 (256,448,3)
1169
+ 00092/0768 7 (256,448,3)
1170
+ 00092/0779 7 (256,448,3)
1171
+ 00093/0081 7 (256,448,3)
1172
+ 00093/0085 7 (256,448,3)
1173
+ 00093/0135 7 (256,448,3)
1174
+ 00093/0241 7 (256,448,3)
1175
+ 00093/0277 7 (256,448,3)
1176
+ 00093/0283 7 (256,448,3)
1177
+ 00093/0320 7 (256,448,3)
1178
+ 00093/0598 7 (256,448,3)
1179
+ 00094/0159 7 (256,448,3)
1180
+ 00094/0253 7 (256,448,3)
1181
+ 00094/0265 7 (256,448,3)
1182
+ 00094/0267 7 (256,448,3)
1183
+ 00094/0269 7 (256,448,3)
1184
+ 00094/0281 7 (256,448,3)
1185
+ 00094/0293 7 (256,448,3)
1186
+ 00094/0404 7 (256,448,3)
1187
+ 00094/0593 7 (256,448,3)
1188
+ 00094/0612 7 (256,448,3)
1189
+ 00094/0638 7 (256,448,3)
1190
+ 00094/0656 7 (256,448,3)
1191
+ 00094/0668 7 (256,448,3)
1192
+ 00094/0786 7 (256,448,3)
1193
+ 00094/0870 7 (256,448,3)
1194
+ 00094/0897 7 (256,448,3)
1195
+ 00094/0900 7 (256,448,3)
1196
+ 00094/0944 7 (256,448,3)
1197
+ 00094/0946 7 (256,448,3)
1198
+ 00094/0952 7 (256,448,3)
1199
+ 00094/0969 7 (256,448,3)
1200
+ 00094/0973 7 (256,448,3)
1201
+ 00094/0981 7 (256,448,3)
1202
+ 00095/0088 7 (256,448,3)
1203
+ 00095/0125 7 (256,448,3)
1204
+ 00095/0130 7 (256,448,3)
1205
+ 00095/0142 7 (256,448,3)
1206
+ 00095/0151 7 (256,448,3)
1207
+ 00095/0180 7 (256,448,3)
1208
+ 00095/0192 7 (256,448,3)
1209
+ 00095/0194 7 (256,448,3)
1210
+ 00095/0195 7 (256,448,3)
1211
+ 00095/0204 7 (256,448,3)
1212
+ 00095/0245 7 (256,448,3)
1213
+ 00095/0315 7 (256,448,3)
1214
+ 00095/0321 7 (256,448,3)
1215
+ 00095/0324 7 (256,448,3)
1216
+ 00095/0327 7 (256,448,3)
1217
+ 00095/0730 7 (256,448,3)
1218
+ 00095/0731 7 (256,448,3)
1219
+ 00095/0741 7 (256,448,3)
1220
+ 00095/0948 7 (256,448,3)
1221
+ 00096/0407 7 (256,448,3)
1222
+ 00096/0420 7 (256,448,3)
1223
+ 00096/0435 7 (256,448,3)
1224
+ 00096/0682 7 (256,448,3)
1225
+ 00096/0865 7 (256,448,3)
basicsr/data/meta_info/meta_info_Vimeo90K_test_medium_GT.txt ADDED
The diff for this file is too large to render. See raw diff
basicsr/data/meta_info/meta_info_Vimeo90K_test_slow_GT.txt ADDED
@@ -0,0 +1,1613 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 00001/0266 7 (256,448,3)
2
+ 00001/0268 7 (256,448,3)
3
+ 00001/0275 7 (256,448,3)
4
+ 00001/0278 7 (256,448,3)
5
+ 00001/0287 7 (256,448,3)
6
+ 00001/0291 7 (256,448,3)
7
+ 00001/0627 7 (256,448,3)
8
+ 00001/0636 7 (256,448,3)
9
+ 00001/0804 7 (256,448,3)
10
+ 00001/0837 7 (256,448,3)
11
+ 00001/0849 7 (256,448,3)
12
+ 00001/0851 7 (256,448,3)
13
+ 00001/0852 7 (256,448,3)
14
+ 00001/0986 7 (256,448,3)
15
+ 00001/0991 7 (256,448,3)
16
+ 00002/0007 7 (256,448,3)
17
+ 00002/0008 7 (256,448,3)
18
+ 00002/0016 7 (256,448,3)
19
+ 00002/0036 7 (256,448,3)
20
+ 00002/0091 7 (256,448,3)
21
+ 00002/0093 7 (256,448,3)
22
+ 00002/0209 7 (256,448,3)
23
+ 00002/0235 7 (256,448,3)
24
+ 00002/0236 7 (256,448,3)
25
+ 00002/0241 7 (256,448,3)
26
+ 00002/0466 7 (256,448,3)
27
+ 00002/0504 7 (256,448,3)
28
+ 00002/0960 7 (256,448,3)
29
+ 00002/0961 7 (256,448,3)
30
+ 00002/0964 7 (256,448,3)
31
+ 00003/0007 7 (256,448,3)
32
+ 00003/0069 7 (256,448,3)
33
+ 00003/0345 7 (256,448,3)
34
+ 00003/0347 7 (256,448,3)
35
+ 00003/0372 7 (256,448,3)
36
+ 00003/0525 7 (256,448,3)
37
+ 00003/0652 7 (256,448,3)
38
+ 00003/0667 7 (256,448,3)
39
+ 00003/0669 7 (256,448,3)
40
+ 00003/0706 7 (256,448,3)
41
+ 00003/0713 7 (256,448,3)
42
+ 00003/0721 7 (256,448,3)
43
+ 00003/0747 7 (256,448,3)
44
+ 00003/0829 7 (256,448,3)
45
+ 00003/0916 7 (256,448,3)
46
+ 00003/0918 7 (256,448,3)
47
+ 00003/0924 7 (256,448,3)
48
+ 00003/0926 7 (256,448,3)
49
+ 00003/0927 7 (256,448,3)
50
+ 00004/0288 7 (256,448,3)
51
+ 00004/0303 7 (256,448,3)
52
+ 00004/0307 7 (256,448,3)
53
+ 00004/0628 7 (256,448,3)
54
+ 00004/0713 7 (256,448,3)
55
+ 00004/0715 7 (256,448,3)
56
+ 00004/0719 7 (256,448,3)
57
+ 00004/0727 7 (256,448,3)
58
+ 00004/0821 7 (256,448,3)
59
+ 00005/0006 7 (256,448,3)
60
+ 00005/0007 7 (256,448,3)
61
+ 00005/0012 7 (256,448,3)
62
+ 00005/0013 7 (256,448,3)
63
+ 00005/0040 7 (256,448,3)
64
+ 00005/0055 7 (256,448,3)
65
+ 00005/0119 7 (256,448,3)
66
+ 00005/0130 7 (256,448,3)
67
+ 00005/0185 7 (256,448,3)
68
+ 00005/0198 7 (256,448,3)
69
+ 00005/0270 7 (256,448,3)
70
+ 00005/0541 7 (256,448,3)
71
+ 00005/0560 7 (256,448,3)
72
+ 00005/0660 7 (256,448,3)
73
+ 00005/0682 7 (256,448,3)
74
+ 00005/0683 7 (256,448,3)
75
+ 00005/0688 7 (256,448,3)
76
+ 00005/0706 7 (256,448,3)
77
+ 00005/0728 7 (256,448,3)
78
+ 00005/0732 7 (256,448,3)
79
+ 00005/0739 7 (256,448,3)
80
+ 00005/0804 7 (256,448,3)
81
+ 00005/0805 7 (256,448,3)
82
+ 00005/0827 7 (256,448,3)
83
+ 00005/0828 7 (256,448,3)
84
+ 00005/0857 7 (256,448,3)
85
+ 00005/0861 7 (256,448,3)
86
+ 00005/0862 7 (256,448,3)
87
+ 00005/0868 7 (256,448,3)
88
+ 00005/0872 7 (256,448,3)
89
+ 00005/0933 7 (256,448,3)
90
+ 00005/0958 7 (256,448,3)
91
+ 00005/0960 7 (256,448,3)
92
+ 00006/0087 7 (256,448,3)
93
+ 00006/0090 7 (256,448,3)
94
+ 00006/0351 7 (256,448,3)
95
+ 00006/0353 7 (256,448,3)
96
+ 00006/0558 7 (256,448,3)
97
+ 00006/0588 7 (256,448,3)
98
+ 00006/0619 7 (256,448,3)
99
+ 00006/0621 7 (256,448,3)
100
+ 00006/0748 7 (256,448,3)
101
+ 00006/0796 7 (256,448,3)
102
+ 00006/0805 7 (256,448,3)
103
+ 00006/0807 7 (256,448,3)
104
+ 00007/0236 7 (256,448,3)
105
+ 00007/0240 7 (256,448,3)
106
+ 00007/0243 7 (256,448,3)
107
+ 00007/0246 7 (256,448,3)
108
+ 00007/0247 7 (256,448,3)
109
+ 00007/0252 7 (256,448,3)
110
+ 00007/0322 7 (256,448,3)
111
+ 00007/0458 7 (256,448,3)
112
+ 00007/0492 7 (256,448,3)
113
+ 00007/0658 7 (256,448,3)
114
+ 00007/0717 7 (256,448,3)
115
+ 00007/0722 7 (256,448,3)
116
+ 00007/0725 7 (256,448,3)
117
+ 00007/0740 7 (256,448,3)
118
+ 00007/0748 7 (256,448,3)
119
+ 00007/0749 7 (256,448,3)
120
+ 00007/0759 7 (256,448,3)
121
+ 00007/0772 7 (256,448,3)
122
+ 00007/0783 7 (256,448,3)
123
+ 00007/0787 7 (256,448,3)
124
+ 00007/0883 7 (256,448,3)
125
+ 00008/0033 7 (256,448,3)
126
+ 00008/0035 7 (256,448,3)
127
+ 00008/0091 7 (256,448,3)
128
+ 00008/0154 7 (256,448,3)
129
+ 00008/0966 7 (256,448,3)
130
+ 00008/0987 7 (256,448,3)
131
+ 00009/0108 7 (256,448,3)
132
+ 00009/0607 7 (256,448,3)
133
+ 00009/0668 7 (256,448,3)
134
+ 00009/0683 7 (256,448,3)
135
+ 00009/0941 7 (256,448,3)
136
+ 00009/0949 7 (256,448,3)
137
+ 00009/0962 7 (256,448,3)
138
+ 00009/0972 7 (256,448,3)
139
+ 00009/0974 7 (256,448,3)
140
+ 00010/0014 7 (256,448,3)
141
+ 00010/0018 7 (256,448,3)
142
+ 00010/0043 7 (256,448,3)
143
+ 00010/0099 7 (256,448,3)
144
+ 00010/0252 7 (256,448,3)
145
+ 00010/0296 7 (256,448,3)
146
+ 00010/0413 7 (256,448,3)
147
+ 00010/0422 7 (256,448,3)
148
+ 00010/0516 7 (256,448,3)
149
+ 00010/0525 7 (256,448,3)
150
+ 00010/0556 7 (256,448,3)
151
+ 00010/0701 7 (256,448,3)
152
+ 00010/0740 7 (256,448,3)
153
+ 00010/0772 7 (256,448,3)
154
+ 00010/0831 7 (256,448,3)
155
+ 00010/0925 7 (256,448,3)
156
+ 00011/0013 7 (256,448,3)
157
+ 00011/0016 7 (256,448,3)
158
+ 00011/0017 7 (256,448,3)
159
+ 00011/0249 7 (256,448,3)
160
+ 00011/0826 7 (256,448,3)
161
+ 00011/0827 7 (256,448,3)
162
+ 00011/0831 7 (256,448,3)
163
+ 00011/0833 7 (256,448,3)
164
+ 00011/0835 7 (256,448,3)
165
+ 00011/0998 7 (256,448,3)
166
+ 00012/0023 7 (256,448,3)
167
+ 00012/0024 7 (256,448,3)
168
+ 00012/0027 7 (256,448,3)
169
+ 00012/0037 7 (256,448,3)
170
+ 00012/0444 7 (256,448,3)
171
+ 00012/0445 7 (256,448,3)
172
+ 00012/0451 7 (256,448,3)
173
+ 00012/0461 7 (256,448,3)
174
+ 00012/0521 7 (256,448,3)
175
+ 00012/0758 7 (256,448,3)
176
+ 00012/0760 7 (256,448,3)
177
+ 00012/0771 7 (256,448,3)
178
+ 00012/0903 7 (256,448,3)
179
+ 00012/0909 7 (256,448,3)
180
+ 00013/0581 7 (256,448,3)
181
+ 00013/0786 7 (256,448,3)
182
+ 00013/0789 7 (256,448,3)
183
+ 00013/0791 7 (256,448,3)
184
+ 00013/0798 7 (256,448,3)
185
+ 00013/0802 7 (256,448,3)
186
+ 00013/0820 7 (256,448,3)
187
+ 00013/0850 7 (256,448,3)
188
+ 00013/0854 7 (256,448,3)
189
+ 00013/0894 7 (256,448,3)
190
+ 00013/0919 7 (256,448,3)
191
+ 00013/0999 7 (256,448,3)
192
+ 00014/0001 7 (256,448,3)
193
+ 00014/0014 7 (256,448,3)
194
+ 00014/0018 7 (256,448,3)
195
+ 00014/0244 7 (256,448,3)
196
+ 00014/0475 7 (256,448,3)
197
+ 00014/0483 7 (256,448,3)
198
+ 00014/0680 7 (256,448,3)
199
+ 00014/0700 7 (256,448,3)
200
+ 00014/0701 7 (256,448,3)
201
+ 00014/0706 7 (256,448,3)
202
+ 00014/0712 7 (256,448,3)
203
+ 00014/0713 7 (256,448,3)
204
+ 00014/0717 7 (256,448,3)
205
+ 00014/0719 7 (256,448,3)
206
+ 00014/0728 7 (256,448,3)
207
+ 00014/0734 7 (256,448,3)
208
+ 00014/0736 7 (256,448,3)
209
+ 00014/0738 7 (256,448,3)
210
+ 00014/0742 7 (256,448,3)
211
+ 00014/0745 7 (256,448,3)
212
+ 00014/0746 7 (256,448,3)
213
+ 00014/0750 7 (256,448,3)
214
+ 00014/0769 7 (256,448,3)
215
+ 00014/0774 7 (256,448,3)
216
+ 00014/0781 7 (256,448,3)
217
+ 00014/0782 7 (256,448,3)
218
+ 00014/0852 7 (256,448,3)
219
+ 00014/0853 7 (256,448,3)
220
+ 00014/0855 7 (256,448,3)
221
+ 00014/0867 7 (256,448,3)
222
+ 00014/0876 7 (256,448,3)
223
+ 00014/0881 7 (256,448,3)
224
+ 00014/0890 7 (256,448,3)
225
+ 00014/0914 7 (256,448,3)
226
+ 00015/0033 7 (256,448,3)
227
+ 00015/0113 7 (256,448,3)
228
+ 00015/0125 7 (256,448,3)
229
+ 00015/0185 7 (256,448,3)
230
+ 00015/0194 7 (256,448,3)
231
+ 00015/0202 7 (256,448,3)
232
+ 00015/0312 7 (256,448,3)
233
+ 00015/0688 7 (256,448,3)
234
+ 00015/0698 7 (256,448,3)
235
+ 00015/0788 7 (256,448,3)
236
+ 00015/0854 7 (256,448,3)
237
+ 00015/0863 7 (256,448,3)
238
+ 00015/0864 7 (256,448,3)
239
+ 00015/0918 7 (256,448,3)
240
+ 00015/0931 7 (256,448,3)
241
+ 00016/0276 7 (256,448,3)
242
+ 00016/0301 7 (256,448,3)
243
+ 00016/0306 7 (256,448,3)
244
+ 00016/0324 7 (256,448,3)
245
+ 00016/0362 7 (256,448,3)
246
+ 00016/0364 7 (256,448,3)
247
+ 00016/0370 7 (256,448,3)
248
+ 00016/0378 7 (256,448,3)
249
+ 00016/0379 7 (256,448,3)
250
+ 00016/0402 7 (256,448,3)
251
+ 00016/0405 7 (256,448,3)
252
+ 00016/0418 7 (256,448,3)
253
+ 00016/0419 7 (256,448,3)
254
+ 00016/0435 7 (256,448,3)
255
+ 00016/0501 7 (256,448,3)
256
+ 00016/0561 7 (256,448,3)
257
+ 00016/0562 7 (256,448,3)
258
+ 00016/0569 7 (256,448,3)
259
+ 00016/0591 7 (256,448,3)
260
+ 00016/0599 7 (256,448,3)
261
+ 00016/0711 7 (256,448,3)
262
+ 00016/0713 7 (256,448,3)
263
+ 00016/0813 7 (256,448,3)
264
+ 00016/0953 7 (256,448,3)
265
+ 00016/0960 7 (256,448,3)
266
+ 00016/0961 7 (256,448,3)
267
+ 00017/0519 7 (256,448,3)
268
+ 00017/0523 7 (256,448,3)
269
+ 00017/0588 7 (256,448,3)
270
+ 00017/0608 7 (256,448,3)
271
+ 00017/0609 7 (256,448,3)
272
+ 00017/0719 7 (256,448,3)
273
+ 00017/0721 7 (256,448,3)
274
+ 00017/0727 7 (256,448,3)
275
+ 00017/0728 7 (256,448,3)
276
+ 00017/0769 7 (256,448,3)
277
+ 00017/0775 7 (256,448,3)
278
+ 00017/0787 7 (256,448,3)
279
+ 00017/0797 7 (256,448,3)
280
+ 00018/0043 7 (256,448,3)
281
+ 00018/0206 7 (256,448,3)
282
+ 00018/0209 7 (256,448,3)
283
+ 00018/0211 7 (256,448,3)
284
+ 00018/0216 7 (256,448,3)
285
+ 00018/0220 7 (256,448,3)
286
+ 00018/0221 7 (256,448,3)
287
+ 00018/0252 7 (256,448,3)
288
+ 00018/0260 7 (256,448,3)
289
+ 00018/0331 7 (256,448,3)
290
+ 00018/0333 7 (256,448,3)
291
+ 00018/0447 7 (256,448,3)
292
+ 00018/0523 7 (256,448,3)
293
+ 00019/0014 7 (256,448,3)
294
+ 00019/0015 7 (256,448,3)
295
+ 00019/0019 7 (256,448,3)
296
+ 00019/0049 7 (256,448,3)
297
+ 00019/0109 7 (256,448,3)
298
+ 00019/0114 7 (256,448,3)
299
+ 00019/0125 7 (256,448,3)
300
+ 00019/0137 7 (256,448,3)
301
+ 00019/0140 7 (256,448,3)
302
+ 00019/0148 7 (256,448,3)
303
+ 00019/0153 7 (256,448,3)
304
+ 00019/0155 7 (256,448,3)
305
+ 00019/0158 7 (256,448,3)
306
+ 00019/0159 7 (256,448,3)
307
+ 00019/0160 7 (256,448,3)
308
+ 00019/0162 7 (256,448,3)
309
+ 00019/0279 7 (256,448,3)
310
+ 00019/0282 7 (256,448,3)
311
+ 00019/0409 7 (256,448,3)
312
+ 00019/0427 7 (256,448,3)
313
+ 00019/0430 7 (256,448,3)
314
+ 00019/0545 7 (256,448,3)
315
+ 00019/0555 7 (256,448,3)
316
+ 00019/0558 7 (256,448,3)
317
+ 00019/0650 7 (256,448,3)
318
+ 00019/0681 7 (256,448,3)
319
+ 00019/0747 7 (256,448,3)
320
+ 00019/0748 7 (256,448,3)
321
+ 00019/0749 7 (256,448,3)
322
+ 00019/0752 7 (256,448,3)
323
+ 00019/0768 7 (256,448,3)
324
+ 00019/0772 7 (256,448,3)
325
+ 00019/0773 7 (256,448,3)
326
+ 00019/0777 7 (256,448,3)
327
+ 00019/0795 7 (256,448,3)
328
+ 00019/0806 7 (256,448,3)
329
+ 00019/0815 7 (256,448,3)
330
+ 00019/0840 7 (256,448,3)
331
+ 00019/0844 7 (256,448,3)
332
+ 00019/0848 7 (256,448,3)
333
+ 00019/0853 7 (256,448,3)
334
+ 00019/0863 7 (256,448,3)
335
+ 00019/0888 7 (256,448,3)
336
+ 00019/0894 7 (256,448,3)
337
+ 00019/0901 7 (256,448,3)
338
+ 00019/0995 7 (256,448,3)
339
+ 00021/0030 7 (256,448,3)
340
+ 00021/0035 7 (256,448,3)
341
+ 00021/0039 7 (256,448,3)
342
+ 00021/0041 7 (256,448,3)
343
+ 00021/0044 7 (256,448,3)
344
+ 00021/0045 7 (256,448,3)
345
+ 00021/0264 7 (256,448,3)
346
+ 00021/0330 7 (256,448,3)
347
+ 00021/0332 7 (256,448,3)
348
+ 00021/0333 7 (256,448,3)
349
+ 00021/0336 7 (256,448,3)
350
+ 00021/0337 7 (256,448,3)
351
+ 00021/0338 7 (256,448,3)
352
+ 00021/0343 7 (256,448,3)
353
+ 00021/0472 7 (256,448,3)
354
+ 00021/0667 7 (256,448,3)
355
+ 00021/0731 7 (256,448,3)
356
+ 00021/0779 7 (256,448,3)
357
+ 00021/0805 7 (256,448,3)
358
+ 00021/0814 7 (256,448,3)
359
+ 00021/0818 7 (256,448,3)
360
+ 00021/0874 7 (256,448,3)
361
+ 00022/0008 7 (256,448,3)
362
+ 00022/0010 7 (256,448,3)
363
+ 00022/0231 7 (256,448,3)
364
+ 00022/0323 7 (256,448,3)
365
+ 00022/0337 7 (256,448,3)
366
+ 00022/0359 7 (256,448,3)
367
+ 00022/0377 7 (256,448,3)
368
+ 00022/0378 7 (256,448,3)
369
+ 00022/0385 7 (256,448,3)
370
+ 00022/0393 7 (256,448,3)
371
+ 00022/0424 7 (256,448,3)
372
+ 00022/0582 7 (256,448,3)
373
+ 00022/0583 7 (256,448,3)
374
+ 00022/0605 7 (256,448,3)
375
+ 00022/0632 7 (256,448,3)
376
+ 00022/0633 7 (256,448,3)
377
+ 00022/0666 7 (256,448,3)
378
+ 00022/0671 7 (256,448,3)
379
+ 00022/0673 7 (256,448,3)
380
+ 00022/0702 7 (256,448,3)
381
+ 00022/0852 7 (256,448,3)
382
+ 00022/0853 7 (256,448,3)
383
+ 00022/0971 7 (256,448,3)
384
+ 00023/0037 7 (256,448,3)
385
+ 00023/0224 7 (256,448,3)
386
+ 00023/0308 7 (256,448,3)
387
+ 00023/0393 7 (256,448,3)
388
+ 00023/0633 7 (256,448,3)
389
+ 00023/0637 7 (256,448,3)
390
+ 00023/0638 7 (256,448,3)
391
+ 00023/0770 7 (256,448,3)
392
+ 00023/0786 7 (256,448,3)
393
+ 00023/0898 7 (256,448,3)
394
+ 00024/0247 7 (256,448,3)
395
+ 00024/0251 7 (256,448,3)
396
+ 00024/0267 7 (256,448,3)
397
+ 00024/0288 7 (256,448,3)
398
+ 00024/0530 7 (256,448,3)
399
+ 00024/0569 7 (256,448,3)
400
+ 00024/0587 7 (256,448,3)
401
+ 00024/0730 7 (256,448,3)
402
+ 00024/0736 7 (256,448,3)
403
+ 00024/0742 7 (256,448,3)
404
+ 00024/0832 7 (256,448,3)
405
+ 00024/0936 7 (256,448,3)
406
+ 00025/0044 7 (256,448,3)
407
+ 00025/0047 7 (256,448,3)
408
+ 00025/0540 7 (256,448,3)
409
+ 00025/0552 7 (256,448,3)
410
+ 00025/0554 7 (256,448,3)
411
+ 00025/0559 7 (256,448,3)
412
+ 00025/0572 7 (256,448,3)
413
+ 00025/0576 7 (256,448,3)
414
+ 00025/0699 7 (256,448,3)
415
+ 00025/0709 7 (256,448,3)
416
+ 00025/0743 7 (256,448,3)
417
+ 00025/0767 7 (256,448,3)
418
+ 00025/0780 7 (256,448,3)
419
+ 00025/0782 7 (256,448,3)
420
+ 00025/0784 7 (256,448,3)
421
+ 00025/0791 7 (256,448,3)
422
+ 00025/0889 7 (256,448,3)
423
+ 00025/0890 7 (256,448,3)
424
+ 00025/0894 7 (256,448,3)
425
+ 00025/0896 7 (256,448,3)
426
+ 00025/0898 7 (256,448,3)
427
+ 00025/0905 7 (256,448,3)
428
+ 00025/0999 7 (256,448,3)
429
+ 00026/0003 7 (256,448,3)
430
+ 00026/0005 7 (256,448,3)
431
+ 00026/0011 7 (256,448,3)
432
+ 00026/0017 7 (256,448,3)
433
+ 00026/0036 7 (256,448,3)
434
+ 00026/0129 7 (256,448,3)
435
+ 00026/0131 7 (256,448,3)
436
+ 00026/0161 7 (256,448,3)
437
+ 00026/0177 7 (256,448,3)
438
+ 00026/0178 7 (256,448,3)
439
+ 00026/0180 7 (256,448,3)
440
+ 00026/0298 7 (256,448,3)
441
+ 00026/0307 7 (256,448,3)
442
+ 00026/0308 7 (256,448,3)
443
+ 00026/0312 7 (256,448,3)
444
+ 00026/0352 7 (256,448,3)
445
+ 00026/0440 7 (256,448,3)
446
+ 00026/0706 7 (256,448,3)
447
+ 00026/0708 7 (256,448,3)
448
+ 00026/0715 7 (256,448,3)
449
+ 00026/0769 7 (256,448,3)
450
+ 00026/0777 7 (256,448,3)
451
+ 00026/0779 7 (256,448,3)
452
+ 00026/0789 7 (256,448,3)
453
+ 00026/0924 7 (256,448,3)
454
+ 00026/0928 7 (256,448,3)
455
+ 00026/0932 7 (256,448,3)
456
+ 00026/0935 7 (256,448,3)
457
+ 00027/0118 7 (256,448,3)
458
+ 00027/0121 7 (256,448,3)
459
+ 00027/0155 7 (256,448,3)
460
+ 00027/0168 7 (256,448,3)
461
+ 00027/0196 7 (256,448,3)
462
+ 00027/0289 7 (256,448,3)
463
+ 00027/0294 7 (256,448,3)
464
+ 00027/0803 7 (256,448,3)
465
+ 00028/0016 7 (256,448,3)
466
+ 00028/0045 7 (256,448,3)
467
+ 00028/0063 7 (256,448,3)
468
+ 00028/0601 7 (256,448,3)
469
+ 00028/0638 7 (256,448,3)
470
+ 00028/0733 7 (256,448,3)
471
+ 00028/0736 7 (256,448,3)
472
+ 00028/0741 7 (256,448,3)
473
+ 00028/0753 7 (256,448,3)
474
+ 00028/0770 7 (256,448,3)
475
+ 00028/0771 7 (256,448,3)
476
+ 00028/0777 7 (256,448,3)
477
+ 00028/0950 7 (256,448,3)
478
+ 00028/0951 7 (256,448,3)
479
+ 00029/0048 7 (256,448,3)
480
+ 00029/0060 7 (256,448,3)
481
+ 00029/0362 7 (256,448,3)
482
+ 00029/0399 7 (256,448,3)
483
+ 00029/0404 7 (256,448,3)
484
+ 00029/0412 7 (256,448,3)
485
+ 00029/0416 7 (256,448,3)
486
+ 00029/0418 7 (256,448,3)
487
+ 00029/0428 7 (256,448,3)
488
+ 00030/0131 7 (256,448,3)
489
+ 00030/0135 7 (256,448,3)
490
+ 00030/0150 7 (256,448,3)
491
+ 00030/0245 7 (256,448,3)
492
+ 00030/0339 7 (256,448,3)
493
+ 00030/0472 7 (256,448,3)
494
+ 00030/0482 7 (256,448,3)
495
+ 00030/0500 7 (256,448,3)
496
+ 00030/0501 7 (256,448,3)
497
+ 00030/0697 7 (256,448,3)
498
+ 00030/0707 7 (256,448,3)
499
+ 00030/0733 7 (256,448,3)
500
+ 00030/0743 7 (256,448,3)
501
+ 00030/0747 7 (256,448,3)
502
+ 00030/0754 7 (256,448,3)
503
+ 00030/0755 7 (256,448,3)
504
+ 00030/0759 7 (256,448,3)
505
+ 00030/0762 7 (256,448,3)
506
+ 00030/0764 7 (256,448,3)
507
+ 00030/0767 7 (256,448,3)
508
+ 00030/0794 7 (256,448,3)
509
+ 00030/0796 7 (256,448,3)
510
+ 00030/0799 7 (256,448,3)
511
+ 00030/0814 7 (256,448,3)
512
+ 00030/0823 7 (256,448,3)
513
+ 00030/0829 7 (256,448,3)
514
+ 00030/0833 7 (256,448,3)
515
+ 00030/0848 7 (256,448,3)
516
+ 00030/0853 7 (256,448,3)
517
+ 00030/0861 7 (256,448,3)
518
+ 00031/0182 7 (256,448,3)
519
+ 00031/0275 7 (256,448,3)
520
+ 00031/0279 7 (256,448,3)
521
+ 00031/0555 7 (256,448,3)
522
+ 00031/0648 7 (256,448,3)
523
+ 00031/0663 7 (256,448,3)
524
+ 00031/0680 7 (256,448,3)
525
+ 00031/0880 7 (256,448,3)
526
+ 00031/0922 7 (256,448,3)
527
+ 00031/0925 7 (256,448,3)
528
+ 00031/0928 7 (256,448,3)
529
+ 00032/0025 7 (256,448,3)
530
+ 00032/0377 7 (256,448,3)
531
+ 00032/0378 7 (256,448,3)
532
+ 00032/0382 7 (256,448,3)
533
+ 00032/0384 7 (256,448,3)
534
+ 00032/0386 7 (256,448,3)
535
+ 00032/0389 7 (256,448,3)
536
+ 00032/0391 7 (256,448,3)
537
+ 00032/0393 7 (256,448,3)
538
+ 00032/0492 7 (256,448,3)
539
+ 00032/0497 7 (256,448,3)
540
+ 00032/0505 7 (256,448,3)
541
+ 00032/0523 7 (256,448,3)
542
+ 00032/0542 7 (256,448,3)
543
+ 00032/0544 7 (256,448,3)
544
+ 00032/0712 7 (256,448,3)
545
+ 00032/0847 7 (256,448,3)
546
+ 00032/0850 7 (256,448,3)
547
+ 00032/0875 7 (256,448,3)
548
+ 00033/0062 7 (256,448,3)
549
+ 00033/0063 7 (256,448,3)
550
+ 00033/0098 7 (256,448,3)
551
+ 00033/0101 7 (256,448,3)
552
+ 00033/0105 7 (256,448,3)
553
+ 00033/0114 7 (256,448,3)
554
+ 00033/0432 7 (256,448,3)
555
+ 00033/0441 7 (256,448,3)
556
+ 00033/0606 7 (256,448,3)
557
+ 00033/0611 7 (256,448,3)
558
+ 00033/0634 7 (256,448,3)
559
+ 00033/0787 7 (256,448,3)
560
+ 00033/0792 7 (256,448,3)
561
+ 00033/0802 7 (256,448,3)
562
+ 00033/0825 7 (256,448,3)
563
+ 00033/0835 7 (256,448,3)
564
+ 00034/0249 7 (256,448,3)
565
+ 00034/0253 7 (256,448,3)
566
+ 00034/0254 7 (256,448,3)
567
+ 00034/0282 7 (256,448,3)
568
+ 00034/0318 7 (256,448,3)
569
+ 00034/0319 7 (256,448,3)
570
+ 00034/0323 7 (256,448,3)
571
+ 00034/0336 7 (256,448,3)
572
+ 00034/0348 7 (256,448,3)
573
+ 00034/0356 7 (256,448,3)
574
+ 00034/0379 7 (256,448,3)
575
+ 00034/0387 7 (256,448,3)
576
+ 00034/0575 7 (256,448,3)
577
+ 00034/0608 7 (256,448,3)
578
+ 00034/0663 7 (256,448,3)
579
+ 00034/0811 7 (256,448,3)
580
+ 00034/0812 7 (256,448,3)
581
+ 00034/0946 7 (256,448,3)
582
+ 00034/0948 7 (256,448,3)
583
+ 00034/0950 7 (256,448,3)
584
+ 00035/0204 7 (256,448,3)
585
+ 00035/0243 7 (256,448,3)
586
+ 00035/0308 7 (256,448,3)
587
+ 00035/0465 7 (256,448,3)
588
+ 00035/0478 7 (256,448,3)
589
+ 00035/0523 7 (256,448,3)
590
+ 00035/0540 7 (256,448,3)
591
+ 00035/0544 7 (256,448,3)
592
+ 00035/0556 7 (256,448,3)
593
+ 00035/0568 7 (256,448,3)
594
+ 00035/0570 7 (256,448,3)
595
+ 00035/0609 7 (256,448,3)
596
+ 00035/0643 7 (256,448,3)
597
+ 00035/0644 7 (256,448,3)
598
+ 00035/0645 7 (256,448,3)
599
+ 00035/0646 7 (256,448,3)
600
+ 00035/0650 7 (256,448,3)
601
+ 00035/0661 7 (256,448,3)
602
+ 00035/0724 7 (256,448,3)
603
+ 00035/0725 7 (256,448,3)
604
+ 00035/0850 7 (256,448,3)
605
+ 00035/0863 7 (256,448,3)
606
+ 00035/0870 7 (256,448,3)
607
+ 00035/0951 7 (256,448,3)
608
+ 00036/0038 7 (256,448,3)
609
+ 00036/0062 7 (256,448,3)
610
+ 00036/0423 7 (256,448,3)
611
+ 00036/0737 7 (256,448,3)
612
+ 00036/0750 7 (256,448,3)
613
+ 00036/0751 7 (256,448,3)
614
+ 00036/0754 7 (256,448,3)
615
+ 00036/0929 7 (256,448,3)
616
+ 00037/0085 7 (256,448,3)
617
+ 00037/0113 7 (256,448,3)
618
+ 00037/0130 7 (256,448,3)
619
+ 00037/0153 7 (256,448,3)
620
+ 00037/0169 7 (256,448,3)
621
+ 00037/0263 7 (256,448,3)
622
+ 00037/0272 7 (256,448,3)
623
+ 00037/0273 7 (256,448,3)
624
+ 00037/0275 7 (256,448,3)
625
+ 00037/0280 7 (256,448,3)
626
+ 00037/0399 7 (256,448,3)
627
+ 00037/0456 7 (256,448,3)
628
+ 00037/0853 7 (256,448,3)
629
+ 00037/0855 7 (256,448,3)
630
+ 00037/0856 7 (256,448,3)
631
+ 00037/0857 7 (256,448,3)
632
+ 00037/0925 7 (256,448,3)
633
+ 00037/0947 7 (256,448,3)
634
+ 00038/0148 7 (256,448,3)
635
+ 00038/0533 7 (256,448,3)
636
+ 00038/0534 7 (256,448,3)
637
+ 00038/0560 7 (256,448,3)
638
+ 00038/0562 7 (256,448,3)
639
+ 00038/0566 7 (256,448,3)
640
+ 00038/0578 7 (256,448,3)
641
+ 00038/0652 7 (256,448,3)
642
+ 00038/0674 7 (256,448,3)
643
+ 00038/0685 7 (256,448,3)
644
+ 00038/0686 7 (256,448,3)
645
+ 00038/0692 7 (256,448,3)
646
+ 00038/0736 7 (256,448,3)
647
+ 00039/0035 7 (256,448,3)
648
+ 00039/0105 7 (256,448,3)
649
+ 00039/0109 7 (256,448,3)
650
+ 00039/0121 7 (256,448,3)
651
+ 00039/0128 7 (256,448,3)
652
+ 00039/0129 7 (256,448,3)
653
+ 00039/0132 7 (256,448,3)
654
+ 00039/0137 7 (256,448,3)
655
+ 00039/0157 7 (256,448,3)
656
+ 00039/0496 7 (256,448,3)
657
+ 00039/0502 7 (256,448,3)
658
+ 00039/0526 7 (256,448,3)
659
+ 00039/0529 7 (256,448,3)
660
+ 00039/0682 7 (256,448,3)
661
+ 00039/0690 7 (256,448,3)
662
+ 00039/0693 7 (256,448,3)
663
+ 00039/0703 7 (256,448,3)
664
+ 00039/0725 7 (256,448,3)
665
+ 00039/0734 7 (256,448,3)
666
+ 00040/0518 7 (256,448,3)
667
+ 00040/0728 7 (256,448,3)
668
+ 00040/0774 7 (256,448,3)
669
+ 00040/0812 7 (256,448,3)
670
+ 00040/0818 7 (256,448,3)
671
+ 00040/0827 7 (256,448,3)
672
+ 00040/0914 7 (256,448,3)
673
+ 00040/0917 7 (256,448,3)
674
+ 00040/0918 7 (256,448,3)
675
+ 00040/0924 7 (256,448,3)
676
+ 00040/0925 7 (256,448,3)
677
+ 00041/0004 7 (256,448,3)
678
+ 00041/0006 7 (256,448,3)
679
+ 00041/0013 7 (256,448,3)
680
+ 00041/0059 7 (256,448,3)
681
+ 00041/0110 7 (256,448,3)
682
+ 00041/0291 7 (256,448,3)
683
+ 00041/0366 7 (256,448,3)
684
+ 00041/0388 7 (256,448,3)
685
+ 00041/0434 7 (256,448,3)
686
+ 00041/0436 7 (256,448,3)
687
+ 00041/0450 7 (256,448,3)
688
+ 00041/0457 7 (256,448,3)
689
+ 00041/0460 7 (256,448,3)
690
+ 00041/0468 7 (256,448,3)
691
+ 00041/0471 7 (256,448,3)
692
+ 00041/0474 7 (256,448,3)
693
+ 00041/0809 7 (256,448,3)
694
+ 00041/0844 7 (256,448,3)
695
+ 00041/0858 7 (256,448,3)
696
+ 00041/0874 7 (256,448,3)
697
+ 00041/0876 7 (256,448,3)
698
+ 00042/0020 7 (256,448,3)
699
+ 00042/0205 7 (256,448,3)
700
+ 00042/0206 7 (256,448,3)
701
+ 00042/0432 7 (256,448,3)
702
+ 00042/0563 7 (256,448,3)
703
+ 00042/0569 7 (256,448,3)
704
+ 00042/0575 7 (256,448,3)
705
+ 00042/0576 7 (256,448,3)
706
+ 00042/0888 7 (256,448,3)
707
+ 00042/0892 7 (256,448,3)
708
+ 00042/0943 7 (256,448,3)
709
+ 00042/0944 7 (256,448,3)
710
+ 00043/0126 7 (256,448,3)
711
+ 00043/0130 7 (256,448,3)
712
+ 00043/0136 7 (256,448,3)
713
+ 00043/0233 7 (256,448,3)
714
+ 00043/0235 7 (256,448,3)
715
+ 00043/0237 7 (256,448,3)
716
+ 00043/0277 7 (256,448,3)
717
+ 00043/0301 7 (256,448,3)
718
+ 00043/0302 7 (256,448,3)
719
+ 00043/0303 7 (256,448,3)
720
+ 00043/0308 7 (256,448,3)
721
+ 00043/0309 7 (256,448,3)
722
+ 00043/0314 7 (256,448,3)
723
+ 00043/0713 7 (256,448,3)
724
+ 00043/0715 7 (256,448,3)
725
+ 00043/0923 7 (256,448,3)
726
+ 00044/0095 7 (256,448,3)
727
+ 00044/0255 7 (256,448,3)
728
+ 00044/0864 7 (256,448,3)
729
+ 00044/0892 7 (256,448,3)
730
+ 00044/0898 7 (256,448,3)
731
+ 00044/0993 7 (256,448,3)
732
+ 00044/0995 7 (256,448,3)
733
+ 00044/0997 7 (256,448,3)
734
+ 00045/0001 7 (256,448,3)
735
+ 00045/0006 7 (256,448,3)
736
+ 00045/0269 7 (256,448,3)
737
+ 00045/0276 7 (256,448,3)
738
+ 00045/0280 7 (256,448,3)
739
+ 00045/0281 7 (256,448,3)
740
+ 00045/0282 7 (256,448,3)
741
+ 00045/0284 7 (256,448,3)
742
+ 00045/0550 7 (256,448,3)
743
+ 00045/0571 7 (256,448,3)
744
+ 00045/0629 7 (256,448,3)
745
+ 00045/0631 7 (256,448,3)
746
+ 00045/0659 7 (256,448,3)
747
+ 00045/0693 7 (256,448,3)
748
+ 00045/0807 7 (256,448,3)
749
+ 00045/0810 7 (256,448,3)
750
+ 00045/0826 7 (256,448,3)
751
+ 00045/0849 7 (256,448,3)
752
+ 00045/0946 7 (256,448,3)
753
+ 00045/0987 7 (256,448,3)
754
+ 00045/0990 7 (256,448,3)
755
+ 00046/0104 7 (256,448,3)
756
+ 00046/0477 7 (256,448,3)
757
+ 00046/0490 7 (256,448,3)
758
+ 00046/0491 7 (256,448,3)
759
+ 00046/0509 7 (256,448,3)
760
+ 00046/0513 7 (256,448,3)
761
+ 00046/0603 7 (256,448,3)
762
+ 00046/0723 7 (256,448,3)
763
+ 00046/0744 7 (256,448,3)
764
+ 00046/0746 7 (256,448,3)
765
+ 00046/0750 7 (256,448,3)
766
+ 00046/0852 7 (256,448,3)
767
+ 00046/0927 7 (256,448,3)
768
+ 00046/0928 7 (256,448,3)
769
+ 00046/0929 7 (256,448,3)
770
+ 00046/0931 7 (256,448,3)
771
+ 00046/0936 7 (256,448,3)
772
+ 00046/0939 7 (256,448,3)
773
+ 00046/0947 7 (256,448,3)
774
+ 00046/0948 7 (256,448,3)
775
+ 00046/0950 7 (256,448,3)
776
+ 00046/0955 7 (256,448,3)
777
+ 00046/0961 7 (256,448,3)
778
+ 00047/0023 7 (256,448,3)
779
+ 00047/0029 7 (256,448,3)
780
+ 00047/0035 7 (256,448,3)
781
+ 00047/0058 7 (256,448,3)
782
+ 00047/0061 7 (256,448,3)
783
+ 00047/0065 7 (256,448,3)
784
+ 00047/0068 7 (256,448,3)
785
+ 00047/0072 7 (256,448,3)
786
+ 00047/0074 7 (256,448,3)
787
+ 00047/0148 7 (256,448,3)
788
+ 00047/0594 7 (256,448,3)
789
+ 00047/0782 7 (256,448,3)
790
+ 00047/0787 7 (256,448,3)
791
+ 00047/0860 7 (256,448,3)
792
+ 00047/0889 7 (256,448,3)
793
+ 00047/0893 7 (256,448,3)
794
+ 00047/0894 7 (256,448,3)
795
+ 00047/0902 7 (256,448,3)
796
+ 00047/0975 7 (256,448,3)
797
+ 00047/0995 7 (256,448,3)
798
+ 00048/0033 7 (256,448,3)
799
+ 00048/0113 7 (256,448,3)
800
+ 00048/0115 7 (256,448,3)
801
+ 00048/0120 7 (256,448,3)
802
+ 00048/0129 7 (256,448,3)
803
+ 00048/0136 7 (256,448,3)
804
+ 00048/0327 7 (256,448,3)
805
+ 00048/0329 7 (256,448,3)
806
+ 00048/0341 7 (256,448,3)
807
+ 00048/0343 7 (256,448,3)
808
+ 00048/0345 7 (256,448,3)
809
+ 00048/0346 7 (256,448,3)
810
+ 00048/0355 7 (256,448,3)
811
+ 00048/0359 7 (256,448,3)
812
+ 00048/0363 7 (256,448,3)
813
+ 00048/0378 7 (256,448,3)
814
+ 00048/0386 7 (256,448,3)
815
+ 00048/0387 7 (256,448,3)
816
+ 00048/0388 7 (256,448,3)
817
+ 00048/0428 7 (256,448,3)
818
+ 00048/0439 7 (256,448,3)
819
+ 00048/0507 7 (256,448,3)
820
+ 00048/0510 7 (256,448,3)
821
+ 00048/0512 7 (256,448,3)
822
+ 00048/0514 7 (256,448,3)
823
+ 00048/0539 7 (256,448,3)
824
+ 00048/0542 7 (256,448,3)
825
+ 00048/0544 7 (256,448,3)
826
+ 00048/0631 7 (256,448,3)
827
+ 00048/0632 7 (256,448,3)
828
+ 00048/0636 7 (256,448,3)
829
+ 00048/0640 7 (256,448,3)
830
+ 00048/0644 7 (256,448,3)
831
+ 00048/0653 7 (256,448,3)
832
+ 00048/0655 7 (256,448,3)
833
+ 00048/0658 7 (256,448,3)
834
+ 00048/0667 7 (256,448,3)
835
+ 00048/0688 7 (256,448,3)
836
+ 00048/0708 7 (256,448,3)
837
+ 00049/0005 7 (256,448,3)
838
+ 00049/0074 7 (256,448,3)
839
+ 00049/0077 7 (256,448,3)
840
+ 00049/0084 7 (256,448,3)
841
+ 00049/0516 7 (256,448,3)
842
+ 00049/0800 7 (256,448,3)
843
+ 00049/0900 7 (256,448,3)
844
+ 00050/0607 7 (256,448,3)
845
+ 00050/0661 7 (256,448,3)
846
+ 00050/0665 7 (256,448,3)
847
+ 00050/0685 7 (256,448,3)
848
+ 00050/0711 7 (256,448,3)
849
+ 00051/0068 7 (256,448,3)
850
+ 00051/0069 7 (256,448,3)
851
+ 00051/0076 7 (256,448,3)
852
+ 00051/0569 7 (256,448,3)
853
+ 00051/0801 7 (256,448,3)
854
+ 00051/0927 7 (256,448,3)
855
+ 00051/0945 7 (256,448,3)
856
+ 00051/0952 7 (256,448,3)
857
+ 00051/0976 7 (256,448,3)
858
+ 00051/0985 7 (256,448,3)
859
+ 00052/0012 7 (256,448,3)
860
+ 00052/0015 7 (256,448,3)
861
+ 00052/0052 7 (256,448,3)
862
+ 00052/0056 7 (256,448,3)
863
+ 00052/0060 7 (256,448,3)
864
+ 00052/0157 7 (256,448,3)
865
+ 00052/0265 7 (256,448,3)
866
+ 00052/0788 7 (256,448,3)
867
+ 00052/0790 7 (256,448,3)
868
+ 00052/0793 7 (256,448,3)
869
+ 00052/0816 7 (256,448,3)
870
+ 00052/0824 7 (256,448,3)
871
+ 00052/0918 7 (256,448,3)
872
+ 00052/0933 7 (256,448,3)
873
+ 00052/0947 7 (256,448,3)
874
+ 00053/0232 7 (256,448,3)
875
+ 00053/0277 7 (256,448,3)
876
+ 00053/0362 7 (256,448,3)
877
+ 00053/0577 7 (256,448,3)
878
+ 00053/0609 7 (256,448,3)
879
+ 00053/0612 7 (256,448,3)
880
+ 00053/0628 7 (256,448,3)
881
+ 00053/0629 7 (256,448,3)
882
+ 00053/0633 7 (256,448,3)
883
+ 00053/0659 7 (256,448,3)
884
+ 00053/0667 7 (256,448,3)
885
+ 00053/0671 7 (256,448,3)
886
+ 00053/0797 7 (256,448,3)
887
+ 00053/0804 7 (256,448,3)
888
+ 00053/0807 7 (256,448,3)
889
+ 00053/0952 7 (256,448,3)
890
+ 00053/0970 7 (256,448,3)
891
+ 00053/0981 7 (256,448,3)
892
+ 00053/0999 7 (256,448,3)
893
+ 00054/0003 7 (256,448,3)
894
+ 00054/0013 7 (256,448,3)
895
+ 00054/0020 7 (256,448,3)
896
+ 00054/0022 7 (256,448,3)
897
+ 00054/0023 7 (256,448,3)
898
+ 00054/0044 7 (256,448,3)
899
+ 00054/0051 7 (256,448,3)
900
+ 00054/0063 7 (256,448,3)
901
+ 00054/0065 7 (256,448,3)
902
+ 00054/0145 7 (256,448,3)
903
+ 00054/0153 7 (256,448,3)
904
+ 00054/0203 7 (256,448,3)
905
+ 00054/0325 7 (256,448,3)
906
+ 00054/0445 7 (256,448,3)
907
+ 00054/0448 7 (256,448,3)
908
+ 00054/0456 7 (256,448,3)
909
+ 00054/0457 7 (256,448,3)
910
+ 00054/0519 7 (256,448,3)
911
+ 00054/0524 7 (256,448,3)
912
+ 00054/0530 7 (256,448,3)
913
+ 00054/0532 7 (256,448,3)
914
+ 00054/0535 7 (256,448,3)
915
+ 00054/0574 7 (256,448,3)
916
+ 00054/0760 7 (256,448,3)
917
+ 00054/0767 7 (256,448,3)
918
+ 00054/0837 7 (256,448,3)
919
+ 00055/0011 7 (256,448,3)
920
+ 00055/0109 7 (256,448,3)
921
+ 00055/0111 7 (256,448,3)
922
+ 00055/0117 7 (256,448,3)
923
+ 00055/0119 7 (256,448,3)
924
+ 00055/0182 7 (256,448,3)
925
+ 00055/0192 7 (256,448,3)
926
+ 00055/0193 7 (256,448,3)
927
+ 00055/0200 7 (256,448,3)
928
+ 00055/0204 7 (256,448,3)
929
+ 00055/0207 7 (256,448,3)
930
+ 00055/0212 7 (256,448,3)
931
+ 00055/0213 7 (256,448,3)
932
+ 00055/0348 7 (256,448,3)
933
+ 00055/0423 7 (256,448,3)
934
+ 00055/0427 7 (256,448,3)
935
+ 00055/0456 7 (256,448,3)
936
+ 00055/0489 7 (256,448,3)
937
+ 00055/0689 7 (256,448,3)
938
+ 00055/0753 7 (256,448,3)
939
+ 00055/0802 7 (256,448,3)
940
+ 00055/0844 7 (256,448,3)
941
+ 00055/0850 7 (256,448,3)
942
+ 00055/0982 7 (256,448,3)
943
+ 00055/0993 7 (256,448,3)
944
+ 00056/0113 7 (256,448,3)
945
+ 00056/0148 7 (256,448,3)
946
+ 00056/0151 7 (256,448,3)
947
+ 00056/0316 7 (256,448,3)
948
+ 00056/0379 7 (256,448,3)
949
+ 00056/0380 7 (256,448,3)
950
+ 00056/0385 7 (256,448,3)
951
+ 00056/0505 7 (256,448,3)
952
+ 00056/0579 7 (256,448,3)
953
+ 00057/0254 7 (256,448,3)
954
+ 00057/0264 7 (256,448,3)
955
+ 00057/0272 7 (256,448,3)
956
+ 00057/0403 7 (256,448,3)
957
+ 00057/0501 7 (256,448,3)
958
+ 00057/0503 7 (256,448,3)
959
+ 00057/0884 7 (256,448,3)
960
+ 00058/0026 7 (256,448,3)
961
+ 00058/0029 7 (256,448,3)
962
+ 00058/0104 7 (256,448,3)
963
+ 00058/0124 7 (256,448,3)
964
+ 00058/0162 7 (256,448,3)
965
+ 00058/0288 7 (256,448,3)
966
+ 00058/0289 7 (256,448,3)
967
+ 00058/0323 7 (256,448,3)
968
+ 00058/0328 7 (256,448,3)
969
+ 00058/0329 7 (256,448,3)
970
+ 00058/0337 7 (256,448,3)
971
+ 00058/0367 7 (256,448,3)
972
+ 00058/0383 7 (256,448,3)
973
+ 00058/0395 7 (256,448,3)
974
+ 00060/0178 7 (256,448,3)
975
+ 00060/0182 7 (256,448,3)
976
+ 00061/0001 7 (256,448,3)
977
+ 00061/0003 7 (256,448,3)
978
+ 00061/0006 7 (256,448,3)
979
+ 00061/0443 7 (256,448,3)
980
+ 00061/0586 7 (256,448,3)
981
+ 00061/0587 7 (256,448,3)
982
+ 00061/0774 7 (256,448,3)
983
+ 00061/0789 7 (256,448,3)
984
+ 00061/0815 7 (256,448,3)
985
+ 00061/0817 7 (256,448,3)
986
+ 00061/0826 7 (256,448,3)
987
+ 00061/0829 7 (256,448,3)
988
+ 00061/0830 7 (256,448,3)
989
+ 00061/0832 7 (256,448,3)
990
+ 00061/0833 7 (256,448,3)
991
+ 00061/0836 7 (256,448,3)
992
+ 00061/0837 7 (256,448,3)
993
+ 00061/0839 7 (256,448,3)
994
+ 00061/0843 7 (256,448,3)
995
+ 00061/0849 7 (256,448,3)
996
+ 00061/0859 7 (256,448,3)
997
+ 00061/0861 7 (256,448,3)
998
+ 00061/0868 7 (256,448,3)
999
+ 00061/0877 7 (256,448,3)
1000
+ 00061/0889 7 (256,448,3)
1001
+ 00061/0905 7 (256,448,3)
1002
+ 00062/0115 7 (256,448,3)
1003
+ 00062/0118 7 (256,448,3)
1004
+ 00062/0125 7 (256,448,3)
1005
+ 00062/0134 7 (256,448,3)
1006
+ 00062/0142 7 (256,448,3)
1007
+ 00062/0400 7 (256,448,3)
1008
+ 00062/0457 7 (256,448,3)
1009
+ 00062/0459 7 (256,448,3)
1010
+ 00062/0560 7 (256,448,3)
1011
+ 00062/0650 7 (256,448,3)
1012
+ 00062/0655 7 (256,448,3)
1013
+ 00062/0715 7 (256,448,3)
1014
+ 00062/0847 7 (256,448,3)
1015
+ 00062/0905 7 (256,448,3)
1016
+ 00062/0981 7 (256,448,3)
1017
+ 00063/0177 7 (256,448,3)
1018
+ 00063/0230 7 (256,448,3)
1019
+ 00063/0253 7 (256,448,3)
1020
+ 00063/0257 7 (256,448,3)
1021
+ 00063/0326 7 (256,448,3)
1022
+ 00063/0530 7 (256,448,3)
1023
+ 00063/0677 7 (256,448,3)
1024
+ 00063/0759 7 (256,448,3)
1025
+ 00063/0761 7 (256,448,3)
1026
+ 00063/0777 7 (256,448,3)
1027
+ 00063/0842 7 (256,448,3)
1028
+ 00063/0900 7 (256,448,3)
1029
+ 00064/0014 7 (256,448,3)
1030
+ 00064/0028 7 (256,448,3)
1031
+ 00064/0029 7 (256,448,3)
1032
+ 00064/0030 7 (256,448,3)
1033
+ 00064/0037 7 (256,448,3)
1034
+ 00064/0044 7 (256,448,3)
1035
+ 00064/0280 7 (256,448,3)
1036
+ 00064/0285 7 (256,448,3)
1037
+ 00064/0286 7 (256,448,3)
1038
+ 00064/0291 7 (256,448,3)
1039
+ 00064/0300 7 (256,448,3)
1040
+ 00064/0303 7 (256,448,3)
1041
+ 00064/0308 7 (256,448,3)
1042
+ 00064/0314 7 (256,448,3)
1043
+ 00064/0316 7 (256,448,3)
1044
+ 00064/0317 7 (256,448,3)
1045
+ 00064/0323 7 (256,448,3)
1046
+ 00064/0435 7 (256,448,3)
1047
+ 00064/0733 7 (256,448,3)
1048
+ 00064/0848 7 (256,448,3)
1049
+ 00064/0868 7 (256,448,3)
1050
+ 00064/0888 7 (256,448,3)
1051
+ 00064/0898 7 (256,448,3)
1052
+ 00065/0116 7 (256,448,3)
1053
+ 00065/0121 7 (256,448,3)
1054
+ 00065/0122 7 (256,448,3)
1055
+ 00065/0124 7 (256,448,3)
1056
+ 00065/0125 7 (256,448,3)
1057
+ 00065/0126 7 (256,448,3)
1058
+ 00065/0136 7 (256,448,3)
1059
+ 00065/0146 7 (256,448,3)
1060
+ 00065/0147 7 (256,448,3)
1061
+ 00065/0163 7 (256,448,3)
1062
+ 00065/0170 7 (256,448,3)
1063
+ 00065/0175 7 (256,448,3)
1064
+ 00065/0176 7 (256,448,3)
1065
+ 00065/0180 7 (256,448,3)
1066
+ 00065/0184 7 (256,448,3)
1067
+ 00065/0186 7 (256,448,3)
1068
+ 00065/0332 7 (256,448,3)
1069
+ 00065/0343 7 (256,448,3)
1070
+ 00065/0365 7 (256,448,3)
1071
+ 00065/0393 7 (256,448,3)
1072
+ 00065/0394 7 (256,448,3)
1073
+ 00065/0442 7 (256,448,3)
1074
+ 00065/0459 7 (256,448,3)
1075
+ 00065/0462 7 (256,448,3)
1076
+ 00065/0476 7 (256,448,3)
1077
+ 00065/0483 7 (256,448,3)
1078
+ 00065/0590 7 (256,448,3)
1079
+ 00065/0593 7 (256,448,3)
1080
+ 00065/0595 7 (256,448,3)
1081
+ 00065/0774 7 (256,448,3)
1082
+ 00065/0947 7 (256,448,3)
1083
+ 00065/0985 7 (256,448,3)
1084
+ 00065/0986 7 (256,448,3)
1085
+ 00066/0015 7 (256,448,3)
1086
+ 00066/0043 7 (256,448,3)
1087
+ 00066/0131 7 (256,448,3)
1088
+ 00066/0157 7 (256,448,3)
1089
+ 00066/0169 7 (256,448,3)
1090
+ 00066/0374 7 (256,448,3)
1091
+ 00066/0382 7 (256,448,3)
1092
+ 00066/0481 7 (256,448,3)
1093
+ 00066/0482 7 (256,448,3)
1094
+ 00066/0491 7 (256,448,3)
1095
+ 00066/0493 7 (256,448,3)
1096
+ 00066/0494 7 (256,448,3)
1097
+ 00066/0496 7 (256,448,3)
1098
+ 00066/0680 7 (256,448,3)
1099
+ 00066/0700 7 (256,448,3)
1100
+ 00066/0887 7 (256,448,3)
1101
+ 00066/0910 7 (256,448,3)
1102
+ 00066/0918 7 (256,448,3)
1103
+ 00067/0024 7 (256,448,3)
1104
+ 00067/0059 7 (256,448,3)
1105
+ 00067/0408 7 (256,448,3)
1106
+ 00067/0414 7 (256,448,3)
1107
+ 00067/0417 7 (256,448,3)
1108
+ 00067/0419 7 (256,448,3)
1109
+ 00067/0423 7 (256,448,3)
1110
+ 00067/0441 7 (256,448,3)
1111
+ 00067/0467 7 (256,448,3)
1112
+ 00067/0471 7 (256,448,3)
1113
+ 00067/0487 7 (256,448,3)
1114
+ 00067/0494 7 (256,448,3)
1115
+ 00067/0497 7 (256,448,3)
1116
+ 00067/0513 7 (256,448,3)
1117
+ 00067/0521 7 (256,448,3)
1118
+ 00068/0111 7 (256,448,3)
1119
+ 00068/0123 7 (256,448,3)
1120
+ 00068/0126 7 (256,448,3)
1121
+ 00068/0129 7 (256,448,3)
1122
+ 00068/0270 7 (256,448,3)
1123
+ 00068/0330 7 (256,448,3)
1124
+ 00068/0407 7 (256,448,3)
1125
+ 00068/0428 7 (256,448,3)
1126
+ 00068/0544 7 (256,448,3)
1127
+ 00068/0635 7 (256,448,3)
1128
+ 00068/0637 7 (256,448,3)
1129
+ 00068/0736 7 (256,448,3)
1130
+ 00068/0738 7 (256,448,3)
1131
+ 00068/0747 7 (256,448,3)
1132
+ 00068/0748 7 (256,448,3)
1133
+ 00068/0749 7 (256,448,3)
1134
+ 00068/0762 7 (256,448,3)
1135
+ 00068/0815 7 (256,448,3)
1136
+ 00068/0981 7 (256,448,3)
1137
+ 00068/0982 7 (256,448,3)
1138
+ 00069/0187 7 (256,448,3)
1139
+ 00069/0191 7 (256,448,3)
1140
+ 00070/0001 7 (256,448,3)
1141
+ 00070/0003 7 (256,448,3)
1142
+ 00070/0340 7 (256,448,3)
1143
+ 00070/0341 7 (256,448,3)
1144
+ 00070/0342 7 (256,448,3)
1145
+ 00070/0347 7 (256,448,3)
1146
+ 00070/0372 7 (256,448,3)
1147
+ 00070/0383 7 (256,448,3)
1148
+ 00070/0389 7 (256,448,3)
1149
+ 00070/0728 7 (256,448,3)
1150
+ 00070/0813 7 (256,448,3)
1151
+ 00070/0814 7 (256,448,3)
1152
+ 00070/0823 7 (256,448,3)
1153
+ 00070/0840 7 (256,448,3)
1154
+ 00070/0843 7 (256,448,3)
1155
+ 00070/0861 7 (256,448,3)
1156
+ 00071/0111 7 (256,448,3)
1157
+ 00071/0138 7 (256,448,3)
1158
+ 00071/0143 7 (256,448,3)
1159
+ 00071/0150 7 (256,448,3)
1160
+ 00071/0508 7 (256,448,3)
1161
+ 00071/0514 7 (256,448,3)
1162
+ 00071/0550 7 (256,448,3)
1163
+ 00071/0556 7 (256,448,3)
1164
+ 00071/0600 7 (256,448,3)
1165
+ 00071/0665 7 (256,448,3)
1166
+ 00071/0670 7 (256,448,3)
1167
+ 00071/0672 7 (256,448,3)
1168
+ 00071/0673 7 (256,448,3)
1169
+ 00071/0705 7 (256,448,3)
1170
+ 00071/0706 7 (256,448,3)
1171
+ 00071/0707 7 (256,448,3)
1172
+ 00071/0774 7 (256,448,3)
1173
+ 00071/0799 7 (256,448,3)
1174
+ 00071/0814 7 (256,448,3)
1175
+ 00071/0816 7 (256,448,3)
1176
+ 00071/0819 7 (256,448,3)
1177
+ 00071/0823 7 (256,448,3)
1178
+ 00071/0828 7 (256,448,3)
1179
+ 00071/0830 7 (256,448,3)
1180
+ 00071/0839 7 (256,448,3)
1181
+ 00071/0841 7 (256,448,3)
1182
+ 00072/0192 7 (256,448,3)
1183
+ 00072/0194 7 (256,448,3)
1184
+ 00072/0197 7 (256,448,3)
1185
+ 00072/0199 7 (256,448,3)
1186
+ 00072/0285 7 (256,448,3)
1187
+ 00072/0586 7 (256,448,3)
1188
+ 00072/0795 7 (256,448,3)
1189
+ 00072/0811 7 (256,448,3)
1190
+ 00072/0812 7 (256,448,3)
1191
+ 00072/0824 7 (256,448,3)
1192
+ 00072/0831 7 (256,448,3)
1193
+ 00072/0835 7 (256,448,3)
1194
+ 00072/0837 7 (256,448,3)
1195
+ 00072/0841 7 (256,448,3)
1196
+ 00072/0962 7 (256,448,3)
1197
+ 00073/0296 7 (256,448,3)
1198
+ 00073/0299 7 (256,448,3)
1199
+ 00073/0300 7 (256,448,3)
1200
+ 00073/0301 7 (256,448,3)
1201
+ 00073/0427 7 (256,448,3)
1202
+ 00073/0428 7 (256,448,3)
1203
+ 00073/0494 7 (256,448,3)
1204
+ 00073/0615 7 (256,448,3)
1205
+ 00073/0620 7 (256,448,3)
1206
+ 00073/0624 7 (256,448,3)
1207
+ 00073/0979 7 (256,448,3)
1208
+ 00074/0226 7 (256,448,3)
1209
+ 00074/0250 7 (256,448,3)
1210
+ 00074/0284 7 (256,448,3)
1211
+ 00074/0503 7 (256,448,3)
1212
+ 00074/0614 7 (256,448,3)
1213
+ 00074/0629 7 (256,448,3)
1214
+ 00074/0762 7 (256,448,3)
1215
+ 00074/0765 7 (256,448,3)
1216
+ 00074/0900 7 (256,448,3)
1217
+ 00074/0908 7 (256,448,3)
1218
+ 00075/0352 7 (256,448,3)
1219
+ 00075/0360 7 (256,448,3)
1220
+ 00075/0361 7 (256,448,3)
1221
+ 00075/0365 7 (256,448,3)
1222
+ 00075/0383 7 (256,448,3)
1223
+ 00075/0384 7 (256,448,3)
1224
+ 00075/0386 7 (256,448,3)
1225
+ 00075/0407 7 (256,448,3)
1226
+ 00075/0410 7 (256,448,3)
1227
+ 00075/0412 7 (256,448,3)
1228
+ 00075/0413 7 (256,448,3)
1229
+ 00075/0459 7 (256,448,3)
1230
+ 00075/0504 7 (256,448,3)
1231
+ 00075/0515 7 (256,448,3)
1232
+ 00075/0518 7 (256,448,3)
1233
+ 00075/0567 7 (256,448,3)
1234
+ 00075/0681 7 (256,448,3)
1235
+ 00075/0693 7 (256,448,3)
1236
+ 00075/0728 7 (256,448,3)
1237
+ 00075/0731 7 (256,448,3)
1238
+ 00075/0804 7 (256,448,3)
1239
+ 00075/0974 7 (256,448,3)
1240
+ 00075/0975 7 (256,448,3)
1241
+ 00075/0983 7 (256,448,3)
1242
+ 00075/0997 7 (256,448,3)
1243
+ 00076/0006 7 (256,448,3)
1244
+ 00076/0007 7 (256,448,3)
1245
+ 00076/0011 7 (256,448,3)
1246
+ 00076/0013 7 (256,448,3)
1247
+ 00076/0014 7 (256,448,3)
1248
+ 00076/0027 7 (256,448,3)
1249
+ 00076/0029 7 (256,448,3)
1250
+ 00076/0037 7 (256,448,3)
1251
+ 00076/0041 7 (256,448,3)
1252
+ 00076/0055 7 (256,448,3)
1253
+ 00076/0071 7 (256,448,3)
1254
+ 00076/0172 7 (256,448,3)
1255
+ 00076/0275 7 (256,448,3)
1256
+ 00076/0286 7 (256,448,3)
1257
+ 00076/0467 7 (256,448,3)
1258
+ 00076/0481 7 (256,448,3)
1259
+ 00076/0527 7 (256,448,3)
1260
+ 00076/0895 7 (256,448,3)
1261
+ 00076/0896 7 (256,448,3)
1262
+ 00076/0906 7 (256,448,3)
1263
+ 00076/0924 7 (256,448,3)
1264
+ 00076/0964 7 (256,448,3)
1265
+ 00076/0984 7 (256,448,3)
1266
+ 00077/0317 7 (256,448,3)
1267
+ 00077/0322 7 (256,448,3)
1268
+ 00077/0333 7 (256,448,3)
1269
+ 00077/0334 7 (256,448,3)
1270
+ 00077/0480 7 (256,448,3)
1271
+ 00077/0488 7 (256,448,3)
1272
+ 00077/0490 7 (256,448,3)
1273
+ 00077/0582 7 (256,448,3)
1274
+ 00077/0586 7 (256,448,3)
1275
+ 00077/0969 7 (256,448,3)
1276
+ 00078/0007 7 (256,448,3)
1277
+ 00078/0011 7 (256,448,3)
1278
+ 00078/0153 7 (256,448,3)
1279
+ 00078/0289 7 (256,448,3)
1280
+ 00078/0312 7 (256,448,3)
1281
+ 00078/0492 7 (256,448,3)
1282
+ 00078/0580 7 (256,448,3)
1283
+ 00078/0595 7 (256,448,3)
1284
+ 00078/0814 7 (256,448,3)
1285
+ 00078/0950 7 (256,448,3)
1286
+ 00078/0955 7 (256,448,3)
1287
+ 00079/0060 7 (256,448,3)
1288
+ 00079/0067 7 (256,448,3)
1289
+ 00080/0216 7 (256,448,3)
1290
+ 00080/0308 7 (256,448,3)
1291
+ 00080/0504 7 (256,448,3)
1292
+ 00080/0552 7 (256,448,3)
1293
+ 00080/0576 7 (256,448,3)
1294
+ 00080/0583 7 (256,448,3)
1295
+ 00080/0837 7 (256,448,3)
1296
+ 00080/0839 7 (256,448,3)
1297
+ 00080/0871 7 (256,448,3)
1298
+ 00080/0877 7 (256,448,3)
1299
+ 00080/0880 7 (256,448,3)
1300
+ 00080/0969 7 (256,448,3)
1301
+ 00080/0973 7 (256,448,3)
1302
+ 00080/0980 7 (256,448,3)
1303
+ 00081/0202 7 (256,448,3)
1304
+ 00081/0203 7 (256,448,3)
1305
+ 00081/0210 7 (256,448,3)
1306
+ 00081/0268 7 (256,448,3)
1307
+ 00081/0281 7 (256,448,3)
1308
+ 00081/0283 7 (256,448,3)
1309
+ 00081/0317 7 (256,448,3)
1310
+ 00081/0327 7 (256,448,3)
1311
+ 00082/0018 7 (256,448,3)
1312
+ 00082/0025 7 (256,448,3)
1313
+ 00082/0089 7 (256,448,3)
1314
+ 00082/0140 7 (256,448,3)
1315
+ 00082/0442 7 (256,448,3)
1316
+ 00082/0465 7 (256,448,3)
1317
+ 00082/0473 7 (256,448,3)
1318
+ 00082/0481 7 (256,448,3)
1319
+ 00082/0492 7 (256,448,3)
1320
+ 00082/0495 7 (256,448,3)
1321
+ 00082/0497 7 (256,448,3)
1322
+ 00082/0502 7 (256,448,3)
1323
+ 00082/0504 7 (256,448,3)
1324
+ 00082/0506 7 (256,448,3)
1325
+ 00082/0507 7 (256,448,3)
1326
+ 00082/0510 7 (256,448,3)
1327
+ 00082/0519 7 (256,448,3)
1328
+ 00082/0523 7 (256,448,3)
1329
+ 00082/0588 7 (256,448,3)
1330
+ 00082/0597 7 (256,448,3)
1331
+ 00082/0632 7 (256,448,3)
1332
+ 00082/0751 7 (256,448,3)
1333
+ 00082/0767 7 (256,448,3)
1334
+ 00082/0771 7 (256,448,3)
1335
+ 00082/0790 7 (256,448,3)
1336
+ 00082/0804 7 (256,448,3)
1337
+ 00082/0823 7 (256,448,3)
1338
+ 00083/0052 7 (256,448,3)
1339
+ 00083/0056 7 (256,448,3)
1340
+ 00083/0113 7 (256,448,3)
1341
+ 00083/0114 7 (256,448,3)
1342
+ 00083/0122 7 (256,448,3)
1343
+ 00083/0137 7 (256,448,3)
1344
+ 00083/0270 7 (256,448,3)
1345
+ 00083/0295 7 (256,448,3)
1346
+ 00083/0303 7 (256,448,3)
1347
+ 00083/0308 7 (256,448,3)
1348
+ 00083/0586 7 (256,448,3)
1349
+ 00083/0592 7 (256,448,3)
1350
+ 00083/0640 7 (256,448,3)
1351
+ 00083/0648 7 (256,448,3)
1352
+ 00083/0654 7 (256,448,3)
1353
+ 00083/0662 7 (256,448,3)
1354
+ 00083/0666 7 (256,448,3)
1355
+ 00083/0668 7 (256,448,3)
1356
+ 00083/0669 7 (256,448,3)
1357
+ 00083/0675 7 (256,448,3)
1358
+ 00083/0679 7 (256,448,3)
1359
+ 00083/0681 7 (256,448,3)
1360
+ 00083/0682 7 (256,448,3)
1361
+ 00083/0694 7 (256,448,3)
1362
+ 00083/0695 7 (256,448,3)
1363
+ 00083/0697 7 (256,448,3)
1364
+ 00083/0704 7 (256,448,3)
1365
+ 00083/0713 7 (256,448,3)
1366
+ 00083/0721 7 (256,448,3)
1367
+ 00083/0855 7 (256,448,3)
1368
+ 00084/0109 7 (256,448,3)
1369
+ 00084/0113 7 (256,448,3)
1370
+ 00084/0306 7 (256,448,3)
1371
+ 00084/0442 7 (256,448,3)
1372
+ 00084/0669 7 (256,448,3)
1373
+ 00084/0679 7 (256,448,3)
1374
+ 00084/0685 7 (256,448,3)
1375
+ 00084/0691 7 (256,448,3)
1376
+ 00084/0768 7 (256,448,3)
1377
+ 00084/0817 7 (256,448,3)
1378
+ 00085/0027 7 (256,448,3)
1379
+ 00085/0035 7 (256,448,3)
1380
+ 00085/0038 7 (256,448,3)
1381
+ 00085/0223 7 (256,448,3)
1382
+ 00085/0233 7 (256,448,3)
1383
+ 00085/0281 7 (256,448,3)
1384
+ 00085/0287 7 (256,448,3)
1385
+ 00085/0313 7 (256,448,3)
1386
+ 00085/0521 7 (256,448,3)
1387
+ 00085/0848 7 (256,448,3)
1388
+ 00085/0855 7 (256,448,3)
1389
+ 00085/0865 7 (256,448,3)
1390
+ 00085/0952 7 (256,448,3)
1391
+ 00085/0964 7 (256,448,3)
1392
+ 00085/0973 7 (256,448,3)
1393
+ 00085/0986 7 (256,448,3)
1394
+ 00085/0993 7 (256,448,3)
1395
+ 00086/0070 7 (256,448,3)
1396
+ 00086/0075 7 (256,448,3)
1397
+ 00086/0094 7 (256,448,3)
1398
+ 00086/0103 7 (256,448,3)
1399
+ 00086/0112 7 (256,448,3)
1400
+ 00086/0288 7 (256,448,3)
1401
+ 00086/0576 7 (256,448,3)
1402
+ 00086/0580 7 (256,448,3)
1403
+ 00086/0584 7 (256,448,3)
1404
+ 00086/0599 7 (256,448,3)
1405
+ 00086/0600 7 (256,448,3)
1406
+ 00086/0602 7 (256,448,3)
1407
+ 00086/0612 7 (256,448,3)
1408
+ 00086/0629 7 (256,448,3)
1409
+ 00086/0655 7 (256,448,3)
1410
+ 00086/0679 7 (256,448,3)
1411
+ 00086/0694 7 (256,448,3)
1412
+ 00086/0695 7 (256,448,3)
1413
+ 00086/0701 7 (256,448,3)
1414
+ 00086/0760 7 (256,448,3)
1415
+ 00086/0786 7 (256,448,3)
1416
+ 00086/0845 7 (256,448,3)
1417
+ 00086/0868 7 (256,448,3)
1418
+ 00086/0889 7 (256,448,3)
1419
+ 00086/0891 7 (256,448,3)
1420
+ 00086/0927 7 (256,448,3)
1421
+ 00086/0938 7 (256,448,3)
1422
+ 00086/0946 7 (256,448,3)
1423
+ 00086/0963 7 (256,448,3)
1424
+ 00086/0969 7 (256,448,3)
1425
+ 00087/0023 7 (256,448,3)
1426
+ 00087/0029 7 (256,448,3)
1427
+ 00087/0144 7 (256,448,3)
1428
+ 00087/0148 7 (256,448,3)
1429
+ 00087/0159 7 (256,448,3)
1430
+ 00087/0174 7 (256,448,3)
1431
+ 00087/0283 7 (256,448,3)
1432
+ 00087/0284 7 (256,448,3)
1433
+ 00087/0294 7 (256,448,3)
1434
+ 00087/0296 7 (256,448,3)
1435
+ 00087/0498 7 (256,448,3)
1436
+ 00087/0502 7 (256,448,3)
1437
+ 00087/0532 7 (256,448,3)
1438
+ 00087/0557 7 (256,448,3)
1439
+ 00087/0559 7 (256,448,3)
1440
+ 00087/0574 7 (256,448,3)
1441
+ 00087/0577 7 (256,448,3)
1442
+ 00088/0006 7 (256,448,3)
1443
+ 00088/0268 7 (256,448,3)
1444
+ 00088/0320 7 (256,448,3)
1445
+ 00088/0412 7 (256,448,3)
1446
+ 00088/0431 7 (256,448,3)
1447
+ 00088/0432 7 (256,448,3)
1448
+ 00088/0465 7 (256,448,3)
1449
+ 00088/0507 7 (256,448,3)
1450
+ 00088/0565 7 (256,448,3)
1451
+ 00088/0629 7 (256,448,3)
1452
+ 00088/0831 7 (256,448,3)
1453
+ 00088/0836 7 (256,448,3)
1454
+ 00088/0972 7 (256,448,3)
1455
+ 00088/0974 7 (256,448,3)
1456
+ 00088/0980 7 (256,448,3)
1457
+ 00089/0067 7 (256,448,3)
1458
+ 00089/0244 7 (256,448,3)
1459
+ 00089/0404 7 (256,448,3)
1460
+ 00089/0416 7 (256,448,3)
1461
+ 00089/0419 7 (256,448,3)
1462
+ 00089/0428 7 (256,448,3)
1463
+ 00089/0712 7 (256,448,3)
1464
+ 00089/0713 7 (256,448,3)
1465
+ 00089/0723 7 (256,448,3)
1466
+ 00089/0727 7 (256,448,3)
1467
+ 00089/0770 7 (256,448,3)
1468
+ 00089/0809 7 (256,448,3)
1469
+ 00089/0811 7 (256,448,3)
1470
+ 00089/0888 7 (256,448,3)
1471
+ 00089/0898 7 (256,448,3)
1472
+ 00089/0903 7 (256,448,3)
1473
+ 00089/0907 7 (256,448,3)
1474
+ 00089/0911 7 (256,448,3)
1475
+ 00089/0915 7 (256,448,3)
1476
+ 00089/0926 7 (256,448,3)
1477
+ 00089/0955 7 (256,448,3)
1478
+ 00090/0027 7 (256,448,3)
1479
+ 00090/0028 7 (256,448,3)
1480
+ 00090/0032 7 (256,448,3)
1481
+ 00090/0038 7 (256,448,3)
1482
+ 00090/0076 7 (256,448,3)
1483
+ 00090/0081 7 (256,448,3)
1484
+ 00090/0086 7 (256,448,3)
1485
+ 00090/0119 7 (256,448,3)
1486
+ 00090/0258 7 (256,448,3)
1487
+ 00090/0261 7 (256,448,3)
1488
+ 00090/0447 7 (256,448,3)
1489
+ 00090/0498 7 (256,448,3)
1490
+ 00090/0514 7 (256,448,3)
1491
+ 00090/0523 7 (256,448,3)
1492
+ 00090/0530 7 (256,448,3)
1493
+ 00090/0540 7 (256,448,3)
1494
+ 00090/0548 7 (256,448,3)
1495
+ 00090/0565 7 (256,448,3)
1496
+ 00090/0578 7 (256,448,3)
1497
+ 00090/0580 7 (256,448,3)
1498
+ 00090/0581 7 (256,448,3)
1499
+ 00090/0780 7 (256,448,3)
1500
+ 00090/0940 7 (256,448,3)
1501
+ 00090/0984 7 (256,448,3)
1502
+ 00091/0023 7 (256,448,3)
1503
+ 00091/0051 7 (256,448,3)
1504
+ 00091/0317 7 (256,448,3)
1505
+ 00091/0320 7 (256,448,3)
1506
+ 00091/0582 7 (256,448,3)
1507
+ 00091/0585 7 (256,448,3)
1508
+ 00091/0588 7 (256,448,3)
1509
+ 00091/0601 7 (256,448,3)
1510
+ 00091/0602 7 (256,448,3)
1511
+ 00091/0603 7 (256,448,3)
1512
+ 00091/0634 7 (256,448,3)
1513
+ 00091/0693 7 (256,448,3)
1514
+ 00091/0741 7 (256,448,3)
1515
+ 00091/0966 7 (256,448,3)
1516
+ 00091/0973 7 (256,448,3)
1517
+ 00091/0985 7 (256,448,3)
1518
+ 00092/0007 7 (256,448,3)
1519
+ 00092/0132 7 (256,448,3)
1520
+ 00092/0270 7 (256,448,3)
1521
+ 00092/0296 7 (256,448,3)
1522
+ 00092/0611 7 (256,448,3)
1523
+ 00092/0625 7 (256,448,3)
1524
+ 00092/0627 7 (256,448,3)
1525
+ 00092/0651 7 (256,448,3)
1526
+ 00092/0652 7 (256,448,3)
1527
+ 00092/0910 7 (256,448,3)
1528
+ 00093/0075 7 (256,448,3)
1529
+ 00093/0078 7 (256,448,3)
1530
+ 00093/0100 7 (256,448,3)
1531
+ 00093/0132 7 (256,448,3)
1532
+ 00093/0133 7 (256,448,3)
1533
+ 00093/0176 7 (256,448,3)
1534
+ 00093/0177 7 (256,448,3)
1535
+ 00093/0178 7 (256,448,3)
1536
+ 00093/0181 7 (256,448,3)
1537
+ 00093/0183 7 (256,448,3)
1538
+ 00093/0184 7 (256,448,3)
1539
+ 00093/0286 7 (256,448,3)
1540
+ 00093/0304 7 (256,448,3)
1541
+ 00093/0305 7 (256,448,3)
1542
+ 00093/0319 7 (256,448,3)
1543
+ 00093/0324 7 (256,448,3)
1544
+ 00093/0325 7 (256,448,3)
1545
+ 00093/0327 7 (256,448,3)
1546
+ 00093/0331 7 (256,448,3)
1547
+ 00093/0444 7 (256,448,3)
1548
+ 00093/0450 7 (256,448,3)
1549
+ 00093/0593 7 (256,448,3)
1550
+ 00094/0032 7 (256,448,3)
1551
+ 00094/0057 7 (256,448,3)
1552
+ 00094/0139 7 (256,448,3)
1553
+ 00094/0206 7 (256,448,3)
1554
+ 00094/0211 7 (256,448,3)
1555
+ 00094/0215 7 (256,448,3)
1556
+ 00094/0218 7 (256,448,3)
1557
+ 00094/0257 7 (256,448,3)
1558
+ 00094/0329 7 (256,448,3)
1559
+ 00094/0331 7 (256,448,3)
1560
+ 00094/0332 7 (256,448,3)
1561
+ 00094/0369 7 (256,448,3)
1562
+ 00094/0370 7 (256,448,3)
1563
+ 00094/0383 7 (256,448,3)
1564
+ 00094/0385 7 (256,448,3)
1565
+ 00094/0387 7 (256,448,3)
1566
+ 00094/0399 7 (256,448,3)
1567
+ 00094/0605 7 (256,448,3)
1568
+ 00094/0648 7 (256,448,3)
1569
+ 00094/0649 7 (256,448,3)
1570
+ 00094/0759 7 (256,448,3)
1571
+ 00094/0800 7 (256,448,3)
1572
+ 00094/0894 7 (256,448,3)
1573
+ 00094/0896 7 (256,448,3)
1574
+ 00095/0089 7 (256,448,3)
1575
+ 00095/0108 7 (256,448,3)
1576
+ 00095/0109 7 (256,448,3)
1577
+ 00095/0114 7 (256,448,3)
1578
+ 00095/0128 7 (256,448,3)
1579
+ 00095/0133 7 (256,448,3)
1580
+ 00095/0150 7 (256,448,3)
1581
+ 00095/0153 7 (256,448,3)
1582
+ 00095/0154 7 (256,448,3)
1583
+ 00095/0196 7 (256,448,3)
1584
+ 00095/0209 7 (256,448,3)
1585
+ 00095/0228 7 (256,448,3)
1586
+ 00095/0230 7 (256,448,3)
1587
+ 00095/0231 7 (256,448,3)
1588
+ 00095/0242 7 (256,448,3)
1589
+ 00095/0243 7 (256,448,3)
1590
+ 00095/0253 7 (256,448,3)
1591
+ 00095/0280 7 (256,448,3)
1592
+ 00095/0281 7 (256,448,3)
1593
+ 00095/0283 7 (256,448,3)
1594
+ 00095/0314 7 (256,448,3)
1595
+ 00095/0868 7 (256,448,3)
1596
+ 00095/0894 7 (256,448,3)
1597
+ 00096/0062 7 (256,448,3)
1598
+ 00096/0347 7 (256,448,3)
1599
+ 00096/0348 7 (256,448,3)
1600
+ 00096/0359 7 (256,448,3)
1601
+ 00096/0363 7 (256,448,3)
1602
+ 00096/0373 7 (256,448,3)
1603
+ 00096/0378 7 (256,448,3)
1604
+ 00096/0387 7 (256,448,3)
1605
+ 00096/0395 7 (256,448,3)
1606
+ 00096/0396 7 (256,448,3)
1607
+ 00096/0404 7 (256,448,3)
1608
+ 00096/0653 7 (256,448,3)
1609
+ 00096/0668 7 (256,448,3)
1610
+ 00096/0679 7 (256,448,3)
1611
+ 00096/0729 7 (256,448,3)
1612
+ 00096/0736 7 (256,448,3)
1613
+ 00096/0823 7 (256,448,3)
basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt ADDED
The diff for this file is too large to render. See raw diff
basicsr/data/paired_image_dataset.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils import data as data
2
+ from torchvision.transforms.functional import normalize
3
+
4
+ from basicsr.data.data_util import paired_paths_from_folder, paired_paths_from_lmdb, paired_paths_from_meta_info_file
5
+ from basicsr.data.transforms import augment, paired_random_crop
6
+ from basicsr.utils import FileClient, bgr2ycbcr, imfrombytes, img2tensor
7
+ from basicsr.utils.registry import DATASET_REGISTRY
8
+
9
+
10
+ @DATASET_REGISTRY.register()
11
+ class PairedImageDataset(data.Dataset):
12
+ """Paired image dataset for image restoration.
13
+
14
+ Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and GT image pairs.
15
+
16
+ There are three modes:
17
+
18
+ 1. **lmdb**: Use lmdb files. If opt['io_backend'] == lmdb.
19
+ 2. **meta_info_file**: Use meta information file to generate paths. \
20
+ If opt['io_backend'] != lmdb and opt['meta_info_file'] is not None.
21
+ 3. **folder**: Scan folders to generate paths. The rest.
22
+
23
+ Args:
24
+ opt (dict): Config for train datasets. It contains the following keys:
25
+ dataroot_gt (str): Data root path for gt.
26
+ dataroot_lq (str): Data root path for lq.
27
+ meta_info_file (str): Path for meta information file.
28
+ io_backend (dict): IO backend type and other kwarg.
29
+ filename_tmpl (str): Template for each filename. Note that the template excludes the file extension.
30
+ Default: '{}'.
31
+ gt_size (int): Cropped patched size for gt patches.
32
+ use_hflip (bool): Use horizontal flips.
33
+ use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
34
+ scale (bool): Scale, which will be added automatically.
35
+ phase (str): 'train' or 'val'.
36
+ """
37
+
38
+ def __init__(self, opt):
39
+ super(PairedImageDataset, self).__init__()
40
+ self.opt = opt
41
+ # file client (io backend)
42
+ self.file_client = None
43
+ self.io_backend_opt = opt['io_backend']
44
+ self.mean = opt['mean'] if 'mean' in opt else None
45
+ self.std = opt['std'] if 'std' in opt else None
46
+
47
+ self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq']
48
+ if 'filename_tmpl' in opt:
49
+ self.filename_tmpl = opt['filename_tmpl']
50
+ else:
51
+ self.filename_tmpl = '{}'
52
+
53
+ if self.io_backend_opt['type'] == 'lmdb':
54
+ self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder]
55
+ self.io_backend_opt['client_keys'] = ['lq', 'gt']
56
+ self.paths = paired_paths_from_lmdb([self.lq_folder, self.gt_folder], ['lq', 'gt'])
57
+ elif 'meta_info_file' in self.opt and self.opt['meta_info_file'] is not None:
58
+ self.paths = paired_paths_from_meta_info_file([self.lq_folder, self.gt_folder], ['lq', 'gt'],
59
+ self.opt['meta_info_file'], self.filename_tmpl)
60
+ else:
61
+ self.paths = paired_paths_from_folder([self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl)
62
+
63
+ def __getitem__(self, index):
64
+ if self.file_client is None:
65
+ self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
66
+
67
+ scale = self.opt['scale']
68
+
69
+ # Load gt and lq images. Dimension order: HWC; channel order: BGR;
70
+ # image range: [0, 1], float32.
71
+ gt_path = self.paths[index]['gt_path']
72
+ img_bytes = self.file_client.get(gt_path, 'gt')
73
+ img_gt = imfrombytes(img_bytes, float32=True)
74
+ lq_path = self.paths[index]['lq_path']
75
+ img_bytes = self.file_client.get(lq_path, 'lq')
76
+ img_lq = imfrombytes(img_bytes, float32=True)
77
+
78
+ # augmentation for training
79
+ if self.opt['phase'] == 'train':
80
+ gt_size = self.opt['gt_size']
81
+ # random crop
82
+ img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path)
83
+ # flip, rotation
84
+ img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_hflip'], self.opt['use_rot'])
85
+
86
+ # color space transform
87
+ if 'color' in self.opt and self.opt['color'] == 'y':
88
+ img_gt = bgr2ycbcr(img_gt, y_only=True)[..., None]
89
+ img_lq = bgr2ycbcr(img_lq, y_only=True)[..., None]
90
+
91
+ # crop the unmatched GT images during validation or testing, especially for SR benchmark datasets
92
+ # TODO: It is better to update the datasets, rather than force to crop
93
+ if self.opt['phase'] != 'train':
94
+ img_gt = img_gt[0:img_lq.shape[0] * scale, 0:img_lq.shape[1] * scale, :]
95
+
96
+ # BGR to RGB, HWC to CHW, numpy to tensor
97
+ img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True)
98
+ # normalize
99
+ if self.mean is not None or self.std is not None:
100
+ normalize(img_lq, self.mean, self.std, inplace=True)
101
+ normalize(img_gt, self.mean, self.std, inplace=True)
102
+
103
+ return {'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path}
104
+
105
+ def __len__(self):
106
+ return len(self.paths)
basicsr/data/prefetch_dataloader.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import queue as Queue
2
+ import threading
3
+ import torch
4
+ from torch.utils.data import DataLoader
5
+
6
+
7
+ class PrefetchGenerator(threading.Thread):
8
+ """A general prefetch generator.
9
+
10
+ Reference: https://stackoverflow.com/questions/7323664/python-generator-pre-fetch
11
+
12
+ Args:
13
+ generator: Python generator.
14
+ num_prefetch_queue (int): Number of prefetch queue.
15
+ """
16
+
17
+ def __init__(self, generator, num_prefetch_queue):
18
+ threading.Thread.__init__(self)
19
+ self.queue = Queue.Queue(num_prefetch_queue)
20
+ self.generator = generator
21
+ self.daemon = True
22
+ self.start()
23
+
24
+ def run(self):
25
+ for item in self.generator:
26
+ self.queue.put(item)
27
+ self.queue.put(None)
28
+
29
+ def __next__(self):
30
+ next_item = self.queue.get()
31
+ if next_item is None:
32
+ raise StopIteration
33
+ return next_item
34
+
35
+ def __iter__(self):
36
+ return self
37
+
38
+
39
+ class PrefetchDataLoader(DataLoader):
40
+ """Prefetch version of dataloader.
41
+
42
+ Reference: https://github.com/IgorSusmelj/pytorch-styleguide/issues/5#
43
+
44
+ TODO:
45
+ Need to test on single gpu and ddp (multi-gpu). There is a known issue in
46
+ ddp.
47
+
48
+ Args:
49
+ num_prefetch_queue (int): Number of prefetch queue.
50
+ kwargs (dict): Other arguments for dataloader.
51
+ """
52
+
53
+ def __init__(self, num_prefetch_queue, **kwargs):
54
+ self.num_prefetch_queue = num_prefetch_queue
55
+ super(PrefetchDataLoader, self).__init__(**kwargs)
56
+
57
+ def __iter__(self):
58
+ return PrefetchGenerator(super().__iter__(), self.num_prefetch_queue)
59
+
60
+
61
+ class CPUPrefetcher():
62
+ """CPU prefetcher.
63
+
64
+ Args:
65
+ loader: Dataloader.
66
+ """
67
+
68
+ def __init__(self, loader):
69
+ self.ori_loader = loader
70
+ self.loader = iter(loader)
71
+
72
+ def next(self):
73
+ try:
74
+ return next(self.loader)
75
+ except StopIteration:
76
+ return None
77
+
78
+ def reset(self):
79
+ self.loader = iter(self.ori_loader)
80
+
81
+
82
+ class CUDAPrefetcher():
83
+ """CUDA prefetcher.
84
+
85
+ Reference: https://github.com/NVIDIA/apex/issues/304#
86
+
87
+ It may consume more GPU memory.
88
+
89
+ Args:
90
+ loader: Dataloader.
91
+ opt (dict): Options.
92
+ """
93
+
94
+ def __init__(self, loader, opt):
95
+ self.ori_loader = loader
96
+ self.loader = iter(loader)
97
+ self.opt = opt
98
+ self.stream = torch.cuda.Stream()
99
+ self.device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu')
100
+ self.preload()
101
+
102
+ def preload(self):
103
+ try:
104
+ self.batch = next(self.loader) # self.batch is a dict
105
+ except StopIteration:
106
+ self.batch = None
107
+ return None
108
+ # put tensors to gpu
109
+ with torch.cuda.stream(self.stream):
110
+ for k, v in self.batch.items():
111
+ if torch.is_tensor(v):
112
+ self.batch[k] = self.batch[k].to(device=self.device, non_blocking=True)
113
+
114
+ def next(self):
115
+ torch.cuda.current_stream().wait_stream(self.stream)
116
+ batch = self.batch
117
+ self.preload()
118
+ return batch
119
+
120
+ def reset(self):
121
+ self.loader = iter(self.ori_loader)
122
+ self.preload()
basicsr/data/realesrgan_dataset.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import math
3
+ import numpy as np
4
+ import os
5
+ import os.path as osp
6
+ import random
7
+ import time
8
+ import torch
9
+ from pathlib import Path
10
+ from torch.utils import data as data
11
+
12
+ from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
13
+ from basicsr.data.transforms import augment
14
+ from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
15
+ from basicsr.utils.registry import DATASET_REGISTRY
16
+
17
+ @DATASET_REGISTRY.register(suffix='basicsr')
18
+ class RealESRGANDataset(data.Dataset):
19
+ """Dataset used for Real-ESRGAN model:
20
+ Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
21
+
22
+ It loads gt (Ground-Truth) images, and augments them.
23
+ It also generates blur kernels and sinc kernels for generating low-quality images.
24
+ Note that the low-quality images are processed in tensors on GPUS for faster processing.
25
+
26
+ Args:
27
+ opt (dict): Config for train datasets. It contains the following keys:
28
+ dataroot_gt (str): Data root path for gt.
29
+ meta_info (str): Path for meta information file.
30
+ io_backend (dict): IO backend type and other kwarg.
31
+ use_hflip (bool): Use horizontal flips.
32
+ use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
33
+ Please see more options in the codes.
34
+ """
35
+
36
+ def __init__(self, opt):
37
+ super(RealESRGANDataset, self).__init__()
38
+ self.opt = opt
39
+ self.file_client = None
40
+ self.io_backend_opt = opt['io_backend']
41
+
42
+ # file client (lmdb io backend)
43
+ self.paths = sorted([str(x) for x in Path(opt['df2k_path']).glob('*.png')])
44
+ self.paths.extend(sorted([str(x) for x in Path(opt['wed_path']).glob('*.bmp')]))
45
+
46
+ # blur settings for the first degradation
47
+ self.blur_kernel_size = opt['blur_kernel_size']
48
+ self.kernel_list = opt['kernel_list']
49
+ self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability
50
+ self.blur_sigma = opt['blur_sigma']
51
+ self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels
52
+ self.betap_range = opt['betap_range'] # betap used in plateau blur kernels
53
+ self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters
54
+
55
+ # blur settings for the second degradation
56
+ self.blur_kernel_size2 = opt['blur_kernel_size2']
57
+ self.kernel_list2 = opt['kernel_list2']
58
+ self.kernel_prob2 = opt['kernel_prob2']
59
+ self.blur_sigma2 = opt['blur_sigma2']
60
+ self.betag_range2 = opt['betag_range2']
61
+ self.betap_range2 = opt['betap_range2']
62
+ self.sinc_prob2 = opt['sinc_prob2']
63
+
64
+ # a final sinc filter
65
+ self.final_sinc_prob = opt['final_sinc_prob']
66
+
67
+ self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21
68
+ # TODO: kernel range is now hard-coded, should be in the configure file
69
+ self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect
70
+ self.pulse_tensor[10, 10] = 1
71
+
72
+ def __getitem__(self, index):
73
+ if self.file_client is None:
74
+ self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
75
+
76
+ # -------------------------------- Load gt images -------------------------------- #
77
+ # Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32.
78
+ gt_path = self.paths[index]
79
+ # avoid errors caused by high latency in reading files
80
+ retry = 3
81
+ while retry > 0:
82
+ try:
83
+ img_bytes = self.file_client.get(gt_path, 'gt')
84
+ except (IOError, OSError) as e:
85
+ # logger = get_root_logger()
86
+ # logger.warn(f'File client error: {e}, remaining retry times: {retry - 1}')
87
+ # change another file to read
88
+ index = random.randint(0, self.__len__())
89
+ gt_path = self.paths[index]
90
+ time.sleep(1) # sleep 1s for occasional server congestion
91
+ else:
92
+ break
93
+ finally:
94
+ retry -= 1
95
+ img_gt = imfrombytes(img_bytes, float32=True)
96
+
97
+ # -------------------- Do augmentation for training: flip, rotation -------------------- #
98
+ img_gt = augment(img_gt, self.opt['use_hflip'], self.opt['use_rot'])
99
+
100
+ # crop or pad to 400
101
+ # TODO: 400 is hard-coded. You may change it accordingly
102
+ h, w = img_gt.shape[0:2]
103
+ crop_pad_size = 400
104
+ # pad
105
+ if h < crop_pad_size or w < crop_pad_size:
106
+ pad_h = max(0, crop_pad_size - h)
107
+ pad_w = max(0, crop_pad_size - w)
108
+ img_gt = cv2.copyMakeBorder(img_gt, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT_101)
109
+ # crop
110
+ if img_gt.shape[0] > crop_pad_size or img_gt.shape[1] > crop_pad_size:
111
+ h, w = img_gt.shape[0:2]
112
+ # randomly choose top and left coordinates
113
+ top = random.randint(0, h - crop_pad_size)
114
+ left = random.randint(0, w - crop_pad_size)
115
+ img_gt = img_gt[top:top + crop_pad_size, left:left + crop_pad_size, ...]
116
+
117
+ # ------------------------ Generate kernels (used in the first degradation) ------------------------ #
118
+ kernel_size = random.choice(self.kernel_range)
119
+ if np.random.uniform() < self.opt['sinc_prob']:
120
+ # this sinc filter setting is for kernels ranging from [7, 21]
121
+ if kernel_size < 13:
122
+ omega_c = np.random.uniform(np.pi / 3, np.pi)
123
+ else:
124
+ omega_c = np.random.uniform(np.pi / 5, np.pi)
125
+ kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
126
+ else:
127
+ kernel = random_mixed_kernels(
128
+ self.kernel_list,
129
+ self.kernel_prob,
130
+ kernel_size,
131
+ self.blur_sigma,
132
+ self.blur_sigma, [-math.pi, math.pi],
133
+ self.betag_range,
134
+ self.betap_range,
135
+ noise_range=None)
136
+ # pad kernel
137
+ pad_size = (21 - kernel_size) // 2
138
+ kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
139
+
140
+ # ------------------------ Generate kernels (used in the second degradation) ------------------------ #
141
+ kernel_size = random.choice(self.kernel_range)
142
+ if np.random.uniform() < self.opt['sinc_prob2']:
143
+ if kernel_size < 13:
144
+ omega_c = np.random.uniform(np.pi / 3, np.pi)
145
+ else:
146
+ omega_c = np.random.uniform(np.pi / 5, np.pi)
147
+ kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
148
+ else:
149
+ kernel2 = random_mixed_kernels(
150
+ self.kernel_list2,
151
+ self.kernel_prob2,
152
+ kernel_size,
153
+ self.blur_sigma2,
154
+ self.blur_sigma2, [-math.pi, math.pi],
155
+ self.betag_range2,
156
+ self.betap_range2,
157
+ noise_range=None)
158
+
159
+ # pad kernel
160
+ pad_size = (21 - kernel_size) // 2
161
+ kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size)))
162
+
163
+ # ------------------------------------- the final sinc kernel ------------------------------------- #
164
+ if np.random.uniform() < self.opt['final_sinc_prob']:
165
+ kernel_size = random.choice(self.kernel_range)
166
+ omega_c = np.random.uniform(np.pi / 3, np.pi)
167
+ sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21)
168
+ sinc_kernel = torch.FloatTensor(sinc_kernel)
169
+ else:
170
+ sinc_kernel = self.pulse_tensor
171
+
172
+ # BGR to RGB, HWC to CHW, numpy to tensor
173
+ img_gt = img2tensor([img_gt], bgr2rgb=True, float32=True)[0]
174
+ kernel = torch.FloatTensor(kernel)
175
+ kernel2 = torch.FloatTensor(kernel2)
176
+
177
+ return_d = {'gt': img_gt, 'kernel1': kernel, 'kernel2': kernel2, 'sinc_kernel': sinc_kernel, 'gt_path': gt_path}
178
+ return return_d
179
+
180
+ def __len__(self):
181
+ return len(self.paths)
basicsr/data/realesrgan_paired_dataset.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from torch.utils import data as data
3
+ from torchvision.transforms.functional import normalize
4
+
5
+ from basicsr.data.data_util import paired_paths_from_folder, paired_paths_from_lmdb
6
+ from basicsr.data.transforms import augment, paired_random_crop
7
+ from basicsr.utils import FileClient, imfrombytes, img2tensor
8
+ from basicsr.utils.registry import DATASET_REGISTRY
9
+
10
+
11
+ @DATASET_REGISTRY.register(suffix='basicsr')
12
+ class RealESRGANPairedDataset(data.Dataset):
13
+ """Paired image dataset for image restoration.
14
+
15
+ Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and GT image pairs.
16
+
17
+ There are three modes:
18
+
19
+ 1. **lmdb**: Use lmdb files. If opt['io_backend'] == lmdb.
20
+ 2. **meta_info_file**: Use meta information file to generate paths. \
21
+ If opt['io_backend'] != lmdb and opt['meta_info_file'] is not None.
22
+ 3. **folder**: Scan folders to generate paths. The rest.
23
+
24
+ Args:
25
+ opt (dict): Config for train datasets. It contains the following keys:
26
+ dataroot_gt (str): Data root path for gt.
27
+ dataroot_lq (str): Data root path for lq.
28
+ meta_info (str): Path for meta information file.
29
+ io_backend (dict): IO backend type and other kwarg.
30
+ filename_tmpl (str): Template for each filename. Note that the template excludes the file extension.
31
+ Default: '{}'.
32
+ gt_size (int): Cropped patched size for gt patches.
33
+ use_hflip (bool): Use horizontal flips.
34
+ use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
35
+ scale (bool): Scale, which will be added automatically.
36
+ phase (str): 'train' or 'val'.
37
+ """
38
+
39
+ def __init__(self, opt):
40
+ super(RealESRGANPairedDataset, self).__init__()
41
+ self.opt = opt
42
+ self.file_client = None
43
+ self.io_backend_opt = opt['io_backend']
44
+ # mean and std for normalizing the input images
45
+ self.mean = opt['mean'] if 'mean' in opt else None
46
+ self.std = opt['std'] if 'std' in opt else None
47
+
48
+ self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq']
49
+ self.filename_tmpl = opt['filename_tmpl'] if 'filename_tmpl' in opt else '{}'
50
+
51
+ # file client (lmdb io backend)
52
+ if self.io_backend_opt['type'] == 'lmdb':
53
+ self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder]
54
+ self.io_backend_opt['client_keys'] = ['lq', 'gt']
55
+ self.paths = paired_paths_from_lmdb([self.lq_folder, self.gt_folder], ['lq', 'gt'])
56
+ elif 'meta_info' in self.opt and self.opt['meta_info'] is not None:
57
+ # disk backend with meta_info
58
+ # Each line in the meta_info describes the relative path to an image
59
+ with open(self.opt['meta_info']) as fin:
60
+ paths = [line.strip() for line in fin]
61
+ self.paths = []
62
+ for path in paths:
63
+ gt_path, lq_path = path.split(', ')
64
+ gt_path = os.path.join(self.gt_folder, gt_path)
65
+ lq_path = os.path.join(self.lq_folder, lq_path)
66
+ self.paths.append(dict([('gt_path', gt_path), ('lq_path', lq_path)]))
67
+ else:
68
+ # disk backend
69
+ # it will scan the whole folder to get meta info
70
+ # it will be time-consuming for folders with too many files. It is recommended using an extra meta txt file
71
+ self.paths = paired_paths_from_folder([self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl)
72
+
73
+ def __getitem__(self, index):
74
+ if self.file_client is None:
75
+ self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
76
+
77
+ scale = self.opt['scale']
78
+
79
+ # Load gt and lq images. Dimension order: HWC; channel order: BGR;
80
+ # image range: [0, 1], float32.
81
+ gt_path = self.paths[index]['gt_path']
82
+ img_bytes = self.file_client.get(gt_path, 'gt')
83
+ img_gt = imfrombytes(img_bytes, float32=True)
84
+ lq_path = self.paths[index]['lq_path']
85
+ img_bytes = self.file_client.get(lq_path, 'lq')
86
+ img_lq = imfrombytes(img_bytes, float32=True)
87
+
88
+ # augmentation for training
89
+ if self.opt['phase'] == 'train':
90
+ gt_size = self.opt['gt_size']
91
+ # random crop
92
+ img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path)
93
+ # flip, rotation
94
+ img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_hflip'], self.opt['use_rot'])
95
+
96
+ # BGR to RGB, HWC to CHW, numpy to tensor
97
+ img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True)
98
+ # normalize
99
+ if self.mean is not None or self.std is not None:
100
+ normalize(img_lq, self.mean, self.std, inplace=True)
101
+ normalize(img_gt, self.mean, self.std, inplace=True)
102
+
103
+ return {'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path}
104
+
105
+ def __len__(self):
106
+ return len(self.paths)
basicsr/data/reds_dataset.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import random
3
+ import torch
4
+ from pathlib import Path
5
+ from torch.utils import data as data
6
+
7
+ from basicsr.data.transforms import augment, paired_random_crop
8
+ from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
9
+ from basicsr.utils.flow_util import dequantize_flow
10
+ from basicsr.utils.registry import DATASET_REGISTRY
11
+
12
+
13
+ @DATASET_REGISTRY.register()
14
+ class REDSDataset(data.Dataset):
15
+ """REDS dataset for training.
16
+
17
+ The keys are generated from a meta info txt file.
18
+ basicsr/data/meta_info/meta_info_REDS_GT.txt
19
+
20
+ Each line contains:
21
+ 1. subfolder (clip) name; 2. frame number; 3. image shape, separated by
22
+ a white space.
23
+ Examples:
24
+ 000 100 (720,1280,3)
25
+ 001 100 (720,1280,3)
26
+ ...
27
+
28
+ Key examples: "000/00000000"
29
+ GT (gt): Ground-Truth;
30
+ LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames.
31
+
32
+ Args:
33
+ opt (dict): Config for train dataset. It contains the following keys:
34
+ dataroot_gt (str): Data root path for gt.
35
+ dataroot_lq (str): Data root path for lq.
36
+ dataroot_flow (str, optional): Data root path for flow.
37
+ meta_info_file (str): Path for meta information file.
38
+ val_partition (str): Validation partition types. 'REDS4' or 'official'.
39
+ io_backend (dict): IO backend type and other kwarg.
40
+ num_frame (int): Window size for input frames.
41
+ gt_size (int): Cropped patched size for gt patches.
42
+ interval_list (list): Interval list for temporal augmentation.
43
+ random_reverse (bool): Random reverse input frames.
44
+ use_hflip (bool): Use horizontal flips.
45
+ use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
46
+ scale (bool): Scale, which will be added automatically.
47
+ """
48
+
49
+ def __init__(self, opt):
50
+ super(REDSDataset, self).__init__()
51
+ self.opt = opt
52
+ self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(opt['dataroot_lq'])
53
+ self.flow_root = Path(opt['dataroot_flow']) if opt['dataroot_flow'] is not None else None
54
+ assert opt['num_frame'] % 2 == 1, (f'num_frame should be odd number, but got {opt["num_frame"]}')
55
+ self.num_frame = opt['num_frame']
56
+ self.num_half_frames = opt['num_frame'] // 2
57
+
58
+ self.keys = []
59
+ with open(opt['meta_info_file'], 'r') as fin:
60
+ for line in fin:
61
+ folder, frame_num, _ = line.split(' ')
62
+ self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))])
63
+
64
+ # remove the video clips used in validation
65
+ if opt['val_partition'] == 'REDS4':
66
+ val_partition = ['000', '011', '015', '020']
67
+ elif opt['val_partition'] == 'official':
68
+ val_partition = [f'{v:03d}' for v in range(240, 270)]
69
+ else:
70
+ raise ValueError(f'Wrong validation partition {opt["val_partition"]}.'
71
+ f"Supported ones are ['official', 'REDS4'].")
72
+ self.keys = [v for v in self.keys if v.split('/')[0] not in val_partition]
73
+
74
+ # file client (io backend)
75
+ self.file_client = None
76
+ self.io_backend_opt = opt['io_backend']
77
+ self.is_lmdb = False
78
+ if self.io_backend_opt['type'] == 'lmdb':
79
+ self.is_lmdb = True
80
+ if self.flow_root is not None:
81
+ self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root, self.flow_root]
82
+ self.io_backend_opt['client_keys'] = ['lq', 'gt', 'flow']
83
+ else:
84
+ self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
85
+ self.io_backend_opt['client_keys'] = ['lq', 'gt']
86
+
87
+ # temporal augmentation configs
88
+ self.interval_list = opt['interval_list']
89
+ self.random_reverse = opt['random_reverse']
90
+ interval_str = ','.join(str(x) for x in opt['interval_list'])
91
+ logger = get_root_logger()
92
+ logger.info(f'Temporal augmentation interval list: [{interval_str}]; '
93
+ f'random reverse is {self.random_reverse}.')
94
+
95
+ def __getitem__(self, index):
96
+ if self.file_client is None:
97
+ self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
98
+
99
+ scale = self.opt['scale']
100
+ gt_size = self.opt['gt_size']
101
+ key = self.keys[index]
102
+ clip_name, frame_name = key.split('/') # key example: 000/00000000
103
+ center_frame_idx = int(frame_name)
104
+
105
+ # determine the neighboring frames
106
+ interval = random.choice(self.interval_list)
107
+
108
+ # ensure not exceeding the borders
109
+ start_frame_idx = center_frame_idx - self.num_half_frames * interval
110
+ end_frame_idx = center_frame_idx + self.num_half_frames * interval
111
+ # each clip has 100 frames starting from 0 to 99
112
+ while (start_frame_idx < 0) or (end_frame_idx > 99):
113
+ center_frame_idx = random.randint(0, 99)
114
+ start_frame_idx = (center_frame_idx - self.num_half_frames * interval)
115
+ end_frame_idx = center_frame_idx + self.num_half_frames * interval
116
+ frame_name = f'{center_frame_idx:08d}'
117
+ neighbor_list = list(range(start_frame_idx, end_frame_idx + 1, interval))
118
+ # random reverse
119
+ if self.random_reverse and random.random() < 0.5:
120
+ neighbor_list.reverse()
121
+
122
+ assert len(neighbor_list) == self.num_frame, (f'Wrong length of neighbor list: {len(neighbor_list)}')
123
+
124
+ # get the GT frame (as the center frame)
125
+ if self.is_lmdb:
126
+ img_gt_path = f'{clip_name}/{frame_name}'
127
+ else:
128
+ img_gt_path = self.gt_root / clip_name / f'{frame_name}.png'
129
+ img_bytes = self.file_client.get(img_gt_path, 'gt')
130
+ img_gt = imfrombytes(img_bytes, float32=True)
131
+
132
+ # get the neighboring LQ frames
133
+ img_lqs = []
134
+ for neighbor in neighbor_list:
135
+ if self.is_lmdb:
136
+ img_lq_path = f'{clip_name}/{neighbor:08d}'
137
+ else:
138
+ img_lq_path = self.lq_root / clip_name / f'{neighbor:08d}.png'
139
+ img_bytes = self.file_client.get(img_lq_path, 'lq')
140
+ img_lq = imfrombytes(img_bytes, float32=True)
141
+ img_lqs.append(img_lq)
142
+
143
+ # get flows
144
+ if self.flow_root is not None:
145
+ img_flows = []
146
+ # read previous flows
147
+ for i in range(self.num_half_frames, 0, -1):
148
+ if self.is_lmdb:
149
+ flow_path = f'{clip_name}/{frame_name}_p{i}'
150
+ else:
151
+ flow_path = (self.flow_root / clip_name / f'{frame_name}_p{i}.png')
152
+ img_bytes = self.file_client.get(flow_path, 'flow')
153
+ cat_flow = imfrombytes(img_bytes, flag='grayscale', float32=False) # uint8, [0, 255]
154
+ dx, dy = np.split(cat_flow, 2, axis=0)
155
+ flow = dequantize_flow(dx, dy, max_val=20, denorm=False) # we use max_val 20 here.
156
+ img_flows.append(flow)
157
+ # read next flows
158
+ for i in range(1, self.num_half_frames + 1):
159
+ if self.is_lmdb:
160
+ flow_path = f'{clip_name}/{frame_name}_n{i}'
161
+ else:
162
+ flow_path = (self.flow_root / clip_name / f'{frame_name}_n{i}.png')
163
+ img_bytes = self.file_client.get(flow_path, 'flow')
164
+ cat_flow = imfrombytes(img_bytes, flag='grayscale', float32=False) # uint8, [0, 255]
165
+ dx, dy = np.split(cat_flow, 2, axis=0)
166
+ flow = dequantize_flow(dx, dy, max_val=20, denorm=False) # we use max_val 20 here.
167
+ img_flows.append(flow)
168
+
169
+ # for random crop, here, img_flows and img_lqs have the same
170
+ # spatial size
171
+ img_lqs.extend(img_flows)
172
+
173
+ # randomly crop
174
+ img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale, img_gt_path)
175
+ if self.flow_root is not None:
176
+ img_lqs, img_flows = img_lqs[:self.num_frame], img_lqs[self.num_frame:]
177
+
178
+ # augmentation - flip, rotate
179
+ img_lqs.append(img_gt)
180
+ if self.flow_root is not None:
181
+ img_results, img_flows = augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot'], img_flows)
182
+ else:
183
+ img_results = augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot'])
184
+
185
+ img_results = img2tensor(img_results)
186
+ img_lqs = torch.stack(img_results[0:-1], dim=0)
187
+ img_gt = img_results[-1]
188
+
189
+ if self.flow_root is not None:
190
+ img_flows = img2tensor(img_flows)
191
+ # add the zero center flow
192
+ img_flows.insert(self.num_half_frames, torch.zeros_like(img_flows[0]))
193
+ img_flows = torch.stack(img_flows, dim=0)
194
+
195
+ # img_lqs: (t, c, h, w)
196
+ # img_flows: (t, 2, h, w)
197
+ # img_gt: (c, h, w)
198
+ # key: str
199
+ if self.flow_root is not None:
200
+ return {'lq': img_lqs, 'flow': img_flows, 'gt': img_gt, 'key': key}
201
+ else:
202
+ return {'lq': img_lqs, 'gt': img_gt, 'key': key}
203
+
204
+ def __len__(self):
205
+ return len(self.keys)
206
+
207
+
208
+ @DATASET_REGISTRY.register()
209
+ class REDSRecurrentDataset(data.Dataset):
210
+ """REDS dataset for training recurrent networks.
211
+
212
+ The keys are generated from a meta info txt file.
213
+ basicsr/data/meta_info/meta_info_REDS_GT.txt
214
+
215
+ Each line contains:
216
+ 1. subfolder (clip) name; 2. frame number; 3. image shape, separated by
217
+ a white space.
218
+ Examples:
219
+ 000 100 (720,1280,3)
220
+ 001 100 (720,1280,3)
221
+ ...
222
+
223
+ Key examples: "000/00000000"
224
+ GT (gt): Ground-Truth;
225
+ LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames.
226
+
227
+ Args:
228
+ opt (dict): Config for train dataset. It contains the following keys:
229
+ dataroot_gt (str): Data root path for gt.
230
+ dataroot_lq (str): Data root path for lq.
231
+ dataroot_flow (str, optional): Data root path for flow.
232
+ meta_info_file (str): Path for meta information file.
233
+ val_partition (str): Validation partition types. 'REDS4' or 'official'.
234
+ io_backend (dict): IO backend type and other kwarg.
235
+ num_frame (int): Window size for input frames.
236
+ gt_size (int): Cropped patched size for gt patches.
237
+ interval_list (list): Interval list for temporal augmentation.
238
+ random_reverse (bool): Random reverse input frames.
239
+ use_hflip (bool): Use horizontal flips.
240
+ use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
241
+ scale (bool): Scale, which will be added automatically.
242
+ """
243
+
244
+ def __init__(self, opt):
245
+ super(REDSRecurrentDataset, self).__init__()
246
+ self.opt = opt
247
+ self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(opt['dataroot_lq'])
248
+ self.num_frame = opt['num_frame']
249
+
250
+ self.keys = []
251
+ with open(opt['meta_info_file'], 'r') as fin:
252
+ for line in fin:
253
+ folder, frame_num, _ = line.split(' ')
254
+ self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))])
255
+
256
+ # remove the video clips used in validation
257
+ if opt['val_partition'] == 'REDS4':
258
+ val_partition = ['000', '011', '015', '020']
259
+ elif opt['val_partition'] == 'official':
260
+ val_partition = [f'{v:03d}' for v in range(240, 270)]
261
+ else:
262
+ raise ValueError(f'Wrong validation partition {opt["val_partition"]}.'
263
+ f"Supported ones are ['official', 'REDS4'].")
264
+ if opt['test_mode']:
265
+ self.keys = [v for v in self.keys if v.split('/')[0] in val_partition]
266
+ else:
267
+ self.keys = [v for v in self.keys if v.split('/')[0] not in val_partition]
268
+
269
+ # file client (io backend)
270
+ self.file_client = None
271
+ self.io_backend_opt = opt['io_backend']
272
+ self.is_lmdb = False
273
+ if self.io_backend_opt['type'] == 'lmdb':
274
+ self.is_lmdb = True
275
+ if hasattr(self, 'flow_root') and self.flow_root is not None:
276
+ self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root, self.flow_root]
277
+ self.io_backend_opt['client_keys'] = ['lq', 'gt', 'flow']
278
+ else:
279
+ self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
280
+ self.io_backend_opt['client_keys'] = ['lq', 'gt']
281
+
282
+ # temporal augmentation configs
283
+ self.interval_list = opt.get('interval_list', [1])
284
+ self.random_reverse = opt.get('random_reverse', False)
285
+ interval_str = ','.join(str(x) for x in self.interval_list)
286
+ logger = get_root_logger()
287
+ logger.info(f'Temporal augmentation interval list: [{interval_str}]; '
288
+ f'random reverse is {self.random_reverse}.')
289
+
290
+ def __getitem__(self, index):
291
+ if self.file_client is None:
292
+ self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
293
+
294
+ scale = self.opt['scale']
295
+ gt_size = self.opt['gt_size']
296
+ key = self.keys[index]
297
+ clip_name, frame_name = key.split('/') # key example: 000/00000000
298
+
299
+ # determine the neighboring frames
300
+ interval = random.choice(self.interval_list)
301
+
302
+ # ensure not exceeding the borders
303
+ start_frame_idx = int(frame_name)
304
+ if start_frame_idx > 100 - self.num_frame * interval:
305
+ start_frame_idx = random.randint(0, 100 - self.num_frame * interval)
306
+ end_frame_idx = start_frame_idx + self.num_frame * interval
307
+
308
+ neighbor_list = list(range(start_frame_idx, end_frame_idx, interval))
309
+
310
+ # random reverse
311
+ if self.random_reverse and random.random() < 0.5:
312
+ neighbor_list.reverse()
313
+
314
+ # get the neighboring LQ and GT frames
315
+ img_lqs = []
316
+ img_gts = []
317
+ for neighbor in neighbor_list:
318
+ if self.is_lmdb:
319
+ img_lq_path = f'{clip_name}/{neighbor:08d}'
320
+ img_gt_path = f'{clip_name}/{neighbor:08d}'
321
+ else:
322
+ img_lq_path = self.lq_root / clip_name / f'{neighbor:08d}.png'
323
+ img_gt_path = self.gt_root / clip_name / f'{neighbor:08d}.png'
324
+
325
+ # get LQ
326
+ img_bytes = self.file_client.get(img_lq_path, 'lq')
327
+ img_lq = imfrombytes(img_bytes, float32=True)
328
+ img_lqs.append(img_lq)
329
+
330
+ # get GT
331
+ img_bytes = self.file_client.get(img_gt_path, 'gt')
332
+ img_gt = imfrombytes(img_bytes, float32=True)
333
+ img_gts.append(img_gt)
334
+
335
+ # randomly crop
336
+ img_gts, img_lqs = paired_random_crop(img_gts, img_lqs, gt_size, scale, img_gt_path)
337
+
338
+ # augmentation - flip, rotate
339
+ img_lqs.extend(img_gts)
340
+ img_results = augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot'])
341
+
342
+ img_results = img2tensor(img_results)
343
+ img_gts = torch.stack(img_results[len(img_lqs) // 2:], dim=0)
344
+ img_lqs = torch.stack(img_results[:len(img_lqs) // 2], dim=0)
345
+
346
+ # img_lqs: (t, c, h, w)
347
+ # img_gts: (t, c, h, w)
348
+ # key: str
349
+ return {'lq': img_lqs, 'gt': img_gts, 'key': key}
350
+
351
+ def __len__(self):
352
+ return len(self.keys)