superprpogresor
commited on
Commit
•
d7a32af
1
Parent(s):
bf3fef2
Upload 2 files
Browse files- 4x_Valar_v1.pth +3 -0
- train_valar_v1.yml +225 -0
4x_Valar_v1.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:90c3192bef43e4baaa095c04751868065f23c52d98c1b42e6d0916bfeda75646
|
3 |
+
size 67544144
|
train_valar_v1.yml
ADDED
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: 4x_Valar_v1
|
2 |
+
use_tb_logger: false
|
3 |
+
model: sr
|
4 |
+
scale: 4
|
5 |
+
gpu_ids: [0]
|
6 |
+
use_amp: false
|
7 |
+
use_swa: false
|
8 |
+
use_cem: false
|
9 |
+
|
10 |
+
# Dataset options:
|
11 |
+
datasets:
|
12 |
+
train:
|
13 |
+
name: AdobeMIT5k
|
14 |
+
mode: aligned
|
15 |
+
dataroot_HR: [
|
16 |
+
'../mit5k/hr',
|
17 |
+
] # high resolution / ground truth images
|
18 |
+
dataroot_LR: [
|
19 |
+
'../mit5k/lr',
|
20 |
+
] # low resolution images
|
21 |
+
subset_file: null
|
22 |
+
use_shuffle: true
|
23 |
+
znorm: false
|
24 |
+
n_workers: 4
|
25 |
+
batch_size: 1
|
26 |
+
virtual_batch_size: 1
|
27 |
+
preprocess: crop
|
28 |
+
crop_size: 112
|
29 |
+
image_channels: 3
|
30 |
+
|
31 |
+
# AdaTarget
|
32 |
+
use_atg: true
|
33 |
+
atg_start_iter_rel: 0.83
|
34 |
+
|
35 |
+
# Color space conversion
|
36 |
+
# color: 'y'
|
37 |
+
# color_LR: 'y'
|
38 |
+
# color_HR: 'y'
|
39 |
+
|
40 |
+
# Rotations augmentations:
|
41 |
+
use_flip: true
|
42 |
+
use_rot: true
|
43 |
+
use_hrrot: false
|
44 |
+
|
45 |
+
# Presets and on the fly (OTF) augmentations
|
46 |
+
|
47 |
+
# Resize Options
|
48 |
+
lr_downscale: true
|
49 |
+
lr_downscale_types: [linear, bicubic, realistic]
|
50 |
+
|
51 |
+
aug_downscale: 0.5
|
52 |
+
resize_strat: pre
|
53 |
+
|
54 |
+
# Blur degradations
|
55 |
+
#lr_blur: true
|
56 |
+
#lr_blur_types: {sinc: 0.05, iso: 0.1, aniso: 0.1}
|
57 |
+
#iso:
|
58 |
+
# p: 0.4
|
59 |
+
# min_kernel_size: 1
|
60 |
+
# kernel_size: 5
|
61 |
+
# sigmaX: [0.1, 1.0]
|
62 |
+
# noise: null
|
63 |
+
|
64 |
+
#aniso:
|
65 |
+
# p: 0.3
|
66 |
+
# min_kernel_size: 1
|
67 |
+
# kernel_size: 3
|
68 |
+
# sigmaX: [0.1, 1.0]
|
69 |
+
# sigmaY: [0.1, 1.0]
|
70 |
+
# angle: [0, 180]
|
71 |
+
# noise: null
|
72 |
+
|
73 |
+
#sinc:
|
74 |
+
# p: 0.2
|
75 |
+
# min_kernel_size: 1
|
76 |
+
# kernel_size: 3
|
77 |
+
# min_cutoff: null
|
78 |
+
|
79 |
+
lr_noise: true
|
80 |
+
lr_noise_types: {JPEG: 3, camera: 1.6, patches: 2.5, clean: 1.5}
|
81 |
+
hr_unsharp_mask: true
|
82 |
+
hr_rand_unsharp: 1
|
83 |
+
|
84 |
+
camera:
|
85 |
+
p: 0.25
|
86 |
+
demosaic_fn: malvar
|
87 |
+
xyz_arr: D50
|
88 |
+
rg_range: [0.7, 3.0]
|
89 |
+
bg_range: [0.7, 3.0]
|
90 |
+
|
91 |
+
jpeg:
|
92 |
+
p: 0.75
|
93 |
+
min_quality: 30
|
94 |
+
max_quality: 95
|
95 |
+
|
96 |
+
unsharp:
|
97 |
+
p: 0.12
|
98 |
+
blur_algo: median
|
99 |
+
kernel_size: 1
|
100 |
+
strength: 0.10
|
101 |
+
unsharp_algo: laplacian
|
102 |
+
|
103 |
+
dataroot_kernels: '../mit5k/kernelgan_hr/'
|
104 |
+
noise_data: '../mit5k/noise_patches_path/'
|
105 |
+
|
106 |
+
# pre_crop: true
|
107 |
+
# hr_downscale: true
|
108 |
+
# hr_downscale_amt: [2, 1.75, 1.5, 1]
|
109 |
+
# shape_change: reshape_lr
|
110 |
+
|
111 |
+
path:
|
112 |
+
root: './'
|
113 |
+
#pretrain_model_G: '../models/4x_RRDB_ESRGAN.pth'
|
114 |
+
#pretrain_model_Loc: '../models/locnet.pth'
|
115 |
+
#resume_state: './experiments/4x_Valar_v1/training_state/latest.state'
|
116 |
+
|
117 |
+
# Generator options:
|
118 |
+
network_G:
|
119 |
+
which_model_G: esrgan
|
120 |
+
plus: true
|
121 |
+
gaussian_noise: true
|
122 |
+
|
123 |
+
# Discriminator options:
|
124 |
+
network_D: unet
|
125 |
+
|
126 |
+
train:
|
127 |
+
# Optimizer options:
|
128 |
+
optim_G: AdamP
|
129 |
+
optim_D: AdamP
|
130 |
+
|
131 |
+
# Schedulers options:
|
132 |
+
lr_scheme: MultiStepLR
|
133 |
+
lr_steps_rel: [0.1, 0.2, 0.4, 0.6]
|
134 |
+
lr_gamma: 0.5
|
135 |
+
|
136 |
+
# For SWA scheduler
|
137 |
+
swa_start_iter_rel: 0.75
|
138 |
+
swa_lr: 1e-4
|
139 |
+
swa_anneal_epochs: 10
|
140 |
+
swa_anneal_strategy: "cos"
|
141 |
+
|
142 |
+
# Losses:
|
143 |
+
pixel_criterion: clipl1 # pixel (content) loss
|
144 |
+
pixel_weight: 0.25
|
145 |
+
perceptual_opt:
|
146 |
+
perceptual_layers: {"conv1_2": 0.1, "conv2_2": 0.1, "conv3_4": 1.0, "conv4_4": 1.0, "conv5_4": 1.0}
|
147 |
+
use_input_norm: true
|
148 |
+
perceptual_weight: 1.05
|
149 |
+
style_weight: 0
|
150 |
+
feature_criterion: l1 # feature loss (VGG feature network)
|
151 |
+
feature_weight: 1
|
152 |
+
cx_type: contextual # contextual loss
|
153 |
+
cx_weight: 0.3
|
154 |
+
cx_vgg_layers: {conv_3_2: 1.0, conv_4_2: 1.0}
|
155 |
+
# hfen_criterion: l1 # hfen
|
156 |
+
# hfen_weight: 1e-6
|
157 |
+
# grad_type: grad-4d-l1 # image gradient loss
|
158 |
+
# grad_weight: 4e-1
|
159 |
+
#tv_type: normal # total variation
|
160 |
+
#tv_weight: 1e-5
|
161 |
+
#tv_norm: 1
|
162 |
+
#ssim_type: ms-ssim # structural similarity
|
163 |
+
#ssim_weight: 1
|
164 |
+
#lpips_weight: 0.6 # perceptual loss
|
165 |
+
#lpips_type: net-lin
|
166 |
+
#lpips_net: squeeze
|
167 |
+
|
168 |
+
# Experimental losses
|
169 |
+
# spl_type: spl # spatial profile loss
|
170 |
+
# spl_weight: 0.1
|
171 |
+
# of_type: overflow # overflow loss
|
172 |
+
# of_weight: 0.2
|
173 |
+
# range_weight: 1 # range loss
|
174 |
+
# fft_type: fft # FFT loss
|
175 |
+
# fft_weight: 0.1
|
176 |
+
color_criterion: color-l1cosinesim # color consistency loss
|
177 |
+
color_weight: 1.0
|
178 |
+
# avg_criterion: avg-l1 # averaging downscale loss
|
179 |
+
# avg_weight: 5
|
180 |
+
# ms_criterion: multiscale-l1 # multi-scale pixel loss
|
181 |
+
# ms_weight: 1e-2
|
182 |
+
# fdpl_type: fdpl # frequency domain-based perceptual loss
|
183 |
+
# fdpl_weight: 1e-3
|
184 |
+
|
185 |
+
# Adversarial loss:
|
186 |
+
gan_type: vanilla
|
187 |
+
gan_weight: 1e-1
|
188 |
+
# freeze_loc: 4
|
189 |
+
# For wgan-gp:
|
190 |
+
# D_update_ratio: 1
|
191 |
+
# D_init_iters: 0
|
192 |
+
# gp_weigth: 10
|
193 |
+
# Feature matching (if using the discriminator_vgg_128_fea or discriminator_vgg_fea):
|
194 |
+
# gan_featmaps: true
|
195 |
+
# dis_feature_criterion: cb # discriminator feature loss
|
196 |
+
# dis_feature_weight: 0.01
|
197 |
+
|
198 |
+
# Differentiable Augmentation for Data-Efficient GAN Training
|
199 |
+
# diffaug: true
|
200 |
+
# dapolicy: 'color,transl_zoom,flip,rotate,cutout'
|
201 |
+
|
202 |
+
# Batch (Mixup) augmentations
|
203 |
+
mixup: true
|
204 |
+
mixopts: [blend, rgb, mixup, cutmix, cutmixup] # , "cutout", "cutblur"]
|
205 |
+
mixprob: [0.5, 0.5, 1.0, 1.0, 1.0] #, 1.0, 1.0]
|
206 |
+
# mixalpha: [0.6, 1.0, 1.2, 0.7, 0.7] #, 0.001, 0.7]
|
207 |
+
aux_mixprob: 1.0
|
208 |
+
# aux_mixalpha: 1.2
|
209 |
+
## mix_p: 1.2
|
210 |
+
|
211 |
+
# Frequency Separator
|
212 |
+
fs: true
|
213 |
+
lpf_type: average
|
214 |
+
hpf_type: average
|
215 |
+
|
216 |
+
# Other training options:
|
217 |
+
manual_seed: 0
|
218 |
+
niter: 4e5
|
219 |
+
warmup_iter: -1
|
220 |
+
# overwrite_val_imgs: true
|
221 |
+
|
222 |
+
logger:
|
223 |
+
print_freq: 100
|
224 |
+
save_checkpoint_freq: 5e3
|
225 |
+
overwrite_chkp: false
|