raannakasturi commited on
Commit
38e5267
1 Parent(s): e86e3b2

Delete x1_ITF_SkinDiffDetail_Lite_v1.yml

Browse files
Files changed (1) hide show
  1. x1_ITF_SkinDiffDetail_Lite_v1.yml +0 -238
x1_ITF_SkinDiffDetail_Lite_v1.yml DELETED
@@ -1,238 +0,0 @@
1
- # python train.py -opt options/sr/x1_ITF_SkinDiffDetail_Lite_v1.yml
2
- name: x1_ITF_SkinDiffDetail_Lite_v1
3
- # the name that defines the experiment and the directory that will be created in the experiments directory.
4
- # name: debug_001_template # use the "debug" or "debug_nochkp" prefix in the name to run a test session and check everything is working. Does validation and state saving every 8 iterations. Remove "debug" to run the real training session.
5
- use_tb_logger: false
6
- # wheter to enable Tensorboard logging or not. Output will be saved in: traiNNer/tb_logger/
7
- model: sr
8
- # the model training strategy to be used. Depends on the type of model, from: https://github.com/victorca25/traiNNer/tree/master/codes/models
9
- scale: 1 # the scale factor that will be used for training for super-resolution cases. Default is "1".
10
- gpu_ids: [0] # the list of `CUDA_VISIBLE_DEVICES` that will be used during training, ie. for two GPUs, use [0, 1]. The batch size should be a multiple of the number of 'gpu_ids', since images will be distributed from the batch to each GPU.
11
- use_amp: true # select to use PyTorch's Automatic Mixed Precision package to train in low-precision FP16 mode (lowers VRAM requirements).
12
- use_swa: false # select to use Stochastic Weight Averaging
13
- use_cem: false # select to use CEM during training. https://github.com/victorca25/traiNNer/tree/master/codes/models/modules/architectures/CEM
14
-
15
- # Dataset options:
16
- datasets: # configure the datasets
17
- train: # the stage the dataset will be used for (training)
18
- name: x1_ITF_SkinDiffDetail_Lite_v1 # the name of your dataset (only informative)
19
- mode: aligned
20
- # dataset mode: https://github.com/victorca25/traiNNer/tree/master/codes/data
21
- dataroot_HR: [
22
- #'K:/TRAINING/data/Skin_Diff2Nrml/hr_clean_tiles/'
23
- '../datasets/Skin_DiffDetail/hr/'
24
- ]
25
- dataroot_LR: [
26
- #'K:/TRAINING/data/Skin_Diff2Nrml/lr_clean_tiles/'
27
- '../datasets/Skin_DiffDetail/lr_soft/'
28
- ] # low resolution images
29
- subset_file: null
30
- use_shuffle: true
31
- znorm: false
32
- n_workers: 8
33
- batch_size: 12
34
- virtual_batch_size: 12
35
- preprocess: crop
36
- crop_size: 64
37
- image_channels: 3
38
-
39
- # Color space conversion
40
- # color: 'y'
41
- # color_LR: 'y'
42
- # color_HR: 'y'
43
-
44
- # LR and HR modifiers.
45
- # aug_downscale: 0.2
46
- # shape_change: reshape_lr
47
-
48
- # Enable random downscaling of HR images (will fix LR pair to correct size)
49
- hr_downscale: true
50
- hr_downscale_types: [0, 3]
51
- hr_downscale_amount: [1, 2, 4]
52
- # #pre_crop: true
53
-
54
- # Presets and on the fly (OTF) augmentations
55
- #augs_strategy: combo
56
- #add_blur_preset: custom_blur
57
- #add_resize_preset: custom_resize
58
- #add_noise_preset: custom_noise
59
- #aug_downscale: 0.2
60
- resize_strat: pre
61
-
62
- # On the fly generation of LR:
63
- # dataroot_kernels: 'KERNEL PATH !!!! CHANGE THIS OR COMMENT OUT'
64
- #lr_downscale: false
65
- #lr_downscale_types: ["linear", "bicubic", "nearest_aligned"]
66
-
67
- # Rotations augmentations:
68
- use_flip: true
69
- use_rot: true
70
- use_hrrot: true
71
-
72
- # Noise and blur augmentations:
73
- #lr_blur: true
74
- #lr_blur_types: {sinc: 0.2, iso: 0.2, ansio2: 0.4, sinc2: 0.2, clean: 3}
75
- #noise_data: 'K:/TRAINING/traiNNer/noise_patches/'
76
- #lr_noise: true
77
- #lr_noise_types: {camera: 0.1, jpeg: 0.8, clean: 3}
78
- #lr_noise2: false
79
- #lr_noise_types2: {jpeg: 1, webp: 0, clean: 2, camera: 2}
80
- #hr_noise: false
81
- #hr_noise_types: {gaussian: 1, clean: 4}
82
-
83
- # Color augmentations
84
- # lr_fringes: false
85
- # lr_fringes_chance: 0.4
86
- # auto_levels: HR
87
- # rand_auto_levels: 0.7
88
- #lr_unsharp_mask: true
89
- #lr_rand_unsharp: 0.7
90
- # hr_unsharp_mask: true
91
- # hr_rand_unsharp: 1
92
-
93
- # Augmentations for classification or (maybe) inpainting networks:
94
- # lr_cutout: false
95
- # lr_erasing: false
96
-
97
- #val:
98
- #name: val_set14_part
99
- #mode: aligned
100
- #dataroot_B: '../datasets/val/hr'
101
- #dataroot_A: '../datasets/val/lr'
102
-
103
- #znorm: false
104
-
105
- # Color space conversion:
106
- # color: 'y'
107
- # color_LR: 'y'
108
- # color_HR: 'y'
109
-
110
-
111
- path:
112
- root: '../'
113
- pretrain_model_G: '../experiments/pretrained_models/1x_DIV2K-Lite_SpongeBC1-Lite_interp.pth'
114
- # pretrain_model_D: 'K:/TRAINING/data/models/x1_ITF_SkinDiff2Nrm_Lite_v3_208500_D.pth'
115
- resume_state: '../experiments/x1_ITF_SkinDiffDetail_Lite_v1/training_state/latest.state'
116
-
117
- # Generator options:
118
- network_G: esrgan-lite # configurations for the Generator network
119
-
120
-
121
- # Discriminator options:
122
- network_D:
123
- # ESRGAN (default)| PPON:
124
- which_model_D: multiscale # discriminator_vgg_128 | discriminator_vgg | discriminator_vgg_128_fea (feature extraction) | patchgan | multiscale
125
- norm_type: batch
126
- act_type: leakyrelu
127
- mode: CNA # CNA | NAC
128
- nf: 32
129
- in_nc: 3
130
- nlayer: 3 # only for patchgan and multiscale
131
- num_D: 3 # only for multiscale
132
-
133
- train:
134
- # Optimizer options:
135
- optim_G: adamp
136
- optim_D: adamp
137
-
138
- # Schedulers options:
139
- lr_scheme: MultiStepLR
140
- lr_steps_rel: [50000, 100000, 200000, 300000]
141
- lr_gamma: 0.5
142
-
143
- # For SWA scheduler
144
- swa_start_iter_rel: 0.05
145
- swa_lr: 1e-4
146
- swa_anneal_epochs: 10
147
- swa_anneal_strategy: "cos"
148
-
149
- # Losses:
150
- pixel_criterion: l1 # pixel (content) loss
151
- pixel_weight: 0.05
152
- feature_criterion: l1 # feature loss (VGG feature network)
153
- feature_weight: 0.3
154
- cx_type: contextual # contextual loss
155
- cx_weight: 1
156
- cx_vgg_layers: {conv_3_2: 1, conv_4_2: 1}
157
- #hfen_criterion: l1 # hfen
158
- #hfen_weight: 1e-6
159
- #grad_type: grad-4d-l1 # image gradient loss
160
- #grad_weight: 4e-1
161
- # tv_type: normal # total variation
162
- # tv_weight: 1e-5
163
- # tv_norm: 1
164
- ssim_type: ssim # structural similarity
165
- ssim_weight: 0.05
166
- lpips_weight: 0.25 # [.25] perceptual loss
167
- lpips_type: net-lin
168
- lpips_net: squeeze
169
-
170
- # Experimental losses
171
- # spl_type: spl # spatial profile loss
172
- # spl_weight: 0.1
173
- #of_type: overflow # overflow loss
174
- #of_weight: 0.1
175
- # range_weight: 1 # range loss
176
- # fft_type: fft # FFT loss
177
- # fft_weight: 0.2 #[.2]
178
- color_criterion: color-l1cosinesim # color consistency loss
179
- color_weight: 0.1
180
- # avg_criterion: avg-l1 # averaging downscale loss
181
- # avg_weight: 5
182
- # ms_criterion: multiscale-l1 # multi-scale pixel loss
183
- # ms_weight: 1e-2
184
- #fdpl_type: fdpl # frequency domain-based perceptual loss
185
- #fdpl_weight: 1e-3
186
-
187
- # Adversarial loss:
188
- #gan_type: vanilla
189
- #gan_weight: 4e-3
190
- # freeze_loc: 4
191
- # For wgan-gp:
192
- # D_update_ratio: 1
193
- # D_init_iters: 0
194
- # gp_weigth: 10
195
- # Feature matching (if using the discriminator_vgg_128_fea or discriminator_vgg_fea):
196
- # gan_featmaps: true
197
- # dis_feature_criterion: cb # discriminator feature loss
198
- # dis_feature_weight: 0.01
199
-
200
- # For PPON:
201
- # p1_losses: [pix]
202
- # p2_losses: [pix-multiscale, ms-ssim]
203
- # p3_losses: [fea]
204
- # ppon_stages: [1000, 2000]
205
-
206
- # Differentiable Augmentation for Data-Efficient GAN Training
207
- # diffaug: true
208
- # dapolicy: 'color,transl_zoom,flip,rotate,cutout'
209
-
210
- # Batch (Mixup) augmentations
211
- #mixup: false
212
- #mixopts: [blend, rgb, mixup, cutmix, cutmixup] # , "cutout", "cutblur"]
213
- #mixprob: [1.0, 1.0, 1.0, 1.0, 1.0] #, 1.0, 1.0]
214
- #mixalpha: [0.6, 1.0, 1.2, 0.7, 0.7] #, 0.001, 0.7]
215
- #aux_mixprob: 1.0
216
- #aux_mixalpha: 1.2
217
- # mix_p: 1.2
218
-
219
- # Frequency Separator
220
- #fs: true
221
- #lpf_type: average
222
- #hpf_type: average
223
-
224
- # Other training options:
225
- manual_seed: 0
226
- niter: 250000
227
- # warmup_iter: -1
228
- #val_freq: 5e3
229
- # overwrite_val_imgs: true
230
- # val_comparison: true
231
- # metrics: 'psnr,ssim,lpips'
232
- #grad_clip: auto
233
- #grad_clip_value: 0.1 # "auto"
234
-
235
- logger:
236
- print_freq: 50
237
- save_checkpoint_freq: 500
238
- overwrite_chkp: false