File size: 1,243 Bytes
4a3768d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
audio:
chunk_size: 261632
dim_f: 4096
dim_t: 512
hop_length: 512
n_fft: 8192
num_channels: 2
sample_rate: 44100
min_mean_abs: 0.001
model:
encoder_name: tu-maxvit_large_tf_512 # look here for possibilities: https://github.com/qubvel/segmentation_models.pytorch#encoders-
decoder_type: unet # unet, fpn
act: gelu
num_channels: 128
num_subbands: 8
training:
batch_size: 8
gradient_accumulation_steps: 1
grad_clip: 0
instruments:
- vocals
- other
lr: 5.0e-05
patience: 2
reduce_factor: 0.95
target_instrument: null
num_epochs: 1000
num_steps: 2000
augmentation: false # enable augmentations by audiomentations and pedalboard
augmentation_type: simple1
use_mp3_compress: false # Deprecated
augmentation_mix: true # Mix several stems of the same type with some probability
augmentation_loudness: true # randomly change loudness of each stem
augmentation_loudness_type: 1 # Type 1 or 2
augmentation_loudness_min: 0.5
augmentation_loudness_max: 1.5
q: 0.95
coarse_loss_clip: true
ema_momentum: 0.999
optimizer: adamw
other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
inference:
batch_size: 1
dim_t: 512
num_overlap: 4 |