File size: 1,883 Bytes
0f9e346 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
audio:
chunk_size: 352800
dim_f: 1024
dim_t: 801 # don't work (use in model)
hop_length: 441 # don't work (use in model)
n_fft: 2048
num_channels: 2
sample_rate: 44100
min_mean_abs: 0.000
model:
dim: 512
depth: 12
stereo: true
num_stems: 1
time_transformer_depth: 1
freq_transformer_depth: 1
linear_transformer_depth: 0
freqs_per_bands: !!python/tuple
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 2
- 4
- 4
- 4
- 4
- 4
- 4
- 4
- 4
- 4
- 4
- 4
- 4
- 12
- 12
- 12
- 12
- 12
- 12
- 12
- 12
- 24
- 24
- 24
- 24
- 24
- 24
- 24
- 24
- 48
- 48
- 48
- 48
- 48
- 48
- 48
- 48
- 128
- 129
dim_head: 64
heads: 8
attn_dropout: 0.1
ff_dropout: 0.1
flash_attn: true
dim_freqs_in: 1025
stft_n_fft: 2048
stft_hop_length: 441
stft_win_length: 2048
stft_normalized: false
mask_estimator_depth: 2
multi_stft_resolution_loss_weight: 1.0
multi_stft_resolutions_window_sizes: !!python/tuple
- 4096
- 2048
- 1024
- 512
- 256
multi_stft_hop_size: 147
multi_stft_normalized: False
training:
batch_size: 2
gradient_accumulation_steps: 1
grad_clip: 0
instruments:
- vocals
- other
lr: 1.0e-05
patience: 2
reduce_factor: 0.95
target_instrument: vocals
num_epochs: 1000
num_steps: 1000
q: 0.95
coarse_loss_clip: true
ema_momentum: 0.999
optimizer: adam
other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental
use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true
inference:
batch_size: 4
dim_t: 1101
num_overlap: 2
|