name: "MultiMaskMultiSourceBandSplitRNN" audio: chunk_size: 264600 num_channels: 2 sample_rate: 44100 min_mean_abs: 0.001 model: in_channel: 1 stems: ['speech', 'music', 'effects'] band_specs: "musical" n_bands: 64 fs: 44100 require_no_overlap: false require_no_gap: true normalize_channel_independently: false treat_channel_as_feature: true n_sqm_modules: 8 emb_dim: 128 rnn_dim: 256 bidirectional: true rnn_type: "GRU" mlp_dim: 512 hidden_activation: "Tanh" hidden_activation_kwargs: null complex_mask: true n_fft: 2048 win_length: 2048 hop_length: 512 window_fn: "hann_window" wkwargs: null power: null center: true normalized: true pad_mode: "constant" onesided: true training: batch_size: 4 gradient_accumulation_steps: 4 grad_clip: 0 instruments: - speech - music - effects lr: 9.0e-05 patience: 2 reduce_factor: 0.95 target_instrument: null num_epochs: 1000 num_steps: 1000 augmentation: false # enable augmentations by audiomentations and pedalboard augmentation_type: simple1 use_mp3_compress: false # Deprecated augmentation_mix: true # Mix several stems of the same type with some probability augmentation_loudness: true # randomly change loudness of each stem augmentation_loudness_type: 1 # Type 1 or 2 augmentation_loudness_min: 0.5 augmentation_loudness_max: 1.5 q: 0.95 coarse_loss_clip: true ema_momentum: 0.999 optimizer: adam other_fix: true # it's needed for checking on multisong dataset if other is actually instrumental use_amp: true # enable or disable usage of mixed precision (float16) - usually it must be true inference: batch_size: 1 dim_t: 256 num_overlap: 4