# pytorch_lightning==1.8.0 seed_everything: 123 trainer: logger: - class_path: pytorch_lightning.loggers.TensorBoardLogger init_args: save_dir: lightning_logs name: tmp version: null log_graph: false default_hp_metric: true prefix: '' sub_dir: null comment: '' purge_step: null max_queue: 10 flush_secs: 120 filename_suffix: '' - class_path: pytorch_lightning.loggers.WandbLogger init_args: name: null save_dir: . version: null offline: false dir: null id: null anonymous: null project: tmp log_model: false experiment: null prefix: '' job_type: null config: null entity: null reinit: null tags: null group: null notes: null magic: null config_exclude_keys: null config_include_keys: null mode: null allow_val_change: null resume: null force: null tensorboard: null sync_tensorboard: null monitor_gym: null save_code: null settings: null enable_checkpointing: true callbacks: - class_path: pytorch_lightning.callbacks.ModelCheckpoint init_args: dirpath: null filename: best-val-loss-{epoch}-{step} monitor: loss/loss/val verbose: false save_last: null save_top_k: 1 save_weights_only: false mode: min auto_insert_metric_name: true every_n_train_steps: null train_time_interval: null every_n_epochs: null save_on_train_epoch_end: null - class_path: pytorch_lightning.callbacks.ModelCheckpoint init_args: dirpath: null filename: best-eer-loss-{epoch}-{step} monitor: EER evaluation proj/val verbose: false save_last: null save_top_k: 1 save_weights_only: false mode: min auto_insert_metric_name: true every_n_train_steps: null train_time_interval: null every_n_epochs: null save_on_train_epoch_end: null - class_path: pytorch_lightning.callbacks.ModelCheckpoint init_args: dirpath: null filename: best-invar-val-{epoch}-{step} monitor: loss/invariance/val verbose: false save_last: null save_top_k: 1 save_weights_only: false mode: min auto_insert_metric_name: true every_n_train_steps: null train_time_interval: null every_n_epochs: null save_on_train_epoch_end: null - class_path: pytorch_lightning.callbacks.ModelCheckpoint init_args: dirpath: null filename: best-order-val-{epoch}-{step} monitor: Order evaluation mean proj/val verbose: false save_last: null save_top_k: 1 save_weights_only: false mode: min auto_insert_metric_name: true every_n_train_steps: null train_time_interval: null every_n_epochs: null save_on_train_epoch_end: null - class_path: pytorch_lightning.callbacks.ModelCheckpoint init_args: dirpath: null filename: cptk-{epoch}-{step} monitor: null verbose: false save_last: null save_top_k: -1 save_weights_only: false mode: min auto_insert_metric_name: true every_n_train_steps: null train_time_interval: null every_n_epochs: 50 save_on_train_epoch_end: null - class_path: pytorch_lightning.callbacks.RichProgressBar init_args: refresh_rate: 1 leave: false theme: description: white progress_bar: '#6206E0' progress_bar_finished: '#6206E0' progress_bar_pulse: '#6206E0' batch_progress: white time: grey54 processing_speed: grey70 metrics: white console_kwargs: null - class_path: callbacks.hypersphere.HypersphereEvaluation init_args: normalize: true use_projections: false - class_path: callbacks.hypersphere.HypersphereEvaluation init_args: normalize: true use_projections: true - class_path: callbacks.lr_logger.LearningRateLogger - class_path: bernardo.callbacks.evaluation.OrderEvaluation init_args: log_n_epochs: 5 on_train: true use_projection: true - class_path: bernardo.callbacks.evaluation.EEREvaluation init_args: use_more_neg: false log_n_epochs: 5 on_train: false use_projection: true default_root_dir: null gradient_clip_val: null gradient_clip_algorithm: null num_nodes: 1 num_processes: null devices: - 1 gpus: null auto_select_gpus: false tpu_cores: null ipus: null enable_progress_bar: true overfit_batches: 0.0 track_grad_norm: -1 check_val_every_n_epoch: 1 fast_dev_run: false accumulate_grad_batches: null max_epochs: 500 min_epochs: null max_steps: -1 min_steps: null max_time: null limit_train_batches: null limit_val_batches: null limit_test_batches: null limit_predict_batches: null val_check_interval: null log_every_n_steps: 50 accelerator: gpu strategy: null sync_batchnorm: false precision: 32 enable_model_summary: true num_sanity_val_steps: 2 resume_from_checkpoint: null profiler: null benchmark: null deterministic: null reload_dataloaders_every_n_epochs: 0 auto_lr_find: false replace_sampler_ddp: true detect_anomaly: false auto_scale_batch_size: false plugins: null amp_backend: native amp_level: null move_metrics_to_cpu: false multiple_trainloader_mode: max_size_cycle inference_mode: true ckpt_path: null model: class_path: models.byol.BYOL init_args: module: class_path: networks.my_siamese_arm.SiameseArm init_args: encoder: class_path: bernardo.models.model.Encoder init_args: backbone: efficientnet_b0 feature_extractor: class_path: bernardo.models.model.FeatureExtractor init_args: spec_layer: melspectogram n_fft: 2048 hop_length: 512 embedding_dim: 1000 pretrained: true progress: true stochastic_depth_prob: 0.2 norm_layer: null projector: class_path: bernardo.models.model.Projection init_args: input_dim: 1000 output_dim: 128 nonlinearity: null is_identity: false l2_normalize: true predictor: class_path: networks.mlp.MLP init_args: dims: - 128 - 1024 - 128 activation: true use_batchnorm: true batchnorm_fn: null last_layer: null bias: null layer_init: null normalize_representations: false normalize_projections: true loss_fn: class_path: torch.nn.MSELoss init_args: size_average: null reduce: null reduction: mean weight_callback: class_path: callbacks.ma_updates.MAWeightUpdate init_args: initial_tau: 0.99 max_epochs: 1000 should_update: true optimizer: class_path: utils.optim.Adam init_args: lr: 3.0e-05 betas: - 0.9 - 0.999 eps: 1.0e-08 weight_decay: 1.5e-06 amsgrad: false scheduler: class_path: utils.optim.LinearWarmupCosineAnnealing init_args: warmup_epochs: 10 max_epochs: 1000 warmup_start_lr: 0.0 eta_min: 0.0 last_epoch: -1 data: class_path: bernardo.data.vocals.VocalsDataModule init_args: augs_neg: enable: false gaussian_noise: 0.5 pitch_shift_naive: 0 time_stretch: 0 gain: 0.5 shift: 0 parametric_eq: 0 tanh_distortion: 0 time_mask: 0 formant_shift_parselmouth: 0 pitch_shift_parselmouth: 0 pitch_range_parselmouth: 0 pitch_shift_parselmouth_prob: 0 positive_examples: same_clip dataset_dirs: - tencys_vocals - ghero_vocals_3 - ghero_vocals_4 batch_size: 120 batch_size_val: 120 nr_samples: 176000 normalize: true num_workers: 40 sr: 44100 batch_sampling_mode: sample_clips eval_frac: 0.105 group_name_is_folder: true group_by_artist: true augs: enable: true gaussian_noise: 0.5 pitch_shift_naive: 0 time_stretch: 0 gain: 0.5 shift: 0 parametric_eq: 0 tanh_distortion: 0 time_mask: 0.5 formant_shift_parselmouth: 0 pitch_shift_parselmouth: - 1 - 1.4 pitch_range_parselmouth: 1.5 pitch_shift_parselmouth_prob: 0.5 transform_override: false verbose: true use_random_loader: false max_groups: -1 multi_epoch: 1 classification: false