BernardoTorres's picture
add paper models
e60d96b
# pytorch_lightning==1.9.3
seed_everything: 123
trainer:
logger:
class_path: pytorch_lightning.loggers.TensorBoardLogger
init_args:
save_dir: logs
name: exp_uniformity_sameclip
version: null
log_graph: false
default_hp_metric: true
prefix: ''
sub_dir: null
enable_checkpointing: true
callbacks:
- class_path: pytorch_lightning.callbacks.ModelCheckpoint
init_args:
dirpath: null
filename: best-val-loss-{epoch}-{step}
monitor: loss/val
verbose: false
save_last: null
save_top_k: 1
save_weights_only: false
mode: min
auto_insert_metric_name: true
every_n_train_steps: null
train_time_interval: null
every_n_epochs: null
save_on_train_epoch_end: null
- class_path: pytorch_lightning.callbacks.ModelCheckpoint
init_args:
dirpath: null
filename: best-eer-val-{epoch}-{step}
monitor: EER evaluation proj/val
verbose: false
save_last: null
save_top_k: 1
save_weights_only: false
mode: min
auto_insert_metric_name: true
every_n_train_steps: null
train_time_interval: null
every_n_epochs: null
save_on_train_epoch_end: null
- class_path: pytorch_lightning.callbacks.ModelCheckpoint
init_args:
dirpath: null
filename: best-rank-val-{epoch}-{step}
monitor: Order evaluation mean proj/val
verbose: false
save_last: null
save_top_k: 1
save_weights_only: false
mode: min
auto_insert_metric_name: true
every_n_train_steps: null
train_time_interval: null
every_n_epochs: null
save_on_train_epoch_end: null
- class_path: pytorch_lightning.callbacks.ModelCheckpoint
init_args:
dirpath: null
filename: best-alignment-val-{epoch}-{step}
monitor: Alignment evaluation proj/val
verbose: false
save_last: null
save_top_k: 1
save_weights_only: false
mode: min
auto_insert_metric_name: true
every_n_train_steps: null
train_time_interval: null
every_n_epochs: null
save_on_train_epoch_end: null
- class_path: pytorch_lightning.callbacks.ModelCheckpoint
init_args:
dirpath: null
filename: best-uniformity-val-{epoch}-{step}
monitor: Uniformity evaluation proj/val
verbose: false
save_last: null
save_top_k: 1
save_weights_only: false
mode: min
auto_insert_metric_name: true
every_n_train_steps: null
train_time_interval: null
every_n_epochs: null
save_on_train_epoch_end: null
- class_path: pytorch_lightning.callbacks.ModelCheckpoint
init_args:
dirpath: null
filename: cptk-{epoch}-{step}
monitor: null
verbose: false
save_last: null
save_top_k: -1
save_weights_only: false
mode: min
auto_insert_metric_name: true
every_n_train_steps: null
train_time_interval: null
every_n_epochs: 25
save_on_train_epoch_end: null
- class_path: callbacks.evaluation.OrderEvaluation
init_args:
log_n_epochs: 5
on_train: true
use_projection: true
- class_path: callbacks.evaluation.EEREvaluation
init_args:
use_more_neg: false
log_n_epochs: 5
on_train: false
use_projection: true
- class_path: callbacks.evaluation.HypersphereEvaluation
init_args:
log_n_epochs: 5
on_train: true
use_projection: true
default_root_dir: null
gradient_clip_val: null
gradient_clip_algorithm: null
num_nodes: 1
num_processes: null
devices: null
gpus: 2
auto_select_gpus: null
tpu_cores: null
ipus: null
enable_progress_bar: true
overfit_batches: 0.0
track_grad_norm: -1
check_val_every_n_epoch: 1
fast_dev_run: false
accumulate_grad_batches: null
max_epochs: 100000
min_epochs: null
max_steps: 1000000000
min_steps: null
max_time: null
limit_train_batches: null
limit_val_batches: null
limit_test_batches: null
limit_predict_batches: null
val_check_interval: null
log_every_n_steps: 50
accelerator: gpu
strategy: null
sync_batchnorm: false
precision: 32
enable_model_summary: true
num_sanity_val_steps: 2
resume_from_checkpoint: null
profiler: null
benchmark: null
deterministic: null
reload_dataloaders_every_n_epochs: 0
auto_lr_find: false
replace_sampler_ddp: true
detect_anomaly: false
auto_scale_batch_size: false
plugins: null
amp_backend: null
amp_level: null
move_metrics_to_cpu: false
multiple_trainloader_mode: max_size_cycle
inference_mode: true
ckpt_path: null
model:
class_path: models.trainer.ContrastiveTrainer
init_args:
feature_extractor:
spec_layer: melspectogram
n_fft: 2048
hop_length: 512
backbone:
backbone: efficientnet_b0
pretrained: true
embedding_dim: 1000
projection:
input_dim: 1000
output_dim: 128
l2_normalize: true
optimizer1_init:
class_path: torch.optim.Adam
init_args:
lr: 0.0001
weight_decay: 1.0e-05
use_contrastive_loss: false
temp: 0.1
nr_negative: 64
decouple: false
use_norm_reg: false
max_norm_hinge: 4.0
norm_hinge_fact: 10.0
use_invariance_loss: false
fact_inv_loss: 1.0
use_covariance_reg: false
fact_cov: 1.0
use_variance_reg: false
fact_var: 1.0
gamma: 1.0
use_vicreg_loss: false
use_align_loss: true
fact_align_loss: 1.0
fact_unif_loss: 1.0
use_uniform_loss: true
mask_batch: false
compute_test_loss: false
data:
class_path: data.vocals.VocalsDataModule
init_args:
augs_neg:
enable: false
gaussian_noise: 0.5
pitch_shift_naive: 0
time_stretch: 0
gain: 0.5
shift: 0
parametric_eq: 0
tanh_distortion: 0
time_mask: 0
formant_shift_parselmouth: 0
pitch_shift_parselmouth: 0
pitch_range_parselmouth: 0
pitch_shift_parselmouth_prob: 0
positive_examples: same_clip
dataset_dirs:
- tencys_vocals
- ghero_vocals_3
- ghero_vocals_4
batch_size: 55
batch_size_val: 55
nr_samples: 176000
normalize: true
num_workers: 40
sr: 44100
batch_sampling_mode: sample_clips
eval_frac: 0.11
group_name_is_folder: true
group_by_artist: true
augs:
enable: true
gaussian_noise: 0.5
pitch_shift_naive: 0
time_stretch: 0
gain: 0.5
shift: 0
parametric_eq: 0
tanh_distortion: 0
time_mask: 0.5
formant_shift_parselmouth: 0
pitch_shift_parselmouth:
- 1
- 1.3
pitch_range_parselmouth: 1.5
pitch_shift_parselmouth_prob: 0.5
transform_override: false
verbose: true
use_random_loader: false
max_groups: -1
multi_epoch: 1
classification: false