Hannes Kuchelmeister
run experiments comparing MSE and MAE
596687f
raw
history blame
1.55 kB
original_work_dir: ${hydra:runtime.cwd}
data_dir: ${original_work_dir}/data
print_config: true
ignore_warnings: true
train: true
test: true
seed: 12345
name: focusMAE
datamodule:
_target_: src.datamodules.focus_datamodule.FocusDataModule
data_dir: ${data_dir}/focus150
csv_file: ${data_dir}/focus150/metadata.csv
batch_size: 128
train_val_test_split_percentage:
- 0.7
- 0.15
- 0.15
num_workers: 0
pin_memory: false
model:
_target_: src.models.focus_module.FocusLitModule
input_size: 67500
lin1_size: 128
lin2_size: 256
lin3_size: 64
output_size: 1
lr: 0.0173
weight_decay: 0.0005
callbacks:
model_checkpoint:
_target_: pytorch_lightning.callbacks.ModelCheckpoint
monitor: val/mae
mode: min
save_top_k: 1
save_last: true
verbose: false
dirpath: checkpoints/
filename: epoch_{epoch:03d}
auto_insert_metric_name: false
early_stopping:
_target_: pytorch_lightning.callbacks.EarlyStopping
monitor: val/mae
mode: min
patience: 100
min_delta: 0
model_summary:
_target_: pytorch_lightning.callbacks.RichModelSummary
max_depth: -1
rich_progress_bar:
_target_: pytorch_lightning.callbacks.RichProgressBar
logger:
tensorboard:
_target_: pytorch_lightning.loggers.tensorboard.TensorBoardLogger
save_dir: tensorboard/
name: null
version: ${name}
log_graph: false
default_hp_metric: true
prefix: ''
trainer:
_target_: pytorch_lightning.Trainer
gpus: 0
min_epochs: 1
max_epochs: 100
resume_from_checkpoint: null