estimator: accelerator: gpu precision: bf16-true deterministic: true tf32_mode: high convert_to_bettertransformer: false callbacks: timer: _target_: energizer.active_learning.callbacks.Timer lr_monitor: _target_: energizer.callbacks.lr_monitor.LearningRateMonitor model_checkpoint: _target_: energizer.callbacks.model_checkpoint.ModelCheckpoint dirpath: .checkpoints stage: train frequency: 1:epoch loggers: tensorboard: _target_: energizer.loggers.TensorBoardLogger root_dir: ./ name: tb_logs version: null data: batch_size: 32 eval_batch_size: 128 shuffle: true replacement: false data_seed: 42 drop_last: false num_workers: 8 pin_memory: true persistent_workers: false multiprocessing_context: null max_length: 512 fit: max_epochs: 20 optimizer_kwargs: name: adamw lr: 3.0e-05 init_kwargs: fused: true scheduler_kwargs: name: constant_schedule_with_warmup num_warmup_steps: 2000 log_interval: 100 enable_progress_bar: true limit_train_batches: null limit_validation_batches: null model: name: bert-tiny revision: null seed: 42 log_interval: 100 enable_progress_bar: true limit_batches: null seed: 42 experiment_group: training run_name: bert-tiny_2024-03-04T17-34-08 data_path: /home/pl487/coreset-project/data/processed dataset: mnli