|
__cached__setup_devices: !!python/object/apply:torch.device |
|
- cuda |
|
- 0 |
|
_n_gpu: 1 |
|
accelerator_config: !!python/object:transformers.trainer_pt_utils.AcceleratorConfig |
|
dispatch_batches: null |
|
even_batches: true |
|
gradient_accumulation_kwargs: null |
|
split_batches: false |
|
use_seedable_sampler: true |
|
adafactor: false |
|
adam_beta1: 0.9 |
|
adam_beta2: 0.999 |
|
adam_epsilon: 1.0e-08 |
|
auto_find_batch_size: false |
|
bf16: false |
|
bf16_full_eval: false |
|
data_seed: null |
|
dataloader_drop_last: false |
|
dataloader_num_workers: 0 |
|
dataloader_persistent_workers: false |
|
dataloader_pin_memory: true |
|
dataloader_prefetch_factor: null |
|
ddp_backend: null |
|
ddp_broadcast_buffers: null |
|
ddp_bucket_cap_mb: null |
|
ddp_find_unused_parameters: null |
|
ddp_timeout: 1800 |
|
debug: [] |
|
deepspeed: null |
|
deepspeed_plugin: null |
|
disable_tqdm: false |
|
dispatch_batches: null |
|
distributed_state: !!python/object:accelerate.state.PartialState |
|
_cpu: false |
|
backend: null |
|
debug: false |
|
device: !!python/object/apply:torch.device |
|
- cuda |
|
distributed_type: !!python/object/apply:accelerate.utils.dataclasses.DistributedType |
|
- 'NO' |
|
fork_launched: false |
|
local_process_index: 0 |
|
num_processes: 1 |
|
process_index: 0 |
|
do_eval: true |
|
do_predict: false |
|
do_train: false |
|
eval_accumulation_steps: null |
|
eval_delay: 0 |
|
eval_do_concat_batches: true |
|
eval_steps: null |
|
evaluation_strategy: !!python/object/apply:transformers.trainer_utils.IntervalStrategy |
|
- epoch |
|
fp16: true |
|
fp16_backend: auto |
|
fp16_full_eval: false |
|
fp16_opt_level: O1 |
|
fsdp: [] |
|
fsdp_config: |
|
min_num_params: 0 |
|
xla: false |
|
xla_fsdp_grad_ckpt: false |
|
xla_fsdp_v2: false |
|
fsdp_min_num_params: 0 |
|
fsdp_transformer_layer_cls_to_wrap: null |
|
full_determinism: false |
|
generation_config: null |
|
generation_max_length: null |
|
generation_num_beams: null |
|
gradient_accumulation_steps: 2 |
|
gradient_checkpointing: false |
|
gradient_checkpointing_kwargs: null |
|
greater_is_better: false |
|
group_by_length: false |
|
half_precision_backend: auto |
|
hub_always_push: false |
|
hub_model_id: donut_experiment_23 |
|
hub_private_repo: false |
|
hub_strategy: !!python/object/apply:transformers.trainer_utils.HubStrategy |
|
- every_save |
|
hub_token: null |
|
ignore_data_skip: false |
|
include_inputs_for_metrics: false |
|
include_num_input_tokens_seen: false |
|
include_tokens_per_second: false |
|
jit_mode_eval: false |
|
label_names: null |
|
label_smoothing_factor: 0.0 |
|
learning_rate: 2.0e-05 |
|
length_column_name: length |
|
load_best_model_at_end: true |
|
local_rank: 0 |
|
log_level: passive |
|
log_level_replica: warning |
|
log_on_each_node: true |
|
logging_dir: model_runs/donut_experiment_23/runs/Jul12_18-16-07_ip-172-16-163-175.ec2.internal |
|
logging_first_step: false |
|
logging_nan_inf_filter: true |
|
logging_steps: 100 |
|
logging_strategy: !!python/object/apply:transformers.trainer_utils.IntervalStrategy |
|
- steps |
|
lr_scheduler_kwargs: {} |
|
lr_scheduler_type: !!python/object/apply:transformers.trainer_utils.SchedulerType |
|
- linear |
|
max_grad_norm: 1.0 |
|
max_steps: -1 |
|
metric_for_best_model: loss |
|
mp_parameters: '' |
|
neftune_noise_alpha: null |
|
no_cuda: false |
|
num_train_epochs: 4 |
|
optim: !!python/object/apply:transformers.training_args.OptimizerNames |
|
- adamw_torch |
|
optim_args: null |
|
optim_target_modules: null |
|
output_dir: model_runs/donut_experiment_23 |
|
overwrite_output_dir: false |
|
past_index: -1 |
|
per_device_eval_batch_size: 1 |
|
per_device_train_batch_size: 1 |
|
per_gpu_eval_batch_size: null |
|
per_gpu_train_batch_size: null |
|
predict_with_generate: true |
|
prediction_loss_only: false |
|
push_to_hub: true |
|
push_to_hub_model_id: null |
|
push_to_hub_organization: null |
|
push_to_hub_token: null |
|
ray_scope: last |
|
remove_unused_columns: true |
|
report_to: |
|
- tensorboard |
|
resume_from_checkpoint: null |
|
run_name: model_runs/donut_experiment_23 |
|
save_on_each_node: false |
|
save_only_model: false |
|
save_safetensors: true |
|
save_steps: 500 |
|
save_strategy: !!python/object/apply:transformers.trainer_utils.IntervalStrategy |
|
- epoch |
|
save_total_limit: 2 |
|
seed: 42 |
|
skip_memory_metrics: true |
|
sortish_sampler: false |
|
split_batches: null |
|
tf32: null |
|
torch_compile: false |
|
torch_compile_backend: null |
|
torch_compile_mode: null |
|
torchdynamo: null |
|
tpu_metrics_debug: false |
|
tpu_num_cores: null |
|
use_cpu: false |
|
use_ipex: false |
|
use_legacy_prediction_loop: false |
|
use_mps_device: false |
|
warmup_ratio: 0.0 |
|
warmup_steps: 0 |
|
weight_decay: 0.01 |
|
|