long-t5-tglobal-base-16384-booksum-V12 / training_metadata.json
pszemraj's picture
Create new file
f702114
{
"output_dir":"/content/drive/MyDrive/Programming/hf-trainer/long-t5-tglobal-base-16384-booksum-V11-ft25-booksum",
"overwrite_output_dir":true,
"do_train":false,
"do_eval":false,
"do_predict":false,
"evaluation_strategy":"no",
"prediction_loss_only":false,
"per_device_train_batch_size":2,
"per_device_eval_batch_size":1,
"per_gpu_train_batch_size":"None",
"per_gpu_eval_batch_size":"None",
"gradient_accumulation_steps":32,
"eval_accumulation_steps":"None",
"eval_delay":0,
"learning_rate":0.001,
"weight_decay":0,
"adam_beta1":0.9,
"adam_beta2":0.999,
"adam_epsilon":1e-08,
"max_grad_norm":1,
"num_train_epochs":1,
"max_steps":-1,
"lr_scheduler_type":"constant_with_warmup",
"warmup_ratio":0.03,
"warmup_steps":0,
"log_level":-1,
"log_level_replica":-1,
"log_on_each_node":true,
"logging_dir":"/content/drive/MyDrive/Programming/hf-trainer/long-t5-tglobal-base-16384-booksum-V11-ft25-booksum/logs",
"logging_strategy":"steps",
"logging_first_step":false,
"logging_steps":2,
"logging_nan_inf_filter":true,
"save_strategy":"steps",
"save_steps":25,
"save_total_limit":1,
"save_on_each_node":false,
"no_cuda":false,
"use_mps_device":false,
"seed":42,
"data_seed":"None",
"jit_mode_eval":false,
"use_ipex":false,
"bf16":false,
"fp16":false,
"fp16_opt_level":"O1",
"half_precision_backend":"auto",
"bf16_full_eval":false,
"fp16_full_eval":false,
"tf32":"None",
"local_rank":0,
"xpu_backend":"None",
"tpu_num_cores":"None",
"tpu_metrics_debug":false,
"debug":"[]",
"dataloader_drop_last":false,
"eval_steps":"None",
"dataloader_num_workers":0,
"past_index":-1,
"run_name":"/content/drive/MyDrive/Programming/hf-trainer/long-t5-tglobal-base-16384-booksum-V11-ft25-booksum",
"disable_tqdm":false,
"remove_unused_columns":true,
"label_names":"None",
"load_best_model_at_end":false,
"metric_for_best_model":"None",
"greater_is_better":"None",
"ignore_data_skip":false,
"sharded_ddp":"[]",
"fsdp":"[]",
"fsdp_min_num_params":0,
"fsdp_transformer_layer_cls_to_wrap":"None",
"deepspeed":"None",
"label_smoothing_factor":0.0,
"optim":"adamw_hf",
"adafactor":false,
"group_by_length":false,
"length_column_name":"length",
"report_to":"['tensorboard']",
"ddp_find_unused_parameters":"None",
"ddp_bucket_cap_mb":"None",
"dataloader_pin_memory":true,
"skip_memory_metrics":false,
"use_legacy_prediction_loop":false,
"push_to_hub":true,
"resume_from_checkpoint":"None",
"hub_model_id":"long-t5-tglobal-base-16384-booksum-V11-ft25-booksum",
"hub_strategy":"end",
"hub_token":"<HUB_TOKEN>",
"hub_private_repo":true,
"gradient_checkpointing":true,
"include_inputs_for_metrics":false,
"fp16_backend":"auto",
"push_to_hub_model_id":"None",
"push_to_hub_organization":"None",
"push_to_hub_token":"<PUSH_TO_HUB_TOKEN>",
"_n_gpu":1,
"mp_parameters":"",
"auto_find_batch_size":false,
"full_determinism":false,
"torchdynamo":"None",
"ray_scope":"last",
"ddp_timeout":1800,
"sortish_sampler":false,
"predict_with_generate":false,
"generation_max_length":"None",
"generation_num_beams":"None",
"train_batch_size":2,
"eval_batch_size":1,
"configs_src":"long-t5-tglobal-base-16384-booksum-V11-ft25-booksum",
"use_adam8bit":false,
"use_adan_optim":false,
"optim_params":"(Adafactor (\nParameter Group 0\n beta1: None\n clip_threshold: 1.0\n decay_rate: -0.8\n eps: (1e-30, 0.001)\n lr: 0.0\n relative_step: True\n scale_parameter: True\n warmup_init: True\n weight_decay: 0.0\n), <transformers.optimization.AdafactorSchedule object at 0x7f6ab0187890>)"
}