pszemraj commited on
Commit
04252c2
1 Parent(s): ade0ddb

Upload pegasus-x-large-booksum-WIP2-ft1-booksum-VM_training_metadata.json

Browse files
pegasus-x-large-booksum-WIP2-ft1-booksum-VM_training_metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"output_dir": "/home/runs/model-checkpoints", "overwrite_output_dir": true, "do_train": false, "do_eval": false, "do_predict": false, "evaluation_strategy": "no", "prediction_loss_only": false, "per_device_train_batch_size": 4, "per_device_eval_batch_size": 1, "per_gpu_train_batch_size": "None", "per_gpu_eval_batch_size": "None", "gradient_accumulation_steps": 32, "eval_accumulation_steps": "None", "eval_delay": 0, "learning_rate": 6e-05, "weight_decay": 0.01, "adam_beta1": 0.9, "adam_beta2": 0.999, "adam_epsilon": 1e-08, "max_grad_norm": 1, "num_train_epochs": 2, "max_steps": -1, "lr_scheduler_type": "constant_with_warmup", "warmup_ratio": 0.0, "warmup_steps": 0, "log_level": -1, "log_level_replica": -1, "log_on_each_node": true, "logging_dir": "/home/runs/logs", "logging_strategy": "steps", "logging_first_step": false, "logging_steps": 2, "logging_nan_inf_filter": true, "save_strategy": "steps", "save_steps": 10, "save_total_limit": 2, "save_on_each_node": false, "no_cuda": false, "use_mps_device": false, "seed": 42, "data_seed": "None", "jit_mode_eval": false, "use_ipex": false, "bf16": false, "fp16": false, "fp16_opt_level": "O1", "half_precision_backend": "auto", "bf16_full_eval": false, "fp16_full_eval": false, "tf32": true, "local_rank": 0, "xpu_backend": "None", "tpu_num_cores": "None", "tpu_metrics_debug": false, "debug": "[]", "dataloader_drop_last": false, "eval_steps": "None", "dataloader_num_workers": 0, "past_index": -1, "run_name": "/home/runs/model-checkpoints", "disable_tqdm": false, "remove_unused_columns": true, "label_names": "None", "load_best_model_at_end": false, "metric_for_best_model": "None", "greater_is_better": "None", "ignore_data_skip": false, "sharded_ddp": "[]", "fsdp": "[]", "fsdp_min_num_params": 0, "fsdp_transformer_layer_cls_to_wrap": "None", "deepspeed": "None", "label_smoothing_factor": 0.0, "optim": "adamw_hf", "adafactor": false, "group_by_length": false, "length_column_name": "length", "report_to": "['tensorboard', 'wandb']", "ddp_find_unused_parameters": "None", "ddp_bucket_cap_mb": "None", "dataloader_pin_memory": true, "skip_memory_metrics": false, "use_legacy_prediction_loop": false, "push_to_hub": false, "resume_from_checkpoint": "None", "hub_model_id": "pegasus-x-large-booksum-WIP2-ft1-booksum-VM", "hub_strategy": "end", "hub_token": "<HUB_TOKEN>", "hub_private_repo": true, "gradient_checkpointing": true, "include_inputs_for_metrics": false, "fp16_backend": "auto", "push_to_hub_model_id": "None", "push_to_hub_organization": "None", "push_to_hub_token": "<PUSH_TO_HUB_TOKEN>", "_n_gpu": 1, "mp_parameters": "", "auto_find_batch_size": false, "full_determinism": false, "torchdynamo": "None", "ray_scope": "last", "ddp_timeout": 1800, "sortish_sampler": false, "predict_with_generate": false, "generation_max_length": "None", "generation_num_beams": "None", "train_batch_size": 4, "eval_batch_size": 1, "configs_src": "pegasus-x-large-booksum-WIP2-ft1-booksum-VM", "use_adam8bit": false, "use_adan_optim": true, "optim_params": "(Adan (\nParameter Group 0\n betas: (0.01, 0.08, 0.001)\n eps: 1e-08\n lr: 6e-05\n restart_cond: None\n weight_decay: 0.01\n\nParameter Group 1\n betas: (0.01, 0.08, 0.001)\n eps: 1e-08\n lr: 6e-05\n restart_cond: None\n weight_decay: 0.0\n), None)", "decay_rate_ada": -0.8}